diff options
Diffstat (limited to 'libvpx/vp9')
201 files changed, 28140 insertions, 17316 deletions
diff --git a/libvpx/vp9/common/arm/neon/vp9_short_idct16x16_1_add_neon.asm b/libvpx/vp9/common/arm/neon/vp9_idct16x16_1_add_neon.asm index b1fd21b..b1fd21b 100644 --- a/libvpx/vp9/common/arm/neon/vp9_short_idct16x16_1_add_neon.asm +++ b/libvpx/vp9/common/arm/neon/vp9_idct16x16_1_add_neon.asm diff --git a/libvpx/vp9/common/arm/neon/vp9_short_idct16x16_add_neon.asm b/libvpx/vp9/common/arm/neon/vp9_idct16x16_add_neon.asm index a13c0d0..a13c0d0 100644 --- a/libvpx/vp9/common/arm/neon/vp9_short_idct16x16_add_neon.asm +++ b/libvpx/vp9/common/arm/neon/vp9_idct16x16_add_neon.asm diff --git a/libvpx/vp9/common/arm/neon/vp9_short_idct32x32_1_add_neon.asm b/libvpx/vp9/common/arm/neon/vp9_idct32x32_1_add_neon.asm index d290d07..d290d07 100644 --- a/libvpx/vp9/common/arm/neon/vp9_short_idct32x32_1_add_neon.asm +++ b/libvpx/vp9/common/arm/neon/vp9_idct32x32_1_add_neon.asm diff --git a/libvpx/vp9/common/arm/neon/vp9_short_idct32x32_add_neon.asm b/libvpx/vp9/common/arm/neon/vp9_idct32x32_add_neon.asm index 388a7d7..72e933e 100644 --- a/libvpx/vp9/common/arm/neon/vp9_short_idct32x32_add_neon.asm +++ b/libvpx/vp9/common/arm/neon/vp9_idct32x32_add_neon.asm @@ -72,7 +72,7 @@ cospi_31_64 EQU 804 ; reg1 = output[first_offset] ; reg2 = output[second_offset] ; for proper address calculation, the last offset used when manipulating - ; output, wethere reading or storing) must be passed in. use 0 for first + ; output, whether reading or storing) must be passed in. use 0 for first ; use. MACRO LOAD_FROM_OUTPUT $prev_offset, $first_offset, $second_offset, $reg1, $reg2 @@ -88,7 +88,7 @@ cospi_31_64 EQU 804 ; output[first_offset] = reg1 ; output[second_offset] = reg2 ; for proper address calculation, the last offset used when manipulating - ; output, wethere reading or storing) must be passed in. use 0 for first + ; output, whether reading or storing) must be passed in. use 0 for first ; use. MACRO STORE_IN_OUTPUT $prev_offset, $first_offset, $second_offset, $reg1, $reg2 @@ -242,7 +242,7 @@ cospi_31_64 EQU 804 ; TODO(cd): have special case to re-use constants when they are similar for ; consecutive butterflies ; TODO(cd): have special case when both constants are the same, do the - ; additions/substractions before the multiplies. + ; additions/subtractions before the multiplies. ; generate the constants ; generate scalar constants mov r8, #$first_constant & 0xFF00 @@ -260,7 +260,7 @@ cospi_31_64 EQU 804 vmull.s16 q11, $regB, d31 vmull.s16 q12, $regC, d31 ; (used) five for intermediate (q8-q12), one for constants (q15) - ; do some addition/substractions (to get back two register) + ; do some addition/subtractions (to get back two register) vsub.s32 q8, q8, q10 vsub.s32 q9, q9, q11 ; do more multiplications (ordered for maximum latency hiding) @@ -268,7 +268,7 @@ cospi_31_64 EQU 804 vmull.s16 q11, $regA, d30 vmull.s16 q15, $regB, d30 ; (used) six for intermediate (q8-q12, q15) - ; do more addition/substractions + ; do more addition/subtractions vadd.s32 q11, q12, q11 vadd.s32 q10, q10, q15 ; (used) four for intermediate (q8-q11) diff --git a/libvpx/vp9/common/arm/neon/vp9_short_idct4x4_1_add_neon.asm b/libvpx/vp9/common/arm/neon/vp9_idct4x4_1_add_neon.asm index 0d4a721..0d4a721 100644 --- a/libvpx/vp9/common/arm/neon/vp9_short_idct4x4_1_add_neon.asm +++ b/libvpx/vp9/common/arm/neon/vp9_idct4x4_1_add_neon.asm diff --git a/libvpx/vp9/common/arm/neon/vp9_short_idct4x4_add_neon.asm b/libvpx/vp9/common/arm/neon/vp9_idct4x4_add_neon.asm index 00283fc..00283fc 100644 --- a/libvpx/vp9/common/arm/neon/vp9_short_idct4x4_add_neon.asm +++ b/libvpx/vp9/common/arm/neon/vp9_idct4x4_add_neon.asm diff --git a/libvpx/vp9/common/arm/neon/vp9_short_idct8x8_1_add_neon.asm b/libvpx/vp9/common/arm/neon/vp9_idct8x8_1_add_neon.asm index 421d202..421d202 100644 --- a/libvpx/vp9/common/arm/neon/vp9_short_idct8x8_1_add_neon.asm +++ b/libvpx/vp9/common/arm/neon/vp9_idct8x8_1_add_neon.asm diff --git a/libvpx/vp9/common/arm/neon/vp9_short_idct8x8_add_neon.asm b/libvpx/vp9/common/arm/neon/vp9_idct8x8_add_neon.asm index 5476400..5476400 100644 --- a/libvpx/vp9/common/arm/neon/vp9_short_idct8x8_add_neon.asm +++ b/libvpx/vp9/common/arm/neon/vp9_idct8x8_add_neon.asm diff --git a/libvpx/vp9/common/arm/neon/vp9_short_iht4x4_add_neon.asm b/libvpx/vp9/common/arm/neon/vp9_iht4x4_add_neon.asm index 2f326e2..2f326e2 100644 --- a/libvpx/vp9/common/arm/neon/vp9_short_iht4x4_add_neon.asm +++ b/libvpx/vp9/common/arm/neon/vp9_iht4x4_add_neon.asm diff --git a/libvpx/vp9/common/arm/neon/vp9_short_iht8x8_add_neon.asm b/libvpx/vp9/common/arm/neon/vp9_iht8x8_add_neon.asm index 93d3af3..b41f566 100644 --- a/libvpx/vp9/common/arm/neon/vp9_short_iht8x8_add_neon.asm +++ b/libvpx/vp9/common/arm/neon/vp9_iht8x8_add_neon.asm @@ -576,6 +576,7 @@ vld1.s16 {q14,q15}, [r0]! push {r0-r10} + vpush {d8-d15} ; transpose the input data TRANSPOSE8X8 @@ -636,6 +637,7 @@ iadst_iadst IADST8X8_1D end_vp9_iht8x8_64_add_neon + vpop {d8-d15} pop {r0-r10} ; ROUND_POWER_OF_TWO(temp_out[j], 5) diff --git a/libvpx/vp9/common/arm/neon/vp9_loopfilter_16_neon.asm b/libvpx/vp9/common/arm/neon/vp9_loopfilter_16_neon.asm new file mode 100644 index 0000000..5b8ec20 --- /dev/null +++ b/libvpx/vp9/common/arm/neon/vp9_loopfilter_16_neon.asm @@ -0,0 +1,199 @@ +; +; Copyright (c) 2013 The WebM project authors. All Rights Reserved. +; +; Use of this source code is governed by a BSD-style license +; that can be found in the LICENSE file in the root of the source +; tree. An additional intellectual property rights grant can be found +; in the file PATENTS. All contributing project authors may +; be found in the AUTHORS file in the root of the source tree. +; + + EXPORT |vp9_lpf_horizontal_4_dual_neon| + ARM + + AREA ||.text||, CODE, READONLY, ALIGN=2 + +;void vp9_lpf_horizontal_4_dual_neon(uint8_t *s, int p, +; const uint8_t *blimit0, +; const uint8_t *limit0, +; const uint8_t *thresh0, +; const uint8_t *blimit1, +; const uint8_t *limit1, +; const uint8_t *thresh1) +; r0 uint8_t *s, +; r1 int p, +; r2 const uint8_t *blimit0, +; r3 const uint8_t *limit0, +; sp const uint8_t *thresh0, +; sp+4 const uint8_t *blimit1, +; sp+8 const uint8_t *limit1, +; sp+12 const uint8_t *thresh1, + +|vp9_lpf_horizontal_4_dual_neon| PROC + push {lr} + + ldr r12, [sp, #4] ; load thresh0 + vld1.8 {d0}, [r2] ; load blimit0 to first half q + vld1.8 {d2}, [r3] ; load limit0 to first half q + + add r1, r1, r1 ; double pitch + ldr r2, [sp, #8] ; load blimit1 + + vld1.8 {d4}, [r12] ; load thresh0 to first half q + + ldr r3, [sp, #12] ; load limit1 + ldr r12, [sp, #16] ; load thresh1 + vld1.8 {d1}, [r2] ; load blimit1 to 2nd half q + + sub r2, r0, r1, lsl #1 ; s[-4 * p] + + vld1.8 {d3}, [r3] ; load limit1 to 2nd half q + vld1.8 {d5}, [r12] ; load thresh1 to 2nd half q + + vpush {d8-d15} ; save neon registers + + add r3, r2, r1, lsr #1 ; s[-3 * p] + + vld1.u8 {q3}, [r2@64], r1 ; p3 + vld1.u8 {q4}, [r3@64], r1 ; p2 + vld1.u8 {q5}, [r2@64], r1 ; p1 + vld1.u8 {q6}, [r3@64], r1 ; p0 + vld1.u8 {q7}, [r2@64], r1 ; q0 + vld1.u8 {q8}, [r3@64], r1 ; q1 + vld1.u8 {q9}, [r2@64] ; q2 + vld1.u8 {q10}, [r3@64] ; q3 + + sub r2, r2, r1, lsl #1 + sub r3, r3, r1, lsl #1 + + bl vp9_loop_filter_neon_16 + + vst1.u8 {q5}, [r2@64], r1 ; store op1 + vst1.u8 {q6}, [r3@64], r1 ; store op0 + vst1.u8 {q7}, [r2@64], r1 ; store oq0 + vst1.u8 {q8}, [r3@64], r1 ; store oq1 + + vpop {d8-d15} ; restore neon registers + + pop {pc} + ENDP ; |vp9_lpf_horizontal_4_dual_neon| + +; void vp9_loop_filter_neon_16(); +; This is a helper function for the loopfilters. The invidual functions do the +; necessary load, transpose (if necessary) and store. This function uses +; registers d8-d15, so the calling function must save those registers. +; +; r0-r3, r12 PRESERVE +; q0 blimit +; q1 limit +; q2 thresh +; q3 p3 +; q4 p2 +; q5 p1 +; q6 p0 +; q7 q0 +; q8 q1 +; q9 q2 +; q10 q3 +; +; Outputs: +; q5 op1 +; q6 op0 +; q7 oq0 +; q8 oq1 +|vp9_loop_filter_neon_16| PROC + + ; filter_mask + vabd.u8 q11, q3, q4 ; m1 = abs(p3 - p2) + vabd.u8 q12, q4, q5 ; m2 = abs(p2 - p1) + vabd.u8 q13, q5, q6 ; m3 = abs(p1 - p0) + vabd.u8 q14, q8, q7 ; m4 = abs(q1 - q0) + vabd.u8 q3, q9, q8 ; m5 = abs(q2 - q1) + vabd.u8 q4, q10, q9 ; m6 = abs(q3 - q2) + + ; only compare the largest value to limit + vmax.u8 q11, q11, q12 ; m7 = max(m1, m2) + vmax.u8 q12, q13, q14 ; m8 = max(m3, m4) + + vabd.u8 q9, q6, q7 ; abs(p0 - q0) + + vmax.u8 q3, q3, q4 ; m9 = max(m5, m6) + + vmov.u8 q10, #0x80 + + vmax.u8 q15, q11, q12 ; m10 = max(m7, m8) + + vcgt.u8 q13, q13, q2 ; (abs(p1 - p0) > thresh)*-1 + vcgt.u8 q14, q14, q2 ; (abs(q1 - q0) > thresh)*-1 + vmax.u8 q15, q15, q3 ; m11 = max(m10, m9) + + vabd.u8 q2, q5, q8 ; a = abs(p1 - q1) + vqadd.u8 q9, q9, q9 ; b = abs(p0 - q0) * 2 + + veor q7, q7, q10 ; qs0 + + vcge.u8 q15, q1, q15 ; abs(m11) > limit + + vshr.u8 q2, q2, #1 ; a = a / 2 + veor q6, q6, q10 ; ps0 + + veor q5, q5, q10 ; ps1 + vqadd.u8 q9, q9, q2 ; a = b + a + + veor q8, q8, q10 ; qs1 + + vmov.u16 q4, #3 + + vsubl.s8 q2, d14, d12 ; ( qs0 - ps0) + vsubl.s8 q11, d15, d13 + + vcge.u8 q9, q0, q9 ; a > blimit + + vqsub.s8 q1, q5, q8 ; filter = clamp(ps1-qs1) + vorr q14, q13, q14 ; hev + + vmul.i16 q2, q2, q4 ; 3 * ( qs0 - ps0) + vmul.i16 q11, q11, q4 + + vand q1, q1, q14 ; filter &= hev + vand q15, q15, q9 ; mask + + vmov.u8 q4, #3 + + vaddw.s8 q2, q2, d2 ; filter + 3 * (qs0 - ps0) + vaddw.s8 q11, q11, d3 + + vmov.u8 q9, #4 + + ; filter = clamp(filter + 3 * ( qs0 - ps0)) + vqmovn.s16 d2, q2 + vqmovn.s16 d3, q11 + vand q1, q1, q15 ; filter &= mask + + vqadd.s8 q2, q1, q4 ; filter2 = clamp(filter+3) + vqadd.s8 q1, q1, q9 ; filter1 = clamp(filter+4) + vshr.s8 q2, q2, #3 ; filter2 >>= 3 + vshr.s8 q1, q1, #3 ; filter1 >>= 3 + + + vqadd.s8 q11, q6, q2 ; u = clamp(ps0 + filter2) + vqsub.s8 q0, q7, q1 ; u = clamp(qs0 - filter1) + + ; outer tap adjustments + vrshr.s8 q1, q1, #1 ; filter = ++filter1 >> 1 + + veor q7, q0, q10 ; *oq0 = u^0x80 + + vbic q1, q1, q14 ; filter &= ~hev + + vqadd.s8 q13, q5, q1 ; u = clamp(ps1 + filter) + vqsub.s8 q12, q8, q1 ; u = clamp(qs1 - filter) + + veor q6, q11, q10 ; *op0 = u^0x80 + veor q5, q13, q10 ; *op1 = u^0x80 + veor q8, q12, q10 ; *oq1 = u^0x80 + + bx lr + ENDP ; |vp9_loop_filter_neon_16| + + END diff --git a/libvpx/vp9/common/arm/neon/vp9_loopfilter_16_neon.c b/libvpx/vp9/common/arm/neon/vp9_loopfilter_16_neon.c new file mode 100644 index 0000000..0820db2 --- /dev/null +++ b/libvpx/vp9/common/arm/neon/vp9_loopfilter_16_neon.c @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2013 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "./vp9_rtcd.h" + +void vp9_lpf_horizontal_8_dual_neon(uint8_t *s, int p /* pitch */, + const uint8_t *blimit0, + const uint8_t *limit0, + const uint8_t *thresh0, + const uint8_t *blimit1, + const uint8_t *limit1, + const uint8_t *thresh1) { + vp9_lpf_horizontal_8(s, p, blimit0, limit0, thresh0, 1); + vp9_lpf_horizontal_8(s + 8, p, blimit1, limit1, thresh1, 1); +} + +void vp9_lpf_vertical_4_dual_neon(uint8_t *s, int p, + const uint8_t *blimit0, + const uint8_t *limit0, + const uint8_t *thresh0, + const uint8_t *blimit1, + const uint8_t *limit1, + const uint8_t *thresh1) { + vp9_lpf_vertical_4_neon(s, p, blimit0, limit0, thresh0, 1); + vp9_lpf_vertical_4_neon(s + 8 * p, p, blimit1, limit1, thresh1, 1); +} + +void vp9_lpf_vertical_8_dual_neon(uint8_t *s, int p, + const uint8_t *blimit0, + const uint8_t *limit0, + const uint8_t *thresh0, + const uint8_t *blimit1, + const uint8_t *limit1, + const uint8_t *thresh1) { + vp9_lpf_vertical_8_neon(s, p, blimit0, limit0, thresh0, 1); + vp9_lpf_vertical_8_neon(s + 8 * p, p, blimit1, limit1, thresh1, 1); +} + +void vp9_lpf_vertical_16_dual_neon(uint8_t *s, int p, + const uint8_t *blimit, + const uint8_t *limit, + const uint8_t *thresh) { + vp9_lpf_vertical_16_neon(s, p, blimit, limit, thresh); + vp9_lpf_vertical_16_neon(s + 8 * p, p, blimit, limit, thresh); +} diff --git a/libvpx/vp9/common/arm/neon/vp9_loopfilter_neon.asm b/libvpx/vp9/common/arm/neon/vp9_loopfilter_neon.asm index 8b4fe5d..4430322 100644 --- a/libvpx/vp9/common/arm/neon/vp9_loopfilter_neon.asm +++ b/libvpx/vp9/common/arm/neon/vp9_loopfilter_neon.asm @@ -8,10 +8,10 @@ ; be found in the AUTHORS file in the root of the source tree. ; - EXPORT |vp9_loop_filter_horizontal_edge_neon| - EXPORT |vp9_loop_filter_vertical_edge_neon| - EXPORT |vp9_mbloop_filter_horizontal_edge_neon| - EXPORT |vp9_mbloop_filter_vertical_edge_neon| + EXPORT |vp9_lpf_horizontal_4_neon| + EXPORT |vp9_lpf_vertical_4_neon| + EXPORT |vp9_lpf_horizontal_8_neon| + EXPORT |vp9_lpf_vertical_8_neon| ARM AREA ||.text||, CODE, READONLY, ALIGN=2 @@ -21,12 +21,12 @@ ; TODO(fgalligan): See about removing the count code as this function is only ; called with a count of 1. ; -; void vp9_loop_filter_horizontal_edge_neon(uint8_t *s, -; int p /* pitch */, -; const uint8_t *blimit, -; const uint8_t *limit, -; const uint8_t *thresh, -; int count) +; void vp9_lpf_horizontal_4_neon(uint8_t *s, +; int p /* pitch */, +; const uint8_t *blimit, +; const uint8_t *limit, +; const uint8_t *thresh, +; int count) ; ; r0 uint8_t *s, ; r1 int p, /* pitch */ @@ -34,7 +34,7 @@ ; r3 const uint8_t *limit, ; sp const uint8_t *thresh, ; sp+4 int count -|vp9_loop_filter_horizontal_edge_neon| PROC +|vp9_lpf_horizontal_4_neon| PROC push {lr} vld1.8 {d0[]}, [r2] ; duplicate *blimit @@ -77,19 +77,19 @@ count_lf_h_loop end_vp9_lf_h_edge pop {pc} - ENDP ; |vp9_loop_filter_horizontal_edge_neon| + ENDP ; |vp9_lpf_horizontal_4_neon| ; Currently vp9 only works on iterations 8 at a time. The vp8 loop filter ; works on 16 iterations at a time. ; TODO(fgalligan): See about removing the count code as this function is only ; called with a count of 1. ; -; void vp9_loop_filter_vertical_edge_neon(uint8_t *s, -; int p /* pitch */, -; const uint8_t *blimit, -; const uint8_t *limit, -; const uint8_t *thresh, -; int count) +; void vp9_lpf_vertical_4_neon(uint8_t *s, +; int p /* pitch */, +; const uint8_t *blimit, +; const uint8_t *limit, +; const uint8_t *thresh, +; int count) ; ; r0 uint8_t *s, ; r1 int p, /* pitch */ @@ -97,7 +97,7 @@ end_vp9_lf_h_edge ; r3 const uint8_t *limit, ; sp const uint8_t *thresh, ; sp+4 int count -|vp9_loop_filter_vertical_edge_neon| PROC +|vp9_lpf_vertical_4_neon| PROC push {lr} vld1.8 {d0[]}, [r2] ; duplicate *blimit @@ -158,7 +158,7 @@ count_lf_v_loop end_vp9_lf_v_edge pop {pc} - ENDP ; |vp9_loop_filter_vertical_edge_neon| + ENDP ; |vp9_lpf_vertical_4_neon| ; void vp9_loop_filter_neon(); ; This is a helper function for the loopfilters. The invidual functions do the @@ -276,18 +276,18 @@ end_vp9_lf_v_edge bx lr ENDP ; |vp9_loop_filter_neon| -; void vp9_mbloop_filter_horizontal_edge_neon(uint8_t *s, int p, -; const uint8_t *blimit, -; const uint8_t *limit, -; const uint8_t *thresh, -; int count) +; void vp9_lpf_horizontal_8_neon(uint8_t *s, int p, +; const uint8_t *blimit, +; const uint8_t *limit, +; const uint8_t *thresh, +; int count) ; r0 uint8_t *s, ; r1 int p, /* pitch */ ; r2 const uint8_t *blimit, ; r3 const uint8_t *limit, ; sp const uint8_t *thresh, ; sp+4 int count -|vp9_mbloop_filter_horizontal_edge_neon| PROC +|vp9_lpf_horizontal_8_neon| PROC push {r4-r5, lr} vld1.8 {d0[]}, [r2] ; duplicate *blimit @@ -333,14 +333,14 @@ count_mblf_h_loop end_vp9_mblf_h_edge pop {r4-r5, pc} - ENDP ; |vp9_mbloop_filter_horizontal_edge_neon| + ENDP ; |vp9_lpf_horizontal_8_neon| -; void vp9_mbloop_filter_vertical_edge_neon(uint8_t *s, -; int pitch, -; const uint8_t *blimit, -; const uint8_t *limit, -; const uint8_t *thresh, -; int count) +; void vp9_lpf_vertical_8_neon(uint8_t *s, +; int pitch, +; const uint8_t *blimit, +; const uint8_t *limit, +; const uint8_t *thresh, +; int count) ; ; r0 uint8_t *s, ; r1 int pitch, @@ -348,7 +348,7 @@ end_vp9_mblf_h_edge ; r3 const uint8_t *limit, ; sp const uint8_t *thresh, ; sp+4 int count -|vp9_mbloop_filter_vertical_edge_neon| PROC +|vp9_lpf_vertical_8_neon| PROC push {r4-r5, lr} vld1.8 {d0[]}, [r2] ; duplicate *blimit @@ -420,7 +420,7 @@ count_mblf_v_loop end_vp9_mblf_v_edge pop {r4-r5, pc} - ENDP ; |vp9_mbloop_filter_vertical_edge_neon| + ENDP ; |vp9_lpf_vertical_8_neon| ; void vp9_mbloop_filter_neon(); ; This is a helper function for the loopfilters. The invidual functions do the diff --git a/libvpx/vp9/common/arm/neon/vp9_mb_lpf_neon.asm b/libvpx/vp9/common/arm/neon/vp9_mb_lpf_neon.asm index 2e8001b..5fe2bba 100644 --- a/libvpx/vp9/common/arm/neon/vp9_mb_lpf_neon.asm +++ b/libvpx/vp9/common/arm/neon/vp9_mb_lpf_neon.asm @@ -8,23 +8,23 @@ ; be found in the AUTHORS file in the root of the source tree. ; - EXPORT |vp9_mb_lpf_horizontal_edge_w_neon| - EXPORT |vp9_mb_lpf_vertical_edge_w_neon| + EXPORT |vp9_lpf_horizontal_16_neon| + EXPORT |vp9_lpf_vertical_16_neon| ARM AREA ||.text||, CODE, READONLY, ALIGN=2 -; void vp9_mb_lpf_horizontal_edge_w_neon(uint8_t *s, int p, -; const uint8_t *blimit, -; const uint8_t *limit, -; const uint8_t *thresh -; int count) +; void vp9_lpf_horizontal_16_neon(uint8_t *s, int p, +; const uint8_t *blimit, +; const uint8_t *limit, +; const uint8_t *thresh +; int count) ; r0 uint8_t *s, ; r1 int p, /* pitch */ ; r2 const uint8_t *blimit, ; r3 const uint8_t *limit, ; sp const uint8_t *thresh, -|vp9_mb_lpf_horizontal_edge_w_neon| PROC +|vp9_lpf_horizontal_16_neon| PROC push {r4-r8, lr} vpush {d8-d15} ldr r4, [sp, #88] ; load thresh @@ -115,18 +115,18 @@ h_next vpop {d8-d15} pop {r4-r8, pc} - ENDP ; |vp9_mb_lpf_horizontal_edge_w_neon| + ENDP ; |vp9_lpf_horizontal_16_neon| -; void vp9_mb_lpf_vertical_edge_w_neon(uint8_t *s, int p, -; const uint8_t *blimit, -; const uint8_t *limit, -; const uint8_t *thresh) +; void vp9_lpf_vertical_16_neon(uint8_t *s, int p, +; const uint8_t *blimit, +; const uint8_t *limit, +; const uint8_t *thresh) ; r0 uint8_t *s, ; r1 int p, /* pitch */ ; r2 const uint8_t *blimit, ; r3 const uint8_t *limit, ; sp const uint8_t *thresh, -|vp9_mb_lpf_vertical_edge_w_neon| PROC +|vp9_lpf_vertical_16_neon| PROC push {r4-r8, lr} vpush {d8-d15} ldr r4, [sp, #88] ; load thresh @@ -279,7 +279,7 @@ v_end vpop {d8-d15} pop {r4-r8, pc} - ENDP ; |vp9_mb_lpf_vertical_edge_w_neon| + ENDP ; |vp9_lpf_vertical_16_neon| ; void vp9_wide_mbfilter_neon(); ; This is a helper function for the loopfilters. The invidual functions do the @@ -439,6 +439,9 @@ v_end tst r7, #1 bxne lr + orrs r5, r5, r6 ; Check for 0 + orreq r7, r7, #2 ; Only do mbfilter branch + ; mbfilter flat && mask branch ; TODO(fgalligan): Can I decrease the cycles shifting to consective d's ; and using vibt on the q's? diff --git a/libvpx/vp9/common/arm/neon/vp9_reconintra_neon.asm b/libvpx/vp9/common/arm/neon/vp9_reconintra_neon.asm new file mode 100644 index 0000000..dc9856f --- /dev/null +++ b/libvpx/vp9/common/arm/neon/vp9_reconintra_neon.asm @@ -0,0 +1,634 @@ +; +; Copyright (c) 2014 The WebM project authors. All Rights Reserved. +; +; Use of this source code is governed by a BSD-style license +; that can be found in the LICENSE file in the root of the source +; tree. An additional intellectual property rights grant can be found +; in the file PATENTS. All contributing project authors may +; be found in the AUTHORS file in the root of the source tree. +; + + EXPORT |vp9_v_predictor_4x4_neon| + EXPORT |vp9_v_predictor_8x8_neon| + EXPORT |vp9_v_predictor_16x16_neon| + EXPORT |vp9_v_predictor_32x32_neon| + EXPORT |vp9_h_predictor_4x4_neon| + EXPORT |vp9_h_predictor_8x8_neon| + EXPORT |vp9_h_predictor_16x16_neon| + EXPORT |vp9_h_predictor_32x32_neon| + EXPORT |vp9_tm_predictor_4x4_neon| + EXPORT |vp9_tm_predictor_8x8_neon| + EXPORT |vp9_tm_predictor_16x16_neon| + EXPORT |vp9_tm_predictor_32x32_neon| + ARM + REQUIRE8 + PRESERVE8 + + AREA ||.text||, CODE, READONLY, ALIGN=2 + +;void vp9_v_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride, +; const uint8_t *above, +; const uint8_t *left) +; r0 uint8_t *dst +; r1 ptrdiff_t y_stride +; r2 const uint8_t *above +; r3 const uint8_t *left + +|vp9_v_predictor_4x4_neon| PROC + vld1.32 {d0[0]}, [r2] + vst1.32 {d0[0]}, [r0], r1 + vst1.32 {d0[0]}, [r0], r1 + vst1.32 {d0[0]}, [r0], r1 + vst1.32 {d0[0]}, [r0], r1 + bx lr + ENDP ; |vp9_v_predictor_4x4_neon| + +;void vp9_v_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride, +; const uint8_t *above, +; const uint8_t *left) +; r0 uint8_t *dst +; r1 ptrdiff_t y_stride +; r2 const uint8_t *above +; r3 const uint8_t *left + +|vp9_v_predictor_8x8_neon| PROC + vld1.8 {d0}, [r2] + vst1.8 {d0}, [r0], r1 + vst1.8 {d0}, [r0], r1 + vst1.8 {d0}, [r0], r1 + vst1.8 {d0}, [r0], r1 + vst1.8 {d0}, [r0], r1 + vst1.8 {d0}, [r0], r1 + vst1.8 {d0}, [r0], r1 + vst1.8 {d0}, [r0], r1 + bx lr + ENDP ; |vp9_v_predictor_8x8_neon| + +;void vp9_v_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride, +; const uint8_t *above, +; const uint8_t *left) +; r0 uint8_t *dst +; r1 ptrdiff_t y_stride +; r2 const uint8_t *above +; r3 const uint8_t *left + +|vp9_v_predictor_16x16_neon| PROC + vld1.8 {q0}, [r2] + vst1.8 {q0}, [r0], r1 + vst1.8 {q0}, [r0], r1 + vst1.8 {q0}, [r0], r1 + vst1.8 {q0}, [r0], r1 + vst1.8 {q0}, [r0], r1 + vst1.8 {q0}, [r0], r1 + vst1.8 {q0}, [r0], r1 + vst1.8 {q0}, [r0], r1 + vst1.8 {q0}, [r0], r1 + vst1.8 {q0}, [r0], r1 + vst1.8 {q0}, [r0], r1 + vst1.8 {q0}, [r0], r1 + vst1.8 {q0}, [r0], r1 + vst1.8 {q0}, [r0], r1 + vst1.8 {q0}, [r0], r1 + vst1.8 {q0}, [r0], r1 + bx lr + ENDP ; |vp9_v_predictor_16x16_neon| + +;void vp9_v_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride, +; const uint8_t *above, +; const uint8_t *left) +; r0 uint8_t *dst +; r1 ptrdiff_t y_stride +; r2 const uint8_t *above +; r3 const uint8_t *left + +|vp9_v_predictor_32x32_neon| PROC + vld1.8 {q0, q1}, [r2] + mov r2, #2 +loop_v + vst1.8 {q0, q1}, [r0], r1 + vst1.8 {q0, q1}, [r0], r1 + vst1.8 {q0, q1}, [r0], r1 + vst1.8 {q0, q1}, [r0], r1 + vst1.8 {q0, q1}, [r0], r1 + vst1.8 {q0, q1}, [r0], r1 + vst1.8 {q0, q1}, [r0], r1 + vst1.8 {q0, q1}, [r0], r1 + vst1.8 {q0, q1}, [r0], r1 + vst1.8 {q0, q1}, [r0], r1 + vst1.8 {q0, q1}, [r0], r1 + vst1.8 {q0, q1}, [r0], r1 + vst1.8 {q0, q1}, [r0], r1 + vst1.8 {q0, q1}, [r0], r1 + vst1.8 {q0, q1}, [r0], r1 + vst1.8 {q0, q1}, [r0], r1 + subs r2, r2, #1 + bgt loop_v + bx lr + ENDP ; |vp9_v_predictor_32x32_neon| + +;void vp9_h_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride, +; const uint8_t *above, +; const uint8_t *left) +; r0 uint8_t *dst +; r1 ptrdiff_t y_stride +; r2 const uint8_t *above +; r3 const uint8_t *left + +|vp9_h_predictor_4x4_neon| PROC + vld1.32 {d1[0]}, [r3] + vdup.8 d0, d1[0] + vst1.32 {d0[0]}, [r0], r1 + vdup.8 d0, d1[1] + vst1.32 {d0[0]}, [r0], r1 + vdup.8 d0, d1[2] + vst1.32 {d0[0]}, [r0], r1 + vdup.8 d0, d1[3] + vst1.32 {d0[0]}, [r0], r1 + bx lr + ENDP ; |vp9_h_predictor_4x4_neon| + +;void vp9_h_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride, +; const uint8_t *above, +; const uint8_t *left) +; r0 uint8_t *dst +; r1 ptrdiff_t y_stride +; r2 const uint8_t *above +; r3 const uint8_t *left + +|vp9_h_predictor_8x8_neon| PROC + vld1.64 {d1}, [r3] + vdup.8 d0, d1[0] + vst1.64 {d0}, [r0], r1 + vdup.8 d0, d1[1] + vst1.64 {d0}, [r0], r1 + vdup.8 d0, d1[2] + vst1.64 {d0}, [r0], r1 + vdup.8 d0, d1[3] + vst1.64 {d0}, [r0], r1 + vdup.8 d0, d1[4] + vst1.64 {d0}, [r0], r1 + vdup.8 d0, d1[5] + vst1.64 {d0}, [r0], r1 + vdup.8 d0, d1[6] + vst1.64 {d0}, [r0], r1 + vdup.8 d0, d1[7] + vst1.64 {d0}, [r0], r1 + bx lr + ENDP ; |vp9_h_predictor_8x8_neon| + +;void vp9_h_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride, +; const uint8_t *above, +; const uint8_t *left) +; r0 uint8_t *dst +; r1 ptrdiff_t y_stride +; r2 const uint8_t *above +; r3 const uint8_t *left + +|vp9_h_predictor_16x16_neon| PROC + vld1.8 {q1}, [r3] + vdup.8 q0, d2[0] + vst1.8 {q0}, [r0], r1 + vdup.8 q0, d2[1] + vst1.8 {q0}, [r0], r1 + vdup.8 q0, d2[2] + vst1.8 {q0}, [r0], r1 + vdup.8 q0, d2[3] + vst1.8 {q0}, [r0], r1 + vdup.8 q0, d2[4] + vst1.8 {q0}, [r0], r1 + vdup.8 q0, d2[5] + vst1.8 {q0}, [r0], r1 + vdup.8 q0, d2[6] + vst1.8 {q0}, [r0], r1 + vdup.8 q0, d2[7] + vst1.8 {q0}, [r0], r1 + vdup.8 q0, d3[0] + vst1.8 {q0}, [r0], r1 + vdup.8 q0, d3[1] + vst1.8 {q0}, [r0], r1 + vdup.8 q0, d3[2] + vst1.8 {q0}, [r0], r1 + vdup.8 q0, d3[3] + vst1.8 {q0}, [r0], r1 + vdup.8 q0, d3[4] + vst1.8 {q0}, [r0], r1 + vdup.8 q0, d3[5] + vst1.8 {q0}, [r0], r1 + vdup.8 q0, d3[6] + vst1.8 {q0}, [r0], r1 + vdup.8 q0, d3[7] + vst1.8 {q0}, [r0], r1 + bx lr + ENDP ; |vp9_h_predictor_16x16_neon| + +;void vp9_h_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride, +; const uint8_t *above, +; const uint8_t *left) +; r0 uint8_t *dst +; r1 ptrdiff_t y_stride +; r2 const uint8_t *above +; r3 const uint8_t *left + +|vp9_h_predictor_32x32_neon| PROC + sub r1, r1, #16 + mov r2, #2 +loop_h + vld1.8 {q1}, [r3]! + vdup.8 q0, d2[0] + vst1.8 {q0}, [r0]! + vst1.8 {q0}, [r0], r1 + vdup.8 q0, d2[1] + vst1.8 {q0}, [r0]! + vst1.8 {q0}, [r0], r1 + vdup.8 q0, d2[2] + vst1.8 {q0}, [r0]! + vst1.8 {q0}, [r0], r1 + vdup.8 q0, d2[3] + vst1.8 {q0}, [r0]! + vst1.8 {q0}, [r0], r1 + vdup.8 q0, d2[4] + vst1.8 {q0}, [r0]! + vst1.8 {q0}, [r0], r1 + vdup.8 q0, d2[5] + vst1.8 {q0}, [r0]! + vst1.8 {q0}, [r0], r1 + vdup.8 q0, d2[6] + vst1.8 {q0}, [r0]! + vst1.8 {q0}, [r0], r1 + vdup.8 q0, d2[7] + vst1.8 {q0}, [r0]! + vst1.8 {q0}, [r0], r1 + vdup.8 q0, d3[0] + vst1.8 {q0}, [r0]! + vst1.8 {q0}, [r0], r1 + vdup.8 q0, d3[1] + vst1.8 {q0}, [r0]! + vst1.8 {q0}, [r0], r1 + vdup.8 q0, d3[2] + vst1.8 {q0}, [r0]! + vst1.8 {q0}, [r0], r1 + vdup.8 q0, d3[3] + vst1.8 {q0}, [r0]! + vst1.8 {q0}, [r0], r1 + vdup.8 q0, d3[4] + vst1.8 {q0}, [r0]! + vst1.8 {q0}, [r0], r1 + vdup.8 q0, d3[5] + vst1.8 {q0}, [r0]! + vst1.8 {q0}, [r0], r1 + vdup.8 q0, d3[6] + vst1.8 {q0}, [r0]! + vst1.8 {q0}, [r0], r1 + vdup.8 q0, d3[7] + vst1.8 {q0}, [r0]! + vst1.8 {q0}, [r0], r1 + subs r2, r2, #1 + bgt loop_h + bx lr + ENDP ; |vp9_h_predictor_32x32_neon| + +;void vp9_tm_predictor_4x4_neon (uint8_t *dst, ptrdiff_t y_stride, +; const uint8_t *above, +; const uint8_t *left) +; r0 uint8_t *dst +; r1 ptrdiff_t y_stride +; r2 const uint8_t *above +; r3 const uint8_t *left + +|vp9_tm_predictor_4x4_neon| PROC + ; Load ytop_left = above[-1]; + sub r12, r2, #1 + ldrb r12, [r12] + vdup.u8 d0, r12 + + ; Load above 4 pixels + vld1.32 {d2[0]}, [r2] + + ; Compute above - ytop_left + vsubl.u8 q3, d2, d0 + + ; Load left row by row and compute left + (above - ytop_left) + ; 1st row and 2nd row + ldrb r12, [r3], #1 + ldrb r2, [r3], #1 + vdup.u16 q1, r12 + vdup.u16 q2, r2 + vadd.s16 q1, q1, q3 + vadd.s16 q2, q2, q3 + vqmovun.s16 d0, q1 + vqmovun.s16 d1, q2 + vst1.32 {d0[0]}, [r0], r1 + vst1.32 {d1[0]}, [r0], r1 + + ; 3rd row and 4th row + ldrb r12, [r3], #1 + ldrb r2, [r3], #1 + vdup.u16 q1, r12 + vdup.u16 q2, r2 + vadd.s16 q1, q1, q3 + vadd.s16 q2, q2, q3 + vqmovun.s16 d0, q1 + vqmovun.s16 d1, q2 + vst1.32 {d0[0]}, [r0], r1 + vst1.32 {d1[0]}, [r0], r1 + bx lr + ENDP ; |vp9_tm_predictor_4x4_neon| + +;void vp9_tm_predictor_8x8_neon (uint8_t *dst, ptrdiff_t y_stride, +; const uint8_t *above, +; const uint8_t *left) +; r0 uint8_t *dst +; r1 ptrdiff_t y_stride +; r2 const uint8_t *above +; r3 const uint8_t *left + +|vp9_tm_predictor_8x8_neon| PROC + ; Load ytop_left = above[-1]; + sub r12, r2, #1 + ldrb r12, [r12] + vdup.u8 d0, r12 + + ; preload 8 left + vld1.8 {d30}, [r3] + + ; Load above 8 pixels + vld1.64 {d2}, [r2] + + vmovl.u8 q10, d30 + + ; Compute above - ytop_left + vsubl.u8 q3, d2, d0 + + ; Load left row by row and compute left + (above - ytop_left) + ; 1st row and 2nd row + vdup.16 q0, d20[0] + vdup.16 q1, d20[1] + vadd.s16 q0, q3, q0 + vadd.s16 q1, q3, q1 + + ; 3rd row and 4th row + vdup.16 q8, d20[2] + vdup.16 q9, d20[3] + vadd.s16 q8, q3, q8 + vadd.s16 q9, q3, q9 + + vqmovun.s16 d0, q0 + vqmovun.s16 d1, q1 + vqmovun.s16 d2, q8 + vqmovun.s16 d3, q9 + + vst1.64 {d0}, [r0], r1 + vst1.64 {d1}, [r0], r1 + vst1.64 {d2}, [r0], r1 + vst1.64 {d3}, [r0], r1 + + ; 5th row and 6th row + vdup.16 q0, d21[0] + vdup.16 q1, d21[1] + vadd.s16 q0, q3, q0 + vadd.s16 q1, q3, q1 + + ; 7th row and 8th row + vdup.16 q8, d21[2] + vdup.16 q9, d21[3] + vadd.s16 q8, q3, q8 + vadd.s16 q9, q3, q9 + + vqmovun.s16 d0, q0 + vqmovun.s16 d1, q1 + vqmovun.s16 d2, q8 + vqmovun.s16 d3, q9 + + vst1.64 {d0}, [r0], r1 + vst1.64 {d1}, [r0], r1 + vst1.64 {d2}, [r0], r1 + vst1.64 {d3}, [r0], r1 + + bx lr + ENDP ; |vp9_tm_predictor_8x8_neon| + +;void vp9_tm_predictor_16x16_neon (uint8_t *dst, ptrdiff_t y_stride, +; const uint8_t *above, +; const uint8_t *left) +; r0 uint8_t *dst +; r1 ptrdiff_t y_stride +; r2 const uint8_t *above +; r3 const uint8_t *left + +|vp9_tm_predictor_16x16_neon| PROC + ; Load ytop_left = above[-1]; + sub r12, r2, #1 + ldrb r12, [r12] + vdup.u8 q0, r12 + + ; Load above 8 pixels + vld1.8 {q1}, [r2] + + ; preload 8 left into r12 + vld1.8 {d18}, [r3]! + + ; Compute above - ytop_left + vsubl.u8 q2, d2, d0 + vsubl.u8 q3, d3, d1 + + vmovl.u8 q10, d18 + + ; Load left row by row and compute left + (above - ytop_left) + ; Process 8 rows in each single loop and loop 2 times to process 16 rows. + mov r2, #2 + +loop_16x16_neon + ; Process two rows. + vdup.16 q0, d20[0] + vdup.16 q8, d20[1] + vadd.s16 q1, q0, q2 + vadd.s16 q0, q0, q3 + vadd.s16 q11, q8, q2 + vadd.s16 q8, q8, q3 + vqmovun.s16 d2, q1 + vqmovun.s16 d3, q0 + vqmovun.s16 d22, q11 + vqmovun.s16 d23, q8 + vdup.16 q0, d20[2] ; proload next 2 rows data + vdup.16 q8, d20[3] + vst1.64 {d2,d3}, [r0], r1 + vst1.64 {d22,d23}, [r0], r1 + + ; Process two rows. + vadd.s16 q1, q0, q2 + vadd.s16 q0, q0, q3 + vadd.s16 q11, q8, q2 + vadd.s16 q8, q8, q3 + vqmovun.s16 d2, q1 + vqmovun.s16 d3, q0 + vqmovun.s16 d22, q11 + vqmovun.s16 d23, q8 + vdup.16 q0, d21[0] ; proload next 2 rows data + vdup.16 q8, d21[1] + vst1.64 {d2,d3}, [r0], r1 + vst1.64 {d22,d23}, [r0], r1 + + vadd.s16 q1, q0, q2 + vadd.s16 q0, q0, q3 + vadd.s16 q11, q8, q2 + vadd.s16 q8, q8, q3 + vqmovun.s16 d2, q1 + vqmovun.s16 d3, q0 + vqmovun.s16 d22, q11 + vqmovun.s16 d23, q8 + vdup.16 q0, d21[2] ; proload next 2 rows data + vdup.16 q8, d21[3] + vst1.64 {d2,d3}, [r0], r1 + vst1.64 {d22,d23}, [r0], r1 + + + vadd.s16 q1, q0, q2 + vadd.s16 q0, q0, q3 + vadd.s16 q11, q8, q2 + vadd.s16 q8, q8, q3 + vqmovun.s16 d2, q1 + vqmovun.s16 d3, q0 + vqmovun.s16 d22, q11 + vqmovun.s16 d23, q8 + vld1.8 {d18}, [r3]! ; preload 8 left into r12 + vmovl.u8 q10, d18 + vst1.64 {d2,d3}, [r0], r1 + vst1.64 {d22,d23}, [r0], r1 + + subs r2, r2, #1 + bgt loop_16x16_neon + + bx lr + ENDP ; |vp9_tm_predictor_16x16_neon| + +;void vp9_tm_predictor_32x32_neon (uint8_t *dst, ptrdiff_t y_stride, +; const uint8_t *above, +; const uint8_t *left) +; r0 uint8_t *dst +; r1 ptrdiff_t y_stride +; r2 const uint8_t *above +; r3 const uint8_t *left + +|vp9_tm_predictor_32x32_neon| PROC + ; Load ytop_left = above[-1]; + sub r12, r2, #1 + ldrb r12, [r12] + vdup.u8 q0, r12 + + ; Load above 32 pixels + vld1.8 {q1}, [r2]! + vld1.8 {q2}, [r2] + + ; preload 8 left pixels + vld1.8 {d26}, [r3]! + + ; Compute above - ytop_left + vsubl.u8 q8, d2, d0 + vsubl.u8 q9, d3, d1 + vsubl.u8 q10, d4, d0 + vsubl.u8 q11, d5, d1 + + vmovl.u8 q3, d26 + + ; Load left row by row and compute left + (above - ytop_left) + ; Process 8 rows in each single loop and loop 4 times to process 32 rows. + mov r2, #4 + +loop_32x32_neon + ; Process two rows. + vdup.16 q0, d6[0] + vdup.16 q2, d6[1] + vadd.s16 q12, q0, q8 + vadd.s16 q13, q0, q9 + vadd.s16 q14, q0, q10 + vadd.s16 q15, q0, q11 + vqmovun.s16 d0, q12 + vqmovun.s16 d1, q13 + vadd.s16 q12, q2, q8 + vadd.s16 q13, q2, q9 + vqmovun.s16 d2, q14 + vqmovun.s16 d3, q15 + vadd.s16 q14, q2, q10 + vadd.s16 q15, q2, q11 + vst1.64 {d0-d3}, [r0], r1 + vqmovun.s16 d24, q12 + vqmovun.s16 d25, q13 + vqmovun.s16 d26, q14 + vqmovun.s16 d27, q15 + vdup.16 q1, d6[2] + vdup.16 q2, d6[3] + vst1.64 {d24-d27}, [r0], r1 + + ; Process two rows. + vadd.s16 q12, q1, q8 + vadd.s16 q13, q1, q9 + vadd.s16 q14, q1, q10 + vadd.s16 q15, q1, q11 + vqmovun.s16 d0, q12 + vqmovun.s16 d1, q13 + vadd.s16 q12, q2, q8 + vadd.s16 q13, q2, q9 + vqmovun.s16 d2, q14 + vqmovun.s16 d3, q15 + vadd.s16 q14, q2, q10 + vadd.s16 q15, q2, q11 + vst1.64 {d0-d3}, [r0], r1 + vqmovun.s16 d24, q12 + vqmovun.s16 d25, q13 + vqmovun.s16 d26, q14 + vqmovun.s16 d27, q15 + vdup.16 q0, d7[0] + vdup.16 q2, d7[1] + vst1.64 {d24-d27}, [r0], r1 + + ; Process two rows. + vadd.s16 q12, q0, q8 + vadd.s16 q13, q0, q9 + vadd.s16 q14, q0, q10 + vadd.s16 q15, q0, q11 + vqmovun.s16 d0, q12 + vqmovun.s16 d1, q13 + vadd.s16 q12, q2, q8 + vadd.s16 q13, q2, q9 + vqmovun.s16 d2, q14 + vqmovun.s16 d3, q15 + vadd.s16 q14, q2, q10 + vadd.s16 q15, q2, q11 + vst1.64 {d0-d3}, [r0], r1 + vqmovun.s16 d24, q12 + vqmovun.s16 d25, q13 + vqmovun.s16 d26, q14 + vqmovun.s16 d27, q15 + vdup.16 q0, d7[2] + vdup.16 q2, d7[3] + vst1.64 {d24-d27}, [r0], r1 + + ; Process two rows. + vadd.s16 q12, q0, q8 + vadd.s16 q13, q0, q9 + vadd.s16 q14, q0, q10 + vadd.s16 q15, q0, q11 + vqmovun.s16 d0, q12 + vqmovun.s16 d1, q13 + vadd.s16 q12, q2, q8 + vadd.s16 q13, q2, q9 + vqmovun.s16 d2, q14 + vqmovun.s16 d3, q15 + vadd.s16 q14, q2, q10 + vadd.s16 q15, q2, q11 + vst1.64 {d0-d3}, [r0], r1 + vqmovun.s16 d24, q12 + vqmovun.s16 d25, q13 + vld1.8 {d0}, [r3]! ; preload 8 left pixels + vqmovun.s16 d26, q14 + vqmovun.s16 d27, q15 + vmovl.u8 q3, d0 + vst1.64 {d24-d27}, [r0], r1 + + subs r2, r2, #1 + bgt loop_32x32_neon + + bx lr + ENDP ; |vp9_tm_predictor_32x32_neon| + + END diff --git a/libvpx/vp9/common/generic/vp9_systemdependent.c b/libvpx/vp9/common/generic/vp9_systemdependent.c deleted file mode 100644 index 536febb..0000000 --- a/libvpx/vp9/common/generic/vp9_systemdependent.c +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - - -#include "./vpx_config.h" -#include "./vp9_rtcd.h" -#include "vp9/common/vp9_onyxc_int.h" - -void vp9_machine_specific_config(VP9_COMMON *cm) { - (void)cm; - vp9_rtcd(); -} diff --git a/libvpx/vp9/common/mips/dspr2/vp9_common_dspr2.h b/libvpx/vp9/common/mips/dspr2/vp9_common_dspr2.h index 644264f..6ebea9f 100644 --- a/libvpx/vp9/common/mips/dspr2/vp9_common_dspr2.h +++ b/libvpx/vp9/common/mips/dspr2/vp9_common_dspr2.h @@ -8,8 +8,8 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef VP9_COMMON_VP9_COMMON_DSPR2_H_ -#define VP9_COMMON_VP9_COMMON_DSPR2_H_ +#ifndef VP9_COMMON_MIPS_DSPR2_VP9_COMMON_DSPR2_H_ +#define VP9_COMMON_MIPS_DSPR2_VP9_COMMON_DSPR2_H_ #include <assert.h> @@ -17,6 +17,10 @@ #include "vpx/vpx_integer.h" #include "vp9/common/vp9_common.h" +#ifdef __cplusplus +extern "C" { +#endif + #if HAVE_DSPR2 #define CROP_WIDTH 512 extern uint8_t *vp9_ff_cropTbl; @@ -81,8 +85,8 @@ static INLINE void vp9_prefetch_store_streamed(unsigned char *dst) { ); } -void vp9_idct32_1d_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, - int dest_stride); +void vp9_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, + int dest_stride); void vp9_convolve2_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, @@ -114,4 +118,8 @@ void vp9_convolve2_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride, int w, int h); #endif // #if HAVE_DSPR2 -#endif // VP9_COMMON_VP9_COMMON_DSPR2_H_ +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // VP9_COMMON_MIPS_DSPR2_VP9_COMMON_DSPR2_H_ diff --git a/libvpx/vp9/common/mips/dspr2/vp9_itrans16_dspr2.c b/libvpx/vp9/common/mips/dspr2/vp9_itrans16_dspr2.c index 1b2f550..19c582f 100644 --- a/libvpx/vp9/common/mips/dspr2/vp9_itrans16_dspr2.c +++ b/libvpx/vp9/common/mips/dspr2/vp9_itrans16_dspr2.c @@ -19,8 +19,8 @@ #include "vp9/common/mips/dspr2/vp9_common_dspr2.h" #if HAVE_DSPR2 -static void idct16_1d_rows_dspr2(const int16_t *input, int16_t *output, - uint32_t no_rows) { +static void idct16_rows_dspr2(const int16_t *input, int16_t *output, + uint32_t no_rows) { int i; int step1_0, step1_1, step1_2, step1_3, step1_4, step1_5, step1_6, step1_7; int step1_10, step1_11, step1_12, step1_13; @@ -404,8 +404,8 @@ static void idct16_1d_rows_dspr2(const int16_t *input, int16_t *output, } } -static void idct16_1d_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, - int dest_stride) { +static void idct16_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, + int dest_stride) { int i; int step1_0, step1_1, step1_2, step1_3, step1_4, step1_5, step1_6, step1_7; int step1_8, step1_9, step1_10, step1_11; @@ -905,13 +905,13 @@ void vp9_idct16x16_256_add_dspr2(const int16_t *input, uint8_t *dest, ); // First transform rows - idct16_1d_rows_dspr2(input, out, 16); + idct16_rows_dspr2(input, out, 16); // Then transform columns and add to dest - idct16_1d_cols_add_blk_dspr2(out, dest, dest_stride); + idct16_cols_add_blk_dspr2(out, dest, dest_stride); } -static void iadst16_1d(const int16_t *input, int16_t *output) { +static void iadst16(const int16_t *input, int16_t *output) { int s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15; int x0 = input[15]; @@ -1099,16 +1099,16 @@ void vp9_iht16x16_256_add_dspr2(const int16_t *input, uint8_t *dest, switch (tx_type) { case DCT_DCT: // DCT in both horizontal and vertical - idct16_1d_rows_dspr2(input, outptr, 16); - idct16_1d_cols_add_blk_dspr2(out, dest, pitch); + idct16_rows_dspr2(input, outptr, 16); + idct16_cols_add_blk_dspr2(out, dest, pitch); break; case ADST_DCT: // ADST in vertical, DCT in horizontal - idct16_1d_rows_dspr2(input, outptr, 16); + idct16_rows_dspr2(input, outptr, 16); outptr = out; for (i = 0; i < 16; ++i) { - iadst16_1d(outptr, temp_out); + iadst16(outptr, temp_out); for (j = 0; j < 16; ++j) dest[j * pitch + i] = @@ -1125,7 +1125,7 @@ void vp9_iht16x16_256_add_dspr2(const int16_t *input, uint8_t *dest, /* prefetch row */ vp9_prefetch_load((const uint8_t *)(input + 16)); - iadst16_1d(input, outptr); + iadst16(input, outptr); input += 16; outptr += 16; } @@ -1134,7 +1134,7 @@ void vp9_iht16x16_256_add_dspr2(const int16_t *input, uint8_t *dest, for (j = 0; j < 16; ++j) temp_in[j * 16 + i] = out[i * 16 + j]; - idct16_1d_cols_add_blk_dspr2(temp_in, dest, pitch); + idct16_cols_add_blk_dspr2(temp_in, dest, pitch); } break; case ADST_ADST: // ADST in both directions @@ -1145,7 +1145,7 @@ void vp9_iht16x16_256_add_dspr2(const int16_t *input, uint8_t *dest, /* prefetch row */ vp9_prefetch_load((const uint8_t *)(input + 16)); - iadst16_1d(input, outptr); + iadst16(input, outptr); input += 16; outptr += 16; } @@ -1153,7 +1153,7 @@ void vp9_iht16x16_256_add_dspr2(const int16_t *input, uint8_t *dest, for (i = 0; i < 16; ++i) { for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i]; - iadst16_1d(temp_in, temp_out); + iadst16(temp_in, temp_out); for (j = 0; j < 16; ++j) dest[j * pitch + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 6) @@ -1183,7 +1183,7 @@ void vp9_idct16x16_10_add_dspr2(const int16_t *input, uint8_t *dest, // First transform rows. Since all non-zero dct coefficients are in // upper-left 4x4 area, we only need to calculate first 4 rows here. - idct16_1d_rows_dspr2(input, outptr, 4); + idct16_rows_dspr2(input, outptr, 4); outptr += 4; for (i = 0; i < 6; ++i) { @@ -1213,7 +1213,7 @@ void vp9_idct16x16_10_add_dspr2(const int16_t *input, uint8_t *dest, } // Then transform columns - idct16_1d_cols_add_blk_dspr2(out, dest, dest_stride); + idct16_cols_add_blk_dspr2(out, dest, dest_stride); } void vp9_idct16x16_1_add_dspr2(const int16_t *input, uint8_t *dest, diff --git a/libvpx/vp9/common/mips/dspr2/vp9_itrans32_cols_dspr2.c b/libvpx/vp9/common/mips/dspr2/vp9_itrans32_cols_dspr2.c index 5e92db3..132d88c 100644 --- a/libvpx/vp9/common/mips/dspr2/vp9_itrans32_cols_dspr2.c +++ b/libvpx/vp9/common/mips/dspr2/vp9_itrans32_cols_dspr2.c @@ -18,8 +18,8 @@ #include "vp9/common/mips/dspr2/vp9_common_dspr2.h" #if HAVE_DSPR2 -void vp9_idct32_1d_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, - int dest_stride) { +void vp9_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, + int dest_stride) { int16_t step1_0, step1_1, step1_2, step1_3, step1_4, step1_5, step1_6; int16_t step1_7, step1_8, step1_9, step1_10, step1_11, step1_12, step1_13; int16_t step1_14, step1_15, step1_16, step1_17, step1_18, step1_19; diff --git a/libvpx/vp9/common/mips/dspr2/vp9_itrans32_dspr2.c b/libvpx/vp9/common/mips/dspr2/vp9_itrans32_dspr2.c index bc67594..74a90b0 100644 --- a/libvpx/vp9/common/mips/dspr2/vp9_itrans32_dspr2.c +++ b/libvpx/vp9/common/mips/dspr2/vp9_itrans32_dspr2.c @@ -19,8 +19,8 @@ #include "vp9/common/mips/dspr2/vp9_common_dspr2.h" #if HAVE_DSPR2 -static void idct32_1d_rows_dspr2(const int16_t *input, int16_t *output, - uint32_t no_rows) { +static void idct32_rows_dspr2(const int16_t *input, int16_t *output, + uint32_t no_rows) { int16_t step1_0, step1_1, step1_2, step1_3, step1_4, step1_5, step1_6; int16_t step1_7, step1_8, step1_9, step1_10, step1_11, step1_12, step1_13; int16_t step1_14, step1_15, step1_16, step1_17, step1_18, step1_19, step1_20; @@ -882,10 +882,10 @@ void vp9_idct32x32_1024_add_dspr2(const int16_t *input, uint8_t *dest, ); // Rows - idct32_1d_rows_dspr2(input, outptr, 32); + idct32_rows_dspr2(input, outptr, 32); // Columns - vp9_idct32_1d_cols_add_blk_dspr2(out, dest, dest_stride); + vp9_idct32_cols_add_blk_dspr2(out, dest, dest_stride); } void vp9_idct32x32_34_add_dspr2(const int16_t *input, uint8_t *dest, @@ -903,7 +903,7 @@ void vp9_idct32x32_34_add_dspr2(const int16_t *input, uint8_t *dest, ); // Rows - idct32_1d_rows_dspr2(input, outptr, 8); + idct32_rows_dspr2(input, outptr, 8); outptr += 8; __asm__ __volatile__ ( @@ -947,7 +947,7 @@ void vp9_idct32x32_34_add_dspr2(const int16_t *input, uint8_t *dest, } // Columns - vp9_idct32_1d_cols_add_blk_dspr2(out, dest, stride); + vp9_idct32_cols_add_blk_dspr2(out, dest, stride); } void vp9_idct32x32_1_add_dspr2(const int16_t *input, uint8_t *dest, diff --git a/libvpx/vp9/common/mips/dspr2/vp9_itrans4_dspr2.c b/libvpx/vp9/common/mips/dspr2/vp9_itrans4_dspr2.c index 5b7aa5e..1990348 100644 --- a/libvpx/vp9/common/mips/dspr2/vp9_itrans4_dspr2.c +++ b/libvpx/vp9/common/mips/dspr2/vp9_itrans4_dspr2.c @@ -19,7 +19,7 @@ #include "vp9/common/mips/dspr2/vp9_common_dspr2.h" #if HAVE_DSPR2 -static void vp9_idct4_1d_rows_dspr2(const int16_t *input, int16_t *output) { +static void vp9_idct4_rows_dspr2(const int16_t *input, int16_t *output) { int16_t step_0, step_1, step_2, step_3; int Temp0, Temp1, Temp2, Temp3; const int const_2_power_13 = 8192; @@ -104,7 +104,7 @@ static void vp9_idct4_1d_rows_dspr2(const int16_t *input, int16_t *output) { } } -static void vp9_idct4_1d_columns_add_blk_dspr2(int16_t *input, uint8_t *dest, +static void vp9_idct4_columns_add_blk_dspr2(int16_t *input, uint8_t *dest, int dest_stride) { int16_t step_0, step_1, step_2, step_3; int Temp0, Temp1, Temp2, Temp3; @@ -240,10 +240,10 @@ void vp9_idct4x4_16_add_dspr2(const int16_t *input, uint8_t *dest, ); // Rows - vp9_idct4_1d_rows_dspr2(input, outptr); + vp9_idct4_rows_dspr2(input, outptr); // Columns - vp9_idct4_1d_columns_add_blk_dspr2(&out[0], dest, dest_stride); + vp9_idct4_columns_add_blk_dspr2(&out[0], dest, dest_stride); } void vp9_idct4x4_1_add_dspr2(const int16_t *input, uint8_t *dest, @@ -319,7 +319,7 @@ void vp9_idct4x4_1_add_dspr2(const int16_t *input, uint8_t *dest, } } -static void iadst4_1d_dspr2(const int16_t *input, int16_t *output) { +static void iadst4_dspr2(const int16_t *input, int16_t *output) { int s0, s1, s2, s3, s4, s5, s6, s7; int x0, x1, x2, x3; @@ -379,16 +379,16 @@ void vp9_iht4x4_16_add_dspr2(const int16_t *input, uint8_t *dest, switch (tx_type) { case DCT_DCT: // DCT in both horizontal and vertical - vp9_idct4_1d_rows_dspr2(input, outptr); - vp9_idct4_1d_columns_add_blk_dspr2(&out[0], dest, dest_stride); + vp9_idct4_rows_dspr2(input, outptr); + vp9_idct4_columns_add_blk_dspr2(&out[0], dest, dest_stride); break; case ADST_DCT: // ADST in vertical, DCT in horizontal - vp9_idct4_1d_rows_dspr2(input, outptr); + vp9_idct4_rows_dspr2(input, outptr); outptr = out; for (i = 0; i < 4; ++i) { - iadst4_1d_dspr2(outptr, temp_out); + iadst4_dspr2(outptr, temp_out); for (j = 0; j < 4; ++j) dest[j * dest_stride + i] = @@ -400,7 +400,7 @@ void vp9_iht4x4_16_add_dspr2(const int16_t *input, uint8_t *dest, break; case DCT_ADST: // DCT in vertical, ADST in horizontal for (i = 0; i < 4; ++i) { - iadst4_1d_dspr2(input, outptr); + iadst4_dspr2(input, outptr); input += 4; outptr += 4; } @@ -410,11 +410,11 @@ void vp9_iht4x4_16_add_dspr2(const int16_t *input, uint8_t *dest, temp_in[i * 4 + j] = out[j * 4 + i]; } } - vp9_idct4_1d_columns_add_blk_dspr2(&temp_in[0], dest, dest_stride); + vp9_idct4_columns_add_blk_dspr2(&temp_in[0], dest, dest_stride); break; case ADST_ADST: // ADST in both directions for (i = 0; i < 4; ++i) { - iadst4_1d_dspr2(input, outptr); + iadst4_dspr2(input, outptr); input += 4; outptr += 4; } @@ -422,7 +422,7 @@ void vp9_iht4x4_16_add_dspr2(const int16_t *input, uint8_t *dest, for (i = 0; i < 4; ++i) { for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i]; - iadst4_1d_dspr2(temp_in, temp_out); + iadst4_dspr2(temp_in, temp_out); for (j = 0; j < 4; ++j) dest[j * dest_stride + i] = diff --git a/libvpx/vp9/common/mips/dspr2/vp9_itrans8_dspr2.c b/libvpx/vp9/common/mips/dspr2/vp9_itrans8_dspr2.c index 93a0840..acccaea 100644 --- a/libvpx/vp9/common/mips/dspr2/vp9_itrans8_dspr2.c +++ b/libvpx/vp9/common/mips/dspr2/vp9_itrans8_dspr2.c @@ -19,8 +19,8 @@ #include "vp9/common/mips/dspr2/vp9_common_dspr2.h" #if HAVE_DSPR2 -static void idct8_1d_rows_dspr2(const int16_t *input, int16_t *output, - uint32_t no_rows) { +static void idct8_rows_dspr2(const int16_t *input, int16_t *output, + uint32_t no_rows) { int step1_0, step1_1, step1_2, step1_3, step1_4, step1_5, step1_6, step1_7; const int const_2_power_13 = 8192; int Temp0, Temp1, Temp2, Temp3, Temp4; @@ -200,8 +200,8 @@ static void idct8_1d_rows_dspr2(const int16_t *input, int16_t *output, } } -static void idct8_1d_columns_add_blk_dspr2(int16_t *input, uint8_t *dest, - int dest_stride) { +static void idct8_columns_add_blk_dspr2(int16_t *input, uint8_t *dest, + int dest_stride) { int step1_0, step1_1, step1_2, step1_3, step1_4, step1_5, step1_6, step1_7; int Temp0, Temp1, Temp2, Temp3; int i; @@ -462,13 +462,13 @@ void vp9_idct8x8_64_add_dspr2(const int16_t *input, uint8_t *dest, ); // First transform rows - idct8_1d_rows_dspr2(input, outptr, 8); + idct8_rows_dspr2(input, outptr, 8); // Then transform columns and add to dest - idct8_1d_columns_add_blk_dspr2(&out[0], dest, dest_stride); + idct8_columns_add_blk_dspr2(&out[0], dest, dest_stride); } -static void iadst8_1d_dspr2(const int16_t *input, int16_t *output) { +static void iadst8_dspr2(const int16_t *input, int16_t *output) { int s0, s1, s2, s3, s4, s5, s6, s7; int x0, x1, x2, x3, x4, x5, x6, x7; @@ -563,14 +563,14 @@ void vp9_iht8x8_64_add_dspr2(const int16_t *input, uint8_t *dest, switch (tx_type) { case DCT_DCT: // DCT in both horizontal and vertical - idct8_1d_rows_dspr2(input, outptr, 8); - idct8_1d_columns_add_blk_dspr2(&out[0], dest, dest_stride); + idct8_rows_dspr2(input, outptr, 8); + idct8_columns_add_blk_dspr2(&out[0], dest, dest_stride); break; case ADST_DCT: // ADST in vertical, DCT in horizontal - idct8_1d_rows_dspr2(input, outptr, 8); + idct8_rows_dspr2(input, outptr, 8); for (i = 0; i < 8; ++i) { - iadst8_1d_dspr2(&out[i * 8], temp_out); + iadst8_dspr2(&out[i * 8], temp_out); for (j = 0; j < 8; ++j) dest[j * dest_stride + i] = @@ -580,7 +580,7 @@ void vp9_iht8x8_64_add_dspr2(const int16_t *input, uint8_t *dest, break; case DCT_ADST: // DCT in vertical, ADST in horizontal for (i = 0; i < 8; ++i) { - iadst8_1d_dspr2(input, outptr); + iadst8_dspr2(input, outptr); input += 8; outptr += 8; } @@ -590,11 +590,11 @@ void vp9_iht8x8_64_add_dspr2(const int16_t *input, uint8_t *dest, temp_in[i * 8 + j] = out[j * 8 + i]; } } - idct8_1d_columns_add_blk_dspr2(&temp_in[0], dest, dest_stride); + idct8_columns_add_blk_dspr2(&temp_in[0], dest, dest_stride); break; case ADST_ADST: // ADST in both directions for (i = 0; i < 8; ++i) { - iadst8_1d_dspr2(input, outptr); + iadst8_dspr2(input, outptr); input += 8; outptr += 8; } @@ -603,7 +603,7 @@ void vp9_iht8x8_64_add_dspr2(const int16_t *input, uint8_t *dest, for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i]; - iadst8_1d_dspr2(temp_in, temp_out); + iadst8_dspr2(temp_in, temp_out); for (j = 0; j < 8; ++j) dest[j * dest_stride + i] = @@ -631,7 +631,7 @@ void vp9_idct8x8_10_add_dspr2(const int16_t *input, uint8_t *dest, ); // First transform rows - idct8_1d_rows_dspr2(input, outptr, 4); + idct8_rows_dspr2(input, outptr, 4); outptr += 4; @@ -659,7 +659,7 @@ void vp9_idct8x8_10_add_dspr2(const int16_t *input, uint8_t *dest, // Then transform columns and add to dest - idct8_1d_columns_add_blk_dspr2(&out[0], dest, dest_stride); + idct8_columns_add_blk_dspr2(&out[0], dest, dest_stride); } void vp9_idct8x8_1_add_dspr2(const int16_t *input, uint8_t *dest, diff --git a/libvpx/vp9/common/mips/dspr2/vp9_loopfilter_filters_dspr2.c b/libvpx/vp9/common/mips/dspr2/vp9_loopfilter_filters_dspr2.c index 36cfc83..3df7f4c 100644 --- a/libvpx/vp9/common/mips/dspr2/vp9_loopfilter_filters_dspr2.c +++ b/libvpx/vp9/common/mips/dspr2/vp9_loopfilter_filters_dspr2.c @@ -20,12 +20,12 @@ #include "vp9/common/mips/dspr2/vp9_loopfilter_filters_dspr2.h" #if HAVE_DSPR2 -void vp9_loop_filter_horizontal_edge_dspr2(unsigned char *s, - int pitch, - const uint8_t *blimit, - const uint8_t *limit, - const uint8_t *thresh, - int count) { +void vp9_lpf_horizontal_4_dspr2(unsigned char *s, + int pitch, + const uint8_t *blimit, + const uint8_t *limit, + const uint8_t *thresh, + int count) { uint8_t i; uint32_t mask; uint32_t hev; @@ -114,12 +114,12 @@ void vp9_loop_filter_horizontal_edge_dspr2(unsigned char *s, } } -void vp9_loop_filter_vertical_edge_dspr2(unsigned char *s, - int pitch, - const uint8_t *blimit, - const uint8_t *limit, - const uint8_t *thresh, - int count) { +void vp9_lpf_vertical_4_dspr2(unsigned char *s, + int pitch, + const uint8_t *blimit, + const uint8_t *limit, + const uint8_t *thresh, + int count) { uint8_t i; uint32_t mask, hev; uint32_t pm1, p0, p1, p2, p3, p4, p5, p6; @@ -306,4 +306,57 @@ void vp9_loop_filter_vertical_edge_dspr2(unsigned char *s, } } } + +void vp9_lpf_horizontal_4_dual_dspr2(uint8_t *s, int p /* pitch */, + const uint8_t *blimit0, + const uint8_t *limit0, + const uint8_t *thresh0, + const uint8_t *blimit1, + const uint8_t *limit1, + const uint8_t *thresh1) { + vp9_lpf_horizontal_4_dspr2(s, p, blimit0, limit0, thresh0, 1); + vp9_lpf_horizontal_4_dspr2(s + 8, p, blimit1, limit1, thresh1, 1); +} + +void vp9_lpf_horizontal_8_dual_dspr2(uint8_t *s, int p /* pitch */, + const uint8_t *blimit0, + const uint8_t *limit0, + const uint8_t *thresh0, + const uint8_t *blimit1, + const uint8_t *limit1, + const uint8_t *thresh1) { + vp9_lpf_horizontal_8_dspr2(s, p, blimit0, limit0, thresh0, 1); + vp9_lpf_horizontal_8_dspr2(s + 8, p, blimit1, limit1, thresh1, 1); +} + +void vp9_lpf_vertical_4_dual_dspr2(uint8_t *s, int p, + const uint8_t *blimit0, + const uint8_t *limit0, + const uint8_t *thresh0, + const uint8_t *blimit1, + const uint8_t *limit1, + const uint8_t *thresh1) { + vp9_lpf_vertical_4_dspr2(s, p, blimit0, limit0, thresh0, 1); + vp9_lpf_vertical_4_dspr2(s + 8 * p, p, blimit1, limit1, thresh1, 1); +} + +void vp9_lpf_vertical_8_dual_dspr2(uint8_t *s, int p, + const uint8_t *blimit0, + const uint8_t *limit0, + const uint8_t *thresh0, + const uint8_t *blimit1, + const uint8_t *limit1, + const uint8_t *thresh1) { + vp9_lpf_vertical_8_dspr2(s, p, blimit0, limit0, thresh0, 1); + vp9_lpf_vertical_8_dspr2(s + 8 * p, p, blimit1, limit1, thresh1, + 1); +} + +void vp9_lpf_vertical_16_dual_dspr2(uint8_t *s, int p, + const uint8_t *blimit, + const uint8_t *limit, + const uint8_t *thresh) { + vp9_lpf_vertical_16_dspr2(s, p, blimit, limit, thresh); + vp9_lpf_vertical_16_dspr2(s + 8 * p, p, blimit, limit, thresh); +} #endif // #if HAVE_DSPR2 diff --git a/libvpx/vp9/common/mips/dspr2/vp9_loopfilter_filters_dspr2.h b/libvpx/vp9/common/mips/dspr2/vp9_loopfilter_filters_dspr2.h index 98bfcfa..008cf8c 100644 --- a/libvpx/vp9/common/mips/dspr2/vp9_loopfilter_filters_dspr2.h +++ b/libvpx/vp9/common/mips/dspr2/vp9_loopfilter_filters_dspr2.h @@ -17,6 +17,10 @@ #include "vp9/common/vp9_common.h" #include "vp9/common/vp9_onyxc_int.h" +#ifdef __cplusplus +extern "C" { +#endif + #if HAVE_DSPR2 /* inputs & outputs are quad-byte vectors */ static INLINE void vp9_filter_dspr2(uint32_t mask, uint32_t hev, @@ -752,4 +756,8 @@ static INLINE void vp9_wide_mbfilter_dspr2(uint32_t *op7, uint32_t *op6, *oq6 = res_oq6; } #endif // #if HAVE_DSPR2 +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_COMMON_MIPS_DSPR2_VP9_LOOPFILTER_FILTERS_DSPR2_H_ diff --git a/libvpx/vp9/common/mips/dspr2/vp9_loopfilter_macros_dspr2.h b/libvpx/vp9/common/mips/dspr2/vp9_loopfilter_macros_dspr2.h index 4cb2ebb..ca01a6a 100644 --- a/libvpx/vp9/common/mips/dspr2/vp9_loopfilter_macros_dspr2.h +++ b/libvpx/vp9/common/mips/dspr2/vp9_loopfilter_macros_dspr2.h @@ -17,6 +17,10 @@ #include "vp9/common/vp9_common.h" #include "vp9/common/vp9_onyxc_int.h" +#ifdef __cplusplus +extern "C" { +#endif + #if HAVE_DSPR2 #define STORE_F0() { \ __asm__ __volatile__ ( \ @@ -467,4 +471,8 @@ } #endif // #if HAVE_DSPR2 +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_COMMON_MIPS_DSPR2_VP9_LOOPFILTER_MACROS_DSPR2_H_ diff --git a/libvpx/vp9/common/mips/dspr2/vp9_loopfilter_masks_dspr2.h b/libvpx/vp9/common/mips/dspr2/vp9_loopfilter_masks_dspr2.h index b9e0aca..5b0d9cc 100644 --- a/libvpx/vp9/common/mips/dspr2/vp9_loopfilter_masks_dspr2.h +++ b/libvpx/vp9/common/mips/dspr2/vp9_loopfilter_masks_dspr2.h @@ -17,6 +17,10 @@ #include "vp9/common/vp9_common.h" #include "vp9/common/vp9_onyxc_int.h" +#ifdef __cplusplus +extern "C" { +#endif + #if HAVE_DSPR2 /* processing 4 pixels at the same time * compute hev and mask in the same function */ @@ -362,4 +366,8 @@ static INLINE void vp9_flatmask5(uint32_t p4, uint32_t p3, *flat2 = flat1; } #endif // #if HAVE_DSPR2 +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_COMMON_MIPS_DSPR2_VP9_LOOPFILTER_MASKS_DSPR2_H_ diff --git a/libvpx/vp9/common/mips/dspr2/vp9_mbloop_loopfilter_dspr2.c b/libvpx/vp9/common/mips/dspr2/vp9_mbloop_loopfilter_dspr2.c index adfd755..7cd0b63 100644 --- a/libvpx/vp9/common/mips/dspr2/vp9_mbloop_loopfilter_dspr2.c +++ b/libvpx/vp9/common/mips/dspr2/vp9_mbloop_loopfilter_dspr2.c @@ -20,12 +20,12 @@ #include "vp9/common/mips/dspr2/vp9_loopfilter_filters_dspr2.h" #if HAVE_DSPR2 -void vp9_mbloop_filter_horizontal_edge_dspr2(unsigned char *s, - int pitch, - const uint8_t *blimit, - const uint8_t *limit, - const uint8_t *thresh, - int count) { +void vp9_lpf_horizontal_8_dspr2(unsigned char *s, + int pitch, + const uint8_t *blimit, + const uint8_t *limit, + const uint8_t *thresh, + int count) { uint32_t mask; uint32_t hev, flat; uint8_t i; @@ -319,12 +319,12 @@ void vp9_mbloop_filter_horizontal_edge_dspr2(unsigned char *s, } } -void vp9_mbloop_filter_vertical_edge_dspr2(unsigned char *s, - int pitch, - const uint8_t *blimit, - const uint8_t *limit, - const uint8_t *thresh, - int count) { +void vp9_lpf_vertical_8_dspr2(unsigned char *s, + int pitch, + const uint8_t *blimit, + const uint8_t *limit, + const uint8_t *thresh, + int count) { uint8_t i; uint32_t mask, hev, flat; uint8_t *s1, *s2, *s3, *s4; diff --git a/libvpx/vp9/common/mips/dspr2/vp9_mblpf_horiz_loopfilter_dspr2.c b/libvpx/vp9/common/mips/dspr2/vp9_mblpf_horiz_loopfilter_dspr2.c index 0759755..6c94674 100644 --- a/libvpx/vp9/common/mips/dspr2/vp9_mblpf_horiz_loopfilter_dspr2.c +++ b/libvpx/vp9/common/mips/dspr2/vp9_mblpf_horiz_loopfilter_dspr2.c @@ -20,12 +20,12 @@ #include "vp9/common/mips/dspr2/vp9_loopfilter_filters_dspr2.h" #if HAVE_DSPR2 -void vp9_mb_lpf_horizontal_edge_w_dspr2(unsigned char *s, - int pitch, - const uint8_t *blimit, - const uint8_t *limit, - const uint8_t *thresh, - int count) { +void vp9_lpf_horizontal_16_dspr2(unsigned char *s, + int pitch, + const uint8_t *blimit, + const uint8_t *limit, + const uint8_t *thresh, + int count) { uint32_t mask; uint32_t hev, flat, flat2; uint8_t i; diff --git a/libvpx/vp9/common/mips/dspr2/vp9_mblpf_vert_loopfilter_dspr2.c b/libvpx/vp9/common/mips/dspr2/vp9_mblpf_vert_loopfilter_dspr2.c index 9e9171c..851fc6c 100644 --- a/libvpx/vp9/common/mips/dspr2/vp9_mblpf_vert_loopfilter_dspr2.c +++ b/libvpx/vp9/common/mips/dspr2/vp9_mblpf_vert_loopfilter_dspr2.c @@ -20,11 +20,11 @@ #include "vp9/common/mips/dspr2/vp9_loopfilter_filters_dspr2.h" #if HAVE_DSPR2 -void vp9_mb_lpf_vertical_edge_w_dspr2(uint8_t *s, - int pitch, - const uint8_t *blimit, - const uint8_t *limit, - const uint8_t *thresh) { +void vp9_lpf_vertical_16_dspr2(uint8_t *s, + int pitch, + const uint8_t *blimit, + const uint8_t *limit, + const uint8_t *thresh) { uint8_t i; uint32_t mask, hev, flat, flat2; uint8_t *s1, *s2, *s3, *s4; diff --git a/libvpx/vp9/common/vp9_alloccommon.c b/libvpx/vp9/common/vp9_alloccommon.c index d298160..a72821b 100644 --- a/libvpx/vp9/common/vp9_alloccommon.c +++ b/libvpx/vp9/common/vp9_alloccommon.c @@ -15,7 +15,6 @@ #include "vp9/common/vp9_blockd.h" #include "vp9/common/vp9_entropymode.h" #include "vp9/common/vp9_entropymv.h" -#include "vp9/common/vp9_findnearmv.h" #include "vp9/common/vp9_onyxc_int.h" #include "vp9/common/vp9_systemdependent.h" @@ -34,8 +33,15 @@ void vp9_update_mode_info_border(VP9_COMMON *cm, MODE_INFO *mi) { void vp9_free_frame_buffers(VP9_COMMON *cm) { int i; - for (i = 0; i < NUM_YV12_BUFFERS; i++) - vp9_free_frame_buffer(&cm->yv12_fb[i]); + for (i = 0; i < FRAME_BUFFERS; i++) { + vp9_free_frame_buffer(&cm->frame_bufs[i].buf); + + if (cm->frame_bufs[i].ref_count > 0 && + cm->frame_bufs[i].raw_frame_buffer.data != NULL) { + cm->release_fb_cb(cm->cb_priv, &cm->frame_bufs[i].raw_frame_buffer); + cm->frame_bufs[i].ref_count = 0; + } + } vp9_free_frame_buffer(&cm->post_proc_buffer); @@ -75,7 +81,6 @@ static void setup_mi(VP9_COMMON *cm) { cm->mode_info_stride * (cm->mi_rows + 1) * sizeof(*cm->mi_grid_base)); - vp9_update_mode_info_border(cm, cm->mip); vp9_update_mode_info_border(cm, cm->prev_mip); } @@ -87,7 +92,7 @@ int vp9_resize_frame_buffers(VP9_COMMON *cm, int width, int height) { int mi_size; if (vp9_realloc_frame_buffer(&cm->post_proc_buffer, width, height, ss_x, ss_y, - VP9BORDERINPIXELS) < 0) + VP9_DEC_BORDER_IN_PIXELS, NULL, NULL, NULL) < 0) goto fail; set_mb_mi(cm, aligned_width, aligned_height); @@ -141,26 +146,23 @@ int vp9_alloc_frame_buffers(VP9_COMMON *cm, int width, int height) { vp9_free_frame_buffers(cm); - for (i = 0; i < NUM_YV12_BUFFERS; i++) { - cm->fb_idx_ref_cnt[i] = 0; - if (vp9_alloc_frame_buffer(&cm->yv12_fb[i], width, height, ss_x, ss_y, - VP9BORDERINPIXELS) < 0) + for (i = 0; i < FRAME_BUFFERS; i++) { + cm->frame_bufs[i].ref_count = 0; + if (vp9_alloc_frame_buffer(&cm->frame_bufs[i].buf, width, height, + ss_x, ss_y, VP9_ENC_BORDER_IN_PIXELS) < 0) goto fail; } - cm->new_fb_idx = NUM_YV12_BUFFERS - 1; - cm->fb_idx_ref_cnt[cm->new_fb_idx] = 1; - - for (i = 0; i < ALLOWED_REFS_PER_FRAME; i++) - cm->active_ref_idx[i] = i; + cm->new_fb_idx = FRAME_BUFFERS - 1; + cm->frame_bufs[cm->new_fb_idx].ref_count = 1; - for (i = 0; i < NUM_REF_FRAMES; i++) { + for (i = 0; i < REF_FRAMES; i++) { cm->ref_frame_map[i] = i; - cm->fb_idx_ref_cnt[i] = 1; + cm->frame_bufs[i].ref_count = 1; } if (vp9_alloc_frame_buffer(&cm->post_proc_buffer, width, height, ss_x, ss_y, - VP9BORDERINPIXELS) < 0) + VP9_ENC_BORDER_IN_PIXELS) < 0) goto fail; set_mb_mi(cm, aligned_width, aligned_height); @@ -198,22 +200,13 @@ int vp9_alloc_frame_buffers(VP9_COMMON *cm, int width, int height) { return 1; } -void vp9_create_common(VP9_COMMON *cm) { - vp9_machine_specific_config(cm); - - cm->tx_mode = ONLY_4X4; - cm->comp_pred_mode = HYBRID_PREDICTION; -} - void vp9_remove_common(VP9_COMMON *cm) { vp9_free_frame_buffers(cm); + vp9_free_internal_frame_buffers(&cm->int_frame_buffers); } void vp9_initialize_common() { vp9_init_neighbors(); - vp9_coef_tree_initialize(); - vp9_entropy_mode_init(); - vp9_entropy_mv_init(); } void vp9_update_frame_size(VP9_COMMON *cm) { diff --git a/libvpx/vp9/common/vp9_alloccommon.h b/libvpx/vp9/common/vp9_alloccommon.h index cf8dca5..066c778 100644 --- a/libvpx/vp9/common/vp9_alloccommon.h +++ b/libvpx/vp9/common/vp9_alloccommon.h @@ -14,11 +14,14 @@ #include "vp9/common/vp9_onyxc_int.h" +#ifdef __cplusplus +extern "C" { +#endif + void vp9_initialize_common(); void vp9_update_mode_info_border(VP9_COMMON *cm, MODE_INFO *mi); -void vp9_create_common(VP9_COMMON *cm); void vp9_remove_common(VP9_COMMON *cm); int vp9_resize_frame_buffers(VP9_COMMON *cm, int width, int height); @@ -28,4 +31,8 @@ void vp9_free_frame_buffers(VP9_COMMON *cm); void vp9_update_frame_size(VP9_COMMON *cm); +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_COMMON_VP9_ALLOCCOMMON_H_ diff --git a/libvpx/vp9/common/vp9_blockd.c b/libvpx/vp9/common/vp9_blockd.c new file mode 100644 index 0000000..e1d1318 --- /dev/null +++ b/libvpx/vp9/common/vp9_blockd.c @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2014 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "vp9/common/vp9_blockd.h" + +MB_PREDICTION_MODE vp9_left_block_mode(const MODE_INFO *cur_mi, + const MODE_INFO *left_mi, int b) { + if (b == 0 || b == 2) { + if (!left_mi || is_inter_block(&left_mi->mbmi)) + return DC_PRED; + + return get_y_mode(left_mi, b + 1); + } else { + assert(b == 1 || b == 3); + return cur_mi->bmi[b - 1].as_mode; + } +} + +MB_PREDICTION_MODE vp9_above_block_mode(const MODE_INFO *cur_mi, + const MODE_INFO *above_mi, int b) { + if (b == 0 || b == 1) { + if (!above_mi || is_inter_block(&above_mi->mbmi)) + return DC_PRED; + + return get_y_mode(above_mi, b + 2); + } else { + assert(b == 2 || b == 3); + return cur_mi->bmi[b - 2].as_mode; + } +} + +void vp9_foreach_transformed_block_in_plane( + const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane, + foreach_transformed_block_visitor visit, void *arg) { + const struct macroblockd_plane *const pd = &xd->plane[plane]; + const MB_MODE_INFO* mbmi = &xd->mi_8x8[0]->mbmi; + // block and transform sizes, in number of 4x4 blocks log 2 ("*_b") + // 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8 + // transform size varies per plane, look it up in a common way. + const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi) + : mbmi->tx_size; + const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd); + const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize]; + const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize]; + const int step = 1 << (tx_size << 1); + int i; + + // If mb_to_right_edge is < 0 we are in a situation in which + // the current block size extends into the UMV and we won't + // visit the sub blocks that are wholly within the UMV. + if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0) { + int r, c; + + int max_blocks_wide = num_4x4_w; + int max_blocks_high = num_4x4_h; + + // xd->mb_to_right_edge is in units of pixels * 8. This converts + // it to 4x4 block sizes. + if (xd->mb_to_right_edge < 0) + max_blocks_wide += (xd->mb_to_right_edge >> (5 + pd->subsampling_x)); + + if (xd->mb_to_bottom_edge < 0) + max_blocks_high += (xd->mb_to_bottom_edge >> (5 + pd->subsampling_y)); + + i = 0; + // Unlike the normal case - in here we have to keep track of the + // row and column of the blocks we use so that we know if we are in + // the unrestricted motion border. + for (r = 0; r < num_4x4_h; r += (1 << tx_size)) { + for (c = 0; c < num_4x4_w; c += (1 << tx_size)) { + if (r < max_blocks_high && c < max_blocks_wide) + visit(plane, i, plane_bsize, tx_size, arg); + i += step; + } + } + } else { + for (i = 0; i < num_4x4_w * num_4x4_h; i += step) + visit(plane, i, plane_bsize, tx_size, arg); + } +} + +void vp9_foreach_transformed_block(const MACROBLOCKD* const xd, + BLOCK_SIZE bsize, + foreach_transformed_block_visitor visit, + void *arg) { + int plane; + + for (plane = 0; plane < MAX_MB_PLANE; plane++) + vp9_foreach_transformed_block_in_plane(xd, bsize, plane, visit, arg); +} + +void vp9_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd, + BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int has_eob, + int aoff, int loff) { + ENTROPY_CONTEXT *const a = pd->above_context + aoff; + ENTROPY_CONTEXT *const l = pd->left_context + loff; + const int tx_size_in_blocks = 1 << tx_size; + + // above + if (has_eob && xd->mb_to_right_edge < 0) { + int i; + const int blocks_wide = num_4x4_blocks_wide_lookup[plane_bsize] + + (xd->mb_to_right_edge >> (5 + pd->subsampling_x)); + int above_contexts = tx_size_in_blocks; + if (above_contexts + aoff > blocks_wide) + above_contexts = blocks_wide - aoff; + + for (i = 0; i < above_contexts; ++i) + a[i] = has_eob; + for (i = above_contexts; i < tx_size_in_blocks; ++i) + a[i] = 0; + } else { + vpx_memset(a, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks); + } + + // left + if (has_eob && xd->mb_to_bottom_edge < 0) { + int i; + const int blocks_high = num_4x4_blocks_high_lookup[plane_bsize] + + (xd->mb_to_bottom_edge >> (5 + pd->subsampling_y)); + int left_contexts = tx_size_in_blocks; + if (left_contexts + loff > blocks_high) + left_contexts = blocks_high - loff; + + for (i = 0; i < left_contexts; ++i) + l[i] = has_eob; + for (i = left_contexts; i < tx_size_in_blocks; ++i) + l[i] = 0; + } else { + vpx_memset(l, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks); + } +} + +void vp9_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y) { + int i; + + for (i = 0; i < MAX_MB_PLANE; i++) { + xd->plane[i].plane_type = i ? PLANE_TYPE_UV : PLANE_TYPE_Y; + xd->plane[i].subsampling_x = i ? ss_x : 0; + xd->plane[i].subsampling_y = i ? ss_y : 0; + } +#if CONFIG_ALPHA + // TODO(jkoleszar): Using the Y w/h for now + xd->plane[3].plane_type = PLANE_TYPE_Y; + xd->plane[3].subsampling_x = 0; + xd->plane[3].subsampling_y = 0; +#endif +} diff --git a/libvpx/vp9/common/vp9_blockd.h b/libvpx/vp9/common/vp9_blockd.h index c5da375..84403ae 100644 --- a/libvpx/vp9/common/vp9_blockd.h +++ b/libvpx/vp9/common/vp9_blockd.h @@ -24,10 +24,14 @@ #include "vp9/common/vp9_mv.h" #include "vp9/common/vp9_scale.h" #include "vp9/common/vp9_seg_common.h" -#include "vp9/common/vp9_treecoder.h" -#define BLOCK_SIZE_GROUPS 4 -#define MBSKIP_CONTEXTS 3 +#ifdef __cplusplus +extern "C" { +#endif + +#define BLOCK_SIZE_GROUPS 4 +#define SKIP_CONTEXTS 3 +#define INTER_MODE_CONTEXTS 7 /* Segment Feature Masks */ #define MAX_MV_REF_CANDIDATES 2 @@ -37,8 +41,9 @@ #define REF_CONTEXTS 5 typedef enum { - PLANE_TYPE_Y_WITH_DC, - PLANE_TYPE_UV, + PLANE_TYPE_Y = 0, + PLANE_TYPE_UV = 1, + PLANE_TYPES } PLANE_TYPE; typedef char ENTROPY_CONTEXT; @@ -84,7 +89,6 @@ static INLINE int is_inter_mode(MB_PREDICTION_MODE mode) { #define INTER_OFFSET(mode) ((mode) - NEARESTMV) - /* For keyframes, intra block modes are predicted by the (already decoded) modes for the Y blocks to the left and above us; for interframes, there is a single probability table. */ @@ -114,10 +118,6 @@ static INLINE int mi_width_log2(BLOCK_SIZE sb_type) { return mi_width_log2_lookup[sb_type]; } -static INLINE int mi_height_log2(BLOCK_SIZE sb_type) { - return mi_height_log2_lookup[sb_type]; -} - // This structure now relates to 8x8 block regions. typedef struct { MB_PREDICTION_MODE mode, uv_mode; @@ -125,17 +125,16 @@ typedef struct { TX_SIZE tx_size; int_mv mv[2]; // for each reference frame used int_mv ref_mvs[MAX_REF_FRAMES][MAX_MV_REF_CANDIDATES]; - int_mv best_mv[2]; uint8_t mode_context[MAX_REF_FRAMES]; - unsigned char skip_coeff; // 0=need to decode coeffs, 1=no coefficients + unsigned char skip; // 0=need to decode coeffs, 1=no coefficients unsigned char segment_id; // Segment id for this block. // Flags used for prediction status of various bit-stream signals unsigned char seg_id_predicted; - INTERPOLATION_TYPE interp_filter; + INTERP_FILTER interp_filter; BLOCK_SIZE sb_type; } MB_MODE_INFO; @@ -145,6 +144,11 @@ typedef struct { b_mode_info bmi[4]; } MODE_INFO; +static INLINE MB_PREDICTION_MODE get_y_mode(const MODE_INFO *mi, int block) { + return mi->mbmi.sb_type < BLOCK_8X8 ? mi->bmi[block].as_mode + : mi->mbmi.mode; +} + static INLINE int is_inter_block(const MB_MODE_INFO *mbmi) { return mbmi->ref_frame[0] > INTRA_FRAME; } @@ -153,6 +157,12 @@ static INLINE int has_second_ref(const MB_MODE_INFO *mbmi) { return mbmi->ref_frame[1] > INTRA_FRAME; } +MB_PREDICTION_MODE vp9_left_block_mode(const MODE_INFO *cur_mi, + const MODE_INFO *left_mi, int b); + +MB_PREDICTION_MODE vp9_above_block_mode(const MODE_INFO *cur_mi, + const MODE_INFO *above_mi, int b); + enum mv_precision { MV_PRECISION_Q3, MV_PRECISION_Q4 @@ -170,26 +180,30 @@ struct buf_2d { }; struct macroblockd_plane { - int16_t *qcoeff; int16_t *dqcoeff; - uint16_t *eobs; PLANE_TYPE plane_type; int subsampling_x; int subsampling_y; struct buf_2d dst; struct buf_2d pre[2]; - int16_t *dequant; + const int16_t *dequant; ENTROPY_CONTEXT *above_context; ENTROPY_CONTEXT *left_context; }; #define BLOCK_OFFSET(x, i) ((x) + (i) * 16) +typedef struct RefBuffer { + // TODO(dkovalev): idx is not really required and should be removed, now it + // is used in vp9_onyxd_if.c + int idx; + YV12_BUFFER_CONFIG *buf; + struct scale_factors sf; +} RefBuffer; + typedef struct macroblockd { struct macroblockd_plane plane[MAX_MB_PLANE]; - struct scale_factors scale_factor[2]; - MODE_INFO *last_mi; int mode_info_stride; @@ -207,11 +221,20 @@ typedef struct macroblockd { int mb_to_top_edge; int mb_to_bottom_edge; + /* pointers to reference frames */ + RefBuffer *block_refs[2]; + + /* pointer to current frame */ + const YV12_BUFFER_CONFIG *cur_buf; + + /* mc buffer */ + DECLARE_ALIGNED(16, uint8_t, mc_buf[80 * 2 * 80 * 2]); + int lossless; /* Inverse transform function pointers. */ void (*itxm_add)(const int16_t *input, uint8_t *dest, int stride, int eob); - struct subpix_fn_table subpix; + const InterpKernel *interp_kernel; int corrupted; @@ -225,182 +248,74 @@ typedef struct macroblockd { -static BLOCK_SIZE get_subsize(BLOCK_SIZE bsize, PARTITION_TYPE partition) { +static INLINE BLOCK_SIZE get_subsize(BLOCK_SIZE bsize, + PARTITION_TYPE partition) { const BLOCK_SIZE subsize = subsize_lookup[partition][bsize]; assert(subsize < BLOCK_SIZES); return subsize; } -extern const TX_TYPE mode2txfm_map[MB_MODE_COUNT]; +extern const TX_TYPE mode2txfm_map[INTRA_MODES]; + +static INLINE TX_TYPE get_tx_type(PLANE_TYPE plane_type, + const MACROBLOCKD *xd) { + const MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi; + + if (plane_type != PLANE_TYPE_Y || is_inter_block(mbmi)) + return DCT_DCT; + return mode2txfm_map[mbmi->mode]; +} static INLINE TX_TYPE get_tx_type_4x4(PLANE_TYPE plane_type, const MACROBLOCKD *xd, int ib) { const MODE_INFO *const mi = xd->mi_8x8[0]; - const MB_MODE_INFO *const mbmi = &mi->mbmi; - if (plane_type != PLANE_TYPE_Y_WITH_DC || - xd->lossless || - is_inter_block(mbmi)) + if (plane_type != PLANE_TYPE_Y || xd->lossless || is_inter_block(&mi->mbmi)) return DCT_DCT; - return mode2txfm_map[mbmi->sb_type < BLOCK_8X8 ? - mi->bmi[ib].as_mode : mbmi->mode]; -} - -static INLINE TX_TYPE get_tx_type_8x8(PLANE_TYPE plane_type, - const MACROBLOCKD *xd) { - return plane_type == PLANE_TYPE_Y_WITH_DC ? - mode2txfm_map[xd->mi_8x8[0]->mbmi.mode] : DCT_DCT; + return mode2txfm_map[get_y_mode(mi, ib)]; } -static INLINE TX_TYPE get_tx_type_16x16(PLANE_TYPE plane_type, - const MACROBLOCKD *xd) { - return plane_type == PLANE_TYPE_Y_WITH_DC ? - mode2txfm_map[xd->mi_8x8[0]->mbmi.mode] : DCT_DCT; -} - -static void setup_block_dptrs(MACROBLOCKD *xd, int ss_x, int ss_y) { - int i; +void vp9_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y); - for (i = 0; i < MAX_MB_PLANE; i++) { - xd->plane[i].plane_type = i ? PLANE_TYPE_UV : PLANE_TYPE_Y_WITH_DC; - xd->plane[i].subsampling_x = i ? ss_x : 0; - xd->plane[i].subsampling_y = i ? ss_y : 0; +static INLINE TX_SIZE get_uv_tx_size_impl(TX_SIZE y_tx_size, BLOCK_SIZE bsize) { + if (bsize < BLOCK_8X8) { + return TX_4X4; + } else { + // TODO(dkovalev): Assuming YUV420 (ss_x == 1, ss_y == 1) + const BLOCK_SIZE plane_bsize = ss_size_lookup[bsize][1][1]; + return MIN(y_tx_size, max_txsize_lookup[plane_bsize]); } -#if CONFIG_ALPHA - // TODO(jkoleszar): Using the Y w/h for now - xd->plane[3].subsampling_x = 0; - xd->plane[3].subsampling_y = 0; -#endif } - static INLINE TX_SIZE get_uv_tx_size(const MB_MODE_INFO *mbmi) { - return MIN(mbmi->tx_size, max_uv_txsize_lookup[mbmi->sb_type]); + return get_uv_tx_size_impl(mbmi->tx_size, mbmi->sb_type); } -static BLOCK_SIZE get_plane_block_size(BLOCK_SIZE bsize, - const struct macroblockd_plane *pd) { +static INLINE BLOCK_SIZE get_plane_block_size(BLOCK_SIZE bsize, + const struct macroblockd_plane *pd) { BLOCK_SIZE bs = ss_size_lookup[bsize][pd->subsampling_x][pd->subsampling_y]; assert(bs < BLOCK_SIZES); return bs; } -static INLINE int plane_block_width(BLOCK_SIZE bsize, - const struct macroblockd_plane* plane) { - return 4 << (b_width_log2(bsize) - plane->subsampling_x); -} - -static INLINE int plane_block_height(BLOCK_SIZE bsize, - const struct macroblockd_plane* plane) { - return 4 << (b_height_log2(bsize) - plane->subsampling_y); -} - typedef void (*foreach_transformed_block_visitor)(int plane, int block, BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg); -static INLINE void foreach_transformed_block_in_plane( +void vp9_foreach_transformed_block_in_plane( const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane, - foreach_transformed_block_visitor visit, void *arg) { - const struct macroblockd_plane *const pd = &xd->plane[plane]; - const MB_MODE_INFO* mbmi = &xd->mi_8x8[0]->mbmi; - // block and transform sizes, in number of 4x4 blocks log 2 ("*_b") - // 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8 - // transform size varies per plane, look it up in a common way. - const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi) - : mbmi->tx_size; - const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd); - const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize]; - const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize]; - const int step = 1 << (tx_size << 1); - int i; - - // If mb_to_right_edge is < 0 we are in a situation in which - // the current block size extends into the UMV and we won't - // visit the sub blocks that are wholly within the UMV. - if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0) { - int r, c; - - int max_blocks_wide = num_4x4_w; - int max_blocks_high = num_4x4_h; - - // xd->mb_to_right_edge is in units of pixels * 8. This converts - // it to 4x4 block sizes. - if (xd->mb_to_right_edge < 0) - max_blocks_wide += (xd->mb_to_right_edge >> (5 + pd->subsampling_x)); - - if (xd->mb_to_bottom_edge < 0) - max_blocks_high += (xd->mb_to_bottom_edge >> (5 + pd->subsampling_y)); - - i = 0; - // Unlike the normal case - in here we have to keep track of the - // row and column of the blocks we use so that we know if we are in - // the unrestricted motion border. - for (r = 0; r < num_4x4_h; r += (1 << tx_size)) { - for (c = 0; c < num_4x4_w; c += (1 << tx_size)) { - if (r < max_blocks_high && c < max_blocks_wide) - visit(plane, i, plane_bsize, tx_size, arg); - i += step; - } - } - } else { - for (i = 0; i < num_4x4_w * num_4x4_h; i += step) - visit(plane, i, plane_bsize, tx_size, arg); - } -} - -static INLINE void foreach_transformed_block( - const MACROBLOCKD* const xd, BLOCK_SIZE bsize, - foreach_transformed_block_visitor visit, void *arg) { - int plane; + foreach_transformed_block_visitor visit, void *arg); - for (plane = 0; plane < MAX_MB_PLANE; plane++) - foreach_transformed_block_in_plane(xd, bsize, plane, visit, arg); -} -static INLINE void foreach_transformed_block_uv( +void vp9_foreach_transformed_block( const MACROBLOCKD* const xd, BLOCK_SIZE bsize, - foreach_transformed_block_visitor visit, void *arg) { - int plane; - - for (plane = 1; plane < MAX_MB_PLANE; plane++) - foreach_transformed_block_in_plane(xd, bsize, plane, visit, arg); -} - -static int raster_block_offset(BLOCK_SIZE plane_bsize, - int raster_block, int stride) { - const int bw = b_width_log2(plane_bsize); - const int y = 4 * (raster_block >> bw); - const int x = 4 * (raster_block & ((1 << bw) - 1)); - return y * stride + x; -} -static int16_t* raster_block_offset_int16(BLOCK_SIZE plane_bsize, - int raster_block, int16_t *base) { - const int stride = 4 << b_width_log2(plane_bsize); - return base + raster_block_offset(plane_bsize, raster_block, stride); -} -static uint8_t* raster_block_offset_uint8(BLOCK_SIZE plane_bsize, - int raster_block, uint8_t *base, - int stride) { - return base + raster_block_offset(plane_bsize, raster_block, stride); -} - -static int txfrm_block_to_raster_block(BLOCK_SIZE plane_bsize, - TX_SIZE tx_size, int block) { - const int bwl = b_width_log2(plane_bsize); - const int tx_cols_log2 = bwl - tx_size; - const int tx_cols = 1 << tx_cols_log2; - const int raster_mb = block >> (tx_size << 1); - const int x = (raster_mb & (tx_cols - 1)) << tx_size; - const int y = (raster_mb >> tx_cols_log2) << tx_size; - return x + (y << bwl); -} + foreach_transformed_block_visitor visit, void *arg); -static void txfrm_block_to_raster_xy(BLOCK_SIZE plane_bsize, - TX_SIZE tx_size, int block, - int *x, int *y) { +static INLINE void txfrm_block_to_raster_xy(BLOCK_SIZE plane_bsize, + TX_SIZE tx_size, int block, + int *x, int *y) { const int bwl = b_width_log2(plane_bsize); const int tx_cols_log2 = bwl - tx_size; const int tx_cols = 1 << tx_cols_log2; @@ -409,93 +324,12 @@ static void txfrm_block_to_raster_xy(BLOCK_SIZE plane_bsize, *y = (raster_mb >> tx_cols_log2) << tx_size; } -static void extend_for_intra(MACROBLOCKD *xd, BLOCK_SIZE plane_bsize, - int plane, int block, TX_SIZE tx_size) { - struct macroblockd_plane *const pd = &xd->plane[plane]; - uint8_t *const buf = pd->dst.buf; - const int stride = pd->dst.stride; - - int x, y; - txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y); - x = x * 4 - 1; - y = y * 4 - 1; - // Copy a pixel into the umv if we are in a situation where the block size - // extends into the UMV. - // TODO(JBB): Should be able to do the full extend in place so we don't have - // to do this multiple times. - if (xd->mb_to_right_edge < 0) { - const int bw = 4 << b_width_log2(plane_bsize); - const int umv_border_start = bw + (xd->mb_to_right_edge >> - (3 + pd->subsampling_x)); - - if (x + bw > umv_border_start) - vpx_memset(&buf[y * stride + umv_border_start], - buf[y * stride + umv_border_start - 1], bw); - } - - if (xd->mb_to_bottom_edge < 0) { - if (xd->left_available || x >= 0) { - const int bh = 4 << b_height_log2(plane_bsize); - const int umv_border_start = - bh + (xd->mb_to_bottom_edge >> (3 + pd->subsampling_y)); - - if (y + bh > umv_border_start) { - const uint8_t c = buf[(umv_border_start - 1) * stride + x]; - uint8_t *d = &buf[umv_border_start * stride + x]; - int i; - for (i = 0; i < bh; ++i, d += stride) - *d = c; - } - } - } -} +void vp9_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd, + BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int has_eob, + int aoff, int loff); -static void set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd, - BLOCK_SIZE plane_bsize, TX_SIZE tx_size, - int has_eob, int aoff, int loff) { - ENTROPY_CONTEXT *const a = pd->above_context + aoff; - ENTROPY_CONTEXT *const l = pd->left_context + loff; - const int tx_size_in_blocks = 1 << tx_size; - - // above - if (has_eob && xd->mb_to_right_edge < 0) { - int i; - const int blocks_wide = num_4x4_blocks_wide_lookup[plane_bsize] + - (xd->mb_to_right_edge >> (5 + pd->subsampling_x)); - int above_contexts = tx_size_in_blocks; - if (above_contexts + aoff > blocks_wide) - above_contexts = blocks_wide - aoff; - - for (i = 0; i < above_contexts; ++i) - a[i] = has_eob; - for (i = above_contexts; i < tx_size_in_blocks; ++i) - a[i] = 0; - } else { - vpx_memset(a, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks); - } - - // left - if (has_eob && xd->mb_to_bottom_edge < 0) { - int i; - const int blocks_high = num_4x4_blocks_high_lookup[plane_bsize] + - (xd->mb_to_bottom_edge >> (5 + pd->subsampling_y)); - int left_contexts = tx_size_in_blocks; - if (left_contexts + loff > blocks_high) - left_contexts = blocks_high - loff; - - for (i = 0; i < left_contexts; ++i) - l[i] = has_eob; - for (i = left_contexts; i < tx_size_in_blocks; ++i) - l[i] = 0; - } else { - vpx_memset(l, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks); - } -} - -static int get_tx_eob(const struct segmentation *seg, int segment_id, - TX_SIZE tx_size) { - const int eob_max = 16 << (tx_size << 1); - return vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP) ? 0 : eob_max; -} +#ifdef __cplusplus +} // extern "C" +#endif #endif // VP9_COMMON_VP9_BLOCKD_H_ diff --git a/libvpx/vp9/common/vp9_common.h b/libvpx/vp9/common/vp9_common.h index 36d1cdf..2dccb70 100644 --- a/libvpx/vp9/common/vp9_common.h +++ b/libvpx/vp9/common/vp9_common.h @@ -18,6 +18,11 @@ #include "./vpx_config.h" #include "vpx_mem/vpx_mem.h" #include "vpx/vpx_integer.h" +#include "vp9/common/vp9_systemdependent.h" + +#ifdef __cplusplus +extern "C" { +#endif #define MIN(x, y) (((x) < (y)) ? (x) : (y)) #define MAX(x, y) (((x) > (y)) ? (x) : (y)) @@ -55,16 +60,8 @@ static INLINE double fclamp(double value, double low, double high) { return value < low ? low : (value > high ? high : value); } -static int get_unsigned_bits(unsigned int num_values) { - int cat = 0; - if (num_values <= 1) - return 0; - num_values--; - while (num_values > 0) { - cat++; - num_values >>= 1; - } - return cat; +static INLINE int get_unsigned_bits(unsigned int num_values) { + return num_values > 0 ? get_msb(num_values) + 1 : 0; } #if CONFIG_DEBUG @@ -91,4 +88,8 @@ static int get_unsigned_bits(unsigned int num_values) { #define VP9_FRAME_MARKER 0x2 +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_COMMON_VP9_COMMON_H_ diff --git a/libvpx/vp9/common/vp9_common_data.c b/libvpx/vp9/common/vp9_common_data.c index f858900..a927823 100644 --- a/libvpx/vp9/common/vp9_common_data.c +++ b/libvpx/vp9/common/vp9_common_data.c @@ -26,8 +26,6 @@ const int mi_width_log2_lookup[BLOCK_SIZES] = {0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3}; const int num_8x8_blocks_wide_lookup[BLOCK_SIZES] = {1, 1, 1, 1, 1, 2, 2, 2, 4, 4, 4, 8, 8}; -const int mi_height_log2_lookup[BLOCK_SIZES] = - {0, 0, 0, 0, 1, 0, 1, 2, 1, 2, 3, 2, 3}; const int num_8x8_blocks_high_lookup[BLOCK_SIZES] = {1, 1, 1, 1, 2, 1, 2, 4, 2, 4, 8, 4, 8}; @@ -108,12 +106,6 @@ const TX_SIZE max_txsize_lookup[BLOCK_SIZES] = { TX_16X16, TX_16X16, TX_16X16, TX_32X32, TX_32X32, TX_32X32, TX_32X32 }; -const TX_SIZE max_uv_txsize_lookup[BLOCK_SIZES] = { - TX_4X4, TX_4X4, TX_4X4, - TX_4X4, TX_4X4, TX_4X4, - TX_8X8, TX_8X8, TX_8X8, - TX_16X16, TX_16X16, TX_16X16, TX_32X32 -}; const TX_SIZE tx_mode_to_biggest_tx_size[TX_MODES] = { TX_4X4, // ONLY_4X4 @@ -123,8 +115,6 @@ const TX_SIZE tx_mode_to_biggest_tx_size[TX_MODES] = { TX_32X32, // TX_MODE_SELECT }; - - const BLOCK_SIZE ss_size_lookup[BLOCK_SIZES][2][2] = { // ss_x == 0 ss_x == 0 ss_x == 1 ss_x == 1 // ss_y == 0 ss_y == 1 ss_y == 0 ss_y == 1 @@ -143,4 +133,24 @@ const BLOCK_SIZE ss_size_lookup[BLOCK_SIZES][2][2] = { {{BLOCK_64X64, BLOCK_64X32}, {BLOCK_32X64, BLOCK_32X32}}, }; - +// Generates 4 bit field in which each bit set to 1 represents +// a blocksize partition 1111 means we split 64x64, 32x32, 16x16 +// and 8x8. 1000 means we just split the 64x64 to 32x32 +const struct { + PARTITION_CONTEXT above; + PARTITION_CONTEXT left; +} partition_context_lookup[BLOCK_SIZES]= { + {15, 15}, // 4X4 - {0b1111, 0b1111} + {15, 14}, // 4X8 - {0b1111, 0b1110} + {14, 15}, // 8X4 - {0b1110, 0b1111} + {14, 14}, // 8X8 - {0b1110, 0b1110} + {14, 12}, // 8X16 - {0b1110, 0b1100} + {12, 14}, // 16X8 - {0b1100, 0b1110} + {12, 12}, // 16X16 - {0b1100, 0b1100} + {12, 8 }, // 16X32 - {0b1100, 0b1000} + {8, 12}, // 32X16 - {0b1000, 0b1100} + {8, 8 }, // 32X32 - {0b1000, 0b1000} + {8, 0 }, // 32X64 - {0b1000, 0b0000} + {0, 8 }, // 64X32 - {0b0000, 0b1000} + {0, 0 }, // 64X64 - {0b0000, 0b0000} +}; diff --git a/libvpx/vp9/common/vp9_common_data.h b/libvpx/vp9/common/vp9_common_data.h index c1f6405..f419627 100644 --- a/libvpx/vp9/common/vp9_common_data.h +++ b/libvpx/vp9/common/vp9_common_data.h @@ -13,10 +13,13 @@ #include "vp9/common/vp9_enums.h" +#ifdef __cplusplus +extern "C" { +#endif + extern const int b_width_log2_lookup[BLOCK_SIZES]; extern const int b_height_log2_lookup[BLOCK_SIZES]; extern const int mi_width_log2_lookup[BLOCK_SIZES]; -extern const int mi_height_log2_lookup[BLOCK_SIZES]; extern const int num_8x8_blocks_wide_lookup[BLOCK_SIZES]; extern const int num_8x8_blocks_high_lookup[BLOCK_SIZES]; extern const int num_4x4_blocks_high_lookup[BLOCK_SIZES]; @@ -26,8 +29,11 @@ extern const int num_pels_log2_lookup[BLOCK_SIZES]; extern const PARTITION_TYPE partition_lookup[][BLOCK_SIZES]; extern const BLOCK_SIZE subsize_lookup[PARTITION_TYPES][BLOCK_SIZES]; extern const TX_SIZE max_txsize_lookup[BLOCK_SIZES]; -extern const TX_SIZE max_uv_txsize_lookup[BLOCK_SIZES]; extern const TX_SIZE tx_mode_to_biggest_tx_size[TX_MODES]; extern const BLOCK_SIZE ss_size_lookup[BLOCK_SIZES][2][2]; -#endif // VP9_COMMON_VP9_COMMON_DATA_H +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // VP9_COMMON_VP9_COMMON_DATA_H_ diff --git a/libvpx/vp9/common/vp9_convolve.c b/libvpx/vp9/common/vp9_convolve.c index a2d864c..d30e0b4 100644 --- a/libvpx/vp9/common/vp9_convolve.c +++ b/libvpx/vp9/common/vp9_convolve.c @@ -18,40 +18,21 @@ #include "vpx/vpx_integer.h" #include "vpx_ports/mem.h" -static void convolve_horiz_c(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const int16_t *filter_x0, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h, int taps) { - int x, y, k; - - /* NOTE: This assumes that the filter table is 256-byte aligned. */ - /* TODO(agrange) Modify to make independent of table alignment. */ - const int16_t *const filter_x_base = - (const int16_t *)(((intptr_t)filter_x0) & ~(intptr_t)0xff); - - /* Adjust base pointer address for this source line */ - src -= taps / 2 - 1; - +static void convolve_horiz(const uint8_t *src, ptrdiff_t src_stride, + uint8_t *dst, ptrdiff_t dst_stride, + const InterpKernel *x_filters, + int x0_q4, int x_step_q4, int w, int h) { + int x, y; + src -= SUBPEL_TAPS / 2 - 1; for (y = 0; y < h; ++y) { - /* Initial phase offset */ - int x_q4 = (int)(filter_x0 - filter_x_base) / taps; - + int x_q4 = x0_q4; for (x = 0; x < w; ++x) { - /* Per-pixel src offset */ - const int src_x = x_q4 >> SUBPEL_BITS; - int sum = 0; - - /* Pointer to filter to use */ - const int16_t *const filter_x = filter_x_base + - (x_q4 & SUBPEL_MASK) * taps; - - for (k = 0; k < taps; ++k) - sum += src[src_x + k] * filter_x[k]; - + const uint8_t *const src_x = &src[x_q4 >> SUBPEL_BITS]; + const int16_t *const x_filter = x_filters[x_q4 & SUBPEL_MASK]; + int k, sum = 0; + for (k = 0; k < SUBPEL_TAPS; ++k) + sum += src_x[k] * x_filter[k]; dst[x] = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)); - - /* Move to the next source pixel */ x_q4 += x_step_q4; } src += src_stride; @@ -59,41 +40,22 @@ static void convolve_horiz_c(const uint8_t *src, ptrdiff_t src_stride, } } -static void convolve_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const int16_t *filter_x0, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h, int taps) { - int x, y, k; - - /* NOTE: This assumes that the filter table is 256-byte aligned. */ - /* TODO(agrange) Modify to make independent of table alignment. */ - const int16_t *const filter_x_base = - (const int16_t *)(((intptr_t)filter_x0) & ~(intptr_t)0xff); - - /* Adjust base pointer address for this source line */ - src -= taps / 2 - 1; - +static void convolve_avg_horiz(const uint8_t *src, ptrdiff_t src_stride, + uint8_t *dst, ptrdiff_t dst_stride, + const InterpKernel *x_filters, + int x0_q4, int x_step_q4, int w, int h) { + int x, y; + src -= SUBPEL_TAPS / 2 - 1; for (y = 0; y < h; ++y) { - /* Initial phase offset */ - int x_q4 = (int)(filter_x0 - filter_x_base) / taps; - + int x_q4 = x0_q4; for (x = 0; x < w; ++x) { - /* Per-pixel src offset */ - const int src_x = x_q4 >> SUBPEL_BITS; - int sum = 0; - - /* Pointer to filter to use */ - const int16_t *const filter_x = filter_x_base + - (x_q4 & SUBPEL_MASK) * taps; - - for (k = 0; k < taps; ++k) - sum += src[src_x + k] * filter_x[k]; - + const uint8_t *const src_x = &src[x_q4 >> SUBPEL_BITS]; + const int16_t *const x_filter = x_filters[x_q4 & SUBPEL_MASK]; + int k, sum = 0; + for (k = 0; k < SUBPEL_TAPS; ++k) + sum += src_x[k] * x_filter[k]; dst[x] = ROUND_POWER_OF_TWO(dst[x] + - clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)), 1); - - /* Move to the next source pixel */ + clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)), 1); x_q4 += x_step_q4; } src += src_stride; @@ -101,41 +63,22 @@ static void convolve_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride, } } -static void convolve_vert_c(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y0, int y_step_q4, - int w, int h, int taps) { - int x, y, k; - - /* NOTE: This assumes that the filter table is 256-byte aligned. */ - /* TODO(agrange) Modify to make independent of table alignment. */ - const int16_t *const filter_y_base = - (const int16_t *)(((intptr_t)filter_y0) & ~(intptr_t)0xff); - - /* Adjust base pointer address for this source column */ - src -= src_stride * (taps / 2 - 1); +static void convolve_vert(const uint8_t *src, ptrdiff_t src_stride, + uint8_t *dst, ptrdiff_t dst_stride, + const InterpKernel *y_filters, + int y0_q4, int y_step_q4, int w, int h) { + int x, y; + src -= src_stride * (SUBPEL_TAPS / 2 - 1); for (x = 0; x < w; ++x) { - /* Initial phase offset */ - int y_q4 = (int)(filter_y0 - filter_y_base) / taps; - + int y_q4 = y0_q4; for (y = 0; y < h; ++y) { - /* Per-pixel src offset */ - const int src_y = y_q4 >> SUBPEL_BITS; - int sum = 0; - - /* Pointer to filter to use */ - const int16_t *const filter_y = filter_y_base + - (y_q4 & SUBPEL_MASK) * taps; - - for (k = 0; k < taps; ++k) - sum += src[(src_y + k) * src_stride] * filter_y[k]; - - dst[y * dst_stride] = - clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)); - - /* Move to the next source pixel */ + const unsigned char *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; + const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK]; + int k, sum = 0; + for (k = 0; k < SUBPEL_TAPS; ++k) + sum += src_y[k * src_stride] * y_filter[k]; + dst[y * dst_stride] = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)); y_q4 += y_step_q4; } ++src; @@ -143,41 +86,23 @@ static void convolve_vert_c(const uint8_t *src, ptrdiff_t src_stride, } } -static void convolve_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y0, int y_step_q4, - int w, int h, int taps) { - int x, y, k; - - /* NOTE: This assumes that the filter table is 256-byte aligned. */ - /* TODO(agrange) Modify to make independent of table alignment. */ - const int16_t *const filter_y_base = - (const int16_t *)(((intptr_t)filter_y0) & ~(intptr_t)0xff); - - /* Adjust base pointer address for this source column */ - src -= src_stride * (taps / 2 - 1); +static void convolve_avg_vert(const uint8_t *src, ptrdiff_t src_stride, + uint8_t *dst, ptrdiff_t dst_stride, + const InterpKernel *y_filters, + int y0_q4, int y_step_q4, int w, int h) { + int x, y; + src -= src_stride * (SUBPEL_TAPS / 2 - 1); for (x = 0; x < w; ++x) { - /* Initial phase offset */ - int y_q4 = (int)(filter_y0 - filter_y_base) / taps; - + int y_q4 = y0_q4; for (y = 0; y < h; ++y) { - /* Per-pixel src offset */ - const int src_y = y_q4 >> SUBPEL_BITS; - int sum = 0; - - /* Pointer to filter to use */ - const int16_t *const filter_y = filter_y_base + - (y_q4 & SUBPEL_MASK) * taps; - - for (k = 0; k < taps; ++k) - sum += src[(src_y + k) * src_stride] * filter_y[k]; - + const unsigned char *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; + const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK]; + int k, sum = 0; + for (k = 0; k < SUBPEL_TAPS; ++k) + sum += src_y[k * src_stride] * y_filter[k]; dst[y * dst_stride] = ROUND_POWER_OF_TWO(dst[y * dst_stride] + - clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)), 1); - - /* Move to the next source pixel */ + clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)), 1); y_q4 += y_step_q4; } ++src; @@ -185,33 +110,42 @@ static void convolve_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride, } } -static void convolve_c(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h, int taps) { - /* Fixed size intermediate buffer places limits on parameters. - * Maximum intermediate_height is 324, for y_step_q4 == 80, - * h == 64, taps == 8. - * y_step_q4 of 80 allows for 1/10 scale for 5 layer svc - */ +static void convolve(const uint8_t *src, ptrdiff_t src_stride, + uint8_t *dst, ptrdiff_t dst_stride, + const InterpKernel *const x_filters, + int x0_q4, int x_step_q4, + const InterpKernel *const y_filters, + int y0_q4, int y_step_q4, + int w, int h) { + // Fixed size intermediate buffer places limits on parameters. + // Maximum intermediate_height is 324, for y_step_q4 == 80, + // h == 64, taps == 8. + // y_step_q4 of 80 allows for 1/10 scale for 5 layer svc uint8_t temp[64 * 324]; - int intermediate_height = (((h - 1) * y_step_q4 + 15) >> 4) + taps; + int intermediate_height = (((h - 1) * y_step_q4 + 15) >> 4) + SUBPEL_TAPS; assert(w <= 64); assert(h <= 64); - assert(taps <= 8); assert(y_step_q4 <= 80); assert(x_step_q4 <= 80); if (intermediate_height < h) intermediate_height = h; - convolve_horiz_c(src - src_stride * (taps / 2 - 1), src_stride, temp, 64, - filter_x, x_step_q4, filter_y, y_step_q4, w, - intermediate_height, taps); - convolve_vert_c(temp + 64 * (taps / 2 - 1), 64, dst, dst_stride, filter_x, - x_step_q4, filter_y, y_step_q4, w, h, taps); + convolve_horiz(src - src_stride * (SUBPEL_TAPS / 2 - 1), src_stride, temp, 64, + x_filters, x0_q4, x_step_q4, w, intermediate_height); + convolve_vert(temp + 64 * (SUBPEL_TAPS / 2 - 1), 64, dst, dst_stride, + y_filters, y0_q4, y_step_q4, w, h); +} + +static const InterpKernel *get_filter_base(const int16_t *filter) { + // NOTE: This assumes that the filter table is 256-byte aligned. + // TODO(agrange) Modify to make independent of table alignment. + return (const InterpKernel *)(((intptr_t)filter) & ~((intptr_t)0xFF)); +} + +static int get_filter_offset(const int16_t *f, const InterpKernel *base) { + return (int)((const InterpKernel *)(intptr_t)f - base); } void vp9_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride, @@ -219,8 +153,11 @@ void vp9_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h) { - convolve_horiz_c(src, src_stride, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, w, h, 8); + const InterpKernel *const filters_x = get_filter_base(filter_x); + const int x0_q4 = get_filter_offset(filter_x, filters_x); + + convolve_horiz(src, src_stride, dst, dst_stride, filters_x, + x0_q4, x_step_q4, w, h); } void vp9_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride, @@ -228,8 +165,11 @@ void vp9_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h) { - convolve_avg_horiz_c(src, src_stride, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, w, h, 8); + const InterpKernel *const filters_x = get_filter_base(filter_x); + const int x0_q4 = get_filter_offset(filter_x, filters_x); + + convolve_avg_horiz(src, src_stride, dst, dst_stride, filters_x, + x0_q4, x_step_q4, w, h); } void vp9_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride, @@ -237,8 +177,10 @@ void vp9_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h) { - convolve_vert_c(src, src_stride, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, w, h, 8); + const InterpKernel *const filters_y = get_filter_base(filter_y); + const int y0_q4 = get_filter_offset(filter_y, filters_y); + convolve_vert(src, src_stride, dst, dst_stride, filters_y, + y0_q4, y_step_q4, w, h); } void vp9_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride, @@ -246,8 +188,10 @@ void vp9_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h) { - convolve_avg_vert_c(src, src_stride, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, w, h, 8); + const InterpKernel *const filters_y = get_filter_base(filter_y); + const int y0_q4 = get_filter_offset(filter_y, filters_y); + convolve_avg_vert(src, src_stride, dst, dst_stride, filters_y, + y0_q4, y_step_q4, w, h); } void vp9_convolve8_c(const uint8_t *src, ptrdiff_t src_stride, @@ -255,8 +199,15 @@ void vp9_convolve8_c(const uint8_t *src, ptrdiff_t src_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h) { - convolve_c(src, src_stride, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, w, h, 8); + const InterpKernel *const filters_x = get_filter_base(filter_x); + const int x0_q4 = get_filter_offset(filter_x, filters_x); + + const InterpKernel *const filters_y = get_filter_base(filter_y); + const int y0_q4 = get_filter_offset(filter_y, filters_y); + + convolve(src, src_stride, dst, dst_stride, + filters_x, x0_q4, x_step_q4, + filters_y, y0_q4, y_step_q4, w, h); } void vp9_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, @@ -269,9 +220,9 @@ void vp9_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, assert(w <= 64); assert(h <= 64); - vp9_convolve8(src, src_stride, temp, 64, - filter_x, x_step_q4, filter_y, y_step_q4, w, h); - vp9_convolve_avg(temp, 64, dst, dst_stride, NULL, 0, NULL, 0, w, h); + vp9_convolve8_c(src, src_stride, temp, 64, + filter_x, x_step_q4, filter_y, y_step_q4, w, h); + vp9_convolve_avg_c(temp, 64, dst, dst_stride, NULL, 0, NULL, 0, w, h); } void vp9_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride, diff --git a/libvpx/vp9/common/vp9_convolve.h b/libvpx/vp9/common/vp9_convolve.h index 29d4990..6bf71fc 100644 --- a/libvpx/vp9/common/vp9_convolve.h +++ b/libvpx/vp9/common/vp9_convolve.h @@ -13,10 +13,18 @@ #include "./vpx_config.h" #include "vpx/vpx_integer.h" +#ifdef __cplusplus +extern "C" { +#endif + typedef void (*convolve_fn_t)(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h); +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_COMMON_VP9_CONVOLVE_H_ diff --git a/libvpx/vp9/common/vp9_debugmodes.c b/libvpx/vp9/common/vp9_debugmodes.c index 355ac1a..24c785f 100644 --- a/libvpx/vp9/common/vp9_debugmodes.c +++ b/libvpx/vp9/common/vp9_debugmodes.c @@ -58,7 +58,7 @@ void vp9_print_modes_and_motion_vectors(VP9_COMMON *cm, char *file) { print_mi_data(cm, mvs, "Partitions:", offsetof(MB_MODE_INFO, sb_type)); print_mi_data(cm, mvs, "Modes:", offsetof(MB_MODE_INFO, mode)); - print_mi_data(cm, mvs, "Skips:", offsetof(MB_MODE_INFO, skip_coeff)); + print_mi_data(cm, mvs, "Skips:", offsetof(MB_MODE_INFO, skip)); print_mi_data(cm, mvs, "Ref frame:", offsetof(MB_MODE_INFO, ref_frame[0])); print_mi_data(cm, mvs, "Transform:", offsetof(MB_MODE_INFO, tx_size)); print_mi_data(cm, mvs, "UV Modes:", offsetof(MB_MODE_INFO, uv_mode)); diff --git a/libvpx/vp9/common/vp9_default_coef_probs.h b/libvpx/vp9/common/vp9_default_coef_probs.h deleted file mode 100644 index 3b512be..0000000 --- a/libvpx/vp9/common/vp9_default_coef_probs.h +++ /dev/null @@ -1,699 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. -*/ -#ifndef VP9_COMMON_DEFAULT_COEF_PROBS_H_ -#define VP9_COMMON_DEFAULT_COEF_PROBS_H_ - -/*Generated file, included by vp9_entropy.c*/ -static const vp9_coeff_probs_model default_coef_probs_4x4[BLOCK_TYPES] = { - { /* block Type 0 */ - { /* Intra */ - { /* Coeff Band 0 */ - { 195, 29, 183 }, - { 84, 49, 136 }, - { 8, 42, 71 } - }, { /* Coeff Band 1 */ - { 31, 107, 169 }, - { 35, 99, 159 }, - { 17, 82, 140 }, - { 8, 66, 114 }, - { 2, 44, 76 }, - { 1, 19, 32 } - }, { /* Coeff Band 2 */ - { 40, 132, 201 }, - { 29, 114, 187 }, - { 13, 91, 157 }, - { 7, 75, 127 }, - { 3, 58, 95 }, - { 1, 28, 47 } - }, { /* Coeff Band 3 */ - { 69, 142, 221 }, - { 42, 122, 201 }, - { 15, 91, 159 }, - { 6, 67, 121 }, - { 1, 42, 77 }, - { 1, 17, 31 } - }, { /* Coeff Band 4 */ - { 102, 148, 228 }, - { 67, 117, 204 }, - { 17, 82, 154 }, - { 6, 59, 114 }, - { 2, 39, 75 }, - { 1, 15, 29 } - }, { /* Coeff Band 5 */ - { 156, 57, 233 }, - { 119, 57, 212 }, - { 58, 48, 163 }, - { 29, 40, 124 }, - { 12, 30, 81 }, - { 3, 12, 31 } - } - }, { /* Inter */ - { /* Coeff Band 0 */ - { 191, 107, 226 }, - { 124, 117, 204 }, - { 25, 99, 155 } - }, { /* Coeff Band 1 */ - { 29, 148, 210 }, - { 37, 126, 194 }, - { 8, 93, 157 }, - { 2, 68, 118 }, - { 1, 39, 69 }, - { 1, 17, 33 } - }, { /* Coeff Band 2 */ - { 41, 151, 213 }, - { 27, 123, 193 }, - { 3, 82, 144 }, - { 1, 58, 105 }, - { 1, 32, 60 }, - { 1, 13, 26 } - }, { /* Coeff Band 3 */ - { 59, 159, 220 }, - { 23, 126, 198 }, - { 4, 88, 151 }, - { 1, 66, 114 }, - { 1, 38, 71 }, - { 1, 18, 34 } - }, { /* Coeff Band 4 */ - { 114, 136, 232 }, - { 51, 114, 207 }, - { 11, 83, 155 }, - { 3, 56, 105 }, - { 1, 33, 65 }, - { 1, 17, 34 } - }, { /* Coeff Band 5 */ - { 149, 65, 234 }, - { 121, 57, 215 }, - { 61, 49, 166 }, - { 28, 36, 114 }, - { 12, 25, 76 }, - { 3, 16, 42 } - } - } - }, { /* block Type 1 */ - { /* Intra */ - { /* Coeff Band 0 */ - { 214, 49, 220 }, - { 132, 63, 188 }, - { 42, 65, 137 } - }, { /* Coeff Band 1 */ - { 85, 137, 221 }, - { 104, 131, 216 }, - { 49, 111, 192 }, - { 21, 87, 155 }, - { 2, 49, 87 }, - { 1, 16, 28 } - }, { /* Coeff Band 2 */ - { 89, 163, 230 }, - { 90, 137, 220 }, - { 29, 100, 183 }, - { 10, 70, 135 }, - { 2, 42, 81 }, - { 1, 17, 33 } - }, { /* Coeff Band 3 */ - { 108, 167, 237 }, - { 55, 133, 222 }, - { 15, 97, 179 }, - { 4, 72, 135 }, - { 1, 45, 85 }, - { 1, 19, 38 } - }, { /* Coeff Band 4 */ - { 124, 146, 240 }, - { 66, 124, 224 }, - { 17, 88, 175 }, - { 4, 58, 122 }, - { 1, 36, 75 }, - { 1, 18, 37 } - }, { /* Coeff Band 5 */ - { 141, 79, 241 }, - { 126, 70, 227 }, - { 66, 58, 182 }, - { 30, 44, 136 }, - { 12, 34, 96 }, - { 2, 20, 47 } - } - }, { /* Inter */ - { /* Coeff Band 0 */ - { 229, 99, 249 }, - { 143, 111, 235 }, - { 46, 109, 192 } - }, { /* Coeff Band 1 */ - { 82, 158, 236 }, - { 94, 146, 224 }, - { 25, 117, 191 }, - { 9, 87, 149 }, - { 3, 56, 99 }, - { 1, 33, 57 } - }, { /* Coeff Band 2 */ - { 83, 167, 237 }, - { 68, 145, 222 }, - { 10, 103, 177 }, - { 2, 72, 131 }, - { 1, 41, 79 }, - { 1, 20, 39 } - }, { /* Coeff Band 3 */ - { 99, 167, 239 }, - { 47, 141, 224 }, - { 10, 104, 178 }, - { 2, 73, 133 }, - { 1, 44, 85 }, - { 1, 22, 47 } - }, { /* Coeff Band 4 */ - { 127, 145, 243 }, - { 71, 129, 228 }, - { 17, 93, 177 }, - { 3, 61, 124 }, - { 1, 41, 84 }, - { 1, 21, 52 } - }, { /* Coeff Band 5 */ - { 157, 78, 244 }, - { 140, 72, 231 }, - { 69, 58, 184 }, - { 31, 44, 137 }, - { 14, 38, 105 }, - { 8, 23, 61 } - } - } - } -}; -static const vp9_coeff_probs_model default_coef_probs_8x8[BLOCK_TYPES] = { - { /* block Type 0 */ - { /* Intra */ - { /* Coeff Band 0 */ - { 125, 34, 187 }, - { 52, 41, 133 }, - { 6, 31, 56 } - }, { /* Coeff Band 1 */ - { 37, 109, 153 }, - { 51, 102, 147 }, - { 23, 87, 128 }, - { 8, 67, 101 }, - { 1, 41, 63 }, - { 1, 19, 29 } - }, { /* Coeff Band 2 */ - { 31, 154, 185 }, - { 17, 127, 175 }, - { 6, 96, 145 }, - { 2, 73, 114 }, - { 1, 51, 82 }, - { 1, 28, 45 } - }, { /* Coeff Band 3 */ - { 23, 163, 200 }, - { 10, 131, 185 }, - { 2, 93, 148 }, - { 1, 67, 111 }, - { 1, 41, 69 }, - { 1, 14, 24 } - }, { /* Coeff Band 4 */ - { 29, 176, 217 }, - { 12, 145, 201 }, - { 3, 101, 156 }, - { 1, 69, 111 }, - { 1, 39, 63 }, - { 1, 14, 23 } - }, { /* Coeff Band 5 */ - { 57, 192, 233 }, - { 25, 154, 215 }, - { 6, 109, 167 }, - { 3, 78, 118 }, - { 1, 48, 69 }, - { 1, 21, 29 } - } - }, { /* Inter */ - { /* Coeff Band 0 */ - { 202, 105, 245 }, - { 108, 106, 216 }, - { 18, 90, 144 } - }, { /* Coeff Band 1 */ - { 33, 172, 219 }, - { 64, 149, 206 }, - { 14, 117, 177 }, - { 5, 90, 141 }, - { 2, 61, 95 }, - { 1, 37, 57 } - }, { /* Coeff Band 2 */ - { 33, 179, 220 }, - { 11, 140, 198 }, - { 1, 89, 148 }, - { 1, 60, 104 }, - { 1, 33, 57 }, - { 1, 12, 21 } - }, { /* Coeff Band 3 */ - { 30, 181, 221 }, - { 8, 141, 198 }, - { 1, 87, 145 }, - { 1, 58, 100 }, - { 1, 31, 55 }, - { 1, 12, 20 } - }, { /* Coeff Band 4 */ - { 32, 186, 224 }, - { 7, 142, 198 }, - { 1, 86, 143 }, - { 1, 58, 100 }, - { 1, 31, 55 }, - { 1, 12, 22 } - }, { /* Coeff Band 5 */ - { 57, 192, 227 }, - { 20, 143, 204 }, - { 3, 96, 154 }, - { 1, 68, 112 }, - { 1, 42, 69 }, - { 1, 19, 32 } - } - } - }, { /* block Type 1 */ - { /* Intra */ - { /* Coeff Band 0 */ - { 212, 35, 215 }, - { 113, 47, 169 }, - { 29, 48, 105 } - }, { /* Coeff Band 1 */ - { 74, 129, 203 }, - { 106, 120, 203 }, - { 49, 107, 178 }, - { 19, 84, 144 }, - { 4, 50, 84 }, - { 1, 15, 25 } - }, { /* Coeff Band 2 */ - { 71, 172, 217 }, - { 44, 141, 209 }, - { 15, 102, 173 }, - { 6, 76, 133 }, - { 2, 51, 89 }, - { 1, 24, 42 } - }, { /* Coeff Band 3 */ - { 64, 185, 231 }, - { 31, 148, 216 }, - { 8, 103, 175 }, - { 3, 74, 131 }, - { 1, 46, 81 }, - { 1, 18, 30 } - }, { /* Coeff Band 4 */ - { 65, 196, 235 }, - { 25, 157, 221 }, - { 5, 105, 174 }, - { 1, 67, 120 }, - { 1, 38, 69 }, - { 1, 15, 30 } - }, { /* Coeff Band 5 */ - { 65, 204, 238 }, - { 30, 156, 224 }, - { 7, 107, 177 }, - { 2, 70, 124 }, - { 1, 42, 73 }, - { 1, 18, 34 } - } - }, { /* Inter */ - { /* Coeff Band 0 */ - { 225, 86, 251 }, - { 144, 104, 235 }, - { 42, 99, 181 } - }, { /* Coeff Band 1 */ - { 85, 175, 239 }, - { 112, 165, 229 }, - { 29, 136, 200 }, - { 12, 103, 162 }, - { 6, 77, 123 }, - { 2, 53, 84 } - }, { /* Coeff Band 2 */ - { 75, 183, 239 }, - { 30, 155, 221 }, - { 3, 106, 171 }, - { 1, 74, 128 }, - { 1, 44, 76 }, - { 1, 17, 28 } - }, { /* Coeff Band 3 */ - { 73, 185, 240 }, - { 27, 159, 222 }, - { 2, 107, 172 }, - { 1, 75, 127 }, - { 1, 42, 73 }, - { 1, 17, 29 } - }, { /* Coeff Band 4 */ - { 62, 190, 238 }, - { 21, 159, 222 }, - { 2, 107, 172 }, - { 1, 72, 122 }, - { 1, 40, 71 }, - { 1, 18, 32 } - }, { /* Coeff Band 5 */ - { 61, 199, 240 }, - { 27, 161, 226 }, - { 4, 113, 180 }, - { 1, 76, 129 }, - { 1, 46, 80 }, - { 1, 23, 41 } - } - } - } -}; -static const vp9_coeff_probs_model default_coef_probs_16x16[BLOCK_TYPES] = { - { /* block Type 0 */ - { /* Intra */ - { /* Coeff Band 0 */ - { 7, 27, 153 }, - { 5, 30, 95 }, - { 1, 16, 30 } - }, { /* Coeff Band 1 */ - { 50, 75, 127 }, - { 57, 75, 124 }, - { 27, 67, 108 }, - { 10, 54, 86 }, - { 1, 33, 52 }, - { 1, 12, 18 } - }, { /* Coeff Band 2 */ - { 43, 125, 151 }, - { 26, 108, 148 }, - { 7, 83, 122 }, - { 2, 59, 89 }, - { 1, 38, 60 }, - { 1, 17, 27 } - }, { /* Coeff Band 3 */ - { 23, 144, 163 }, - { 13, 112, 154 }, - { 2, 75, 117 }, - { 1, 50, 81 }, - { 1, 31, 51 }, - { 1, 14, 23 } - }, { /* Coeff Band 4 */ - { 18, 162, 185 }, - { 6, 123, 171 }, - { 1, 78, 125 }, - { 1, 51, 86 }, - { 1, 31, 54 }, - { 1, 14, 23 } - }, { /* Coeff Band 5 */ - { 15, 199, 227 }, - { 3, 150, 204 }, - { 1, 91, 146 }, - { 1, 55, 95 }, - { 1, 30, 53 }, - { 1, 11, 20 } - } - }, { /* Inter */ - { /* Coeff Band 0 */ - { 19, 55, 240 }, - { 19, 59, 196 }, - { 3, 52, 105 } - }, { /* Coeff Band 1 */ - { 41, 166, 207 }, - { 104, 153, 199 }, - { 31, 123, 181 }, - { 14, 101, 152 }, - { 5, 72, 106 }, - { 1, 36, 52 } - }, { /* Coeff Band 2 */ - { 35, 176, 211 }, - { 12, 131, 190 }, - { 2, 88, 144 }, - { 1, 60, 101 }, - { 1, 36, 60 }, - { 1, 16, 28 } - }, { /* Coeff Band 3 */ - { 28, 183, 213 }, - { 8, 134, 191 }, - { 1, 86, 142 }, - { 1, 56, 96 }, - { 1, 30, 53 }, - { 1, 12, 20 } - }, { /* Coeff Band 4 */ - { 20, 190, 215 }, - { 4, 135, 192 }, - { 1, 84, 139 }, - { 1, 53, 91 }, - { 1, 28, 49 }, - { 1, 11, 20 } - }, { /* Coeff Band 5 */ - { 13, 196, 216 }, - { 2, 137, 192 }, - { 1, 86, 143 }, - { 1, 57, 99 }, - { 1, 32, 56 }, - { 1, 13, 24 } - } - } - }, { /* block Type 1 */ - { /* Intra */ - { /* Coeff Band 0 */ - { 211, 29, 217 }, - { 96, 47, 156 }, - { 22, 43, 87 } - }, { /* Coeff Band 1 */ - { 78, 120, 193 }, - { 111, 116, 186 }, - { 46, 102, 164 }, - { 15, 80, 128 }, - { 2, 49, 76 }, - { 1, 18, 28 } - }, { /* Coeff Band 2 */ - { 71, 161, 203 }, - { 42, 132, 192 }, - { 10, 98, 150 }, - { 3, 69, 109 }, - { 1, 44, 70 }, - { 1, 18, 29 } - }, { /* Coeff Band 3 */ - { 57, 186, 211 }, - { 30, 140, 196 }, - { 4, 93, 146 }, - { 1, 62, 102 }, - { 1, 38, 65 }, - { 1, 16, 27 } - }, { /* Coeff Band 4 */ - { 47, 199, 217 }, - { 14, 145, 196 }, - { 1, 88, 142 }, - { 1, 57, 98 }, - { 1, 36, 62 }, - { 1, 15, 26 } - }, { /* Coeff Band 5 */ - { 26, 219, 229 }, - { 5, 155, 207 }, - { 1, 94, 151 }, - { 1, 60, 104 }, - { 1, 36, 62 }, - { 1, 16, 28 } - } - }, { /* Inter */ - { /* Coeff Band 0 */ - { 233, 29, 248 }, - { 146, 47, 220 }, - { 43, 52, 140 } - }, { /* Coeff Band 1 */ - { 100, 163, 232 }, - { 179, 161, 222 }, - { 63, 142, 204 }, - { 37, 113, 174 }, - { 26, 89, 137 }, - { 18, 68, 97 } - }, { /* Coeff Band 2 */ - { 85, 181, 230 }, - { 32, 146, 209 }, - { 7, 100, 164 }, - { 3, 71, 121 }, - { 1, 45, 77 }, - { 1, 18, 30 } - }, { /* Coeff Band 3 */ - { 65, 187, 230 }, - { 20, 148, 207 }, - { 2, 97, 159 }, - { 1, 68, 116 }, - { 1, 40, 70 }, - { 1, 14, 29 } - }, { /* Coeff Band 4 */ - { 40, 194, 227 }, - { 8, 147, 204 }, - { 1, 94, 155 }, - { 1, 65, 112 }, - { 1, 39, 66 }, - { 1, 14, 26 } - }, { /* Coeff Band 5 */ - { 16, 208, 228 }, - { 3, 151, 207 }, - { 1, 98, 160 }, - { 1, 67, 117 }, - { 1, 41, 74 }, - { 1, 17, 31 } - } - } - } -}; -static const vp9_coeff_probs_model default_coef_probs_32x32[BLOCK_TYPES] = { - { /* block Type 0 */ - { /* Intra */ - { /* Coeff Band 0 */ - { 17, 38, 140 }, - { 7, 34, 80 }, - { 1, 17, 29 } - }, { /* Coeff Band 1 */ - { 37, 75, 128 }, - { 41, 76, 128 }, - { 26, 66, 116 }, - { 12, 52, 94 }, - { 2, 32, 55 }, - { 1, 10, 16 } - }, { /* Coeff Band 2 */ - { 50, 127, 154 }, - { 37, 109, 152 }, - { 16, 82, 121 }, - { 5, 59, 85 }, - { 1, 35, 54 }, - { 1, 13, 20 } - }, { /* Coeff Band 3 */ - { 40, 142, 167 }, - { 17, 110, 157 }, - { 2, 71, 112 }, - { 1, 44, 72 }, - { 1, 27, 45 }, - { 1, 11, 17 } - }, { /* Coeff Band 4 */ - { 30, 175, 188 }, - { 9, 124, 169 }, - { 1, 74, 116 }, - { 1, 48, 78 }, - { 1, 30, 49 }, - { 1, 11, 18 } - }, { /* Coeff Band 5 */ - { 10, 222, 223 }, - { 2, 150, 194 }, - { 1, 83, 128 }, - { 1, 48, 79 }, - { 1, 27, 45 }, - { 1, 11, 17 } - } - }, { /* Inter */ - { /* Coeff Band 0 */ - { 36, 41, 235 }, - { 29, 36, 193 }, - { 10, 27, 111 } - }, { /* Coeff Band 1 */ - { 85, 165, 222 }, - { 177, 162, 215 }, - { 110, 135, 195 }, - { 57, 113, 168 }, - { 23, 83, 120 }, - { 10, 49, 61 } - }, { /* Coeff Band 2 */ - { 85, 190, 223 }, - { 36, 139, 200 }, - { 5, 90, 146 }, - { 1, 60, 103 }, - { 1, 38, 65 }, - { 1, 18, 30 } - }, { /* Coeff Band 3 */ - { 72, 202, 223 }, - { 23, 141, 199 }, - { 2, 86, 140 }, - { 1, 56, 97 }, - { 1, 36, 61 }, - { 1, 16, 27 } - }, { /* Coeff Band 4 */ - { 55, 218, 225 }, - { 13, 145, 200 }, - { 1, 86, 141 }, - { 1, 57, 99 }, - { 1, 35, 61 }, - { 1, 13, 22 } - }, { /* Coeff Band 5 */ - { 15, 235, 212 }, - { 1, 132, 184 }, - { 1, 84, 139 }, - { 1, 57, 97 }, - { 1, 34, 56 }, - { 1, 14, 23 } - } - } - }, { /* block Type 1 */ - { /* Intra */ - { /* Coeff Band 0 */ - { 181, 21, 201 }, - { 61, 37, 123 }, - { 10, 38, 71 } - }, { /* Coeff Band 1 */ - { 47, 106, 172 }, - { 95, 104, 173 }, - { 42, 93, 159 }, - { 18, 77, 131 }, - { 4, 50, 81 }, - { 1, 17, 23 } - }, { /* Coeff Band 2 */ - { 62, 147, 199 }, - { 44, 130, 189 }, - { 28, 102, 154 }, - { 18, 75, 115 }, - { 2, 44, 65 }, - { 1, 12, 19 } - }, { /* Coeff Band 3 */ - { 55, 153, 210 }, - { 24, 130, 194 }, - { 3, 93, 146 }, - { 1, 61, 97 }, - { 1, 31, 50 }, - { 1, 10, 16 } - }, { /* Coeff Band 4 */ - { 49, 186, 223 }, - { 17, 148, 204 }, - { 1, 96, 142 }, - { 1, 53, 83 }, - { 1, 26, 44 }, - { 1, 11, 17 } - }, { /* Coeff Band 5 */ - { 13, 217, 212 }, - { 2, 136, 180 }, - { 1, 78, 124 }, - { 1, 50, 83 }, - { 1, 29, 49 }, - { 1, 14, 23 } - } - }, { /* Inter */ - { /* Coeff Band 0 */ - { 197, 13, 247 }, - { 82, 17, 222 }, - { 25, 17, 162 } - }, { /* Coeff Band 1 */ - { 126, 186, 247 }, - { 234, 191, 243 }, - { 176, 177, 234 }, - { 104, 158, 220 }, - { 66, 128, 186 }, - { 55, 90, 137 } - }, { /* Coeff Band 2 */ - { 111, 197, 242 }, - { 46, 158, 219 }, - { 9, 104, 171 }, - { 2, 65, 125 }, - { 1, 44, 80 }, - { 1, 17, 91 } - }, { /* Coeff Band 3 */ - { 104, 208, 245 }, - { 39, 168, 224 }, - { 3, 109, 162 }, - { 1, 79, 124 }, - { 1, 50, 102 }, - { 1, 43, 102 } - }, { /* Coeff Band 4 */ - { 84, 220, 246 }, - { 31, 177, 231 }, - { 2, 115, 180 }, - { 1, 79, 134 }, - { 1, 55, 77 }, - { 1, 60, 79 } - }, { /* Coeff Band 5 */ - { 43, 243, 240 }, - { 8, 180, 217 }, - { 1, 115, 166 }, - { 1, 84, 121 }, - { 1, 51, 67 }, - { 1, 16, 6 } - } - } - } -}; - -#endif // VP9_COMMON_DEFAULT_COEF_PROBS_H_ diff --git a/libvpx/vp9/common/vp9_entropy.c b/libvpx/vp9/common/vp9_entropy.c index feceb66..bc12f9a 100644 --- a/libvpx/vp9/common/vp9_entropy.c +++ b/libvpx/vp9/common/vp9_entropy.c @@ -15,29 +15,8 @@ #include "vpx_mem/vpx_mem.h" #include "vpx/vpx_integer.h" -#define MODEL_NODES (ENTROPY_NODES - UNCONSTRAINED_NODES) -DECLARE_ALIGNED(16, const uint8_t, vp9_norm[256]) = { - 0, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 -}; - -DECLARE_ALIGNED(16, const uint8_t, - vp9_coefband_trans_8x8plus[1024]) = { +const uint8_t vp9_coefband_trans_8x8plus[1024] = { 0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, // beyond MAXBAND_INDEX+1 all values are filled as 5 @@ -106,50 +85,17 @@ DECLARE_ALIGNED(16, const uint8_t, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, }; -DECLARE_ALIGNED(16, const uint8_t, - vp9_coefband_trans_4x4[16]) = { +const uint8_t vp9_coefband_trans_4x4[16] = { 0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5, }; -DECLARE_ALIGNED(16, const uint8_t, vp9_pt_energy_class[MAX_ENTROPY_TOKENS]) = { +const uint8_t vp9_pt_energy_class[ENTROPY_TOKENS] = { 0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 5 }; - - -/* Array indices are identical to previously-existing CONTEXT_NODE indices */ - -const vp9_tree_index vp9_coef_tree[TREE_SIZE(MAX_ENTROPY_TOKENS)] = { - -DCT_EOB_TOKEN, 2, /* 0 = EOB */ - -ZERO_TOKEN, 4, /* 1 = ZERO */ - -ONE_TOKEN, 6, /* 2 = ONE */ - 8, 12, /* 3 = LOW_VAL */ - -TWO_TOKEN, 10, /* 4 = TWO */ - -THREE_TOKEN, -FOUR_TOKEN, /* 5 = THREE */ - 14, 16, /* 6 = HIGH_LOW */ - -DCT_VAL_CATEGORY1, -DCT_VAL_CATEGORY2, /* 7 = CAT_ONE */ - 18, 20, /* 8 = CAT_THREEFOUR */ - -DCT_VAL_CATEGORY3, -DCT_VAL_CATEGORY4, /* 9 = CAT_THREE */ - -DCT_VAL_CATEGORY5, -DCT_VAL_CATEGORY6 /* 10 = CAT_FIVE */ -}; - -struct vp9_token vp9_coef_encodings[MAX_ENTROPY_TOKENS]; - -/* Trees for extra bits. Probabilities are constant and - do not depend on previously encoded bits */ - -static const vp9_prob Pcat1[] = { 159}; -static const vp9_prob Pcat2[] = { 165, 145}; -static const vp9_prob Pcat3[] = { 173, 148, 140}; -static const vp9_prob Pcat4[] = { 176, 155, 140, 135}; -static const vp9_prob Pcat5[] = { 180, 157, 141, 134, 130}; -static const vp9_prob Pcat6[] = { - 254, 254, 254, 252, 249, 243, 230, 196, 177, 153, 140, 133, 130, 129 -}; - -const vp9_tree_index vp9_coefmodel_tree[6] = { - -DCT_EOB_MODEL_TOKEN, 2, /* 0 = EOB */ - -ZERO_TOKEN, 4, /* 1 = ZERO */ +const vp9_tree_index vp9_coefmodel_tree[TREE_SIZE(UNCONSTRAINED_NODES + 1)] = { + -EOB_MODEL_TOKEN, 2, + -ZERO_TOKEN, 4, -ONE_TOKEN, -TWO_TOKEN, }; @@ -162,198 +108,617 @@ const vp9_tree_index vp9_coefmodel_tree[6] = { // the probabilities for the rest of the nodes. // beta = 8 -static const vp9_prob modelcoefprobs_pareto8[COEFPROB_MODELS][MODEL_NODES] = { + +// Every odd line in this table can be generated from the even lines +// by averaging : +// vp9_pareto8_full[l][node] = (vp9_pareto8_full[l-1][node] + +// vp9_pareto8_full[l+1][node] ) >> 1; +const vp9_prob vp9_pareto8_full[COEFF_PROB_MODELS][MODEL_NODES] = { { 3, 86, 128, 6, 86, 23, 88, 29}, + { 6, 86, 128, 11, 87, 42, 91, 52}, { 9, 86, 129, 17, 88, 61, 94, 76}, + { 12, 86, 129, 22, 88, 77, 97, 93}, { 15, 87, 129, 28, 89, 93, 100, 110}, + { 17, 87, 129, 33, 90, 105, 103, 123}, { 20, 88, 130, 38, 91, 118, 106, 136}, + { 23, 88, 130, 43, 91, 128, 108, 146}, { 26, 89, 131, 48, 92, 139, 111, 156}, + { 28, 89, 131, 53, 93, 147, 114, 163}, { 31, 90, 131, 58, 94, 156, 117, 171}, + { 34, 90, 131, 62, 94, 163, 119, 177}, { 37, 90, 132, 66, 95, 171, 122, 184}, + { 39, 90, 132, 70, 96, 177, 124, 189}, { 42, 91, 132, 75, 97, 183, 127, 194}, + { 44, 91, 132, 79, 97, 188, 129, 198}, { 47, 92, 133, 83, 98, 193, 132, 202}, + { 49, 92, 133, 86, 99, 197, 134, 205}, { 52, 93, 133, 90, 100, 201, 137, 208}, + { 54, 93, 133, 94, 100, 204, 139, 211}, { 57, 94, 134, 98, 101, 208, 142, 214}, + { 59, 94, 134, 101, 102, 211, 144, 216}, { 62, 94, 135, 105, 103, 214, 146, 218}, + { 64, 94, 135, 108, 103, 216, 148, 220}, { 66, 95, 135, 111, 104, 219, 151, 222}, + { 68, 95, 135, 114, 105, 221, 153, 223}, { 71, 96, 136, 117, 106, 224, 155, 225}, + { 73, 96, 136, 120, 106, 225, 157, 226}, { 76, 97, 136, 123, 107, 227, 159, 228}, + { 78, 97, 136, 126, 108, 229, 160, 229}, { 80, 98, 137, 129, 109, 231, 162, 231}, + { 82, 98, 137, 131, 109, 232, 164, 232}, { 84, 98, 138, 134, 110, 234, 166, 233}, + { 86, 98, 138, 137, 111, 235, 168, 234}, { 89, 99, 138, 140, 112, 236, 170, 235}, + { 91, 99, 138, 142, 112, 237, 171, 235}, { 93, 100, 139, 145, 113, 238, 173, 236}, + { 95, 100, 139, 147, 114, 239, 174, 237}, { 97, 101, 140, 149, 115, 240, 176, 238}, + { 99, 101, 140, 151, 115, 241, 177, 238}, {101, 102, 140, 154, 116, 242, 179, 239}, + {103, 102, 140, 156, 117, 242, 180, 239}, {105, 103, 141, 158, 118, 243, 182, 240}, + {107, 103, 141, 160, 118, 243, 183, 240}, {109, 104, 141, 162, 119, 244, 185, 241}, + {111, 104, 141, 164, 119, 244, 186, 241}, {113, 104, 142, 166, 120, 245, 187, 242}, + {114, 104, 142, 168, 121, 245, 188, 242}, {116, 105, 143, 170, 122, 246, 190, 243}, + {118, 105, 143, 171, 122, 246, 191, 243}, {120, 106, 143, 173, 123, 247, 192, 244}, + {121, 106, 143, 175, 124, 247, 193, 244}, {123, 107, 144, 177, 125, 248, 195, 244}, + {125, 107, 144, 178, 125, 248, 196, 244}, {127, 108, 145, 180, 126, 249, 197, 245}, + {128, 108, 145, 181, 127, 249, 198, 245}, {130, 109, 145, 183, 128, 249, 199, 245}, + {132, 109, 145, 184, 128, 249, 200, 245}, {134, 110, 146, 186, 129, 250, 201, 246}, + {135, 110, 146, 187, 130, 250, 202, 246}, {137, 111, 147, 189, 131, 251, 203, 246}, + {138, 111, 147, 190, 131, 251, 204, 246}, {140, 112, 147, 192, 132, 251, 205, 247}, + {141, 112, 147, 193, 132, 251, 206, 247}, {143, 113, 148, 194, 133, 251, 207, 247}, + {144, 113, 148, 195, 134, 251, 207, 247}, {146, 114, 149, 197, 135, 252, 208, 248}, + {147, 114, 149, 198, 135, 252, 209, 248}, {149, 115, 149, 199, 136, 252, 210, 248}, + {150, 115, 149, 200, 137, 252, 210, 248}, {152, 115, 150, 201, 138, 252, 211, 248}, + {153, 115, 150, 202, 138, 252, 212, 248}, {155, 116, 151, 204, 139, 253, 213, 249}, + {156, 116, 151, 205, 139, 253, 213, 249}, {158, 117, 151, 206, 140, 253, 214, 249}, + {159, 117, 151, 207, 141, 253, 215, 249}, {161, 118, 152, 208, 142, 253, 216, 249}, + {162, 118, 152, 209, 142, 253, 216, 249}, {163, 119, 153, 210, 143, 253, 217, 249}, + {164, 119, 153, 211, 143, 253, 217, 249}, {166, 120, 153, 212, 144, 254, 218, 250}, + {167, 120, 153, 212, 145, 254, 219, 250}, {168, 121, 154, 213, 146, 254, 220, 250}, + {169, 121, 154, 214, 146, 254, 220, 250}, {171, 122, 155, 215, 147, 254, 221, 250}, + {172, 122, 155, 216, 147, 254, 221, 250}, {173, 123, 155, 217, 148, 254, 222, 250}, + {174, 123, 155, 217, 149, 254, 222, 250}, {176, 124, 156, 218, 150, 254, 223, 250}, + {177, 124, 156, 219, 150, 254, 223, 250}, {178, 125, 157, 220, 151, 254, 224, 251}, + {179, 125, 157, 220, 151, 254, 224, 251}, {180, 126, 157, 221, 152, 254, 225, 251}, + {181, 126, 157, 221, 152, 254, 225, 251}, {183, 127, 158, 222, 153, 254, 226, 251}, + {184, 127, 158, 223, 154, 254, 226, 251}, {185, 128, 159, 224, 155, 255, 227, 251}, + {186, 128, 159, 224, 155, 255, 227, 251}, {187, 129, 160, 225, 156, 255, 228, 251}, + {188, 130, 160, 225, 156, 255, 228, 251}, {189, 131, 160, 226, 157, 255, 228, 251}, + {190, 131, 160, 226, 158, 255, 228, 251}, {191, 132, 161, 227, 159, 255, 229, 251}, + {192, 132, 161, 227, 159, 255, 229, 251}, {193, 133, 162, 228, 160, 255, 230, 252}, + {194, 133, 162, 229, 160, 255, 230, 252}, {195, 134, 163, 230, 161, 255, 231, 252}, + {196, 134, 163, 230, 161, 255, 231, 252}, {197, 135, 163, 231, 162, 255, 231, 252}, + {198, 135, 163, 231, 162, 255, 231, 252}, {199, 136, 164, 232, 163, 255, 232, 252}, + {200, 136, 164, 232, 164, 255, 232, 252}, + {201, 137, 165, 233, 165, 255, 233, 252}, {201, 137, 165, 233, 165, 255, 233, 252}, {202, 138, 166, 233, 166, 255, 233, 252}, + {203, 138, 166, 233, 166, 255, 233, 252}, {204, 139, 166, 234, 167, 255, 234, 252}, + {205, 139, 166, 234, 167, 255, 234, 252}, + {206, 140, 167, 235, 168, 255, 235, 252}, {206, 140, 167, 235, 168, 255, 235, 252}, {207, 141, 168, 236, 169, 255, 235, 252}, + {208, 141, 168, 236, 170, 255, 235, 252}, {209, 142, 169, 237, 171, 255, 236, 252}, + {209, 143, 169, 237, 171, 255, 236, 252}, {210, 144, 169, 237, 172, 255, 236, 252}, + {211, 144, 169, 237, 172, 255, 236, 252}, {212, 145, 170, 238, 173, 255, 237, 252}, + {213, 145, 170, 238, 173, 255, 237, 252}, {214, 146, 171, 239, 174, 255, 237, 253}, + {214, 146, 171, 239, 174, 255, 237, 253}, + {215, 147, 172, 240, 175, 255, 238, 253}, {215, 147, 172, 240, 175, 255, 238, 253}, {216, 148, 173, 240, 176, 255, 238, 253}, + {217, 148, 173, 240, 176, 255, 238, 253}, {218, 149, 173, 241, 177, 255, 239, 253}, + {218, 149, 173, 241, 178, 255, 239, 253}, {219, 150, 174, 241, 179, 255, 239, 253}, + {219, 151, 174, 241, 179, 255, 239, 253}, {220, 152, 175, 242, 180, 255, 240, 253}, + {221, 152, 175, 242, 180, 255, 240, 253}, {222, 153, 176, 242, 181, 255, 240, 253}, + {222, 153, 176, 242, 181, 255, 240, 253}, + {223, 154, 177, 243, 182, 255, 240, 253}, {223, 154, 177, 243, 182, 255, 240, 253}, {224, 155, 178, 244, 183, 255, 241, 253}, + {224, 155, 178, 244, 183, 255, 241, 253}, {225, 156, 178, 244, 184, 255, 241, 253}, + {225, 157, 178, 244, 184, 255, 241, 253}, {226, 158, 179, 244, 185, 255, 242, 253}, + {227, 158, 179, 244, 185, 255, 242, 253}, + {228, 159, 180, 245, 186, 255, 242, 253}, {228, 159, 180, 245, 186, 255, 242, 253}, {229, 160, 181, 245, 187, 255, 242, 253}, + {229, 160, 181, 245, 187, 255, 242, 253}, {230, 161, 182, 246, 188, 255, 243, 253}, + {230, 162, 182, 246, 188, 255, 243, 253}, + {231, 163, 183, 246, 189, 255, 243, 253}, {231, 163, 183, 246, 189, 255, 243, 253}, {232, 164, 184, 247, 190, 255, 243, 253}, + {232, 164, 184, 247, 190, 255, 243, 253}, + {233, 165, 185, 247, 191, 255, 244, 253}, {233, 165, 185, 247, 191, 255, 244, 253}, {234, 166, 185, 247, 192, 255, 244, 253}, + {234, 167, 185, 247, 192, 255, 244, 253}, {235, 168, 186, 248, 193, 255, 244, 253}, + {235, 168, 186, 248, 193, 255, 244, 253}, + {236, 169, 187, 248, 194, 255, 244, 253}, {236, 169, 187, 248, 194, 255, 244, 253}, {236, 170, 188, 248, 195, 255, 245, 253}, + {236, 170, 188, 248, 195, 255, 245, 253}, {237, 171, 189, 249, 196, 255, 245, 254}, + {237, 172, 189, 249, 196, 255, 245, 254}, + {238, 173, 190, 249, 197, 255, 245, 254}, {238, 173, 190, 249, 197, 255, 245, 254}, {239, 174, 191, 249, 198, 255, 245, 254}, + {239, 174, 191, 249, 198, 255, 245, 254}, {240, 175, 192, 249, 199, 255, 246, 254}, + {240, 176, 192, 249, 199, 255, 246, 254}, + {240, 177, 193, 250, 200, 255, 246, 254}, {240, 177, 193, 250, 200, 255, 246, 254}, {241, 178, 194, 250, 201, 255, 246, 254}, + {241, 178, 194, 250, 201, 255, 246, 254}, {242, 179, 195, 250, 202, 255, 246, 254}, + {242, 180, 195, 250, 202, 255, 246, 254}, + {242, 181, 196, 250, 203, 255, 247, 254}, {242, 181, 196, 250, 203, 255, 247, 254}, {243, 182, 197, 251, 204, 255, 247, 254}, + {243, 183, 197, 251, 204, 255, 247, 254}, {244, 184, 198, 251, 205, 255, 247, 254}, + {244, 184, 198, 251, 205, 255, 247, 254}, + {244, 185, 199, 251, 206, 255, 247, 254}, {244, 185, 199, 251, 206, 255, 247, 254}, {245, 186, 200, 251, 207, 255, 247, 254}, + {245, 187, 200, 251, 207, 255, 247, 254}, + {246, 188, 201, 252, 207, 255, 248, 254}, {246, 188, 201, 252, 207, 255, 248, 254}, {246, 189, 202, 252, 208, 255, 248, 254}, + {246, 190, 202, 252, 208, 255, 248, 254}, + {247, 191, 203, 252, 209, 255, 248, 254}, {247, 191, 203, 252, 209, 255, 248, 254}, {247, 192, 204, 252, 210, 255, 248, 254}, + {247, 193, 204, 252, 210, 255, 248, 254}, + {248, 194, 205, 252, 211, 255, 248, 254}, {248, 194, 205, 252, 211, 255, 248, 254}, {248, 195, 206, 252, 212, 255, 249, 254}, + {248, 196, 206, 252, 212, 255, 249, 254}, + {249, 197, 207, 253, 213, 255, 249, 254}, {249, 197, 207, 253, 213, 255, 249, 254}, {249, 198, 208, 253, 214, 255, 249, 254}, + {249, 199, 209, 253, 214, 255, 249, 254}, + {250, 200, 210, 253, 215, 255, 249, 254}, {250, 200, 210, 253, 215, 255, 249, 254}, {250, 201, 211, 253, 215, 255, 249, 254}, + {250, 202, 211, 253, 215, 255, 249, 254}, + {250, 203, 212, 253, 216, 255, 249, 254}, {250, 203, 212, 253, 216, 255, 249, 254}, {251, 204, 213, 253, 217, 255, 250, 254}, + {251, 205, 213, 253, 217, 255, 250, 254}, {251, 206, 214, 254, 218, 255, 250, 254}, + {251, 206, 215, 254, 218, 255, 250, 254}, {252, 207, 216, 254, 219, 255, 250, 254}, + {252, 208, 216, 254, 219, 255, 250, 254}, {252, 209, 217, 254, 220, 255, 250, 254}, + {252, 210, 217, 254, 220, 255, 250, 254}, {252, 211, 218, 254, 221, 255, 250, 254}, + {252, 212, 218, 254, 221, 255, 250, 254}, {253, 213, 219, 254, 222, 255, 250, 254}, + {253, 213, 220, 254, 222, 255, 250, 254}, {253, 214, 221, 254, 223, 255, 250, 254}, + {253, 215, 221, 254, 223, 255, 250, 254}, {253, 216, 222, 254, 224, 255, 251, 254}, + {253, 217, 223, 254, 224, 255, 251, 254}, {253, 218, 224, 254, 225, 255, 251, 254}, + {253, 219, 224, 254, 225, 255, 251, 254}, {254, 220, 225, 254, 225, 255, 251, 254}, + {254, 221, 226, 254, 225, 255, 251, 254}, {254, 222, 227, 255, 226, 255, 251, 254}, + {254, 223, 227, 255, 226, 255, 251, 254}, {254, 224, 228, 255, 227, 255, 251, 254}, + {254, 225, 229, 255, 227, 255, 251, 254}, {254, 226, 230, 255, 228, 255, 251, 254}, + {254, 227, 230, 255, 229, 255, 251, 254}, {255, 228, 231, 255, 230, 255, 251, 254}, + {255, 229, 232, 255, 230, 255, 251, 254}, {255, 230, 233, 255, 231, 255, 252, 254}, + {255, 231, 234, 255, 231, 255, 252, 254}, {255, 232, 235, 255, 232, 255, 252, 254}, + {255, 233, 236, 255, 232, 255, 252, 254}, {255, 235, 237, 255, 233, 255, 252, 254}, + {255, 236, 238, 255, 234, 255, 252, 254}, {255, 238, 240, 255, 235, 255, 252, 255}, + {255, 239, 241, 255, 235, 255, 252, 254}, {255, 241, 243, 255, 236, 255, 252, 254}, - {255, 246, 247, 255, 239, 255, 253, 255} + {255, 243, 245, 255, 237, 255, 252, 254}, + {255, 246, 247, 255, 239, 255, 253, 255}, + {255, 246, 247, 255, 239, 255, 253, 255}, }; -static void extend_model_to_full_distribution(vp9_prob p, - vp9_prob *tree_probs) { - const int l = (p - 1) / 2; - const vp9_prob (*model)[MODEL_NODES] = modelcoefprobs_pareto8; - if (p & 1) { - vpx_memcpy(tree_probs + UNCONSTRAINED_NODES, - model[l], MODEL_NODES * sizeof(vp9_prob)); - } else { - // interpolate - int i; - for (i = UNCONSTRAINED_NODES; i < ENTROPY_NODES; ++i) - tree_probs[i] = (model[l][i - UNCONSTRAINED_NODES] + - model[l + 1][i - UNCONSTRAINED_NODES]) >> 1; +static const vp9_coeff_probs_model default_coef_probs_4x4[PLANE_TYPES] = { + { // Y plane + { // Intra + { // Band 0 + { 195, 29, 183 }, { 84, 49, 136 }, { 8, 42, 71 } + }, { // Band 1 + { 31, 107, 169 }, { 35, 99, 159 }, { 17, 82, 140 }, + { 8, 66, 114 }, { 2, 44, 76 }, { 1, 19, 32 } + }, { // Band 2 + { 40, 132, 201 }, { 29, 114, 187 }, { 13, 91, 157 }, + { 7, 75, 127 }, { 3, 58, 95 }, { 1, 28, 47 } + }, { // Band 3 + { 69, 142, 221 }, { 42, 122, 201 }, { 15, 91, 159 }, + { 6, 67, 121 }, { 1, 42, 77 }, { 1, 17, 31 } + }, { // Band 4 + { 102, 148, 228 }, { 67, 117, 204 }, { 17, 82, 154 }, + { 6, 59, 114 }, { 2, 39, 75 }, { 1, 15, 29 } + }, { // Band 5 + { 156, 57, 233 }, { 119, 57, 212 }, { 58, 48, 163 }, + { 29, 40, 124 }, { 12, 30, 81 }, { 3, 12, 31 } + } + }, { // Inter + { // Band 0 + { 191, 107, 226 }, { 124, 117, 204 }, { 25, 99, 155 } + }, { // Band 1 + { 29, 148, 210 }, { 37, 126, 194 }, { 8, 93, 157 }, + { 2, 68, 118 }, { 1, 39, 69 }, { 1, 17, 33 } + }, { // Band 2 + { 41, 151, 213 }, { 27, 123, 193 }, { 3, 82, 144 }, + { 1, 58, 105 }, { 1, 32, 60 }, { 1, 13, 26 } + }, { // Band 3 + { 59, 159, 220 }, { 23, 126, 198 }, { 4, 88, 151 }, + { 1, 66, 114 }, { 1, 38, 71 }, { 1, 18, 34 } + }, { // Band 4 + { 114, 136, 232 }, { 51, 114, 207 }, { 11, 83, 155 }, + { 3, 56, 105 }, { 1, 33, 65 }, { 1, 17, 34 } + }, { // Band 5 + { 149, 65, 234 }, { 121, 57, 215 }, { 61, 49, 166 }, + { 28, 36, 114 }, { 12, 25, 76 }, { 3, 16, 42 } + } + } + }, { // UV plane + { // Intra + { // Band 0 + { 214, 49, 220 }, { 132, 63, 188 }, { 42, 65, 137 } + }, { // Band 1 + { 85, 137, 221 }, { 104, 131, 216 }, { 49, 111, 192 }, + { 21, 87, 155 }, { 2, 49, 87 }, { 1, 16, 28 } + }, { // Band 2 + { 89, 163, 230 }, { 90, 137, 220 }, { 29, 100, 183 }, + { 10, 70, 135 }, { 2, 42, 81 }, { 1, 17, 33 } + }, { // Band 3 + { 108, 167, 237 }, { 55, 133, 222 }, { 15, 97, 179 }, + { 4, 72, 135 }, { 1, 45, 85 }, { 1, 19, 38 } + }, { // Band 4 + { 124, 146, 240 }, { 66, 124, 224 }, { 17, 88, 175 }, + { 4, 58, 122 }, { 1, 36, 75 }, { 1, 18, 37 } + }, { // Band 5 + { 141, 79, 241 }, { 126, 70, 227 }, { 66, 58, 182 }, + { 30, 44, 136 }, { 12, 34, 96 }, { 2, 20, 47 } + } + }, { // Inter + { // Band 0 + { 229, 99, 249 }, { 143, 111, 235 }, { 46, 109, 192 } + }, { // Band 1 + { 82, 158, 236 }, { 94, 146, 224 }, { 25, 117, 191 }, + { 9, 87, 149 }, { 3, 56, 99 }, { 1, 33, 57 } + }, { // Band 2 + { 83, 167, 237 }, { 68, 145, 222 }, { 10, 103, 177 }, + { 2, 72, 131 }, { 1, 41, 79 }, { 1, 20, 39 } + }, { // Band 3 + { 99, 167, 239 }, { 47, 141, 224 }, { 10, 104, 178 }, + { 2, 73, 133 }, { 1, 44, 85 }, { 1, 22, 47 } + }, { // Band 4 + { 127, 145, 243 }, { 71, 129, 228 }, { 17, 93, 177 }, + { 3, 61, 124 }, { 1, 41, 84 }, { 1, 21, 52 } + }, { // Band 5 + { 157, 78, 244 }, { 140, 72, 231 }, { 69, 58, 184 }, + { 31, 44, 137 }, { 14, 38, 105 }, { 8, 23, 61 } + } + } } -} - -void vp9_model_to_full_probs(const vp9_prob *model, vp9_prob *full) { - if (full != model) - vpx_memcpy(full, model, sizeof(vp9_prob) * UNCONSTRAINED_NODES); - extend_model_to_full_distribution(model[PIVOT_NODE], full); -} +}; -static vp9_tree_index cat1[2], cat2[4], cat3[6], cat4[8], cat5[10], cat6[28]; +static const vp9_coeff_probs_model default_coef_probs_8x8[PLANE_TYPES] = { + { // Y plane + { // Intra + { // Band 0 + { 125, 34, 187 }, { 52, 41, 133 }, { 6, 31, 56 } + }, { // Band 1 + { 37, 109, 153 }, { 51, 102, 147 }, { 23, 87, 128 }, + { 8, 67, 101 }, { 1, 41, 63 }, { 1, 19, 29 } + }, { // Band 2 + { 31, 154, 185 }, { 17, 127, 175 }, { 6, 96, 145 }, + { 2, 73, 114 }, { 1, 51, 82 }, { 1, 28, 45 } + }, { // Band 3 + { 23, 163, 200 }, { 10, 131, 185 }, { 2, 93, 148 }, + { 1, 67, 111 }, { 1, 41, 69 }, { 1, 14, 24 } + }, { // Band 4 + { 29, 176, 217 }, { 12, 145, 201 }, { 3, 101, 156 }, + { 1, 69, 111 }, { 1, 39, 63 }, { 1, 14, 23 } + }, { // Band 5 + { 57, 192, 233 }, { 25, 154, 215 }, { 6, 109, 167 }, + { 3, 78, 118 }, { 1, 48, 69 }, { 1, 21, 29 } + } + }, { // Inter + { // Band 0 + { 202, 105, 245 }, { 108, 106, 216 }, { 18, 90, 144 } + }, { // Band 1 + { 33, 172, 219 }, { 64, 149, 206 }, { 14, 117, 177 }, + { 5, 90, 141 }, { 2, 61, 95 }, { 1, 37, 57 } + }, { // Band 2 + { 33, 179, 220 }, { 11, 140, 198 }, { 1, 89, 148 }, + { 1, 60, 104 }, { 1, 33, 57 }, { 1, 12, 21 } + }, { // Band 3 + { 30, 181, 221 }, { 8, 141, 198 }, { 1, 87, 145 }, + { 1, 58, 100 }, { 1, 31, 55 }, { 1, 12, 20 } + }, { // Band 4 + { 32, 186, 224 }, { 7, 142, 198 }, { 1, 86, 143 }, + { 1, 58, 100 }, { 1, 31, 55 }, { 1, 12, 22 } + }, { // Band 5 + { 57, 192, 227 }, { 20, 143, 204 }, { 3, 96, 154 }, + { 1, 68, 112 }, { 1, 42, 69 }, { 1, 19, 32 } + } + } + }, { // UV plane + { // Intra + { // Band 0 + { 212, 35, 215 }, { 113, 47, 169 }, { 29, 48, 105 } + }, { // Band 1 + { 74, 129, 203 }, { 106, 120, 203 }, { 49, 107, 178 }, + { 19, 84, 144 }, { 4, 50, 84 }, { 1, 15, 25 } + }, { // Band 2 + { 71, 172, 217 }, { 44, 141, 209 }, { 15, 102, 173 }, + { 6, 76, 133 }, { 2, 51, 89 }, { 1, 24, 42 } + }, { // Band 3 + { 64, 185, 231 }, { 31, 148, 216 }, { 8, 103, 175 }, + { 3, 74, 131 }, { 1, 46, 81 }, { 1, 18, 30 } + }, { // Band 4 + { 65, 196, 235 }, { 25, 157, 221 }, { 5, 105, 174 }, + { 1, 67, 120 }, { 1, 38, 69 }, { 1, 15, 30 } + }, { // Band 5 + { 65, 204, 238 }, { 30, 156, 224 }, { 7, 107, 177 }, + { 2, 70, 124 }, { 1, 42, 73 }, { 1, 18, 34 } + } + }, { // Inter + { // Band 0 + { 225, 86, 251 }, { 144, 104, 235 }, { 42, 99, 181 } + }, { // Band 1 + { 85, 175, 239 }, { 112, 165, 229 }, { 29, 136, 200 }, + { 12, 103, 162 }, { 6, 77, 123 }, { 2, 53, 84 } + }, { // Band 2 + { 75, 183, 239 }, { 30, 155, 221 }, { 3, 106, 171 }, + { 1, 74, 128 }, { 1, 44, 76 }, { 1, 17, 28 } + }, { // Band 3 + { 73, 185, 240 }, { 27, 159, 222 }, { 2, 107, 172 }, + { 1, 75, 127 }, { 1, 42, 73 }, { 1, 17, 29 } + }, { // Band 4 + { 62, 190, 238 }, { 21, 159, 222 }, { 2, 107, 172 }, + { 1, 72, 122 }, { 1, 40, 71 }, { 1, 18, 32 } + }, { // Band 5 + { 61, 199, 240 }, { 27, 161, 226 }, { 4, 113, 180 }, + { 1, 76, 129 }, { 1, 46, 80 }, { 1, 23, 41 } + } + } + } +}; -static void init_bit_tree(vp9_tree_index *p, int n) { - int i = 0; +static const vp9_coeff_probs_model default_coef_probs_16x16[PLANE_TYPES] = { + { // Y plane + { // Intra + { // Band 0 + { 7, 27, 153 }, { 5, 30, 95 }, { 1, 16, 30 } + }, { // Band 1 + { 50, 75, 127 }, { 57, 75, 124 }, { 27, 67, 108 }, + { 10, 54, 86 }, { 1, 33, 52 }, { 1, 12, 18 } + }, { // Band 2 + { 43, 125, 151 }, { 26, 108, 148 }, { 7, 83, 122 }, + { 2, 59, 89 }, { 1, 38, 60 }, { 1, 17, 27 } + }, { // Band 3 + { 23, 144, 163 }, { 13, 112, 154 }, { 2, 75, 117 }, + { 1, 50, 81 }, { 1, 31, 51 }, { 1, 14, 23 } + }, { // Band 4 + { 18, 162, 185 }, { 6, 123, 171 }, { 1, 78, 125 }, + { 1, 51, 86 }, { 1, 31, 54 }, { 1, 14, 23 } + }, { // Band 5 + { 15, 199, 227 }, { 3, 150, 204 }, { 1, 91, 146 }, + { 1, 55, 95 }, { 1, 30, 53 }, { 1, 11, 20 } + } + }, { // Inter + { // Band 0 + { 19, 55, 240 }, { 19, 59, 196 }, { 3, 52, 105 } + }, { // Band 1 + { 41, 166, 207 }, { 104, 153, 199 }, { 31, 123, 181 }, + { 14, 101, 152 }, { 5, 72, 106 }, { 1, 36, 52 } + }, { // Band 2 + { 35, 176, 211 }, { 12, 131, 190 }, { 2, 88, 144 }, + { 1, 60, 101 }, { 1, 36, 60 }, { 1, 16, 28 } + }, { // Band 3 + { 28, 183, 213 }, { 8, 134, 191 }, { 1, 86, 142 }, + { 1, 56, 96 }, { 1, 30, 53 }, { 1, 12, 20 } + }, { // Band 4 + { 20, 190, 215 }, { 4, 135, 192 }, { 1, 84, 139 }, + { 1, 53, 91 }, { 1, 28, 49 }, { 1, 11, 20 } + }, { // Band 5 + { 13, 196, 216 }, { 2, 137, 192 }, { 1, 86, 143 }, + { 1, 57, 99 }, { 1, 32, 56 }, { 1, 13, 24 } + } + } + }, { // UV plane + { // Intra + { // Band 0 + { 211, 29, 217 }, { 96, 47, 156 }, { 22, 43, 87 } + }, { // Band 1 + { 78, 120, 193 }, { 111, 116, 186 }, { 46, 102, 164 }, + { 15, 80, 128 }, { 2, 49, 76 }, { 1, 18, 28 } + }, { // Band 2 + { 71, 161, 203 }, { 42, 132, 192 }, { 10, 98, 150 }, + { 3, 69, 109 }, { 1, 44, 70 }, { 1, 18, 29 } + }, { // Band 3 + { 57, 186, 211 }, { 30, 140, 196 }, { 4, 93, 146 }, + { 1, 62, 102 }, { 1, 38, 65 }, { 1, 16, 27 } + }, { // Band 4 + { 47, 199, 217 }, { 14, 145, 196 }, { 1, 88, 142 }, + { 1, 57, 98 }, { 1, 36, 62 }, { 1, 15, 26 } + }, { // Band 5 + { 26, 219, 229 }, { 5, 155, 207 }, { 1, 94, 151 }, + { 1, 60, 104 }, { 1, 36, 62 }, { 1, 16, 28 } + } + }, { // Inter + { // Band 0 + { 233, 29, 248 }, { 146, 47, 220 }, { 43, 52, 140 } + }, { // Band 1 + { 100, 163, 232 }, { 179, 161, 222 }, { 63, 142, 204 }, + { 37, 113, 174 }, { 26, 89, 137 }, { 18, 68, 97 } + }, { // Band 2 + { 85, 181, 230 }, { 32, 146, 209 }, { 7, 100, 164 }, + { 3, 71, 121 }, { 1, 45, 77 }, { 1, 18, 30 } + }, { // Band 3 + { 65, 187, 230 }, { 20, 148, 207 }, { 2, 97, 159 }, + { 1, 68, 116 }, { 1, 40, 70 }, { 1, 14, 29 } + }, { // Band 4 + { 40, 194, 227 }, { 8, 147, 204 }, { 1, 94, 155 }, + { 1, 65, 112 }, { 1, 39, 66 }, { 1, 14, 26 } + }, { // Band 5 + { 16, 208, 228 }, { 3, 151, 207 }, { 1, 98, 160 }, + { 1, 67, 117 }, { 1, 41, 74 }, { 1, 17, 31 } + } + } + } +}; - while (++i < n) { - p[0] = p[1] = i << 1; - p += 2; +static const vp9_coeff_probs_model default_coef_probs_32x32[PLANE_TYPES] = { + { // Y plane + { // Intra + { // Band 0 + { 17, 38, 140 }, { 7, 34, 80 }, { 1, 17, 29 } + }, { // Band 1 + { 37, 75, 128 }, { 41, 76, 128 }, { 26, 66, 116 }, + { 12, 52, 94 }, { 2, 32, 55 }, { 1, 10, 16 } + }, { // Band 2 + { 50, 127, 154 }, { 37, 109, 152 }, { 16, 82, 121 }, + { 5, 59, 85 }, { 1, 35, 54 }, { 1, 13, 20 } + }, { // Band 3 + { 40, 142, 167 }, { 17, 110, 157 }, { 2, 71, 112 }, + { 1, 44, 72 }, { 1, 27, 45 }, { 1, 11, 17 } + }, { // Band 4 + { 30, 175, 188 }, { 9, 124, 169 }, { 1, 74, 116 }, + { 1, 48, 78 }, { 1, 30, 49 }, { 1, 11, 18 } + }, { // Band 5 + { 10, 222, 223 }, { 2, 150, 194 }, { 1, 83, 128 }, + { 1, 48, 79 }, { 1, 27, 45 }, { 1, 11, 17 } + } + }, { // Inter + { // Band 0 + { 36, 41, 235 }, { 29, 36, 193 }, { 10, 27, 111 } + }, { // Band 1 + { 85, 165, 222 }, { 177, 162, 215 }, { 110, 135, 195 }, + { 57, 113, 168 }, { 23, 83, 120 }, { 10, 49, 61 } + }, { // Band 2 + { 85, 190, 223 }, { 36, 139, 200 }, { 5, 90, 146 }, + { 1, 60, 103 }, { 1, 38, 65 }, { 1, 18, 30 } + }, { // Band 3 + { 72, 202, 223 }, { 23, 141, 199 }, { 2, 86, 140 }, + { 1, 56, 97 }, { 1, 36, 61 }, { 1, 16, 27 } + }, { // Band 4 + { 55, 218, 225 }, { 13, 145, 200 }, { 1, 86, 141 }, + { 1, 57, 99 }, { 1, 35, 61 }, { 1, 13, 22 } + }, { // Band 5 + { 15, 235, 212 }, { 1, 132, 184 }, { 1, 84, 139 }, + { 1, 57, 97 }, { 1, 34, 56 }, { 1, 14, 23 } + } + } + }, { // UV plane + { // Intra + { // Band 0 + { 181, 21, 201 }, { 61, 37, 123 }, { 10, 38, 71 } + }, { // Band 1 + { 47, 106, 172 }, { 95, 104, 173 }, { 42, 93, 159 }, + { 18, 77, 131 }, { 4, 50, 81 }, { 1, 17, 23 } + }, { // Band 2 + { 62, 147, 199 }, { 44, 130, 189 }, { 28, 102, 154 }, + { 18, 75, 115 }, { 2, 44, 65 }, { 1, 12, 19 } + }, { // Band 3 + { 55, 153, 210 }, { 24, 130, 194 }, { 3, 93, 146 }, + { 1, 61, 97 }, { 1, 31, 50 }, { 1, 10, 16 } + }, { // Band 4 + { 49, 186, 223 }, { 17, 148, 204 }, { 1, 96, 142 }, + { 1, 53, 83 }, { 1, 26, 44 }, { 1, 11, 17 } + }, { // Band 5 + { 13, 217, 212 }, { 2, 136, 180 }, { 1, 78, 124 }, + { 1, 50, 83 }, { 1, 29, 49 }, { 1, 14, 23 } + } + }, { // Inter + { // Band 0 + { 197, 13, 247 }, { 82, 17, 222 }, { 25, 17, 162 } + }, { // Band 1 + { 126, 186, 247 }, { 234, 191, 243 }, { 176, 177, 234 }, + { 104, 158, 220 }, { 66, 128, 186 }, { 55, 90, 137 } + }, { // Band 2 + { 111, 197, 242 }, { 46, 158, 219 }, { 9, 104, 171 }, + { 2, 65, 125 }, { 1, 44, 80 }, { 1, 17, 91 } + }, { // Band 3 + { 104, 208, 245 }, { 39, 168, 224 }, { 3, 109, 162 }, + { 1, 79, 124 }, { 1, 50, 102 }, { 1, 43, 102 } + }, { // Band 4 + { 84, 220, 246 }, { 31, 177, 231 }, { 2, 115, 180 }, + { 1, 79, 134 }, { 1, 55, 77 }, { 1, 60, 79 } + }, { // Band 5 + { 43, 243, 240 }, { 8, 180, 217 }, { 1, 115, 166 }, + { 1, 84, 121 }, { 1, 51, 67 }, { 1, 16, 6 } + } + } } +}; - p[0] = p[1] = 0; +static void extend_to_full_distribution(vp9_prob *probs, vp9_prob p) { + vpx_memcpy(probs, vp9_pareto8_full[p = 0 ? 0 : p - 1], + MODEL_NODES * sizeof(vp9_prob)); } -static void init_bit_trees() { - init_bit_tree(cat1, 1); - init_bit_tree(cat2, 2); - init_bit_tree(cat3, 3); - init_bit_tree(cat4, 4); - init_bit_tree(cat5, 5); - init_bit_tree(cat6, 14); +void vp9_model_to_full_probs(const vp9_prob *model, vp9_prob *full) { + if (full != model) + vpx_memcpy(full, model, sizeof(vp9_prob) * UNCONSTRAINED_NODES); + extend_to_full_distribution(&full[UNCONSTRAINED_NODES], model[PIVOT_NODE]); } -const vp9_extra_bit vp9_extra_bits[MAX_ENTROPY_TOKENS] = { - { 0, 0, 0, 0}, - { 0, 0, 0, 1}, - { 0, 0, 0, 2}, - { 0, 0, 0, 3}, - { 0, 0, 0, 4}, - { cat1, Pcat1, 1, 5}, - { cat2, Pcat2, 2, 7}, - { cat3, Pcat3, 3, 11}, - { cat4, Pcat4, 4, 19}, - { cat5, Pcat5, 5, 35}, - { cat6, Pcat6, 14, 67}, - { 0, 0, 0, 0} -}; - -#include "vp9/common/vp9_default_coef_probs.h" - void vp9_default_coef_probs(VP9_COMMON *cm) { vp9_copy(cm->fc.coef_probs[TX_4X4], default_coef_probs_4x4); vp9_copy(cm->fc.coef_probs[TX_8X8], default_coef_probs_8x8); @@ -361,13 +726,6 @@ void vp9_default_coef_probs(VP9_COMMON *cm) { vp9_copy(cm->fc.coef_probs[TX_32X32], default_coef_probs_32x32); } -void vp9_coef_tree_initialize() { - init_bit_trees(); - vp9_tokens_from_tree(vp9_coef_encodings, vp9_coef_tree); -} - -// #define COEF_COUNT_TESTING - #define COEF_COUNT_SAT 24 #define COEF_MAX_UPDATE_FACTOR 112 #define COEF_COUNT_SAT_KEY 24 @@ -379,29 +737,30 @@ static void adapt_coef_probs(VP9_COMMON *cm, TX_SIZE tx_size, unsigned int count_sat, unsigned int update_factor) { const FRAME_CONTEXT *pre_fc = &cm->frame_contexts[cm->frame_context_idx]; - - vp9_coeff_probs_model *dst_coef_probs = cm->fc.coef_probs[tx_size]; - const vp9_coeff_probs_model *pre_coef_probs = pre_fc->coef_probs[tx_size]; - vp9_coeff_count_model *coef_counts = cm->counts.coef[tx_size]; - unsigned int (*eob_branch_count)[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS] = + vp9_coeff_probs_model *const probs = cm->fc.coef_probs[tx_size]; + const vp9_coeff_probs_model *const pre_probs = pre_fc->coef_probs[tx_size]; + vp9_coeff_count_model *counts = cm->counts.coef[tx_size]; + unsigned int (*eob_counts)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] = cm->counts.eob_branch[tx_size]; int i, j, k, l, m; - unsigned int branch_ct[UNCONSTRAINED_NODES][2]; - for (i = 0; i < BLOCK_TYPES; ++i) + for (i = 0; i < PLANE_TYPES; ++i) for (j = 0; j < REF_TYPES; ++j) for (k = 0; k < COEF_BANDS; ++k) - for (l = 0; l < PREV_COEF_CONTEXTS; ++l) { - if (l >= 3 && k == 0) - continue; - vp9_tree_probs_from_distribution(vp9_coefmodel_tree, branch_ct, - coef_counts[i][j][k][l]); - branch_ct[0][1] = eob_branch_count[i][j][k][l] - branch_ct[0][0]; + for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { + const int n0 = counts[i][j][k][l][ZERO_TOKEN]; + const int n1 = counts[i][j][k][l][ONE_TOKEN]; + const int n2 = counts[i][j][k][l][TWO_TOKEN]; + const int neob = counts[i][j][k][l][EOB_MODEL_TOKEN]; + const unsigned int branch_ct[UNCONSTRAINED_NODES][2] = { + { neob, eob_counts[i][j][k][l] - neob }, + { n0, n1 + n2 }, + { n1, n2 } + }; for (m = 0; m < UNCONSTRAINED_NODES; ++m) - dst_coef_probs[i][j][k][l][m] = merge_probs( - pre_coef_probs[i][j][k][l][m], - branch_ct[m], - count_sat, update_factor); + probs[i][j][k][l][m] = merge_probs(pre_probs[i][j][k][l][m], + branch_ct[m], + count_sat, update_factor); } } diff --git a/libvpx/vp9/common/vp9_entropy.h b/libvpx/vp9/common/vp9_entropy.h index e133d65..f009777 100644 --- a/libvpx/vp9/common/vp9_entropy.h +++ b/libvpx/vp9/common/vp9_entropy.h @@ -16,40 +16,36 @@ #include "vp9/common/vp9_blockd.h" #include "vp9/common/vp9_common.h" #include "vp9/common/vp9_scan.h" -#include "vp9/common/vp9_treecoder.h" -#define DIFF_UPDATE_PROB 252 +#ifdef __cplusplus +extern "C" { +#endif -/* Coefficient token alphabet */ +#define DIFF_UPDATE_PROB 252 -#define ZERO_TOKEN 0 /* 0 Extra Bits 0+0 */ -#define ONE_TOKEN 1 /* 1 Extra Bits 0+1 */ -#define TWO_TOKEN 2 /* 2 Extra Bits 0+1 */ -#define THREE_TOKEN 3 /* 3 Extra Bits 0+1 */ -#define FOUR_TOKEN 4 /* 4 Extra Bits 0+1 */ -#define DCT_VAL_CATEGORY1 5 /* 5-6 Extra Bits 1+1 */ -#define DCT_VAL_CATEGORY2 6 /* 7-10 Extra Bits 2+1 */ -#define DCT_VAL_CATEGORY3 7 /* 11-18 Extra Bits 3+1 */ -#define DCT_VAL_CATEGORY4 8 /* 19-34 Extra Bits 4+1 */ -#define DCT_VAL_CATEGORY5 9 /* 35-66 Extra Bits 5+1 */ -#define DCT_VAL_CATEGORY6 10 /* 67+ Extra Bits 14+1 */ -#define DCT_EOB_TOKEN 11 /* EOB Extra Bits 0+0 */ -#define MAX_ENTROPY_TOKENS 12 -#define ENTROPY_NODES 11 -#define EOSB_TOKEN 127 /* Not signalled, encoder only */ +// Coefficient token alphabet +#define ZERO_TOKEN 0 // 0 Extra Bits 0+0 +#define ONE_TOKEN 1 // 1 Extra Bits 0+1 +#define TWO_TOKEN 2 // 2 Extra Bits 0+1 +#define THREE_TOKEN 3 // 3 Extra Bits 0+1 +#define FOUR_TOKEN 4 // 4 Extra Bits 0+1 +#define CATEGORY1_TOKEN 5 // 5-6 Extra Bits 1+1 +#define CATEGORY2_TOKEN 6 // 7-10 Extra Bits 2+1 +#define CATEGORY3_TOKEN 7 // 11-18 Extra Bits 3+1 +#define CATEGORY4_TOKEN 8 // 19-34 Extra Bits 4+1 +#define CATEGORY5_TOKEN 9 // 35-66 Extra Bits 5+1 +#define CATEGORY6_TOKEN 10 // 67+ Extra Bits 14+1 +#define EOB_TOKEN 11 // EOB Extra Bits 0+0 -#define INTER_MODE_CONTEXTS 7 +#define ENTROPY_TOKENS 12 -extern DECLARE_ALIGNED(16, const uint8_t, - vp9_pt_energy_class[MAX_ENTROPY_TOKENS]); +#define ENTROPY_NODES 11 -extern const vp9_tree_index vp9_coef_tree[TREE_SIZE(MAX_ENTROPY_TOKENS)]; +DECLARE_ALIGNED(16, extern const uint8_t, vp9_pt_energy_class[ENTROPY_TOKENS]); -#define DCT_EOB_MODEL_TOKEN 3 /* EOB Extra Bits 0+0 */ +#define EOB_MODEL_TOKEN 3 extern const vp9_tree_index vp9_coefmodel_tree[]; -extern struct vp9_token vp9_coef_encodings[MAX_ENTROPY_TOKENS]; - typedef struct { const vp9_tree_index *tree; const vp9_prob *prob; @@ -58,15 +54,12 @@ typedef struct { } vp9_extra_bit; // indexed by token value -extern const vp9_extra_bit vp9_extra_bits[MAX_ENTROPY_TOKENS]; +extern const vp9_extra_bit vp9_extra_bits[ENTROPY_TOKENS]; -#define MAX_PROB 255 #define DCT_MAX_VALUE 16384 /* Coefficients are predicted via a 3-dimensional probability table. */ -/* Outside dimension. 0 = Y with DC, 1 = UV */ -#define BLOCK_TYPES 2 #define REF_TYPES 2 // intra=0, inter=1 /* Middle dimension reflects the coefficient position within the transform. */ @@ -88,13 +81,14 @@ extern const vp9_extra_bit vp9_extra_bits[MAX_ENTROPY_TOKENS]; coefficient band (and since zigzag positions 0, 1, and 2 are in distinct bands). */ -#define PREV_COEF_CONTEXTS 6 +#define COEFF_CONTEXTS 6 +#define BAND_COEFF_CONTEXTS(band) ((band) == 0 ? 3 : COEFF_CONTEXTS) // #define ENTROPY_STATS -typedef unsigned int vp9_coeff_count[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS] - [MAX_ENTROPY_TOKENS]; -typedef unsigned int vp9_coeff_stats[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS] +typedef unsigned int vp9_coeff_count[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] + [ENTROPY_TOKENS]; +typedef unsigned int vp9_coeff_stats[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] [ENTROPY_NODES][2]; #define SUBEXP_PARAM 4 /* Subexponential code parameter */ @@ -102,8 +96,6 @@ typedef unsigned int vp9_coeff_stats[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS] struct VP9Common; void vp9_default_coef_probs(struct VP9Common *cm); - -void vp9_coef_tree_initialize(); void vp9_adapt_coef_probs(struct VP9Common *cm); static INLINE void reset_skip_context(MACROBLOCKD *xd, BLOCK_SIZE bsize) { @@ -123,10 +115,10 @@ static INLINE void reset_skip_context(MACROBLOCKD *xd, BLOCK_SIZE bsize) { // This macro is currently unused but may be used by certain implementations #define MAXBAND_INDEX 21 -extern const uint8_t vp9_coefband_trans_8x8plus[1024]; -extern const uint8_t vp9_coefband_trans_4x4[16]; +DECLARE_ALIGNED(16, extern const uint8_t, vp9_coefband_trans_8x8plus[1024]); +DECLARE_ALIGNED(16, extern const uint8_t, vp9_coefband_trans_4x4[16]); -static const uint8_t *get_band_translate(TX_SIZE tx_size) { +static INLINE const uint8_t *get_band_translate(TX_SIZE tx_size) { return tx_size == TX_4X4 ? vp9_coefband_trans_4x4 : vp9_coefband_trans_8x8plus; } @@ -135,24 +127,26 @@ static const uint8_t *get_band_translate(TX_SIZE tx_size) { // 1, 3, 5, 7, ..., 253, 255 // In between probabilities are interpolated linearly -#define COEFPROB_MODELS 128 +#define COEFF_PROB_MODELS 256 #define UNCONSTRAINED_NODES 3 #define PIVOT_NODE 2 // which node is pivot +#define MODEL_NODES (ENTROPY_NODES - UNCONSTRAINED_NODES) +extern const vp9_prob vp9_pareto8_full[COEFF_PROB_MODELS][MODEL_NODES]; + typedef vp9_prob vp9_coeff_probs_model[REF_TYPES][COEF_BANDS] - [PREV_COEF_CONTEXTS] - [UNCONSTRAINED_NODES]; + [COEFF_CONTEXTS][UNCONSTRAINED_NODES]; typedef unsigned int vp9_coeff_count_model[REF_TYPES][COEF_BANDS] - [PREV_COEF_CONTEXTS] + [COEFF_CONTEXTS] [UNCONSTRAINED_NODES + 1]; void vp9_model_to_full_probs(const vp9_prob *model, vp9_prob *full); -static int get_entropy_context(TX_SIZE tx_size, const ENTROPY_CONTEXT *a, - const ENTROPY_CONTEXT *l) { +static INLINE int get_entropy_context(TX_SIZE tx_size, const ENTROPY_CONTEXT *a, + const ENTROPY_CONTEXT *l) { ENTROPY_CONTEXT above_ec = 0, left_ec = 0; switch (tx_size) { @@ -173,32 +167,26 @@ static int get_entropy_context(TX_SIZE tx_size, const ENTROPY_CONTEXT *a, left_ec = !!*(const uint64_t *)l; break; default: - assert(!"Invalid transform size."); + assert(0 && "Invalid transform size."); } return combine_entropy_contexts(above_ec, left_ec); } -static void get_scan(const MACROBLOCKD *xd, TX_SIZE tx_size, - PLANE_TYPE type, int block_idx, - const int16_t **scan, const int16_t **scan_nb) { - switch (tx_size) { - case TX_4X4: - get_scan_nb_4x4(get_tx_type_4x4(type, xd, block_idx), scan, scan_nb); - break; - case TX_8X8: - get_scan_nb_8x8(get_tx_type_8x8(type, xd), scan, scan_nb); - break; - case TX_16X16: - get_scan_nb_16x16(get_tx_type_16x16(type, xd), scan, scan_nb); - break; - case TX_32X32: - *scan = vp9_default_scan_32x32; - *scan_nb = vp9_default_scan_32x32_neighbors; - break; - default: - assert(!"Invalid transform size."); +static const INLINE scan_order *get_scan(const MACROBLOCKD *xd, TX_SIZE tx_size, + PLANE_TYPE type, int block_idx) { + const MODE_INFO *const mi = xd->mi_8x8[0]; + + if (is_inter_block(&mi->mbmi) || type != PLANE_TYPE_Y || xd->lossless) { + return &vp9_default_scan_orders[tx_size]; + } else { + const MB_PREDICTION_MODE mode = get_y_mode(mi, block_idx); + return &vp9_scan_orders[tx_size][mode2txfm_map[mode]]; } } +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_COMMON_VP9_ENTROPY_H_ diff --git a/libvpx/vp9/common/vp9_entropymode.c b/libvpx/vp9/common/vp9_entropymode.c index 3b2510d..f2c81bc 100644 --- a/libvpx/vp9/common/vp9_entropymode.c +++ b/libvpx/vp9/common/vp9_entropymode.c @@ -10,7 +10,6 @@ #include "vpx_mem/vpx_mem.h" -#include "vp9/common/vp9_alloccommon.h" #include "vp9/common/vp9_onyxc_int.h" #include "vp9/common/vp9_seg_common.h" @@ -232,21 +231,18 @@ const vp9_tree_index vp9_intra_mode_tree[TREE_SIZE(INTRA_MODES)] = { -D63_PRED, 16, /* 7 = D63_NODE */ -D153_PRED, -D207_PRED /* 8 = D153_NODE */ }; -struct vp9_token vp9_intra_mode_encodings[INTRA_MODES]; const vp9_tree_index vp9_inter_mode_tree[TREE_SIZE(INTER_MODES)] = { -INTER_OFFSET(ZEROMV), 2, -INTER_OFFSET(NEARESTMV), 4, -INTER_OFFSET(NEARMV), -INTER_OFFSET(NEWMV) }; -struct vp9_token vp9_inter_mode_encodings[INTER_MODES]; const vp9_tree_index vp9_partition_tree[TREE_SIZE(PARTITION_TYPES)] = { -PARTITION_NONE, 2, -PARTITION_HORZ, 4, -PARTITION_VERT, -PARTITION_SPLIT }; -struct vp9_token vp9_partition_encodings[PARTITION_TYPES]; static const vp9_prob default_intra_inter_p[INTRA_INTER_CONTEXTS] = { 9, 102, 187, 225 @@ -306,7 +302,7 @@ void tx_counts_to_branch_counts_8x8(const unsigned int *tx_count_8x8p, ct_8x8p[0][1] = tx_count_8x8p[TX_8X8]; } -static const vp9_prob default_mbskip_probs[MBSKIP_CONTEXTS] = { +static const vp9_prob default_skip_probs[SKIP_CONTEXTS] = { 192, 128, 64 }; @@ -318,17 +314,18 @@ static const vp9_prob default_switchable_interp_prob[SWITCHABLE_FILTER_CONTEXTS] { 149, 144, }, }; -void vp9_init_mbmode_probs(VP9_COMMON *cm) { - vp9_copy(cm->fc.uv_mode_prob, default_if_uv_probs); - vp9_copy(cm->fc.y_mode_prob, default_if_y_probs); - vp9_copy(cm->fc.switchable_interp_prob, default_switchable_interp_prob); - vp9_copy(cm->fc.partition_prob, default_partition_probs); - vp9_copy(cm->fc.intra_inter_prob, default_intra_inter_p); - vp9_copy(cm->fc.comp_inter_prob, default_comp_inter_p); - vp9_copy(cm->fc.comp_ref_prob, default_comp_ref_p); - vp9_copy(cm->fc.single_ref_prob, default_single_ref_p); - cm->fc.tx_probs = default_tx_probs; - vp9_copy(cm->fc.mbskip_probs, default_mbskip_probs); +void vp9_init_mode_probs(FRAME_CONTEXT *fc) { + vp9_copy(fc->uv_mode_prob, default_if_uv_probs); + vp9_copy(fc->y_mode_prob, default_if_y_probs); + vp9_copy(fc->switchable_interp_prob, default_switchable_interp_prob); + vp9_copy(fc->partition_prob, default_partition_probs); + vp9_copy(fc->intra_inter_prob, default_intra_inter_p); + vp9_copy(fc->comp_inter_prob, default_comp_inter_p); + vp9_copy(fc->comp_ref_prob, default_comp_ref_p); + vp9_copy(fc->single_ref_prob, default_single_ref_p); + fc->tx_probs = default_tx_probs; + vp9_copy(fc->skip_probs, default_skip_probs); + vp9_copy(fc->inter_mode_probs, default_inter_mode_probs); } const vp9_tree_index vp9_switchable_interp_tree @@ -336,15 +333,6 @@ const vp9_tree_index vp9_switchable_interp_tree -EIGHTTAP, 2, -EIGHTTAP_SMOOTH, -EIGHTTAP_SHARP }; -struct vp9_token vp9_switchable_interp_encodings[SWITCHABLE_FILTERS]; - -void vp9_entropy_mode_init() { - vp9_tokens_from_tree(vp9_intra_mode_encodings, vp9_intra_mode_tree); - vp9_tokens_from_tree(vp9_switchable_interp_encodings, - vp9_switchable_interp_tree); - vp9_tokens_from_tree(vp9_partition_encodings, vp9_partition_tree); - vp9_tokens_from_tree(vp9_inter_mode_encodings, vp9_inter_mode_tree); -} #define COUNT_SAT 20 #define MAX_UPDATE_FACTOR 128 @@ -356,7 +344,7 @@ static int adapt_prob(vp9_prob pre_prob, const unsigned int ct[2]) { static void adapt_probs(const vp9_tree_index *tree, const vp9_prob *pre_probs, const unsigned int *counts, vp9_prob *probs) { - tree_merge_probs(tree, pre_probs, counts, COUNT_SAT, MAX_UPDATE_FACTOR, + vp9_tree_merge_probs(tree, pre_probs, counts, COUNT_SAT, MAX_UPDATE_FACTOR, probs); } @@ -396,7 +384,7 @@ void vp9_adapt_mode_probs(VP9_COMMON *cm) { adapt_probs(vp9_partition_tree, pre_fc->partition_prob[i], counts->partition[i], fc->partition_prob[i]); - if (cm->mcomp_filter_type == SWITCHABLE) { + if (cm->interp_filter == SWITCHABLE) { for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) adapt_probs(vp9_switchable_interp_tree, pre_fc->switchable_interp_prob[i], counts->switchable_interp[i], fc->switchable_interp_prob[i]); @@ -426,9 +414,8 @@ void vp9_adapt_mode_probs(VP9_COMMON *cm) { } } - for (i = 0; i < MBSKIP_CONTEXTS; ++i) - fc->mbskip_probs[i] = adapt_prob(pre_fc->mbskip_probs[i], - counts->mbskip[i]); + for (i = 0; i < SKIP_CONTEXTS; ++i) + fc->skip_probs[i] = adapt_prob(pre_fc->skip_probs[i], counts->skip[i]); } static void set_default_lf_deltas(struct loopfilter *lf) { @@ -464,28 +451,26 @@ void vp9_setup_past_independence(VP9_COMMON *cm) { lf->last_sharpness_level = -1; vp9_default_coef_probs(cm); - vp9_init_mbmode_probs(cm); + vp9_init_mode_probs(&cm->fc); vp9_init_mv_probs(cm); - vp9_copy(cm->fc.inter_mode_probs, default_inter_mode_probs); if (cm->frame_type == KEY_FRAME || cm->error_resilient_mode || cm->reset_frame_context == 3) { // Reset all frame contexts. - for (i = 0; i < NUM_FRAME_CONTEXTS; ++i) + for (i = 0; i < FRAME_CONTEXTS; ++i) cm->frame_contexts[i] = cm->fc; } else if (cm->reset_frame_context == 2) { // Reset only the frame context specified in the frame header. cm->frame_contexts[cm->frame_context_idx] = cm->fc; } - vpx_memset(cm->prev_mip, 0, - cm->mode_info_stride * (cm->mi_rows + 1) * sizeof(MODE_INFO)); + if (frame_is_intra_only(cm)) + vpx_memset(cm->prev_mip, 0, + cm->mode_info_stride * (cm->mi_rows + 1) * sizeof(MODE_INFO)); + vpx_memset(cm->mip, 0, cm->mode_info_stride * (cm->mi_rows + 1) * sizeof(MODE_INFO)); - vp9_update_mode_info_border(cm, cm->mip); - vp9_update_mode_info_border(cm, cm->prev_mip); - vp9_zero(cm->ref_frame_sign_bias); cm->frame_context_idx = 0; diff --git a/libvpx/vp9/common/vp9_entropymode.h b/libvpx/vp9/common/vp9_entropymode.h index 38b4199..c7b1911 100644 --- a/libvpx/vp9/common/vp9_entropymode.h +++ b/libvpx/vp9/common/vp9_entropymode.h @@ -12,14 +12,17 @@ #define VP9_COMMON_VP9_ENTROPYMODE_H_ #include "vp9/common/vp9_blockd.h" -#include "vp9/common/vp9_treecoder.h" +#include "vp9/common/vp9_entropy.h" +#include "vp9/common/vp9_entropymv.h" + +#ifdef __cplusplus +extern "C" { +#endif #define TX_SIZE_CONTEXTS 2 #define SWITCHABLE_FILTERS 3 // number of switchable filters #define SWITCHABLE_FILTER_CONTEXTS (SWITCHABLE_FILTERS + 1) -// #define MODE_STATS - struct VP9Common; struct tx_probs { @@ -34,31 +37,56 @@ struct tx_counts { unsigned int p8x8[TX_SIZE_CONTEXTS][TX_SIZES - 2]; }; +typedef struct frame_contexts { + vp9_prob y_mode_prob[BLOCK_SIZE_GROUPS][INTRA_MODES - 1]; + vp9_prob uv_mode_prob[INTRA_MODES][INTRA_MODES - 1]; + vp9_prob partition_prob[PARTITION_CONTEXTS][PARTITION_TYPES - 1]; + vp9_coeff_probs_model coef_probs[TX_SIZES][PLANE_TYPES]; + vp9_prob switchable_interp_prob[SWITCHABLE_FILTER_CONTEXTS] + [SWITCHABLE_FILTERS - 1]; + vp9_prob inter_mode_probs[INTER_MODE_CONTEXTS][INTER_MODES - 1]; + vp9_prob intra_inter_prob[INTRA_INTER_CONTEXTS]; + vp9_prob comp_inter_prob[COMP_INTER_CONTEXTS]; + vp9_prob single_ref_prob[REF_CONTEXTS][2]; + vp9_prob comp_ref_prob[REF_CONTEXTS]; + struct tx_probs tx_probs; + vp9_prob skip_probs[SKIP_CONTEXTS]; + nmv_context nmvc; +} FRAME_CONTEXT; + +typedef struct { + unsigned int y_mode[BLOCK_SIZE_GROUPS][INTRA_MODES]; + unsigned int uv_mode[INTRA_MODES][INTRA_MODES]; + unsigned int partition[PARTITION_CONTEXTS][PARTITION_TYPES]; + vp9_coeff_count_model coef[TX_SIZES][PLANE_TYPES]; + unsigned int eob_branch[TX_SIZES][PLANE_TYPES][REF_TYPES] + [COEF_BANDS][COEFF_CONTEXTS]; + unsigned int switchable_interp[SWITCHABLE_FILTER_CONTEXTS] + [SWITCHABLE_FILTERS]; + unsigned int inter_mode[INTER_MODE_CONTEXTS][INTER_MODES]; + unsigned int intra_inter[INTRA_INTER_CONTEXTS][2]; + unsigned int comp_inter[COMP_INTER_CONTEXTS][2]; + unsigned int single_ref[REF_CONTEXTS][2][2]; + unsigned int comp_ref[REF_CONTEXTS][2]; + struct tx_counts tx; + unsigned int skip[SKIP_CONTEXTS][2]; + nmv_context_counts mv; +} FRAME_COUNTS; + extern const vp9_prob vp9_kf_uv_mode_prob[INTRA_MODES][INTRA_MODES - 1]; extern const vp9_prob vp9_kf_y_mode_prob[INTRA_MODES][INTRA_MODES] [INTRA_MODES - 1]; - extern const vp9_prob vp9_kf_partition_probs[PARTITION_CONTEXTS] [PARTITION_TYPES - 1]; - extern const vp9_tree_index vp9_intra_mode_tree[TREE_SIZE(INTRA_MODES)]; -extern struct vp9_token vp9_intra_mode_encodings[INTRA_MODES]; - extern const vp9_tree_index vp9_inter_mode_tree[TREE_SIZE(INTER_MODES)]; -extern struct vp9_token vp9_inter_mode_encodings[INTER_MODES]; - extern const vp9_tree_index vp9_partition_tree[TREE_SIZE(PARTITION_TYPES)]; -extern struct vp9_token vp9_partition_encodings[PARTITION_TYPES]; - extern const vp9_tree_index vp9_switchable_interp_tree [TREE_SIZE(SWITCHABLE_FILTERS)]; -extern struct vp9_token vp9_switchable_interp_encodings[SWITCHABLE_FILTERS]; - -void vp9_entropy_mode_init(); void vp9_setup_past_independence(struct VP9Common *cm); -void vp9_init_mbmode_probs(struct VP9Common *cm); +void vp9_init_mode_probs(FRAME_CONTEXT *fc); void vp9_adapt_mode_probs(struct VP9Common *cm); @@ -69,4 +97,17 @@ void tx_counts_to_branch_counts_16x16(const unsigned int *tx_count_16x16p, void tx_counts_to_branch_counts_8x8(const unsigned int *tx_count_8x8p, unsigned int (*ct_8x8p)[2]); +static INLINE const vp9_prob *get_y_mode_probs(const MODE_INFO *mi, + const MODE_INFO *above_mi, + const MODE_INFO *left_mi, + int block) { + const MB_PREDICTION_MODE above = vp9_above_block_mode(mi, above_mi, block); + const MB_PREDICTION_MODE left = vp9_left_block_mode(mi, left_mi, block); + return vp9_kf_y_mode_prob[above][left]; +} + +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_COMMON_VP9_ENTROPYMODE_H_ diff --git a/libvpx/vp9/common/vp9_entropymv.c b/libvpx/vp9/common/vp9_entropymv.c index 290dcdd..e1f5ef7 100644 --- a/libvpx/vp9/common/vp9_entropymv.c +++ b/libvpx/vp9/common/vp9_entropymv.c @@ -23,7 +23,6 @@ const vp9_tree_index vp9_mv_joint_tree[TREE_SIZE(MV_JOINTS)] = { -MV_JOINT_HNZVZ, 4, -MV_JOINT_HZVNZ, -MV_JOINT_HNZVNZ }; -struct vp9_token vp9_mv_joint_encodings[MV_JOINTS]; const vp9_tree_index vp9_mv_class_tree[TREE_SIZE(MV_CLASSES)] = { -MV_CLASS_0, 2, @@ -37,19 +36,16 @@ const vp9_tree_index vp9_mv_class_tree[TREE_SIZE(MV_CLASSES)] = { -MV_CLASS_7, -MV_CLASS_8, -MV_CLASS_9, -MV_CLASS_10, }; -struct vp9_token vp9_mv_class_encodings[MV_CLASSES]; const vp9_tree_index vp9_mv_class0_tree[TREE_SIZE(CLASS0_SIZE)] = { -0, -1, }; -struct vp9_token vp9_mv_class0_encodings[CLASS0_SIZE]; -const vp9_tree_index vp9_mv_fp_tree[TREE_SIZE(4)] = { +const vp9_tree_index vp9_mv_fp_tree[TREE_SIZE(MV_FP_SIZE)] = { -0, 2, -1, 4, -2, -3 }; -struct vp9_token vp9_mv_fp_encodings[4]; static const nmv_context default_nmv_context = { {32, 64, 96}, @@ -196,8 +192,8 @@ static vp9_prob adapt_prob(vp9_prob prep, const unsigned int ct[2]) { static void adapt_probs(const vp9_tree_index *tree, const vp9_prob *pre_probs, const unsigned int *counts, vp9_prob *probs) { - tree_merge_probs(tree, pre_probs, counts, MV_COUNT_SAT, MV_MAX_UPDATE_FACTOR, - probs); + vp9_tree_merge_probs(tree, pre_probs, counts, MV_COUNT_SAT, + MV_MAX_UPDATE_FACTOR, probs); } void vp9_adapt_mv_probs(VP9_COMMON *cm, int allow_hp) { @@ -235,13 +231,6 @@ void vp9_adapt_mv_probs(VP9_COMMON *cm, int allow_hp) { } } -void vp9_entropy_mv_init() { - vp9_tokens_from_tree(vp9_mv_joint_encodings, vp9_mv_joint_tree); - vp9_tokens_from_tree(vp9_mv_class_encodings, vp9_mv_class_tree); - vp9_tokens_from_tree(vp9_mv_class0_encodings, vp9_mv_class0_tree); - vp9_tokens_from_tree(vp9_mv_fp_encodings, vp9_mv_fp_tree); -} - void vp9_init_mv_probs(VP9_COMMON *cm) { cm->fc.nmvc = default_nmv_context; } diff --git a/libvpx/vp9/common/vp9_entropymv.h b/libvpx/vp9/common/vp9_entropymv.h index d843f5b..e7033e4 100644 --- a/libvpx/vp9/common/vp9_entropymv.h +++ b/libvpx/vp9/common/vp9_entropymv.h @@ -12,19 +12,21 @@ #ifndef VP9_COMMON_VP9_ENTROPYMV_H_ #define VP9_COMMON_VP9_ENTROPYMV_H_ -#include "vp9/common/vp9_treecoder.h" #include "./vpx_config.h" #include "vp9/common/vp9_blockd.h" +#ifdef __cplusplus +extern "C" { +#endif + struct VP9Common; -void vp9_entropy_mv_init(); void vp9_init_mv_probs(struct VP9Common *cm); void vp9_adapt_mv_probs(struct VP9Common *cm, int usehp); int vp9_use_mv_hp(const MV *ref); -#define NMV_UPDATE_PROB 252 +#define MV_UPDATE_PROB 252 /* Symbols for coding which components are zero jointly */ #define MV_JOINTS 4 @@ -62,6 +64,7 @@ typedef enum { #define CLASS0_BITS 1 /* bits at integer precision for class 0 */ #define CLASS0_SIZE (1 << CLASS0_BITS) #define MV_OFFSET_BITS (MV_CLASSES + CLASS0_BITS - 2) +#define MV_FP_SIZE 4 #define MV_MAX_BITS (MV_CLASSES + CLASS0_BITS + 2) #define MV_MAX ((1 << MV_MAX_BITS) - 1) @@ -71,25 +74,18 @@ typedef enum { #define MV_UPP ((1 << MV_IN_USE_BITS) - 1) #define MV_LOW (-(1 << MV_IN_USE_BITS)) -extern const vp9_tree_index vp9_mv_joint_tree[TREE_SIZE(MV_JOINTS)]; -extern struct vp9_token vp9_mv_joint_encodings[MV_JOINTS]; - -extern const vp9_tree_index vp9_mv_class_tree[TREE_SIZE(MV_CLASSES)]; -extern struct vp9_token vp9_mv_class_encodings[MV_CLASSES]; - -extern const vp9_tree_index vp9_mv_class0_tree[TREE_SIZE(CLASS0_SIZE)]; -extern struct vp9_token vp9_mv_class0_encodings[CLASS0_SIZE]; - -extern const vp9_tree_index vp9_mv_fp_tree[TREE_SIZE(4)]; -extern struct vp9_token vp9_mv_fp_encodings[4]; +extern const vp9_tree_index vp9_mv_joint_tree[]; +extern const vp9_tree_index vp9_mv_class_tree[]; +extern const vp9_tree_index vp9_mv_class0_tree[]; +extern const vp9_tree_index vp9_mv_fp_tree[]; typedef struct { vp9_prob sign; vp9_prob classes[MV_CLASSES - 1]; vp9_prob class0[CLASS0_SIZE - 1]; vp9_prob bits[MV_OFFSET_BITS]; - vp9_prob class0_fp[CLASS0_SIZE][4 - 1]; - vp9_prob fp[4 - 1]; + vp9_prob class0_fp[CLASS0_SIZE][MV_FP_SIZE - 1]; + vp9_prob fp[MV_FP_SIZE - 1]; vp9_prob class0_hp; vp9_prob hp; } nmv_component; @@ -116,8 +112,8 @@ typedef struct { unsigned int classes[MV_CLASSES]; unsigned int class0[CLASS0_SIZE]; unsigned int bits[MV_OFFSET_BITS][2]; - unsigned int class0_fp[CLASS0_SIZE][4]; - unsigned int fp[4]; + unsigned int class0_fp[CLASS0_SIZE][MV_FP_SIZE]; + unsigned int fp[MV_FP_SIZE]; unsigned int class0_hp[2]; unsigned int hp[2]; } nmv_component_counts; @@ -129,4 +125,8 @@ typedef struct { void vp9_inc_mv(const MV *mv, nmv_context_counts *mvctx); +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_COMMON_VP9_ENTROPYMV_H_ diff --git a/libvpx/vp9/common/vp9_enums.h b/libvpx/vp9/common/vp9_enums.h index 1651b90..e96e769 100644 --- a/libvpx/vp9/common/vp9_enums.h +++ b/libvpx/vp9/common/vp9_enums.h @@ -13,6 +13,10 @@ #include "./vpx_config.h" +#ifdef __cplusplus +extern "C" { +#endif + #define MI_SIZE_LOG2 3 #define MI_BLOCK_SIZE_LOG2 (6 - MI_SIZE_LOG2) // 64 = 2^6 @@ -52,20 +56,22 @@ typedef enum PARTITION_TYPE { #define PARTITION_PLOFFSET 4 // number of probability models per block size #define PARTITION_CONTEXTS (4 * PARTITION_PLOFFSET) +// block transform size typedef enum { - TX_4X4 = 0, // 4x4 dct transform - TX_8X8 = 1, // 8x8 dct transform - TX_16X16 = 2, // 16x16 dct transform - TX_32X32 = 3, // 32x32 dct transform + TX_4X4 = 0, // 4x4 transform + TX_8X8 = 1, // 8x8 transform + TX_16X16 = 2, // 16x16 transform + TX_32X32 = 3, // 32x32 transform TX_SIZES } TX_SIZE; +// frame transform mode typedef enum { - ONLY_4X4 = 0, - ALLOW_8X8 = 1, - ALLOW_16X16 = 2, - ALLOW_32X32 = 3, - TX_MODE_SELECT = 4, + ONLY_4X4 = 0, // only 4x4 transform used + ALLOW_8X8 = 1, // allow block transform size up to 8x8 + ALLOW_16X16 = 2, // allow block transform size up to 16x16 + ALLOW_32X32 = 3, // allow block transform size up to 32x32 + TX_MODE_SELECT = 4, // transform specified for each block TX_MODES = 5, } TX_MODE; @@ -73,7 +79,8 @@ typedef enum { DCT_DCT = 0, // DCT in both horizontal and vertical ADST_DCT = 1, // ADST in vertical, DCT in horizontal DCT_ADST = 2, // DCT in vertical, ADST in horizontal - ADST_ADST = 3 // ADST in both directions + ADST_ADST = 3, // ADST in both directions + TX_TYPES = 4 } TX_TYPE; typedef enum { @@ -87,4 +94,8 @@ typedef enum { SRGB = 7 // RGB } COLOR_SPACE; +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_COMMON_VP9_ENUMS_H_ diff --git a/libvpx/vp9/common/vp9_filter.c b/libvpx/vp9/common/vp9_filter.c index 79ace14..7474a88 100644 --- a/libvpx/vp9/common/vp9_filter.c +++ b/libvpx/vp9/common/vp9_filter.c @@ -10,12 +10,9 @@ #include <assert.h> -#include "vpx_ports/mem.h" - #include "vp9/common/vp9_filter.h" -DECLARE_ALIGNED(256, const subpel_kernel, - vp9_bilinear_filters[SUBPEL_SHIFTS]) = { +const InterpKernel vp9_bilinear_filters[SUBPEL_SHIFTS] = { { 0, 0, 0, 128, 0, 0, 0, 0 }, { 0, 0, 0, 120, 8, 0, 0, 0 }, { 0, 0, 0, 112, 16, 0, 0, 0 }, @@ -35,8 +32,7 @@ DECLARE_ALIGNED(256, const subpel_kernel, }; // Lagrangian interpolation filter -DECLARE_ALIGNED(256, const subpel_kernel, - vp9_sub_pel_filters_8[SUBPEL_SHIFTS]) = { +const InterpKernel vp9_sub_pel_filters_8[SUBPEL_SHIFTS] = { { 0, 0, 0, 128, 0, 0, 0, 0}, { 0, 1, -5, 126, 8, -3, 1, 0}, { -1, 3, -10, 122, 18, -6, 2, 0}, @@ -56,8 +52,7 @@ DECLARE_ALIGNED(256, const subpel_kernel, }; // DCT based filter -DECLARE_ALIGNED(256, const subpel_kernel, - vp9_sub_pel_filters_8s[SUBPEL_SHIFTS]) = { +const InterpKernel vp9_sub_pel_filters_8s[SUBPEL_SHIFTS] = { {0, 0, 0, 128, 0, 0, 0, 0}, {-1, 3, -7, 127, 8, -3, 1, 0}, {-2, 5, -13, 125, 17, -6, 3, -1}, @@ -77,8 +72,7 @@ DECLARE_ALIGNED(256, const subpel_kernel, }; // freqmultiplier = 0.5 -DECLARE_ALIGNED(256, const subpel_kernel, - vp9_sub_pel_filters_8lp[SUBPEL_SHIFTS]) = { +const InterpKernel vp9_sub_pel_filters_8lp[SUBPEL_SHIFTS] = { { 0, 0, 0, 128, 0, 0, 0, 0}, {-3, -1, 32, 64, 38, 1, -3, 0}, {-2, -2, 29, 63, 41, 2, -3, 0}, @@ -98,14 +92,15 @@ DECLARE_ALIGNED(256, const subpel_kernel, }; -static const subpel_kernel* vp9_filter_kernels[4] = { +static const InterpKernel* vp9_filter_kernels[4] = { vp9_sub_pel_filters_8, vp9_sub_pel_filters_8lp, vp9_sub_pel_filters_8s, vp9_bilinear_filters }; -const subpel_kernel *vp9_get_filter_kernel(INTERPOLATION_TYPE type) { - return vp9_filter_kernels[type]; +const InterpKernel *vp9_get_interp_kernel(INTERP_FILTER filter) { + assert(filter != SWITCHABLE); + return vp9_filter_kernels[filter]; } diff --git a/libvpx/vp9/common/vp9_filter.h b/libvpx/vp9/common/vp9_filter.h index b1e7e64..29d3867 100644 --- a/libvpx/vp9/common/vp9_filter.h +++ b/libvpx/vp9/common/vp9_filter.h @@ -13,6 +13,12 @@ #include "./vpx_config.h" #include "vpx/vpx_integer.h" +#include "vpx_ports/mem.h" + + +#ifdef __cplusplus +extern "C" { +#endif #define FILTER_BITS 7 @@ -27,25 +33,28 @@ typedef enum { EIGHTTAP_SHARP = 2, BILINEAR = 3, SWITCHABLE = 4 /* should be the last one */ -} INTERPOLATION_TYPE; +} INTERP_FILTER; -typedef int16_t subpel_kernel[SUBPEL_TAPS]; +typedef int16_t InterpKernel[SUBPEL_TAPS]; -struct subpix_fn_table { - const subpel_kernel *filter_x; - const subpel_kernel *filter_y; -}; +const InterpKernel *vp9_get_interp_kernel(INTERP_FILTER filter); -const subpel_kernel *vp9_get_filter_kernel(INTERPOLATION_TYPE type); - -extern const subpel_kernel vp9_bilinear_filters[SUBPEL_SHIFTS]; -extern const subpel_kernel vp9_sub_pel_filters_8[SUBPEL_SHIFTS]; -extern const subpel_kernel vp9_sub_pel_filters_8s[SUBPEL_SHIFTS]; -extern const subpel_kernel vp9_sub_pel_filters_8lp[SUBPEL_SHIFTS]; +DECLARE_ALIGNED(256, extern const InterpKernel, + vp9_bilinear_filters[SUBPEL_SHIFTS]); +DECLARE_ALIGNED(256, extern const InterpKernel, + vp9_sub_pel_filters_8[SUBPEL_SHIFTS]); +DECLARE_ALIGNED(256, extern const InterpKernel, + vp9_sub_pel_filters_8s[SUBPEL_SHIFTS]); +DECLARE_ALIGNED(256, extern const InterpKernel, + vp9_sub_pel_filters_8lp[SUBPEL_SHIFTS]); // The VP9_BILINEAR_FILTERS_2TAP macro returns a pointer to the bilinear // filter kernel as a 2 tap filter. #define BILINEAR_FILTERS_2TAP(x) \ (vp9_bilinear_filters[(x)] + SUBPEL_TAPS/2 - 1) +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_COMMON_VP9_FILTER_H_ diff --git a/libvpx/vp9/common/vp9_findnearmv.c b/libvpx/vp9/common/vp9_findnearmv.c deleted file mode 100644 index b91c501..0000000 --- a/libvpx/vp9/common/vp9_findnearmv.c +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "vp9/common/vp9_findnearmv.h" -#include "vp9/common/vp9_mvref_common.h" - -static void lower_mv_precision(MV *mv, int allow_hp) { - const int use_hp = allow_hp && vp9_use_mv_hp(mv); - if (!use_hp) { - if (mv->row & 1) - mv->row += (mv->row > 0 ? -1 : 1); - if (mv->col & 1) - mv->col += (mv->col > 0 ? -1 : 1); - } -} - - -void vp9_find_best_ref_mvs(MACROBLOCKD *xd, int allow_hp, - int_mv *mvlist, int_mv *nearest, int_mv *near) { - int i; - // Make sure all the candidates are properly clamped etc - for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i) { - lower_mv_precision(&mvlist[i].as_mv, allow_hp); - clamp_mv2(&mvlist[i].as_mv, xd); - } - *nearest = mvlist[0]; - *near = mvlist[1]; -} - -void vp9_append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd, - const TileInfo *const tile, - int_mv *dst_nearest, - int_mv *dst_near, - int block_idx, int ref_idx, - int mi_row, int mi_col) { - int_mv dst_list[MAX_MV_REF_CANDIDATES]; - int_mv mv_list[MAX_MV_REF_CANDIDATES]; - MODE_INFO *const mi = xd->mi_8x8[0]; - - assert(ref_idx == 0 || ref_idx == 1); - assert(MAX_MV_REF_CANDIDATES == 2); // makes code here slightly easier - - vp9_find_mv_refs_idx(cm, xd, tile, mi, xd->last_mi, - mi->mbmi.ref_frame[ref_idx], - mv_list, block_idx, mi_row, mi_col); - - dst_list[1].as_int = 0; - if (block_idx == 0) { - vpx_memcpy(dst_list, mv_list, MAX_MV_REF_CANDIDATES * sizeof(int_mv)); - } else if (block_idx == 1 || block_idx == 2) { - int dst = 0, n; - b_mode_info *bmi = mi->bmi; - - dst_list[dst++].as_int = bmi[0].as_mv[ref_idx].as_int; - for (n = 0; dst < MAX_MV_REF_CANDIDATES && - n < MAX_MV_REF_CANDIDATES; n++) - if (mv_list[n].as_int != dst_list[0].as_int) - dst_list[dst++].as_int = mv_list[n].as_int; - } else { - int dst = 0, n; - b_mode_info *bmi = mi->bmi; - - assert(block_idx == 3); - dst_list[dst++].as_int = bmi[2].as_mv[ref_idx].as_int; - if (dst_list[0].as_int != bmi[1].as_mv[ref_idx].as_int) - dst_list[dst++].as_int = bmi[1].as_mv[ref_idx].as_int; - if (dst < MAX_MV_REF_CANDIDATES && - dst_list[0].as_int != bmi[0].as_mv[ref_idx].as_int) - dst_list[dst++].as_int = bmi[0].as_mv[ref_idx].as_int; - for (n = 0; dst < MAX_MV_REF_CANDIDATES && - n < MAX_MV_REF_CANDIDATES; n++) - if (mv_list[n].as_int != dst_list[0].as_int) - dst_list[dst++].as_int = mv_list[n].as_int; - } - - dst_nearest->as_int = dst_list[0].as_int; - dst_near->as_int = dst_list[1].as_int; -} diff --git a/libvpx/vp9/common/vp9_findnearmv.h b/libvpx/vp9/common/vp9_findnearmv.h deleted file mode 100644 index 2362caa..0000000 --- a/libvpx/vp9/common/vp9_findnearmv.h +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - - -#ifndef VP9_COMMON_VP9_FINDNEARMV_H_ -#define VP9_COMMON_VP9_FINDNEARMV_H_ - -#include "vp9/common/vp9_mv.h" -#include "vp9/common/vp9_blockd.h" -#include "vp9/common/vp9_treecoder.h" -#include "vp9/common/vp9_onyxc_int.h" - -#define LEFT_TOP_MARGIN ((VP9BORDERINPIXELS - VP9_INTERP_EXTEND) << 3) -#define RIGHT_BOTTOM_MARGIN ((VP9BORDERINPIXELS - VP9_INTERP_EXTEND) << 3) - -// check a list of motion vectors by sad score using a number rows of pixels -// above and a number cols of pixels in the left to select the one with best -// score to use as ref motion vector -void vp9_find_best_ref_mvs(MACROBLOCKD *xd, int allow_hp, - int_mv *mvlist, int_mv *nearest, int_mv *near); - -// TODO(jingning): this mv clamping function should be block size dependent. -static void clamp_mv2(MV *mv, const MACROBLOCKD *xd) { - clamp_mv(mv, xd->mb_to_left_edge - LEFT_TOP_MARGIN, - xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN, - xd->mb_to_top_edge - LEFT_TOP_MARGIN, - xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN); -} - -void vp9_append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd, - const TileInfo *const tile, - int_mv *dst_nearest, - int_mv *dst_near, - int block_idx, int ref_idx, - int mi_row, int mi_col); - -static MB_PREDICTION_MODE left_block_mode(const MODE_INFO *cur_mi, - const MODE_INFO *left_mi, int b) { - if (b == 0 || b == 2) { - if (!left_mi || is_inter_block(&left_mi->mbmi)) - return DC_PRED; - - return left_mi->mbmi.sb_type < BLOCK_8X8 ? left_mi->bmi[b + 1].as_mode - : left_mi->mbmi.mode; - } else { - assert(b == 1 || b == 3); - return cur_mi->bmi[b - 1].as_mode; - } -} - -static MB_PREDICTION_MODE above_block_mode(const MODE_INFO *cur_mi, - const MODE_INFO *above_mi, int b) { - if (b == 0 || b == 1) { - if (!above_mi || is_inter_block(&above_mi->mbmi)) - return DC_PRED; - - return above_mi->mbmi.sb_type < BLOCK_8X8 ? above_mi->bmi[b + 2].as_mode - : above_mi->mbmi.mode; - } else { - assert(b == 2 || b == 3); - return cur_mi->bmi[b - 2].as_mode; - } -} - -#endif // VP9_COMMON_VP9_FINDNEARMV_H_ diff --git a/libvpx/vp9/common/vp9_frame_buffers.c b/libvpx/vp9/common/vp9_frame_buffers.c new file mode 100644 index 0000000..dffeb8a --- /dev/null +++ b/libvpx/vp9/common/vp9_frame_buffers.c @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2014 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include <assert.h> + +#include "vp9/common/vp9_frame_buffers.h" +#include "vpx_mem/vpx_mem.h" + +int vp9_alloc_internal_frame_buffers(InternalFrameBufferList *list) { + assert(list != NULL); + vp9_free_internal_frame_buffers(list); + + list->num_internal_frame_buffers = + VP9_MAXIMUM_REF_BUFFERS + VPX_MAXIMUM_WORK_BUFFERS; + list->int_fb = vpx_calloc(list->num_internal_frame_buffers, + sizeof(*list->int_fb)); + return (list->int_fb == NULL); +} + +void vp9_free_internal_frame_buffers(InternalFrameBufferList *list) { + int i; + + assert(list != NULL); + + for (i = 0; i < list->num_internal_frame_buffers; ++i) { + vpx_free(list->int_fb[i].data); + list->int_fb[i].data = NULL; + } + vpx_free(list->int_fb); + list->int_fb = NULL; +} + +int vp9_get_frame_buffer(void *cb_priv, size_t min_size, + vpx_codec_frame_buffer_t *fb) { + int i; + InternalFrameBufferList *const int_fb_list = + (InternalFrameBufferList *)cb_priv; + if (int_fb_list == NULL) + return -1; + + // Find a free frame buffer. + for (i = 0; i < int_fb_list->num_internal_frame_buffers; ++i) { + if (!int_fb_list->int_fb[i].in_use) + break; + } + + if (i == int_fb_list->num_internal_frame_buffers) + return -1; + + if (int_fb_list->int_fb[i].size < min_size) { + int_fb_list->int_fb[i].data = + (uint8_t *)vpx_realloc(int_fb_list->int_fb[i].data, min_size); + if (!int_fb_list->int_fb[i].data) + return -1; + + int_fb_list->int_fb[i].size = min_size; + } + + fb->data = int_fb_list->int_fb[i].data; + fb->size = int_fb_list->int_fb[i].size; + int_fb_list->int_fb[i].in_use = 1; + + // Set the frame buffer's private data to point at the internal frame buffer. + fb->priv = &int_fb_list->int_fb[i]; + return 0; +} + +int vp9_release_frame_buffer(void *cb_priv, vpx_codec_frame_buffer_t *fb) { + InternalFrameBuffer *const int_fb = (InternalFrameBuffer *)fb->priv; + (void)cb_priv; + int_fb->in_use = 0; + return 0; +} diff --git a/libvpx/vp9/common/vp9_frame_buffers.h b/libvpx/vp9/common/vp9_frame_buffers.h new file mode 100644 index 0000000..e2cfe61 --- /dev/null +++ b/libvpx/vp9/common/vp9_frame_buffers.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2014 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VP9_COMMON_VP9_FRAME_BUFFERS_H_ +#define VP9_COMMON_VP9_FRAME_BUFFERS_H_ + +#include "vpx/vpx_frame_buffer.h" +#include "vpx/vpx_integer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct InternalFrameBuffer { + uint8_t *data; + size_t size; + int in_use; +} InternalFrameBuffer; + +typedef struct InternalFrameBufferList { + int num_internal_frame_buffers; + InternalFrameBuffer *int_fb; +} InternalFrameBufferList; + +// Initializes |list|. Returns 0 on success. +int vp9_alloc_internal_frame_buffers(InternalFrameBufferList *list); + +// Free any data allocated to the frame buffers. +void vp9_free_internal_frame_buffers(InternalFrameBufferList *list); + +// Callback used by libvpx to request an external frame buffer. |cb_priv| +// Callback private data, which points to an InternalFrameBufferList. +// |min_size| is the minimum size in bytes needed to decode the next frame. +// |fb| pointer to the frame buffer. +int vp9_get_frame_buffer(void *cb_priv, size_t min_size, + vpx_codec_frame_buffer_t *fb); + +// Callback used by libvpx when there are no references to the frame buffer. +// |cb_priv| is not used. |fb| pointer to the frame buffer. +int vp9_release_frame_buffer(void *cb_priv, vpx_codec_frame_buffer_t *fb); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // VP9_COMMON_VP9_FRAME_BUFFERS_H_ diff --git a/libvpx/vp9/common/vp9_idct.c b/libvpx/vp9/common/vp9_idct.c index ea8683e..20b78bf 100644 --- a/libvpx/vp9/common/vp9_idct.c +++ b/libvpx/vp9/common/vp9_idct.c @@ -96,7 +96,7 @@ void vp9_iwht4x4_1_add_c(const int16_t *in, uint8_t *dest, int dest_stride) { } } -static void idct4_1d(const int16_t *input, int16_t *output) { +static void idct4(const int16_t *input, int16_t *output) { int16_t step[4]; int temp1, temp2; // stage 1 @@ -124,7 +124,7 @@ void vp9_idct4x4_16_add_c(const int16_t *input, uint8_t *dest, int stride) { // Rows for (i = 0; i < 4; ++i) { - idct4_1d(input, outptr); + idct4(input, outptr); input += 4; outptr += 4; } @@ -133,7 +133,7 @@ void vp9_idct4x4_16_add_c(const int16_t *input, uint8_t *dest, int stride) { for (i = 0; i < 4; ++i) { for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i]; - idct4_1d(temp_in, temp_out); + idct4(temp_in, temp_out); for (j = 0; j < 4; ++j) dest[j * stride + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 4) + dest[j * stride + i]); @@ -156,7 +156,7 @@ void vp9_idct4x4_1_add_c(const int16_t *input, uint8_t *dest, int dest_stride) { } } -static void idct8_1d(const int16_t *input, int16_t *output) { +static void idct8(const int16_t *input, int16_t *output) { int16_t step1[8], step2[8]; int temp1, temp2; // stage 1 @@ -174,7 +174,7 @@ static void idct8_1d(const int16_t *input, int16_t *output) { step1[6] = dct_const_round_shift(temp2); // stage 2 & stage 3 - even half - idct4_1d(step1, step1); + idct4(step1, step1); // stage 2 - odd half step2[4] = step1[4] + step1[5]; @@ -209,7 +209,7 @@ void vp9_idct8x8_64_add_c(const int16_t *input, uint8_t *dest, int stride) { // First transform rows for (i = 0; i < 8; ++i) { - idct8_1d(input, outptr); + idct8(input, outptr); input += 8; outptr += 8; } @@ -218,7 +218,7 @@ void vp9_idct8x8_64_add_c(const int16_t *input, uint8_t *dest, int stride) { for (i = 0; i < 8; ++i) { for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i]; - idct8_1d(temp_in, temp_out); + idct8(temp_in, temp_out); for (j = 0; j < 8; ++j) dest[j * stride + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 5) + dest[j * stride + i]); @@ -238,7 +238,7 @@ void vp9_idct8x8_1_add_c(const int16_t *input, uint8_t *dest, int stride) { } } -static void iadst4_1d(const int16_t *input, int16_t *output) { +static void iadst4(const int16_t *input, int16_t *output) { int s0, s1, s2, s3, s4, s5, s6, s7; int x0 = input[0]; @@ -283,10 +283,10 @@ static void iadst4_1d(const int16_t *input, int16_t *output) { void vp9_iht4x4_16_add_c(const int16_t *input, uint8_t *dest, int stride, int tx_type) { const transform_2d IHT_4[] = { - { idct4_1d, idct4_1d }, // DCT_DCT = 0 - { iadst4_1d, idct4_1d }, // ADST_DCT = 1 - { idct4_1d, iadst4_1d }, // DCT_ADST = 2 - { iadst4_1d, iadst4_1d } // ADST_ADST = 3 + { idct4, idct4 }, // DCT_DCT = 0 + { iadst4, idct4 }, // ADST_DCT = 1 + { idct4, iadst4 }, // DCT_ADST = 2 + { iadst4, iadst4 } // ADST_ADST = 3 }; int i, j; @@ -311,7 +311,7 @@ void vp9_iht4x4_16_add_c(const int16_t *input, uint8_t *dest, int stride, + dest[j * stride + i]); } } -static void iadst8_1d(const int16_t *input, int16_t *output) { +static void iadst8(const int16_t *input, int16_t *output) { int s0, s1, s2, s3, s4, s5, s6, s7; int x0 = input[7]; @@ -389,10 +389,10 @@ static void iadst8_1d(const int16_t *input, int16_t *output) { } static const transform_2d IHT_8[] = { - { idct8_1d, idct8_1d }, // DCT_DCT = 0 - { iadst8_1d, idct8_1d }, // ADST_DCT = 1 - { idct8_1d, iadst8_1d }, // DCT_ADST = 2 - { iadst8_1d, iadst8_1d } // ADST_ADST = 3 + { idct8, idct8 }, // DCT_DCT = 0 + { iadst8, idct8 }, // ADST_DCT = 1 + { idct8, iadst8 }, // DCT_ADST = 2 + { iadst8, iadst8 } // ADST_ADST = 3 }; void vp9_iht8x8_64_add_c(const int16_t *input, uint8_t *dest, int stride, @@ -430,7 +430,7 @@ void vp9_idct8x8_10_add_c(const int16_t *input, uint8_t *dest, int stride) { // First transform rows // only first 4 row has non-zero coefs for (i = 0; i < 4; ++i) { - idct8_1d(input, outptr); + idct8(input, outptr); input += 8; outptr += 8; } @@ -439,14 +439,14 @@ void vp9_idct8x8_10_add_c(const int16_t *input, uint8_t *dest, int stride) { for (i = 0; i < 8; ++i) { for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i]; - idct8_1d(temp_in, temp_out); + idct8(temp_in, temp_out); for (j = 0; j < 8; ++j) dest[j * stride + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 5) + dest[j * stride + i]); } } -static void idct16_1d(const int16_t *input, int16_t *output) { +static void idct16(const int16_t *input, int16_t *output) { int16_t step1[16], step2[16]; int temp1, temp2; @@ -619,7 +619,7 @@ void vp9_idct16x16_256_add_c(const int16_t *input, uint8_t *dest, int stride) { // First transform rows for (i = 0; i < 16; ++i) { - idct16_1d(input, outptr); + idct16(input, outptr); input += 16; outptr += 16; } @@ -628,14 +628,14 @@ void vp9_idct16x16_256_add_c(const int16_t *input, uint8_t *dest, int stride) { for (i = 0; i < 16; ++i) { for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i]; - idct16_1d(temp_in, temp_out); + idct16(temp_in, temp_out); for (j = 0; j < 16; ++j) dest[j * stride + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 6) + dest[j * stride + i]); } } -static void iadst16_1d(const int16_t *input, int16_t *output) { +static void iadst16(const int16_t *input, int16_t *output) { int s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15; int x0 = input[15]; @@ -807,10 +807,10 @@ static void iadst16_1d(const int16_t *input, int16_t *output) { } static const transform_2d IHT_16[] = { - { idct16_1d, idct16_1d }, // DCT_DCT = 0 - { iadst16_1d, idct16_1d }, // ADST_DCT = 1 - { idct16_1d, iadst16_1d }, // DCT_ADST = 2 - { iadst16_1d, iadst16_1d } // ADST_ADST = 3 + { idct16, idct16 }, // DCT_DCT = 0 + { iadst16, idct16 }, // ADST_DCT = 1 + { idct16, iadst16 }, // DCT_ADST = 2 + { iadst16, iadst16 } // ADST_ADST = 3 }; void vp9_iht16x16_256_add_c(const int16_t *input, uint8_t *dest, int stride, @@ -835,7 +835,8 @@ void vp9_iht16x16_256_add_c(const int16_t *input, uint8_t *dest, int stride, ht.cols(temp_in, temp_out); for (j = 0; j < 16; ++j) dest[j * stride + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 6) - + dest[j * stride + i]); } + + dest[j * stride + i]); + } } void vp9_idct16x16_10_add_c(const int16_t *input, uint8_t *dest, int stride) { @@ -847,7 +848,7 @@ void vp9_idct16x16_10_add_c(const int16_t *input, uint8_t *dest, int stride) { // First transform rows. Since all non-zero dct coefficients are in // upper-left 4x4 area, we only need to calculate first 4 rows here. for (i = 0; i < 4; ++i) { - idct16_1d(input, outptr); + idct16(input, outptr); input += 16; outptr += 16; } @@ -856,7 +857,7 @@ void vp9_idct16x16_10_add_c(const int16_t *input, uint8_t *dest, int stride) { for (i = 0; i < 16; ++i) { for (j = 0; j < 16; ++j) temp_in[j] = out[j*16 + i]; - idct16_1d(temp_in, temp_out); + idct16(temp_in, temp_out); for (j = 0; j < 16; ++j) dest[j * stride + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 6) + dest[j * stride + i]); @@ -876,7 +877,7 @@ void vp9_idct16x16_1_add_c(const int16_t *input, uint8_t *dest, int stride) { } } -static void idct32_1d(const int16_t *input, int16_t *output) { +static void idct32(const int16_t *input, int16_t *output) { int16_t step1[32], step2[32]; int temp1, temp2; @@ -1262,7 +1263,7 @@ void vp9_idct32x32_1024_add_c(const int16_t *input, uint8_t *dest, int stride) { zero_coeff[j] = zero_coeff[2 * j] | zero_coeff[2 * j + 1]; if (zero_coeff[0] | zero_coeff[1]) - idct32_1d(input, outptr); + idct32(input, outptr); else vpx_memset(outptr, 0, sizeof(int16_t) * 32); input += 32; @@ -1273,10 +1274,10 @@ void vp9_idct32x32_1024_add_c(const int16_t *input, uint8_t *dest, int stride) { for (i = 0; i < 32; ++i) { for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i]; - idct32_1d(temp_in, temp_out); + idct32(temp_in, temp_out); for (j = 0; j < 32; ++j) dest[j * stride + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 6) - + dest[j * stride + i]); + + dest[j * stride + i]); } } @@ -1289,7 +1290,7 @@ void vp9_idct32x32_34_add_c(const int16_t *input, uint8_t *dest, int stride) { // Rows // only upper-left 8x8 has non-zero coeff for (i = 0; i < 8; ++i) { - idct32_1d(input, outptr); + idct32(input, outptr); input += 32; outptr += 32; } @@ -1298,7 +1299,7 @@ void vp9_idct32x32_34_add_c(const int16_t *input, uint8_t *dest, int stride) { for (i = 0; i < 32; ++i) { for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i]; - idct32_1d(temp_in, temp_out); + idct32(temp_in, temp_out); for (j = 0; j < 32; ++j) dest[j * stride + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 6) + dest[j * stride + i]); @@ -1344,43 +1345,37 @@ void vp9_idct8x8_add(const int16_t *input, uint8_t *dest, int stride, int eob) { // coefficients. Use eobs to decide what to do. // TODO(yunqingwang): "eobs = 1" case is also handled in vp9_short_idct8x8_c. // Combine that with code here. - if (eob) { - if (eob == 1) - // DC only DCT coefficient - vp9_idct8x8_1_add(input, dest, stride); - else if (eob <= 10) - vp9_idct8x8_10_add(input, dest, stride); - else - vp9_idct8x8_64_add(input, dest, stride); - } + if (eob == 1) + // DC only DCT coefficient + vp9_idct8x8_1_add(input, dest, stride); + else if (eob <= 10) + vp9_idct8x8_10_add(input, dest, stride); + else + vp9_idct8x8_64_add(input, dest, stride); } void vp9_idct16x16_add(const int16_t *input, uint8_t *dest, int stride, int eob) { /* The calculation can be simplified if there are not many non-zero dct * coefficients. Use eobs to separate different cases. */ - if (eob) { - if (eob == 1) - /* DC only DCT coefficient. */ - vp9_idct16x16_1_add(input, dest, stride); - else if (eob <= 10) - vp9_idct16x16_10_add(input, dest, stride); - else - vp9_idct16x16_256_add(input, dest, stride); - } + if (eob == 1) + /* DC only DCT coefficient. */ + vp9_idct16x16_1_add(input, dest, stride); + else if (eob <= 10) + vp9_idct16x16_10_add(input, dest, stride); + else + vp9_idct16x16_256_add(input, dest, stride); } void vp9_idct32x32_add(const int16_t *input, uint8_t *dest, int stride, int eob) { - if (eob) { - if (eob == 1) - vp9_idct32x32_1_add(input, dest, stride); - else if (eob <= 34) - // non-zero coeff only in upper-left 8x8 - vp9_idct32x32_34_add(input, dest, stride); - else - vp9_idct32x32_1024_add(input, dest, stride); - } + if (eob == 1) + vp9_idct32x32_1_add(input, dest, stride); + else if (eob <= 34) + // non-zero coeff only in upper-left 8x8 + vp9_idct32x32_34_add(input, dest, stride); + else + vp9_idct32x32_1024_add(input, dest, stride); } // iht @@ -1397,9 +1392,7 @@ void vp9_iht8x8_add(TX_TYPE tx_type, const int16_t *input, uint8_t *dest, if (tx_type == DCT_DCT) { vp9_idct8x8_add(input, dest, stride, eob); } else { - if (eob > 0) { - vp9_iht8x8_64_add(input, dest, stride, tx_type); - } + vp9_iht8x8_64_add(input, dest, stride, tx_type); } } @@ -1408,8 +1401,6 @@ void vp9_iht16x16_add(TX_TYPE tx_type, const int16_t *input, uint8_t *dest, if (tx_type == DCT_DCT) { vp9_idct16x16_add(input, dest, stride, eob); } else { - if (eob > 0) { - vp9_iht16x16_256_add(input, dest, stride, tx_type); - } + vp9_iht16x16_256_add(input, dest, stride, tx_type); } } diff --git a/libvpx/vp9/common/vp9_idct.h b/libvpx/vp9/common/vp9_idct.h index 2b3f35f..ceca795 100644 --- a/libvpx/vp9/common/vp9_idct.h +++ b/libvpx/vp9/common/vp9_idct.h @@ -18,6 +18,10 @@ #include "vp9/common/vp9_common.h" #include "vp9/common/vp9_enums.h" +#ifdef __cplusplus +extern "C" { +#endif + // Constants and Macros used by all idct/dct functions #define DCT_CONST_BITS 14 @@ -77,8 +81,7 @@ static const int sinpi_4_9 = 15212; static INLINE int dct_const_round_shift(int input) { int rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS); - assert(INT16_MIN <= rv && rv <= INT16_MAX); - return rv; + return (int16_t)rv; } typedef void (*transform_1d)(const int16_t*, int16_t*); @@ -104,4 +107,8 @@ void vp9_iht16x16_add(TX_TYPE tx_type, const int16_t *input, uint8_t *dest, int stride, int eob); +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_COMMON_VP9_IDCT_H_ diff --git a/libvpx/vp9/common/vp9_loopfilter.c b/libvpx/vp9/common/vp9_loopfilter.c index ff504a1..af8afed 100644 --- a/libvpx/vp9/common/vp9_loopfilter.c +++ b/libvpx/vp9/common/vp9_loopfilter.c @@ -16,26 +16,6 @@ #include "vp9/common/vp9_seg_common.h" -// This structure holds bit masks for all 8x8 blocks in a 64x64 region. -// Each 1 bit represents a position in which we want to apply the loop filter. -// Left_ entries refer to whether we apply a filter on the border to the -// left of the block. Above_ entries refer to whether or not to apply a -// filter on the above border. Int_ entries refer to whether or not to -// apply borders on the 4x4 edges within the 8x8 block that each bit -// represents. -// Since each transform is accompanied by a potentially different type of -// loop filter there is a different entry in the array for each transform size. -typedef struct { - uint64_t left_y[TX_SIZES]; - uint64_t above_y[TX_SIZES]; - uint64_t int_4x4_y; - uint16_t left_uv[TX_SIZES]; - uint16_t above_uv[TX_SIZES]; - uint16_t int_4x4_uv; - uint8_t lfl_y[64]; - uint8_t lfl_uv[16]; -} LOOP_FILTER_MASK; - // 64 bit masks for left transform size. Each 1 represents a position where // we should apply a loop filter across the left border of an 8x8 block // boundary. @@ -221,23 +201,10 @@ static const uint16_t size_mask_uv[BLOCK_SIZES] = { static const uint16_t left_border_uv = 0x1111; static const uint16_t above_border_uv = 0x000f; - -static void lf_init_lut(loop_filter_info_n *lfi) { - lfi->mode_lf_lut[DC_PRED] = 0; - lfi->mode_lf_lut[D45_PRED] = 0; - lfi->mode_lf_lut[D135_PRED] = 0; - lfi->mode_lf_lut[D117_PRED] = 0; - lfi->mode_lf_lut[D153_PRED] = 0; - lfi->mode_lf_lut[D207_PRED] = 0; - lfi->mode_lf_lut[D63_PRED] = 0; - lfi->mode_lf_lut[V_PRED] = 0; - lfi->mode_lf_lut[H_PRED] = 0; - lfi->mode_lf_lut[TM_PRED] = 0; - lfi->mode_lf_lut[ZEROMV] = 0; - lfi->mode_lf_lut[NEARESTMV] = 1; - lfi->mode_lf_lut[NEARMV] = 1; - lfi->mode_lf_lut[NEWMV] = 1; -} +static const int mode_lf_lut[MB_MODE_COUNT] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // INTRA_MODES + 1, 1, 0, 1 // INTER_MODES (ZEROMV == 0) +}; static void update_sharpness(loop_filter_info_n *lfi, int sharpness_lvl) { int lvl; @@ -270,9 +237,6 @@ void vp9_loop_filter_init(VP9_COMMON *cm) { update_sharpness(lfi, lf->sharpness_level); lf->last_sharpness_level = lf->sharpness_level; - // init LUT for lvl and hev thr picking - lf_init_lut(lfi); - // init hev threshold const vectors for (lvl = 0; lvl <= MAX_LOOP_FILTER; lvl++) vpx_memset(lfi->lfthr[lvl].hev_thr, (lvl >> 4), SIMD_WIDTH); @@ -283,10 +247,10 @@ void vp9_loop_filter_frame_init(VP9_COMMON *cm, int default_filt_lvl) { // n_shift is the a multiplier for lf_deltas // the multiplier is 1 for when filter_lvl is between 0 and 31; // 2 when filter_lvl is between 32 and 63 - const int n_shift = default_filt_lvl >> 5; + const int scale = 1 << (default_filt_lvl >> 5); loop_filter_info_n *const lfi = &cm->lf_info; struct loopfilter *const lf = &cm->lf; - struct segmentation *const seg = &cm->seg; + const struct segmentation *const seg = &cm->seg; // update limits if sharpness has changed if (lf->last_sharpness_level != lf->sharpness_level) { @@ -295,86 +259,130 @@ void vp9_loop_filter_frame_init(VP9_COMMON *cm, int default_filt_lvl) { } for (seg_id = 0; seg_id < MAX_SEGMENTS; seg_id++) { - int lvl_seg = default_filt_lvl, ref, mode, intra_lvl; - - // Set the baseline filter values for each segment + int lvl_seg = default_filt_lvl; if (vp9_segfeature_active(seg, seg_id, SEG_LVL_ALT_LF)) { const int data = vp9_get_segdata(seg, seg_id, SEG_LVL_ALT_LF); - lvl_seg = seg->abs_delta == SEGMENT_ABSDATA - ? data - : clamp(default_filt_lvl + data, 0, MAX_LOOP_FILTER); + lvl_seg = clamp(seg->abs_delta == SEGMENT_ABSDATA ? + data : default_filt_lvl + data, + 0, MAX_LOOP_FILTER); } if (!lf->mode_ref_delta_enabled) { // we could get rid of this if we assume that deltas are set to // zero when not in use; encoder always uses deltas vpx_memset(lfi->lvl[seg_id], lvl_seg, sizeof(lfi->lvl[seg_id])); - continue; - } - - intra_lvl = lvl_seg + lf->ref_deltas[INTRA_FRAME] * (1 << n_shift); - lfi->lvl[seg_id][INTRA_FRAME][0] = clamp(intra_lvl, 0, MAX_LOOP_FILTER); - - for (ref = LAST_FRAME; ref < MAX_REF_FRAMES; ++ref) - for (mode = 0; mode < MAX_MODE_LF_DELTAS; ++mode) { - const int inter_lvl = lvl_seg + lf->ref_deltas[ref] * (1 << n_shift) - + lf->mode_deltas[mode] * (1 << n_shift); - lfi->lvl[seg_id][ref][mode] = clamp(inter_lvl, 0, MAX_LOOP_FILTER); + } else { + int ref, mode; + const int intra_lvl = lvl_seg + lf->ref_deltas[INTRA_FRAME] * scale; + lfi->lvl[seg_id][INTRA_FRAME][0] = clamp(intra_lvl, 0, MAX_LOOP_FILTER); + + for (ref = LAST_FRAME; ref < MAX_REF_FRAMES; ++ref) { + for (mode = 0; mode < MAX_MODE_LF_DELTAS; ++mode) { + const int inter_lvl = lvl_seg + lf->ref_deltas[ref] * scale + + lf->mode_deltas[mode] * scale; + lfi->lvl[seg_id][ref][mode] = clamp(inter_lvl, 0, MAX_LOOP_FILTER); + } } + } } } -static uint8_t build_lfi(const loop_filter_info_n *lfi_n, - const MB_MODE_INFO *mbmi) { - const int seg = mbmi->segment_id; - const int ref = mbmi->ref_frame[0]; - const int mode = lfi_n->mode_lf_lut[mbmi->mode]; - const int filter_level = lfi_n->lvl[seg][ref][mode]; - - return filter_level; -} - -static void filter_selectively_vert(uint8_t *s, int pitch, - unsigned int mask_16x16, - unsigned int mask_8x8, - unsigned int mask_4x4, - unsigned int mask_4x4_int, - const loop_filter_info_n *lfi_n, - const uint8_t *lfl) { +static void filter_selectively_vert_row2(PLANE_TYPE plane_type, + uint8_t *s, int pitch, + unsigned int mask_16x16_l, + unsigned int mask_8x8_l, + unsigned int mask_4x4_l, + unsigned int mask_4x4_int_l, + const loop_filter_info_n *lfi_n, + const uint8_t *lfl) { + const int mask_shift = plane_type ? 4 : 8; + const int mask_cutoff = plane_type ? 0xf : 0xff; + const int lfl_forward = plane_type ? 4 : 8; + + unsigned int mask_16x16_0 = mask_16x16_l & mask_cutoff; + unsigned int mask_8x8_0 = mask_8x8_l & mask_cutoff; + unsigned int mask_4x4_0 = mask_4x4_l & mask_cutoff; + unsigned int mask_4x4_int_0 = mask_4x4_int_l & mask_cutoff; + unsigned int mask_16x16_1 = (mask_16x16_l >> mask_shift) & mask_cutoff; + unsigned int mask_8x8_1 = (mask_8x8_l >> mask_shift) & mask_cutoff; + unsigned int mask_4x4_1 = (mask_4x4_l >> mask_shift) & mask_cutoff; + unsigned int mask_4x4_int_1 = (mask_4x4_int_l >> mask_shift) & mask_cutoff; unsigned int mask; - for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int; - mask; mask >>= 1) { - const loop_filter_thresh *lfi = lfi_n->lfthr + *lfl; + for (mask = mask_16x16_0 | mask_8x8_0 | mask_4x4_0 | mask_4x4_int_0 | + mask_16x16_1 | mask_8x8_1 | mask_4x4_1 | mask_4x4_int_1; + mask; mask >>= 1) { + const loop_filter_thresh *lfi0 = lfi_n->lfthr + *lfl; + const loop_filter_thresh *lfi1 = lfi_n->lfthr + *(lfl + lfl_forward); + // TODO(yunqingwang): count in loopfilter functions should be removed. if (mask & 1) { - if (mask_16x16 & 1) { - vp9_mb_lpf_vertical_edge_w(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr); - assert(!(mask_8x8 & 1)); - assert(!(mask_4x4 & 1)); - assert(!(mask_4x4_int & 1)); - } else if (mask_8x8 & 1) { - vp9_mbloop_filter_vertical_edge(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1); - assert(!(mask_16x16 & 1)); - assert(!(mask_4x4 & 1)); - } else if (mask_4x4 & 1) { - vp9_loop_filter_vertical_edge(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1); - assert(!(mask_16x16 & 1)); - assert(!(mask_8x8 & 1)); + if ((mask_16x16_0 | mask_16x16_1) & 1) { + if ((mask_16x16_0 & mask_16x16_1) & 1) { + vp9_lpf_vertical_16_dual(s, pitch, lfi0->mblim, lfi0->lim, + lfi0->hev_thr); + } else if (mask_16x16_0 & 1) { + vp9_lpf_vertical_16(s, pitch, lfi0->mblim, lfi0->lim, + lfi0->hev_thr); + } else { + vp9_lpf_vertical_16(s + 8 *pitch, pitch, lfi1->mblim, + lfi1->lim, lfi1->hev_thr); + } + } + + if ((mask_8x8_0 | mask_8x8_1) & 1) { + if ((mask_8x8_0 & mask_8x8_1) & 1) { + vp9_lpf_vertical_8_dual(s, pitch, lfi0->mblim, lfi0->lim, + lfi0->hev_thr, lfi1->mblim, lfi1->lim, + lfi1->hev_thr); + } else if (mask_8x8_0 & 1) { + vp9_lpf_vertical_8(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr, + 1); + } else { + vp9_lpf_vertical_8(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim, + lfi1->hev_thr, 1); + } + } + + if ((mask_4x4_0 | mask_4x4_1) & 1) { + if ((mask_4x4_0 & mask_4x4_1) & 1) { + vp9_lpf_vertical_4_dual(s, pitch, lfi0->mblim, lfi0->lim, + lfi0->hev_thr, lfi1->mblim, lfi1->lim, + lfi1->hev_thr); + } else if (mask_4x4_0 & 1) { + vp9_lpf_vertical_4(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr, + 1); + } else { + vp9_lpf_vertical_4(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim, + lfi1->hev_thr, 1); + } + } + + if ((mask_4x4_int_0 | mask_4x4_int_1) & 1) { + if ((mask_4x4_int_0 & mask_4x4_int_1) & 1) { + vp9_lpf_vertical_4_dual(s + 4, pitch, lfi0->mblim, lfi0->lim, + lfi0->hev_thr, lfi1->mblim, lfi1->lim, + lfi1->hev_thr); + } else if (mask_4x4_int_0 & 1) { + vp9_lpf_vertical_4(s + 4, pitch, lfi0->mblim, lfi0->lim, + lfi0->hev_thr, 1); + } else { + vp9_lpf_vertical_4(s + 8 * pitch + 4, pitch, lfi1->mblim, lfi1->lim, + lfi1->hev_thr, 1); + } } } - if (mask_4x4_int & 1) - vp9_loop_filter_vertical_edge(s + 4, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1); + s += 8; lfl += 1; - mask_16x16 >>= 1; - mask_8x8 >>= 1; - mask_4x4 >>= 1; - mask_4x4_int >>= 1; + mask_16x16_0 >>= 1; + mask_8x8_0 >>= 1; + mask_4x4_0 >>= 1; + mask_4x4_int_0 >>= 1; + mask_16x16_1 >>= 1; + mask_8x8_1 >>= 1; + mask_4x4_1 >>= 1; + mask_4x4_int_1 >>= 1; } } @@ -396,95 +404,73 @@ static void filter_selectively_horiz(uint8_t *s, int pitch, if (mask & 1) { if (mask_16x16 & 1) { if ((mask_16x16 & 3) == 3) { - vp9_mb_lpf_horizontal_edge_w(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 2); + vp9_lpf_horizontal_16(s, pitch, lfi->mblim, lfi->lim, + lfi->hev_thr, 2); count = 2; } else { - vp9_mb_lpf_horizontal_edge_w(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1); + vp9_lpf_horizontal_16(s, pitch, lfi->mblim, lfi->lim, + lfi->hev_thr, 1); } - assert(!(mask_8x8 & 1)); - assert(!(mask_4x4 & 1)); - assert(!(mask_4x4_int & 1)); } else if (mask_8x8 & 1) { if ((mask_8x8 & 3) == 3) { // Next block's thresholds const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1); - // TODO(yunqingwang): Combine next 2 calls as 1 wide filtering. - vp9_mbloop_filter_horizontal_edge(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1); - vp9_mbloop_filter_horizontal_edge(s + 8, pitch, lfin->mblim, - lfin->lim, lfin->hev_thr, 1); + vp9_lpf_horizontal_8_dual(s, pitch, lfi->mblim, lfi->lim, + lfi->hev_thr, lfin->mblim, lfin->lim, + lfin->hev_thr); if ((mask_4x4_int & 3) == 3) { - // TODO(yunqingwang): Combine next 2 calls as 1 wide filtering. - vp9_loop_filter_horizontal_edge(s + 4 * pitch, pitch, lfi->mblim, - lfi->lim, lfi->hev_thr, 1); - vp9_loop_filter_horizontal_edge(s + 8 + 4 * pitch, pitch, - lfin->mblim, lfin->lim, - lfin->hev_thr, 1); + vp9_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim, + lfi->lim, lfi->hev_thr, lfin->mblim, + lfin->lim, lfin->hev_thr); } else { if (mask_4x4_int & 1) - vp9_loop_filter_horizontal_edge(s + 4 * pitch, pitch, lfi->mblim, - lfi->lim, lfi->hev_thr, 1); + vp9_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim, + lfi->hev_thr, 1); else if (mask_4x4_int & 2) - vp9_loop_filter_horizontal_edge(s + 8 + 4 * pitch, pitch, - lfin->mblim, lfin->lim, - lfin->hev_thr, 1); + vp9_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim, + lfin->lim, lfin->hev_thr, 1); } count = 2; } else { - vp9_mbloop_filter_horizontal_edge(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1); + vp9_lpf_horizontal_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1); if (mask_4x4_int & 1) - vp9_loop_filter_horizontal_edge(s + 4 * pitch, pitch, lfi->mblim, - lfi->lim, lfi->hev_thr, 1); + vp9_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim, + lfi->hev_thr, 1); } - assert(!(mask_16x16 & 1)); - assert(!(mask_4x4 & 1)); } else if (mask_4x4 & 1) { if ((mask_4x4 & 3) == 3) { // Next block's thresholds const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1); - // TODO(yunqingwang): Combine next 2 calls as 1 wide filtering. - vp9_loop_filter_horizontal_edge(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1); - vp9_loop_filter_horizontal_edge(s + 8, pitch, lfin->mblim, lfin->lim, - lfin->hev_thr, 1); - + vp9_lpf_horizontal_4_dual(s, pitch, lfi->mblim, lfi->lim, + lfi->hev_thr, lfin->mblim, lfin->lim, + lfin->hev_thr); if ((mask_4x4_int & 3) == 3) { - // TODO(yunqingwang): Combine next 2 calls as 1 wide filtering. - vp9_loop_filter_horizontal_edge(s + 4 * pitch, pitch, lfi->mblim, - lfi->lim, lfi->hev_thr, 1); - vp9_loop_filter_horizontal_edge(s + 8 + 4 * pitch, pitch, - lfin->mblim, lfin->lim, - lfin->hev_thr, 1); + vp9_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim, + lfi->lim, lfi->hev_thr, lfin->mblim, + lfin->lim, lfin->hev_thr); } else { if (mask_4x4_int & 1) - vp9_loop_filter_horizontal_edge(s + 4 * pitch, pitch, lfi->mblim, - lfi->lim, lfi->hev_thr, 1); + vp9_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim, + lfi->hev_thr, 1); else if (mask_4x4_int & 2) - vp9_loop_filter_horizontal_edge(s + 8 + 4 * pitch, pitch, - lfin->mblim, lfin->lim, - lfin->hev_thr, 1); + vp9_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim, + lfin->lim, lfin->hev_thr, 1); } count = 2; } else { - vp9_loop_filter_horizontal_edge(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1); + vp9_lpf_horizontal_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1); - if (mask_4x4_int & 1) - vp9_loop_filter_horizontal_edge(s + 4 * pitch, pitch, lfi->mblim, - lfi->lim, lfi->hev_thr, 1); + if (mask_4x4_int & 1) + vp9_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim, + lfi->hev_thr, 1); } - assert(!(mask_16x16 & 1)); - assert(!(mask_8x8 & 1)); } else if (mask_4x4_int & 1) { - vp9_loop_filter_horizontal_edge(s + 4 * pitch, pitch, lfi->mblim, - lfi->lim, lfi->hev_thr, 1); + vp9_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim, + lfi->hev_thr, 1); } } s += 8 * count; @@ -510,11 +496,10 @@ static void build_masks(const loop_filter_info_n *const lfi_n, const BLOCK_SIZE block_size = mi->mbmi.sb_type; const TX_SIZE tx_size_y = mi->mbmi.tx_size; const TX_SIZE tx_size_uv = get_uv_tx_size(&mi->mbmi); - const int skip = mi->mbmi.skip_coeff; + const int skip = mi->mbmi.skip; const int seg = mi->mbmi.segment_id; const int ref = mi->mbmi.ref_frame[0]; - const int mode = lfi_n->mode_lf_lut[mi->mbmi.mode]; - const int filter_level = lfi_n->lvl[seg][ref][mode]; + const int filter_level = lfi_n->lvl[seg][ref][mode_lf_lut[mi->mbmi.mode]]; uint64_t *left_y = &lfm->left_y[tx_size_y]; uint64_t *above_y = &lfm->above_y[tx_size_y]; uint64_t *int_4x4_y = &lfm->int_4x4_y; @@ -592,11 +577,10 @@ static void build_y_mask(const loop_filter_info_n *const lfi_n, LOOP_FILTER_MASK *lfm) { const BLOCK_SIZE block_size = mi->mbmi.sb_type; const TX_SIZE tx_size_y = mi->mbmi.tx_size; - const int skip = mi->mbmi.skip_coeff; + const int skip = mi->mbmi.skip; const int seg = mi->mbmi.segment_id; const int ref = mi->mbmi.ref_frame[0]; - const int mode = lfi_n->mode_lf_lut[mi->mbmi.mode]; - const int filter_level = lfi_n->lvl[seg][ref][mode]; + const int filter_level = lfi_n->lvl[seg][ref][mode_lf_lut[mi->mbmi.mode]]; uint64_t *left_y = &lfm->left_y[tx_size_y]; uint64_t *above_y = &lfm->above_y[tx_size_y]; uint64_t *int_4x4_y = &lfm->int_4x4_y; @@ -634,9 +618,9 @@ static void build_y_mask(const loop_filter_info_n *const lfi_n, // This function sets up the bit masks for the entire 64x64 region represented // by mi_row, mi_col. // TODO(JBB): This function only works for yv12. -static void setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col, - MODE_INFO **mi_8x8, const int mode_info_stride, - LOOP_FILTER_MASK *lfm) { +void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col, + MODE_INFO **mi_8x8, const int mode_info_stride, + LOOP_FILTER_MASK *lfm) { int idx_32, idx_16, idx_8; const loop_filter_info_n *const lfi_n = &cm->lf_info; MODE_INFO **mip = mi_8x8; @@ -864,9 +848,66 @@ static void setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col, lfm->left_uv[i] &= 0xeeee; } } + + // Assert if we try to apply 2 different loop filters at the same position. + assert(!(lfm->left_y[TX_16X16] & lfm->left_y[TX_8X8])); + assert(!(lfm->left_y[TX_16X16] & lfm->left_y[TX_4X4])); + assert(!(lfm->left_y[TX_8X8] & lfm->left_y[TX_4X4])); + assert(!(lfm->int_4x4_y & lfm->left_y[TX_16X16])); + assert(!(lfm->left_uv[TX_16X16]&lfm->left_uv[TX_8X8])); + assert(!(lfm->left_uv[TX_16X16] & lfm->left_uv[TX_4X4])); + assert(!(lfm->left_uv[TX_8X8] & lfm->left_uv[TX_4X4])); + assert(!(lfm->int_4x4_uv & lfm->left_uv[TX_16X16])); + assert(!(lfm->above_y[TX_16X16] & lfm->above_y[TX_8X8])); + assert(!(lfm->above_y[TX_16X16] & lfm->above_y[TX_4X4])); + assert(!(lfm->above_y[TX_8X8] & lfm->above_y[TX_4X4])); + assert(!(lfm->int_4x4_y & lfm->above_y[TX_16X16])); + assert(!(lfm->above_uv[TX_16X16] & lfm->above_uv[TX_8X8])); + assert(!(lfm->above_uv[TX_16X16] & lfm->above_uv[TX_4X4])); + assert(!(lfm->above_uv[TX_8X8] & lfm->above_uv[TX_4X4])); + assert(!(lfm->int_4x4_uv & lfm->above_uv[TX_16X16])); +} + +static uint8_t build_lfi(const loop_filter_info_n *lfi_n, + const MB_MODE_INFO *mbmi) { + const int seg = mbmi->segment_id; + const int ref = mbmi->ref_frame[0]; + return lfi_n->lvl[seg][ref][mode_lf_lut[mbmi->mode]]; +} + +static void filter_selectively_vert(uint8_t *s, int pitch, + unsigned int mask_16x16, + unsigned int mask_8x8, + unsigned int mask_4x4, + unsigned int mask_4x4_int, + const loop_filter_info_n *lfi_n, + const uint8_t *lfl) { + unsigned int mask; + + for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int; + mask; mask >>= 1) { + const loop_filter_thresh *lfi = lfi_n->lfthr + *lfl; + + if (mask & 1) { + if (mask_16x16 & 1) { + vp9_lpf_vertical_16(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr); + } else if (mask_8x8 & 1) { + vp9_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1); + } else if (mask_4x4 & 1) { + vp9_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1); + } + } + if (mask_4x4_int & 1) + vp9_lpf_vertical_4(s + 4, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1); + s += 8; + lfl += 1; + mask_16x16 >>= 1; + mask_8x8 >>= 1; + mask_4x4 >>= 1; + mask_4x4_int >>= 1; + } } -#if CONFIG_NON420 static void filter_block_plane_non420(VP9_COMMON *cm, struct macroblockd_plane *plane, MODE_INFO **mi_8x8, @@ -894,15 +935,15 @@ static void filter_block_plane_non420(VP9_COMMON *cm, // Determine the vertical edges that need filtering for (c = 0; c < MI_BLOCK_SIZE && mi_col + c < cm->mi_cols; c += col_step) { const MODE_INFO *mi = mi_8x8[c]; - const int skip_this = mi[0].mbmi.skip_coeff - && is_inter_block(&mi[0].mbmi); + const BLOCK_SIZE sb_type = mi[0].mbmi.sb_type; + const int skip_this = mi[0].mbmi.skip && is_inter_block(&mi[0].mbmi); // left edge of current unit is block/partition edge -> no skip - const int block_edge_left = b_width_log2(mi[0].mbmi.sb_type) ? - !(c & ((1 << (b_width_log2(mi[0].mbmi.sb_type)-1)) - 1)) : 1; + const int block_edge_left = (num_4x4_blocks_wide_lookup[sb_type] > 1) ? + !(c & (num_8x8_blocks_wide_lookup[sb_type] - 1)) : 1; const int skip_this_c = skip_this && !block_edge_left; // top edge of current unit is block/partition edge -> no skip - const int block_edge_above = b_height_log2(mi[0].mbmi.sb_type) ? - !(r & ((1 << (b_height_log2(mi[0].mbmi.sb_type)-1)) - 1)) : 1; + const int block_edge_above = (num_4x4_blocks_high_lookup[sb_type] > 1) ? + !(r & (num_8x8_blocks_high_lookup[sb_type] - 1)) : 1; const int skip_this_r = skip_this && !block_edge_above; const TX_SIZE tx_size = (plane->plane_type == PLANE_TYPE_UV) ? get_uv_tx_size(&mi[0].mbmi) @@ -1004,15 +1045,13 @@ static void filter_block_plane_non420(VP9_COMMON *cm, dst->buf += 8 * dst->stride; } } -#endif -static void filter_block_plane(VP9_COMMON *const cm, - struct macroblockd_plane *const plane, - int mi_row, - LOOP_FILTER_MASK *lfm) { +void vp9_filter_block_plane(VP9_COMMON *const cm, + struct macroblockd_plane *const plane, + int mi_row, + LOOP_FILTER_MASK *lfm) { struct buf_2d *const dst = &plane->dst; uint8_t* const dst0 = dst->buf; - unsigned int mask_4x4_int_row[MI_BLOCK_SIZE] = {0}; int r, c; if (!plane->plane_type) { @@ -1021,23 +1060,27 @@ static void filter_block_plane(VP9_COMMON *const cm, uint64_t mask_4x4 = lfm->left_y[TX_4X4]; uint64_t mask_4x4_int = lfm->int_4x4_y; - // Vertical pass - for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r++) { - mask_4x4_int_row[r] = mask_4x4_int & 0xff; + // Vertical pass: do 2 rows at one time + for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r += 2) { + unsigned int mask_16x16_l = mask_16x16 & 0xffff; + unsigned int mask_8x8_l = mask_8x8 & 0xffff; + unsigned int mask_4x4_l = mask_4x4 & 0xffff; + unsigned int mask_4x4_int_l = mask_4x4_int & 0xffff; // Disable filtering on the leftmost column - filter_selectively_vert(dst->buf, dst->stride, - mask_16x16 & 0xff, - mask_8x8 & 0xff, - mask_4x4 & 0xff, - mask_4x4_int_row[r], - &cm->lf_info, &lfm->lfl_y[r << 3]); - - dst->buf += 8 * dst->stride; - mask_16x16 >>= 8; - mask_8x8 >>= 8; - mask_4x4 >>= 8; - mask_4x4_int >>= 8; + filter_selectively_vert_row2(plane->plane_type, + dst->buf, dst->stride, + mask_16x16_l, + mask_8x8_l, + mask_4x4_l, + mask_4x4_int_l, + &cm->lf_info, &lfm->lfl_y[r << 3]); + + dst->buf += 16 * dst->stride; + mask_16x16 >>= 16; + mask_8x8 >>= 16; + mask_4x4 >>= 16; + mask_4x4_int >>= 16; } // Horizontal pass @@ -1045,6 +1088,7 @@ static void filter_block_plane(VP9_COMMON *const cm, mask_16x16 = lfm->above_y[TX_16X16]; mask_8x8 = lfm->above_y[TX_8X8]; mask_4x4 = lfm->above_y[TX_4X4]; + mask_4x4_int = lfm->int_4x4_y; for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r++) { unsigned int mask_16x16_r; @@ -1065,13 +1109,14 @@ static void filter_block_plane(VP9_COMMON *const cm, mask_16x16_r, mask_8x8_r, mask_4x4_r, - mask_4x4_int_row[r], + mask_4x4_int & 0xff, &cm->lf_info, &lfm->lfl_y[r << 3]); dst->buf += 8 * dst->stride; mask_16x16 >>= 8; mask_8x8 >>= 8; mask_4x4 >>= 8; + mask_4x4_int >>= 8; } } else { uint16_t mask_16x16 = lfm->left_uv[TX_16X16]; @@ -1079,27 +1124,37 @@ static void filter_block_plane(VP9_COMMON *const cm, uint16_t mask_4x4 = lfm->left_uv[TX_4X4]; uint16_t mask_4x4_int = lfm->int_4x4_uv; - // Vertical pass - for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r += 2) { + // Vertical pass: do 2 rows at one time + for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r += 4) { if (plane->plane_type == 1) { - for (c = 0; c < (MI_BLOCK_SIZE >> 1); c++) + for (c = 0; c < (MI_BLOCK_SIZE >> 1); c++) { lfm->lfl_uv[(r << 1) + c] = lfm->lfl_y[(r << 3) + (c << 1)]; + lfm->lfl_uv[((r + 2) << 1) + c] = lfm->lfl_y[((r + 2) << 3) + + (c << 1)]; + } } - mask_4x4_int_row[r] = mask_4x4_int & 0xf; - // Disable filtering on the leftmost column - filter_selectively_vert(dst->buf, dst->stride, - mask_16x16 & 0xf, - mask_8x8 & 0xf, - mask_4x4 & 0xf, - mask_4x4_int_row[r], - &cm->lf_info, &lfm->lfl_uv[r << 1]); - - dst->buf += 8 * dst->stride; - mask_16x16 >>= 4; - mask_8x8 >>= 4; - mask_4x4 >>= 4; - mask_4x4_int >>= 4; + { + unsigned int mask_16x16_l = mask_16x16 & 0xff; + unsigned int mask_8x8_l = mask_8x8 & 0xff; + unsigned int mask_4x4_l = mask_4x4 & 0xff; + unsigned int mask_4x4_int_l = mask_4x4_int & 0xff; + + // Disable filtering on the leftmost column + filter_selectively_vert_row2(plane->plane_type, + dst->buf, dst->stride, + mask_16x16_l, + mask_8x8_l, + mask_4x4_l, + mask_4x4_int_l, + &cm->lf_info, &lfm->lfl_uv[r << 1]); + + dst->buf += 16 * dst->stride; + mask_16x16 >>= 8; + mask_8x8 >>= 8; + mask_4x4 >>= 8; + mask_4x4_int >>= 8; + } } // Horizontal pass @@ -1107,11 +1162,12 @@ static void filter_block_plane(VP9_COMMON *const cm, mask_16x16 = lfm->above_uv[TX_16X16]; mask_8x8 = lfm->above_uv[TX_8X8]; mask_4x4 = lfm->above_uv[TX_4X4]; + mask_4x4_int = lfm->int_4x4_uv; for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r += 2) { const int skip_border_4x4_r = mi_row + r == cm->mi_rows - 1; const unsigned int mask_4x4_int_r = skip_border_4x4_r ? - 0 : (mask_4x4_int_row[r]); + 0 : (mask_4x4_int & 0xf); unsigned int mask_16x16_r; unsigned int mask_8x8_r; unsigned int mask_4x4_r; @@ -1137,6 +1193,7 @@ static void filter_block_plane(VP9_COMMON *const cm, mask_16x16 >>= 4; mask_8x8 >>= 4; mask_4x4 >>= 4; + mask_4x4_int >>= 4; } } } @@ -1147,10 +1204,8 @@ void vp9_loop_filter_rows(const YV12_BUFFER_CONFIG *frame_buffer, const int num_planes = y_only ? 1 : MAX_MB_PLANE; int mi_row, mi_col; LOOP_FILTER_MASK lfm; -#if CONFIG_NON420 int use_420 = y_only || (xd->plane[1].subsampling_y == 1 && xd->plane[1].subsampling_x == 1); -#endif for (mi_row = start; mi_row < stop; mi_row += MI_BLOCK_SIZE) { MODE_INFO **mi_8x8 = cm->mi_grid_visible + mi_row * cm->mode_info_stride; @@ -1158,25 +1213,19 @@ void vp9_loop_filter_rows(const YV12_BUFFER_CONFIG *frame_buffer, for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MI_BLOCK_SIZE) { int plane; - setup_dst_planes(xd, frame_buffer, mi_row, mi_col); + vp9_setup_dst_planes(xd, frame_buffer, mi_row, mi_col); // TODO(JBB): Make setup_mask work for non 420. -#if CONFIG_NON420 if (use_420) -#endif - setup_mask(cm, mi_row, mi_col, mi_8x8 + mi_col, cm->mode_info_stride, - &lfm); + vp9_setup_mask(cm, mi_row, mi_col, mi_8x8 + mi_col, + cm->mode_info_stride, &lfm); for (plane = 0; plane < num_planes; ++plane) { -#if CONFIG_NON420 if (use_420) -#endif - filter_block_plane(cm, &xd->plane[plane], mi_row, &lfm); -#if CONFIG_NON420 + vp9_filter_block_plane(cm, &xd->plane[plane], mi_row, &lfm); else filter_block_plane_non420(cm, &xd->plane[plane], mi_8x8 + mi_col, mi_row, mi_col); -#endif } } } @@ -1184,12 +1233,12 @@ void vp9_loop_filter_rows(const YV12_BUFFER_CONFIG *frame_buffer, void vp9_loop_filter_frame(VP9_COMMON *cm, MACROBLOCKD *xd, int frame_filter_level, - int y_only, int partial) { + int y_only, int partial_frame) { int start_mi_row, end_mi_row, mi_rows_to_filter; if (!frame_filter_level) return; start_mi_row = 0; mi_rows_to_filter = cm->mi_rows; - if (partial && cm->mi_rows > 8) { + if (partial_frame && cm->mi_rows > 8) { start_mi_row = cm->mi_rows >> 1; start_mi_row &= 0xfffffff8; mi_rows_to_filter = MAX(cm->mi_rows / 8, 8); diff --git a/libvpx/vp9/common/vp9_loopfilter.h b/libvpx/vp9/common/vp9_loopfilter.h index 62389ea..97ae9d2 100644 --- a/libvpx/vp9/common/vp9_loopfilter.h +++ b/libvpx/vp9/common/vp9_loopfilter.h @@ -17,6 +17,10 @@ #include "vp9/common/vp9_blockd.h" #include "vp9/common/vp9_seg_common.h" +#ifdef __cplusplus +extern "C" { +#endif + #define MAX_LOOP_FILTER 63 #define MAX_SHARPNESS 7 @@ -54,12 +58,44 @@ typedef struct { typedef struct { loop_filter_thresh lfthr[MAX_LOOP_FILTER + 1]; uint8_t lvl[MAX_SEGMENTS][MAX_REF_FRAMES][MAX_MODE_LF_DELTAS]; - uint8_t mode_lf_lut[MB_MODE_COUNT]; } loop_filter_info_n; +// This structure holds bit masks for all 8x8 blocks in a 64x64 region. +// Each 1 bit represents a position in which we want to apply the loop filter. +// Left_ entries refer to whether we apply a filter on the border to the +// left of the block. Above_ entries refer to whether or not to apply a +// filter on the above border. Int_ entries refer to whether or not to +// apply borders on the 4x4 edges within the 8x8 block that each bit +// represents. +// Since each transform is accompanied by a potentially different type of +// loop filter there is a different entry in the array for each transform size. +typedef struct { + uint64_t left_y[TX_SIZES]; + uint64_t above_y[TX_SIZES]; + uint64_t int_4x4_y; + uint16_t left_uv[TX_SIZES]; + uint16_t above_uv[TX_SIZES]; + uint16_t int_4x4_uv; + uint8_t lfl_y[64]; + uint8_t lfl_uv[16]; +} LOOP_FILTER_MASK; + /* assorted loopfilter functions which get used elsewhere */ struct VP9Common; struct macroblockd; +struct VP9LfSyncData; + +// This function sets up the bit masks for the entire 64x64 region represented +// by mi_row, mi_col. +void vp9_setup_mask(struct VP9Common *const cm, + const int mi_row, const int mi_col, + MODE_INFO **mi_8x8, const int mode_info_stride, + LOOP_FILTER_MASK *lfm); + +void vp9_filter_block_plane(struct VP9Common *const cm, + struct macroblockd_plane *const plane, + int mi_row, + LOOP_FILTER_MASK *lfm); void vp9_loop_filter_init(struct VP9Common *cm); @@ -71,7 +107,7 @@ void vp9_loop_filter_frame_init(struct VP9Common *cm, int default_filt_lvl); void vp9_loop_filter_frame(struct VP9Common *cm, struct macroblockd *mbd, int filter_level, - int y_only, int partial); + int y_only, int partial_frame); // Apply the loop filter to [start, stop) macro block rows in frame_buffer. void vp9_loop_filter_rows(const YV12_BUFFER_CONFIG *frame_buffer, @@ -87,8 +123,15 @@ typedef struct LoopFilterWorkerData { int start; int stop; int y_only; + + struct VP9LfSyncData *lf_sync; + int num_lf_workers; } LFWorkerData; // Operates on the rows described by LFWorkerData passed as 'arg1'. int vp9_loop_filter_worker(void *arg1, void *arg2); +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_COMMON_VP9_LOOPFILTER_H_ diff --git a/libvpx/vp9/common/vp9_loopfilter_filters.c b/libvpx/vp9/common/vp9_loopfilter_filters.c index 2c4bf6c..25d3311 100644 --- a/libvpx/vp9/common/vp9_loopfilter_filters.c +++ b/libvpx/vp9/common/vp9_loopfilter_filters.c @@ -70,7 +70,7 @@ static INLINE int8_t hev_mask(uint8_t thresh, uint8_t p1, uint8_t p0, return hev; } -static INLINE void filter4(int8_t mask, uint8_t hev, uint8_t *op1, +static INLINE void filter4(int8_t mask, uint8_t thresh, uint8_t *op1, uint8_t *op0, uint8_t *oq0, uint8_t *oq1) { int8_t filter1, filter2; @@ -78,6 +78,7 @@ static INLINE void filter4(int8_t mask, uint8_t hev, uint8_t *op1, const int8_t ps0 = (int8_t) *op0 ^ 0x80; const int8_t qs0 = (int8_t) *oq0 ^ 0x80; const int8_t qs1 = (int8_t) *oq1 ^ 0x80; + const uint8_t hev = hev_mask(thresh, *op1, *op0, *oq0, *oq1); // add outer taps if we have high edge variance int8_t filter = signed_char_clamp(ps1 - qs1) & hev; @@ -101,11 +102,9 @@ static INLINE void filter4(int8_t mask, uint8_t hev, uint8_t *op1, *op1 = signed_char_clamp(ps1 + filter) ^ 0x80; } -void vp9_loop_filter_horizontal_edge_c(uint8_t *s, int p /* pitch */, - const uint8_t *blimit, - const uint8_t *limit, - const uint8_t *thresh, - int count) { +void vp9_lpf_horizontal_4_c(uint8_t *s, int p /* pitch */, + const uint8_t *blimit, const uint8_t *limit, + const uint8_t *thresh, int count) { int i; // loop filter designed to work using chars so that we can make maximum use @@ -115,17 +114,22 @@ void vp9_loop_filter_horizontal_edge_c(uint8_t *s, int p /* pitch */, const uint8_t q0 = s[0 * p], q1 = s[1 * p], q2 = s[2 * p], q3 = s[3 * p]; const int8_t mask = filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3); - const int8_t hev = hev_mask(*thresh, p1, p0, q0, q1); - filter4(mask, hev, s - 2 * p, s - 1 * p, s, s + 1 * p); + filter4(mask, *thresh, s - 2 * p, s - 1 * p, s, s + 1 * p); ++s; } } -void vp9_loop_filter_vertical_edge_c(uint8_t *s, int pitch, - const uint8_t *blimit, - const uint8_t *limit, - const uint8_t *thresh, - int count) { +void vp9_lpf_horizontal_4_dual_c(uint8_t *s, int p, const uint8_t *blimit0, + const uint8_t *limit0, const uint8_t *thresh0, + const uint8_t *blimit1, const uint8_t *limit1, + const uint8_t *thresh1) { + vp9_lpf_horizontal_4_c(s, p, blimit0, limit0, thresh0, 1); + vp9_lpf_horizontal_4_c(s + 8, p, blimit1, limit1, thresh1, 1); +} + +void vp9_lpf_vertical_4_c(uint8_t *s, int pitch, const uint8_t *blimit, + const uint8_t *limit, const uint8_t *thresh, + int count) { int i; // loop filter designed to work using chars so that we can make maximum use @@ -135,13 +139,21 @@ void vp9_loop_filter_vertical_edge_c(uint8_t *s, int pitch, const uint8_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3]; const int8_t mask = filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3); - const int8_t hev = hev_mask(*thresh, p1, p0, q0, q1); - filter4(mask, hev, s - 2, s - 1, s, s + 1); + filter4(mask, *thresh, s - 2, s - 1, s, s + 1); s += pitch; } } -static INLINE void filter8(int8_t mask, uint8_t hev, uint8_t flat, +void vp9_lpf_vertical_4_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, + const uint8_t *limit0, const uint8_t *thresh0, + const uint8_t *blimit1, const uint8_t *limit1, + const uint8_t *thresh1) { + vp9_lpf_vertical_4_c(s, pitch, blimit0, limit0, thresh0, 1); + vp9_lpf_vertical_4_c(s + 8 * pitch, pitch, blimit1, limit1, + thresh1, 1); +} + +static INLINE void filter8(int8_t mask, uint8_t thresh, uint8_t flat, uint8_t *op3, uint8_t *op2, uint8_t *op1, uint8_t *op0, uint8_t *oq0, uint8_t *oq1, @@ -158,15 +170,13 @@ static INLINE void filter8(int8_t mask, uint8_t hev, uint8_t flat, *oq1 = ROUND_POWER_OF_TWO(p1 + p0 + q0 + 2 * q1 + q2 + q3 + q3, 3); *oq2 = ROUND_POWER_OF_TWO(p0 + q0 + q1 + 2 * q2 + q3 + q3 + q3, 3); } else { - filter4(mask, hev, op1, op0, oq0, oq1); + filter4(mask, thresh, op1, op0, oq0, oq1); } } -void vp9_mbloop_filter_horizontal_edge_c(uint8_t *s, int p, - const uint8_t *blimit, - const uint8_t *limit, - const uint8_t *thresh, - int count) { +void vp9_lpf_horizontal_8_c(uint8_t *s, int p, const uint8_t *blimit, + const uint8_t *limit, const uint8_t *thresh, + int count) { int i; // loop filter designed to work using chars so that we can make maximum use @@ -177,19 +187,24 @@ void vp9_mbloop_filter_horizontal_edge_c(uint8_t *s, int p, const int8_t mask = filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3); - const int8_t hev = hev_mask(*thresh, p1, p0, q0, q1); const int8_t flat = flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3); - filter8(mask, hev, flat, s - 4 * p, s - 3 * p, s - 2 * p, s - 1 * p, - s, s + 1 * p, s + 2 * p, s + 3 * p); + filter8(mask, *thresh, flat, s - 4 * p, s - 3 * p, s - 2 * p, s - 1 * p, + s, s + 1 * p, s + 2 * p, s + 3 * p); ++s; } } -void vp9_mbloop_filter_vertical_edge_c(uint8_t *s, int pitch, - const uint8_t *blimit, - const uint8_t *limit, - const uint8_t *thresh, - int count) { +void vp9_lpf_horizontal_8_dual_c(uint8_t *s, int p, const uint8_t *blimit0, + const uint8_t *limit0, const uint8_t *thresh0, + const uint8_t *blimit1, const uint8_t *limit1, + const uint8_t *thresh1) { + vp9_lpf_horizontal_8_c(s, p, blimit0, limit0, thresh0, 1); + vp9_lpf_horizontal_8_c(s + 8, p, blimit1, limit1, thresh1, 1); +} + +void vp9_lpf_vertical_8_c(uint8_t *s, int pitch, const uint8_t *blimit, + const uint8_t *limit, const uint8_t *thresh, + int count) { int i; for (i = 0; i < 8 * count; ++i) { @@ -197,15 +212,23 @@ void vp9_mbloop_filter_vertical_edge_c(uint8_t *s, int pitch, const uint8_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3]; const int8_t mask = filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3); - const int8_t hev = hev_mask(thresh[0], p1, p0, q0, q1); const int8_t flat = flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3); - filter8(mask, hev, flat, s - 4, s - 3, s - 2, s - 1, - s, s + 1, s + 2, s + 3); + filter8(mask, *thresh, flat, s - 4, s - 3, s - 2, s - 1, + s, s + 1, s + 2, s + 3); s += pitch; } } -static INLINE void filter16(int8_t mask, uint8_t hev, +void vp9_lpf_vertical_8_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, + const uint8_t *limit0, const uint8_t *thresh0, + const uint8_t *blimit1, const uint8_t *limit1, + const uint8_t *thresh1) { + vp9_lpf_vertical_8_c(s, pitch, blimit0, limit0, thresh0, 1); + vp9_lpf_vertical_8_c(s + 8 * pitch, pitch, blimit1, limit1, + thresh1, 1); +} + +static INLINE void filter16(int8_t mask, uint8_t thresh, uint8_t flat, uint8_t flat2, uint8_t *op7, uint8_t *op6, uint8_t *op5, uint8_t *op4, @@ -252,15 +275,13 @@ static INLINE void filter16(int8_t mask, uint8_t hev, *oq6 = ROUND_POWER_OF_TWO(p0 + q0 + q1 + q2 + q3 + q4 + q5 + q6 * 2 + q7 * 7, 4); } else { - filter8(mask, hev, flat, op3, op2, op1, op0, oq0, oq1, oq2, oq3); + filter8(mask, thresh, flat, op3, op2, op1, op0, oq0, oq1, oq2, oq3); } } -void vp9_mb_lpf_horizontal_edge_w_c(uint8_t *s, int p, - const uint8_t *blimit, - const uint8_t *limit, - const uint8_t *thresh, - int count) { +void vp9_lpf_horizontal_16_c(uint8_t *s, int p, const uint8_t *blimit, + const uint8_t *limit, const uint8_t *thresh, + int count) { int i; // loop filter designed to work using chars so that we can make maximum use @@ -270,13 +291,12 @@ void vp9_mb_lpf_horizontal_edge_w_c(uint8_t *s, int p, const uint8_t q0 = s[0 * p], q1 = s[1 * p], q2 = s[2 * p], q3 = s[3 * p]; const int8_t mask = filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3); - const int8_t hev = hev_mask(*thresh, p1, p0, q0, q1); const int8_t flat = flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3); const int8_t flat2 = flat_mask5(1, s[-8 * p], s[-7 * p], s[-6 * p], s[-5 * p], p0, q0, s[4 * p], s[5 * p], s[6 * p], s[7 * p]); - filter16(mask, hev, flat, flat2, + filter16(mask, *thresh, flat, flat2, s - 8 * p, s - 7 * p, s - 6 * p, s - 5 * p, s - 4 * p, s - 3 * p, s - 2 * p, s - 1 * p, s, s + 1 * p, s + 2 * p, s + 3 * p, @@ -285,25 +305,35 @@ void vp9_mb_lpf_horizontal_edge_w_c(uint8_t *s, int p, } } -void vp9_mb_lpf_vertical_edge_w_c(uint8_t *s, int p, - const uint8_t *blimit, - const uint8_t *limit, - const uint8_t *thresh) { +static void mb_lpf_vertical_edge_w(uint8_t *s, int p, + const uint8_t *blimit, + const uint8_t *limit, + const uint8_t *thresh, + int count) { int i; - for (i = 0; i < 8; ++i) { + for (i = 0; i < count; ++i) { const uint8_t p3 = s[-4], p2 = s[-3], p1 = s[-2], p0 = s[-1]; const uint8_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3]; const int8_t mask = filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3); - const int8_t hev = hev_mask(*thresh, p1, p0, q0, q1); const int8_t flat = flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3); const int8_t flat2 = flat_mask5(1, s[-8], s[-7], s[-6], s[-5], p0, q0, s[4], s[5], s[6], s[7]); - filter16(mask, hev, flat, flat2, + filter16(mask, *thresh, flat, flat2, s - 8, s - 7, s - 6, s - 5, s - 4, s - 3, s - 2, s - 1, s, s + 1, s + 2, s + 3, s + 4, s + 5, s + 6, s + 7); s += p; } } + +void vp9_lpf_vertical_16_c(uint8_t *s, int p, const uint8_t *blimit, + const uint8_t *limit, const uint8_t *thresh) { + mb_lpf_vertical_edge_w(s, p, blimit, limit, thresh, 8); +} + +void vp9_lpf_vertical_16_dual_c(uint8_t *s, int p, const uint8_t *blimit, + const uint8_t *limit, const uint8_t *thresh) { + mb_lpf_vertical_edge_w(s, p, blimit, limit, thresh, 16); +} diff --git a/libvpx/vp9/common/vp9_mv.h b/libvpx/vp9/common/vp9_mv.h index 31a79b9..3eb7f9d 100644 --- a/libvpx/vp9/common/vp9_mv.h +++ b/libvpx/vp9/common/vp9_mv.h @@ -15,7 +15,11 @@ #include "vp9/common/vp9_common.h" -typedef struct { +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct mv { int16_t row; int16_t col; } MV; @@ -25,15 +29,19 @@ typedef union int_mv { MV as_mv; } int_mv; /* facilitates faster equality tests and copies */ -typedef struct { +typedef struct mv32 { int32_t row; int32_t col; } MV32; -static void clamp_mv(MV *mv, int min_col, int max_col, - int min_row, int max_row) { +static INLINE void clamp_mv(MV *mv, int min_col, int max_col, + int min_row, int max_row) { mv->col = clamp(mv->col, min_col, max_col); mv->row = clamp(mv->row, min_row, max_row); } +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_COMMON_VP9_MV_H_ diff --git a/libvpx/vp9/common/vp9_mvref_common.c b/libvpx/vp9/common/vp9_mvref_common.c index 8df8aec..d179f42 100644 --- a/libvpx/vp9/common/vp9_mvref_common.c +++ b/libvpx/vp9/common/vp9_mvref_common.c @@ -13,6 +13,11 @@ #define MVREF_NEIGHBOURS 8 +typedef struct position { + int row; + int col; +} POSITION; + typedef enum { BOTH_ZERO = 0, ZERO_PLUS_PREDICTED = 1, @@ -71,7 +76,7 @@ static const int counter_to_context[19] = { BOTH_INTRA // 18 }; -static const MV mv_ref_blocks[BLOCK_SIZES][MVREF_NEIGHBOURS] = { +static const POSITION mv_ref_blocks[BLOCK_SIZES][MVREF_NEIGHBOURS] = { // 4X4 {{-1, 0}, {0, -1}, {-1, -1}, {-2, 0}, {0, -2}, {-2, -1}, {-1, -2}, {-2, -2}}, // 4X8 @@ -172,26 +177,26 @@ static INLINE int_mv scale_mv(const MB_MODE_INFO *mbmi, int ref, // are inside the borders of the tile. static INLINE int is_inside(const TileInfo *const tile, int mi_col, int mi_row, int mi_rows, - const MV *mv) { - return !(mi_row + mv->row < 0 || - mi_col + mv->col < tile->mi_col_start || - mi_row + mv->row >= mi_rows || - mi_col + mv->col >= tile->mi_col_end); + const POSITION *mi_pos) { + return !(mi_row + mi_pos->row < 0 || + mi_col + mi_pos->col < tile->mi_col_start || + mi_row + mi_pos->row >= mi_rows || + mi_col + mi_pos->col >= tile->mi_col_end); } // This function searches the neighbourhood of a given MB/SB // to try and find candidate reference vectors. -void vp9_find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd, - const TileInfo *const tile, - MODE_INFO *mi, const MODE_INFO *prev_mi, - MV_REFERENCE_FRAME ref_frame, - int_mv *mv_ref_list, - int block_idx, - int mi_row, int mi_col) { +static void find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd, + const TileInfo *const tile, + MODE_INFO *mi, const MODE_INFO *prev_mi, + MV_REFERENCE_FRAME ref_frame, + int_mv *mv_ref_list, + int block, int mi_row, int mi_col) { const int *ref_sign_bias = cm->ref_frame_sign_bias; int i, refmv_count = 0; - const MV *const mv_ref_search = mv_ref_blocks[mi->mbmi.sb_type]; - const MB_MODE_INFO *const prev_mbmi = prev_mi ? &prev_mi->mbmi : NULL; + const POSITION *const mv_ref_search = mv_ref_blocks[mi->mbmi.sb_type]; + const MB_MODE_INFO *const prev_mbmi = cm->coding_use_prev_mi && prev_mi ? + &prev_mi->mbmi : NULL; int different_ref_found = 0; int context_counter = 0; @@ -202,26 +207,19 @@ void vp9_find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd, // if the size < 8x8 we get the mv from the bmi substructure, // and we also need to keep a mode count. for (i = 0; i < 2; ++i) { - const MV *const mv_ref = &mv_ref_search[i]; + const POSITION *const mv_ref = &mv_ref_search[i]; if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) { const MODE_INFO *const candidate_mi = xd->mi_8x8[mv_ref->col + mv_ref->row * xd->mode_info_stride]; const MB_MODE_INFO *const candidate = &candidate_mi->mbmi; // Keep counts for entropy encoding. context_counter += mode_2_counter[candidate->mode]; + different_ref_found = 1; - // Check if the candidate comes from the same reference frame. - if (candidate->ref_frame[0] == ref_frame) { - ADD_MV_REF_LIST(get_sub_block_mv(candidate_mi, 0, - mv_ref->col, block_idx)); - different_ref_found = candidate->ref_frame[1] != ref_frame; - } else { - if (candidate->ref_frame[1] == ref_frame) - // Add second motion vector if it has the same ref_frame. - ADD_MV_REF_LIST(get_sub_block_mv(candidate_mi, 1, - mv_ref->col, block_idx)); - different_ref_found = 1; - } + if (candidate->ref_frame[0] == ref_frame) + ADD_MV_REF_LIST(get_sub_block_mv(candidate_mi, 0, mv_ref->col, block)); + else if (candidate->ref_frame[1] == ref_frame) + ADD_MV_REF_LIST(get_sub_block_mv(candidate_mi, 1, mv_ref->col, block)); } } @@ -229,20 +227,17 @@ void vp9_find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd, // as before except we don't need to keep track of sub blocks or // mode counts. for (; i < MVREF_NEIGHBOURS; ++i) { - const MV *const mv_ref = &mv_ref_search[i]; + const POSITION *const mv_ref = &mv_ref_search[i]; if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) { const MB_MODE_INFO *const candidate = &xd->mi_8x8[mv_ref->col + mv_ref->row * xd->mode_info_stride]->mbmi; + different_ref_found = 1; - if (candidate->ref_frame[0] == ref_frame) { + if (candidate->ref_frame[0] == ref_frame) ADD_MV_REF_LIST(candidate->mv[0]); - different_ref_found = candidate->ref_frame[1] != ref_frame; - } else { - if (candidate->ref_frame[1] == ref_frame) - ADD_MV_REF_LIST(candidate->mv[1]); - different_ref_found = 1; - } + else if (candidate->ref_frame[1] == ref_frame) + ADD_MV_REF_LIST(candidate->mv[1]); } } @@ -259,7 +254,7 @@ void vp9_find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd, // different reference frames. if (different_ref_found) { for (i = 0; i < MVREF_NEIGHBOURS; ++i) { - const MV *mv_ref = &mv_ref_search[i]; + const POSITION *mv_ref = &mv_ref_search[i]; if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) { const MB_MODE_INFO *const candidate = &xd->mi_8x8[mv_ref->col + mv_ref->row @@ -284,3 +279,85 @@ void vp9_find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd, for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i) clamp_mv_ref(&mv_ref_list[i].as_mv, xd); } + +void vp9_find_mv_refs(const VP9_COMMON *cm, const MACROBLOCKD *xd, + const TileInfo *const tile, + MODE_INFO *mi, const MODE_INFO *prev_mi, + MV_REFERENCE_FRAME ref_frame, + int_mv *mv_ref_list, + int mi_row, int mi_col) { + find_mv_refs_idx(cm, xd, tile, mi, prev_mi, ref_frame, mv_ref_list, -1, + mi_row, mi_col); +} + +static void lower_mv_precision(MV *mv, int allow_hp) { + const int use_hp = allow_hp && vp9_use_mv_hp(mv); + if (!use_hp) { + if (mv->row & 1) + mv->row += (mv->row > 0 ? -1 : 1); + if (mv->col & 1) + mv->col += (mv->col > 0 ? -1 : 1); + } +} + + +void vp9_find_best_ref_mvs(MACROBLOCKD *xd, int allow_hp, + int_mv *mvlist, int_mv *nearest, int_mv *near) { + int i; + // Make sure all the candidates are properly clamped etc + for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i) { + lower_mv_precision(&mvlist[i].as_mv, allow_hp); + clamp_mv2(&mvlist[i].as_mv, xd); + } + *nearest = mvlist[0]; + *near = mvlist[1]; +} + +void vp9_append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd, + const TileInfo *const tile, + int block, int ref, int mi_row, int mi_col, + int_mv *nearest, int_mv *near) { + int_mv mv_list[MAX_MV_REF_CANDIDATES]; + MODE_INFO *const mi = xd->mi_8x8[0]; + b_mode_info *bmi = mi->bmi; + int n; + + assert(MAX_MV_REF_CANDIDATES == 2); + + find_mv_refs_idx(cm, xd, tile, mi, xd->last_mi, mi->mbmi.ref_frame[ref], + mv_list, block, mi_row, mi_col); + + near->as_int = 0; + switch (block) { + case 0: + nearest->as_int = mv_list[0].as_int; + near->as_int = mv_list[1].as_int; + break; + case 1: + case 2: + nearest->as_int = bmi[0].as_mv[ref].as_int; + for (n = 0; n < MAX_MV_REF_CANDIDATES; ++n) + if (nearest->as_int != mv_list[n].as_int) { + near->as_int = mv_list[n].as_int; + break; + } + break; + case 3: { + int_mv candidates[2 + MAX_MV_REF_CANDIDATES]; + candidates[0] = bmi[1].as_mv[ref]; + candidates[1] = bmi[0].as_mv[ref]; + candidates[2] = mv_list[0]; + candidates[3] = mv_list[1]; + + nearest->as_int = bmi[2].as_mv[ref].as_int; + for (n = 0; n < 2 + MAX_MV_REF_CANDIDATES; ++n) + if (nearest->as_int != candidates[n].as_int) { + near->as_int = candidates[n].as_int; + break; + } + break; + } + default: + assert("Invalid block index."); + } +} diff --git a/libvpx/vp9/common/vp9_mvref_common.h b/libvpx/vp9/common/vp9_mvref_common.h index ce4c559..04cb000 100644 --- a/libvpx/vp9/common/vp9_mvref_common.h +++ b/libvpx/vp9/common/vp9_mvref_common.h @@ -7,29 +7,48 @@ * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ +#ifndef VP9_COMMON_VP9_MVREF_COMMON_H_ +#define VP9_COMMON_VP9_MVREF_COMMON_H_ #include "vp9/common/vp9_onyxc_int.h" #include "vp9/common/vp9_blockd.h" -#ifndef VP9_COMMON_VP9_MVREF_COMMON_H_ -#define VP9_COMMON_VP9_MVREF_COMMON_H_ +#ifdef __cplusplus +extern "C" { +#endif + +#define LEFT_TOP_MARGIN ((VP9_ENC_BORDER_IN_PIXELS - VP9_INTERP_EXTEND) << 3) +#define RIGHT_BOTTOM_MARGIN ((VP9_ENC_BORDER_IN_PIXELS -\ + VP9_INTERP_EXTEND) << 3) -void vp9_find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd, - const TileInfo *const tile, - MODE_INFO *mi, const MODE_INFO *prev_mi, - MV_REFERENCE_FRAME ref_frame, - int_mv *mv_ref_list, - int block_idx, - int mi_row, int mi_col); - -static INLINE void vp9_find_mv_refs(const VP9_COMMON *cm, const MACROBLOCKD *xd, - const TileInfo *const tile, - MODE_INFO *mi, const MODE_INFO *prev_mi, - MV_REFERENCE_FRAME ref_frame, - int_mv *mv_ref_list, - int mi_row, int mi_col) { - vp9_find_mv_refs_idx(cm, xd, tile, mi, prev_mi, ref_frame, - mv_ref_list, -1, mi_row, mi_col); +// TODO(jingning): this mv clamping function should be block size dependent. +static INLINE void clamp_mv2(MV *mv, const MACROBLOCKD *xd) { + clamp_mv(mv, xd->mb_to_left_edge - LEFT_TOP_MARGIN, + xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN, + xd->mb_to_top_edge - LEFT_TOP_MARGIN, + xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN); } +void vp9_find_mv_refs(const VP9_COMMON *cm, const MACROBLOCKD *xd, + const TileInfo *const tile, + MODE_INFO *mi, const MODE_INFO *prev_mi, + MV_REFERENCE_FRAME ref_frame, + int_mv *mv_ref_list, + int mi_row, int mi_col); + +// check a list of motion vectors by sad score using a number rows of pixels +// above and a number cols of pixels in the left to select the one with best +// score to use as ref motion vector +void vp9_find_best_ref_mvs(MACROBLOCKD *xd, int allow_hp, + int_mv *mvlist, int_mv *nearest, int_mv *near); + +void vp9_append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd, + const TileInfo *const tile, + int block, int ref, int mi_row, int mi_col, + int_mv *nearest, int_mv *near); + +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_COMMON_VP9_MVREF_COMMON_H_ diff --git a/libvpx/vp9/common/vp9_onyx.h b/libvpx/vp9/common/vp9_onyx.h deleted file mode 100644 index 452dd6b..0000000 --- a/libvpx/vp9/common/vp9_onyx.h +++ /dev/null @@ -1,232 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP9_COMMON_VP9_ONYX_H_ -#define VP9_COMMON_VP9_ONYX_H_ - -#ifdef __cplusplus -extern "C" -{ // NOLINT -#endif - -#include "./vpx_config.h" -#include "vpx/internal/vpx_codec_internal.h" -#include "vpx/vp8cx.h" -#include "vpx_scale/yv12config.h" -#include "vp9/common/vp9_ppflags.h" - -#define MAX_SEGMENTS 8 - - typedef int *VP9_PTR; - - /* Create/destroy static data structures. */ - - typedef enum { - NORMAL = 0, - FOURFIVE = 1, - THREEFIVE = 2, - ONETWO = 3 - } VPX_SCALING; - - typedef enum { - VP9_LAST_FLAG = 1, - VP9_GOLD_FLAG = 2, - VP9_ALT_FLAG = 4 - } VP9_REFFRAME; - - - typedef enum { - USAGE_STREAM_FROM_SERVER = 0x0, - USAGE_LOCAL_FILE_PLAYBACK = 0x1, - USAGE_CONSTRAINED_QUALITY = 0x2, - USAGE_CONSTANT_QUALITY = 0x3, - } END_USAGE; - - - typedef enum { - MODE_GOODQUALITY = 0x1, - MODE_BESTQUALITY = 0x2, - MODE_FIRSTPASS = 0x3, - MODE_SECONDPASS = 0x4, - MODE_SECONDPASS_BEST = 0x5, - } MODE; - - typedef enum { - FRAMEFLAGS_KEY = 1, - FRAMEFLAGS_GOLDEN = 2, - FRAMEFLAGS_ALTREF = 4, - } FRAMETYPE_FLAGS; - - typedef struct { - int version; // 4 versions of bitstream defined: - // 0 - best quality/slowest decode, - // 3 - lowest quality/fastest decode - int width; // width of data passed to the compressor - int height; // height of data passed to the compressor - double framerate; // set to passed in framerate - int64_t target_bandwidth; // bandwidth to be used in kilobits per second - - int noise_sensitivity; // pre processing blur: recommendation 0 - int Sharpness; // sharpening output: recommendation 0: - int cpu_used; - unsigned int rc_max_intra_bitrate_pct; - - // mode -> - // (0)=Realtime/Live Encoding. This mode is optimized for realtime - // encoding (for example, capturing a television signal or feed from - // a live camera). ( speed setting controls how fast ) - // (1)=Good Quality Fast Encoding. The encoder balances quality with the - // amount of time it takes to encode the output. ( speed setting - // controls how fast ) - // (2)=One Pass - Best Quality. The encoder places priority on the - // quality of the output over encoding speed. The output is compressed - // at the highest possible quality. This option takes the longest - // amount of time to encode. ( speed setting ignored ) - // (3)=Two Pass - First Pass. The encoder generates a file of statistics - // for use in the second encoding pass. ( speed setting controls how - // fast ) - // (4)=Two Pass - Second Pass. The encoder uses the statistics that were - // generated in the first encoding pass to create the compressed - // output. ( speed setting controls how fast ) - // (5)=Two Pass - Second Pass Best. The encoder uses the statistics that - // were generated in the first encoding pass to create the compressed - // output using the highest possible quality, and taking a - // longer amount of time to encode.. ( speed setting ignored ) - int Mode; - - // Key Framing Operations - int auto_key; // autodetect cut scenes and set the keyframes - int key_freq; // maximum distance to key frame. - - int allow_lag; // allow lagged compression (if 0 lagin frames is ignored) - int lag_in_frames; // how many frames lag before we start encoding - - // ---------------------------------------------------------------- - // DATARATE CONTROL OPTIONS - - int end_usage; // vbr or cbr - - // buffer targeting aggressiveness - int under_shoot_pct; - int over_shoot_pct; - - // buffering parameters - int64_t starting_buffer_level; // in seconds - int64_t optimal_buffer_level; - int64_t maximum_buffer_size; - - // controlling quality - int fixed_q; - int worst_allowed_q; - int best_allowed_q; - int cq_level; - int lossless; - - // two pass datarate control - int two_pass_vbrbias; // two pass datarate control tweaks - int two_pass_vbrmin_section; - int two_pass_vbrmax_section; - // END DATARATE CONTROL OPTIONS - // ---------------------------------------------------------------- - - // Spatial scalability - int ss_number_layers; - - // these parameters aren't to be used in final build don't use!!! - int play_alternate; - int alt_freq; - - int encode_breakout; // early breakout : for video conf recommend 800 - - /* Bitfield defining the error resiliency features to enable. - * Can provide decodable frames after losses in previous - * frames and decodable partitions after losses in the same frame. - */ - unsigned int error_resilient_mode; - - /* Bitfield defining the parallel decoding mode where the - * decoding in successive frames may be conducted in parallel - * just by decoding the frame headers. - */ - unsigned int frame_parallel_decoding_mode; - - int arnr_max_frames; - int arnr_strength; - int arnr_type; - - int tile_columns; - int tile_rows; - - struct vpx_fixed_buf two_pass_stats_in; - struct vpx_codec_pkt_list *output_pkt_list; - - vp8e_tuning tuning; - } VP9_CONFIG; - - - void vp9_initialize_enc(); - - VP9_PTR vp9_create_compressor(VP9_CONFIG *oxcf); - void vp9_remove_compressor(VP9_PTR *comp); - - void vp9_change_config(VP9_PTR onyx, VP9_CONFIG *oxcf); - - // receive a frames worth of data. caller can assume that a copy of this - // frame is made and not just a copy of the pointer.. - int vp9_receive_raw_frame(VP9_PTR comp, unsigned int frame_flags, - YV12_BUFFER_CONFIG *sd, int64_t time_stamp, - int64_t end_time_stamp); - - int vp9_get_compressed_data(VP9_PTR comp, unsigned int *frame_flags, - unsigned long *size, unsigned char *dest, - int64_t *time_stamp, int64_t *time_end, - int flush); - - int vp9_get_preview_raw_frame(VP9_PTR comp, YV12_BUFFER_CONFIG *dest, - vp9_ppflags_t *flags); - - int vp9_use_as_reference(VP9_PTR comp, int ref_frame_flags); - - int vp9_update_reference(VP9_PTR comp, int ref_frame_flags); - - int vp9_copy_reference_enc(VP9_PTR comp, VP9_REFFRAME ref_frame_flag, - YV12_BUFFER_CONFIG *sd); - - int vp9_get_reference_enc(VP9_PTR ptr, int index, YV12_BUFFER_CONFIG **fb); - - int vp9_set_reference_enc(VP9_PTR comp, VP9_REFFRAME ref_frame_flag, - YV12_BUFFER_CONFIG *sd); - - int vp9_update_entropy(VP9_PTR comp, int update); - - int vp9_set_roimap(VP9_PTR comp, unsigned char *map, - unsigned int rows, unsigned int cols, - int delta_q[MAX_SEGMENTS], - int delta_lf[MAX_SEGMENTS], - unsigned int threshold[MAX_SEGMENTS]); - - int vp9_set_active_map(VP9_PTR comp, unsigned char *map, - unsigned int rows, unsigned int cols); - - int vp9_set_internal_size(VP9_PTR comp, - VPX_SCALING horiz_mode, VPX_SCALING vert_mode); - - int vp9_set_size_literal(VP9_PTR comp, unsigned int width, - unsigned int height); - - void vp9_set_svc(VP9_PTR comp, int use_svc); - - int vp9_get_quantizer(VP9_PTR c); - -#ifdef __cplusplus -} -#endif - -#endif // VP9_COMMON_VP9_ONYX_H_ diff --git a/libvpx/vp9/common/vp9_onyxc_int.h b/libvpx/vp9/common/vp9_onyxc_int.h index a2af57a..52889f7 100644 --- a/libvpx/vp9/common/vp9_onyxc_int.h +++ b/libvpx/vp9/common/vp9_onyxc_int.h @@ -18,6 +18,7 @@ #include "vp9/common/vp9_entropymv.h" #include "vp9/common/vp9_entropy.h" #include "vp9/common/vp9_entropymode.h" +#include "vp9/common/vp9_frame_buffers.h" #include "vp9/common/vp9_quant_common.h" #include "vp9/common/vp9_tile_common.h" @@ -25,62 +26,42 @@ #include "vp9/common/vp9_postproc.h" #endif -#define ALLOWED_REFS_PER_FRAME 3 +#ifdef __cplusplus +extern "C" { +#endif + +#define REFS_PER_FRAME 3 -#define NUM_REF_FRAMES_LOG2 3 -#define NUM_REF_FRAMES (1 << NUM_REF_FRAMES_LOG2) +#define REF_FRAMES_LOG2 3 +#define REF_FRAMES (1 << REF_FRAMES_LOG2) // 1 scratch frame for the new frame, 3 for scaled references on the encoder // TODO(jkoleszar): These 3 extra references could probably come from the // normal reference pool. -#define NUM_YV12_BUFFERS (NUM_REF_FRAMES + 4) - -#define NUM_FRAME_CONTEXTS_LOG2 2 -#define NUM_FRAME_CONTEXTS (1 << NUM_FRAME_CONTEXTS_LOG2) - -typedef struct frame_contexts { - vp9_prob y_mode_prob[BLOCK_SIZE_GROUPS][INTRA_MODES - 1]; - vp9_prob uv_mode_prob[INTRA_MODES][INTRA_MODES - 1]; - vp9_prob partition_prob[PARTITION_CONTEXTS][PARTITION_TYPES - 1]; - vp9_coeff_probs_model coef_probs[TX_SIZES][BLOCK_TYPES]; - vp9_prob switchable_interp_prob[SWITCHABLE_FILTER_CONTEXTS] - [SWITCHABLE_FILTERS - 1]; - vp9_prob inter_mode_probs[INTER_MODE_CONTEXTS][INTER_MODES - 1]; - vp9_prob intra_inter_prob[INTRA_INTER_CONTEXTS]; - vp9_prob comp_inter_prob[COMP_INTER_CONTEXTS]; - vp9_prob single_ref_prob[REF_CONTEXTS][2]; - vp9_prob comp_ref_prob[REF_CONTEXTS]; - struct tx_probs tx_probs; - vp9_prob mbskip_probs[MBSKIP_CONTEXTS]; - nmv_context nmvc; -} FRAME_CONTEXT; +#define FRAME_BUFFERS (REF_FRAMES + 4) -typedef struct { - unsigned int y_mode[BLOCK_SIZE_GROUPS][INTRA_MODES]; - unsigned int uv_mode[INTRA_MODES][INTRA_MODES]; - unsigned int partition[PARTITION_CONTEXTS][PARTITION_TYPES]; - vp9_coeff_count_model coef[TX_SIZES][BLOCK_TYPES]; - unsigned int eob_branch[TX_SIZES][BLOCK_TYPES][REF_TYPES] - [COEF_BANDS][PREV_COEF_CONTEXTS]; - unsigned int switchable_interp[SWITCHABLE_FILTER_CONTEXTS] - [SWITCHABLE_FILTERS]; - unsigned int inter_mode[INTER_MODE_CONTEXTS][INTER_MODES]; - unsigned int intra_inter[INTRA_INTER_CONTEXTS][2]; - unsigned int comp_inter[COMP_INTER_CONTEXTS][2]; - unsigned int single_ref[REF_CONTEXTS][2][2]; - unsigned int comp_ref[REF_CONTEXTS][2]; - struct tx_counts tx; - unsigned int mbskip[MBSKIP_CONTEXTS][2]; - nmv_context_counts mv; -} FRAME_COUNTS; +#define FRAME_CONTEXTS_LOG2 2 +#define FRAME_CONTEXTS (1 << FRAME_CONTEXTS_LOG2) + +extern const struct { + PARTITION_CONTEXT above; + PARTITION_CONTEXT left; +} partition_context_lookup[BLOCK_SIZES]; typedef enum { - SINGLE_PREDICTION_ONLY = 0, - COMP_PREDICTION_ONLY = 1, - HYBRID_PREDICTION = 2, - NB_PREDICTION_TYPES = 3, -} COMPPREDMODE_TYPE; + SINGLE_REFERENCE = 0, + COMPOUND_REFERENCE = 1, + REFERENCE_MODE_SELECT = 2, + REFERENCE_MODES = 3, +} REFERENCE_MODE; + + +typedef struct { + int ref_count; + vpx_codec_frame_buffer_t raw_frame_buffer; + YV12_BUFFER_CONFIG buf; +} RefCntBuffer; typedef struct VP9Common { struct vpx_internal_error_info error; @@ -108,17 +89,16 @@ typedef struct VP9Common { YV12_BUFFER_CONFIG *frame_to_show; - YV12_BUFFER_CONFIG yv12_fb[NUM_YV12_BUFFERS]; - int fb_idx_ref_cnt[NUM_YV12_BUFFERS]; /* reference counts */ - int ref_frame_map[NUM_REF_FRAMES]; /* maps fb_idx to reference slot */ + RefCntBuffer frame_bufs[FRAME_BUFFERS]; + + int ref_frame_map[REF_FRAMES]; /* maps fb_idx to reference slot */ // TODO(jkoleszar): could expand active_ref_idx to 4, with 0 as intra, and // roll new_fb_idx into it. - // Each frame can reference ALLOWED_REFS_PER_FRAME buffers - int active_ref_idx[ALLOWED_REFS_PER_FRAME]; - struct scale_factors active_ref_scale[ALLOWED_REFS_PER_FRAME]; - struct scale_factors_common active_ref_scale_comm[ALLOWED_REFS_PER_FRAME]; + // Each frame can reference REFS_PER_FRAME buffers + RefBuffer frame_refs[REFS_PER_FRAME]; + int new_fb_idx; YV12_BUFFER_CONFIG post_proc_buffer; @@ -128,6 +108,7 @@ typedef struct VP9Common { int show_frame; int last_show_frame; + int show_existing_frame; // Flag signaling that the frame is encoded using only INTRA modes. int intra_only; @@ -175,7 +156,7 @@ typedef struct VP9Common { // Persistent mb segment id map used in prediction. unsigned char *last_frame_seg_map; - INTERPOLATION_TYPE mcomp_filter_type; + INTERP_FILTER interp_filter; loop_filter_info_n lf_info; @@ -190,10 +171,10 @@ typedef struct VP9Common { int allow_comp_inter_inter; MV_REFERENCE_FRAME comp_fixed_ref; MV_REFERENCE_FRAME comp_var_ref[2]; - COMPPREDMODE_TYPE comp_pred_mode; + REFERENCE_MODE reference_mode; FRAME_CONTEXT fc; /* this frame entropy */ - FRAME_CONTEXT frame_contexts[NUM_FRAME_CONTEXTS]; + FRAME_CONTEXT frame_contexts[FRAME_CONTEXTS]; unsigned int frame_context_idx; /* Context to use/update */ FRAME_COUNTS counts; @@ -207,45 +188,54 @@ typedef struct VP9Common { int error_resilient_mode; int frame_parallel_decoding_mode; + // Flag indicates if prev_mi can be used in coding: + // 0: encoder assumes decoder does not have prev_mi + // 1: encoder assumes decoder has and uses prev_mi + unsigned int coding_use_prev_mi; + int log2_tile_cols, log2_tile_rows; -} VP9_COMMON; -// ref == 0 => LAST_FRAME -// ref == 1 => GOLDEN_FRAME -// ref == 2 => ALTREF_FRAME -static YV12_BUFFER_CONFIG *get_frame_ref_buffer(VP9_COMMON *cm, int ref) { - return &cm->yv12_fb[cm->active_ref_idx[ref]]; -} + // Private data associated with the frame buffer callbacks. + void *cb_priv; + vpx_get_frame_buffer_cb_fn_t get_fb_cb; + vpx_release_frame_buffer_cb_fn_t release_fb_cb; -static YV12_BUFFER_CONFIG *get_frame_new_buffer(VP9_COMMON *cm) { - return &cm->yv12_fb[cm->new_fb_idx]; + // Handles memory for the codec. + InternalFrameBufferList int_frame_buffers; +} VP9_COMMON; + +static INLINE YV12_BUFFER_CONFIG *get_frame_new_buffer(VP9_COMMON *cm) { + return &cm->frame_bufs[cm->new_fb_idx].buf; } -static int get_free_fb(VP9_COMMON *cm) { +static INLINE int get_free_fb(VP9_COMMON *cm) { int i; - for (i = 0; i < NUM_YV12_BUFFERS; i++) - if (cm->fb_idx_ref_cnt[i] == 0) + for (i = 0; i < FRAME_BUFFERS; i++) + if (cm->frame_bufs[i].ref_count == 0) break; - assert(i < NUM_YV12_BUFFERS); - cm->fb_idx_ref_cnt[i] = 1; + assert(i < FRAME_BUFFERS); + cm->frame_bufs[i].ref_count = 1; return i; } -static void ref_cnt_fb(int *buf, int *idx, int new_idx) { - if (buf[*idx] > 0) - buf[*idx]--; +static INLINE void ref_cnt_fb(RefCntBuffer *bufs, int *idx, int new_idx) { + const int ref_index = *idx; + + if (ref_index >= 0 && bufs[ref_index].ref_count > 0) + bufs[ref_index].ref_count--; *idx = new_idx; - buf[new_idx]++; + bufs[new_idx].ref_count++; } -static int mi_cols_aligned_to_sb(int n_mis) { +static INLINE int mi_cols_aligned_to_sb(int n_mis) { return ALIGN_POWER_OF_TWO(n_mis, MI_BLOCK_SIZE_LOG2); } -static INLINE const vp9_prob* get_partition_probs(VP9_COMMON *cm, int ctx) { +static INLINE const vp9_prob* get_partition_probs(const VP9_COMMON *cm, + int ctx) { return cm->frame_type == KEY_FRAME ? vp9_kf_partition_probs[ctx] : cm->fc.partition_prob[ctx]; } @@ -265,10 +255,10 @@ static INLINE void set_skip_context( } } -static void set_mi_row_col(MACROBLOCKD *xd, const TileInfo *const tile, - int mi_row, int bh, - int mi_col, int bw, - int mi_rows, int mi_cols) { +static INLINE void set_mi_row_col(MACROBLOCKD *xd, const TileInfo *const tile, + int mi_row, int bh, + int mi_col, int bw, + int mi_rows, int mi_cols) { xd->mb_to_top_edge = -((mi_row * MI_SIZE) * 8); xd->mb_to_bottom_edge = ((mi_rows - bh - mi_row) * MI_SIZE) * 8; xd->mb_to_left_edge = -((mi_col * MI_SIZE) * 8); @@ -279,10 +269,9 @@ static void set_mi_row_col(MACROBLOCKD *xd, const TileInfo *const tile, xd->left_available = (mi_col > tile->mi_col_start); } -static void set_prev_mi(VP9_COMMON *cm) { +static INLINE void set_prev_mi(VP9_COMMON *cm) { const int use_prev_in_find_mv_refs = cm->width == cm->last_width && cm->height == cm->last_height && - !cm->error_resilient_mode && !cm->intra_only && cm->last_show_frame; // Special case: set prev_mi to NULL when the previous mode info @@ -298,54 +287,46 @@ static INLINE int frame_is_intra_only(const VP9_COMMON *const cm) { static INLINE void update_partition_context( PARTITION_CONTEXT *above_seg_context, PARTITION_CONTEXT left_seg_context[8], - int mi_row, int mi_col, - BLOCK_SIZE sb_type, - BLOCK_SIZE sb_size) { - PARTITION_CONTEXT *above_ctx = above_seg_context + mi_col; - PARTITION_CONTEXT *left_ctx = left_seg_context + (mi_row & MI_MASK); - - const int bsl = b_width_log2(sb_size), bs = (1 << bsl) / 2; - const int bwl = b_width_log2(sb_type); - const int bhl = b_height_log2(sb_type); - const int boffset = b_width_log2(BLOCK_64X64) - bsl; - const char pcval0 = ~(0xe << boffset); - const char pcval1 = ~(0xf << boffset); - const char pcvalue[2] = {pcval0, pcval1}; - - assert(MAX(bwl, bhl) <= bsl); + int mi_row, int mi_col, BLOCK_SIZE subsize, BLOCK_SIZE bsize) { + PARTITION_CONTEXT *const above_ctx = above_seg_context + mi_col; + PARTITION_CONTEXT *const left_ctx = left_seg_context + (mi_row & MI_MASK); + + // num_4x4_blocks_wide_lookup[bsize] / 2 + const int bs = num_8x8_blocks_wide_lookup[bsize]; // update the partition context at the end notes. set partition bits // of block sizes larger than the current one to be one, and partition // bits of smaller block sizes to be zero. - vpx_memset(above_ctx, pcvalue[bwl == bsl], bs); - vpx_memset(left_ctx, pcvalue[bhl == bsl], bs); + vpx_memset(above_ctx, partition_context_lookup[subsize].above, bs); + vpx_memset(left_ctx, partition_context_lookup[subsize].left, bs); } static INLINE int partition_plane_context( const PARTITION_CONTEXT *above_seg_context, const PARTITION_CONTEXT left_seg_context[8], - int mi_row, int mi_col, - BLOCK_SIZE sb_type) { + int mi_row, int mi_col, BLOCK_SIZE bsize) { const PARTITION_CONTEXT *above_ctx = above_seg_context + mi_col; const PARTITION_CONTEXT *left_ctx = left_seg_context + (mi_row & MI_MASK); - int bsl = mi_width_log2(sb_type), bs = 1 << bsl; + const int bsl = mi_width_log2(bsize); + const int bs = 1 << bsl; int above = 0, left = 0, i; - int boffset = mi_width_log2(BLOCK_64X64) - bsl; - assert(mi_width_log2(sb_type) == mi_height_log2(sb_type)); + assert(b_width_log2(bsize) == b_height_log2(bsize)); assert(bsl >= 0); - assert(boffset >= 0); - - for (i = 0; i < bs; i++) - above |= (above_ctx[i] & (1 << boffset)); - for (i = 0; i < bs; i++) - left |= (left_ctx[i] & (1 << boffset)); - above = (above > 0); - left = (left > 0); + for (i = 0; i < bs; i++) { + above |= above_ctx[i]; + left |= left_ctx[i]; + } + above = (above & bs) > 0; + left = (left & bs) > 0; return (left * 2 + above) + bsl * PARTITION_PLOFFSET; } +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_COMMON_VP9_ONYXC_INT_H_ diff --git a/libvpx/vp9/common/vp9_postproc.c b/libvpx/vp9/common/vp9_postproc.c index 212a28a..7baa9ee 100644 --- a/libvpx/vp9/common/vp9_postproc.c +++ b/libvpx/vp9/common/vp9_postproc.c @@ -13,13 +13,16 @@ #include <stdio.h> #include "./vpx_config.h" +#include "./vpx_scale_rtcd.h" +#include "./vp9_rtcd.h" + +#include "vpx_scale/vpx_scale.h" #include "vpx_scale/yv12config.h" + +#include "vp9/common/vp9_onyxc_int.h" #include "vp9/common/vp9_postproc.h" -#include "vp9/common/vp9_textblit.h" -#include "vpx_scale/vpx_scale.h" #include "vp9/common/vp9_systemdependent.h" -#include "./vp9_rtcd.h" -#include "./vpx_scale_rtcd.h" +#include "vp9/common/vp9_textblit.h" #define RGB_TO_YUV(t) \ ( (0.257*(float)(t >> 16)) + (0.504*(float)(t >> 8 & 0xff)) + \ @@ -127,9 +130,6 @@ const short vp9_rv[] = { 0, 9, 5, 5, 11, 10, 13, 9, 10, 13, }; - -/**************************************************************************** - */ void vp9_post_proc_down_and_across_c(const uint8_t *src_ptr, uint8_t *dst_ptr, int src_pixels_per_line, @@ -371,7 +371,7 @@ void vp9_denoise(const YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst, } } -double vp9_gaussian(double sigma, double mu, double x) { +static double gaussian(double sigma, double mu, double x) { return 1 / (sigma * sqrt(2.0 * 3.14159265)) * (exp(-(x - mu) * (x - mu) / (2 * sigma * sigma))); } @@ -396,7 +396,7 @@ static void fillrd(struct postproc_state *state, int q, int a) { next = 0; for (i = -32; i < 32; i++) { - int a = (int)(.5 + 256 * vp9_gaussian(sigma, 0, i)); + int a = (int)(0.5 + 256 * gaussian(sigma, 0, i)); if (a) { for (j = 0; j < a; j++) { @@ -425,27 +425,6 @@ static void fillrd(struct postproc_state *state, int q, int a) { state->last_noise = a; } -/**************************************************************************** - * - * ROUTINE : plane_add_noise_c - * - * INPUTS : unsigned char *Start starting address of buffer to - * add gaussian noise to - * unsigned int width width of plane - * unsigned int height height of plane - * int pitch distance between subsequent lines of frame - * int q quantizer used to determine amount of noise - * to add - * - * OUTPUTS : None. - * - * RETURNS : void. - * - * FUNCTION : adds gaussian noise to a plane of pixels - * - * SPECIAL NOTES : None. - * - ****************************************************************************/ void vp9_plane_add_noise_c(uint8_t *start, char *noise, char blackclamp[16], char whiteclamp[16], @@ -628,49 +607,40 @@ static void constrain_line(int x0, int *x1, int y0, int *y1, int vp9_post_proc_frame(struct VP9Common *cm, YV12_BUFFER_CONFIG *dest, vp9_ppflags_t *ppflags) { - int q = cm->lf.filter_level * 10 / 6; - int flags = ppflags->post_proc_flag; - int deblock_level = ppflags->deblocking_level; - int noise_level = ppflags->noise_level; + const int q = MIN(63, cm->lf.filter_level * 10 / 6); + const int flags = ppflags->post_proc_flag; + YV12_BUFFER_CONFIG *const ppbuf = &cm->post_proc_buffer; + struct postproc_state *const ppstate = &cm->postproc_state; if (!cm->frame_to_show) return -1; - if (q > 63) - q = 63; - if (!flags) { *dest = *cm->frame_to_show; return 0; } -#if ARCH_X86||ARCH_X86_64 - vpx_reset_mmx_state(); -#endif + vp9_clear_system_state(); if (flags & VP9D_DEMACROBLOCK) { - deblock_and_de_macro_block(cm->frame_to_show, &cm->post_proc_buffer, - q + (deblock_level - 5) * 10, 1, 0); + deblock_and_de_macro_block(cm->frame_to_show, ppbuf, + q + (ppflags->deblocking_level - 5) * 10, 1, 0); } else if (flags & VP9D_DEBLOCK) { - vp9_deblock(cm->frame_to_show, &cm->post_proc_buffer, q); + vp9_deblock(cm->frame_to_show, ppbuf, q); } else { - vp8_yv12_copy_frame(cm->frame_to_show, &cm->post_proc_buffer); + vp8_yv12_copy_frame(cm->frame_to_show, ppbuf); } if (flags & VP9D_ADDNOISE) { - if (cm->postproc_state.last_q != q - || cm->postproc_state.last_noise != noise_level) { - fillrd(&cm->postproc_state, 63 - q, noise_level); + const int noise_level = ppflags->noise_level; + if (ppstate->last_q != q || + ppstate->last_noise != noise_level) { + fillrd(ppstate, 63 - q, noise_level); } - vp9_plane_add_noise(cm->post_proc_buffer.y_buffer, - cm->postproc_state.noise, - cm->postproc_state.blackclamp, - cm->postproc_state.whiteclamp, - cm->postproc_state.bothclamp, - cm->post_proc_buffer.y_width, - cm->post_proc_buffer.y_height, - cm->post_proc_buffer.y_stride); + vp9_plane_add_noise(ppbuf->y_buffer, ppstate->noise, ppstate->blackclamp, + ppstate->whiteclamp, ppstate->bothclamp, + ppbuf->y_width, ppbuf->y_height, ppbuf->y_stride); } #if 0 && CONFIG_POSTPROC_VISUALIZER @@ -684,16 +654,14 @@ int vp9_post_proc_frame(struct VP9Common *cm, cm->filter_level, flags, cm->mb_cols, cm->mb_rows); - vp9_blit_text(message, cm->post_proc_buffer.y_buffer, - cm->post_proc_buffer.y_stride); + vp9_blit_text(message, ppbuf->y_buffer, ppbuf->y_stride); } if (flags & VP9D_DEBUG_TXT_MBLK_MODES) { int i, j; uint8_t *y_ptr; - YV12_BUFFER_CONFIG *post = &cm->post_proc_buffer; - int mb_rows = post->y_height >> 4; - int mb_cols = post->y_width >> 4; + int mb_rows = ppbuf->y_height >> 4; + int mb_cols = ppbuf->y_width >> 4; int mb_index = 0; MODE_INFO *mi = cm->mi; @@ -719,9 +687,8 @@ int vp9_post_proc_frame(struct VP9Common *cm, if (flags & VP9D_DEBUG_TXT_DC_DIFF) { int i, j; uint8_t *y_ptr; - YV12_BUFFER_CONFIG *post = &cm->post_proc_buffer; - int mb_rows = post->y_height >> 4; - int mb_cols = post->y_width >> 4; + int mb_rows = ppbuf->y_height >> 4; + int mb_cols = ppbuf->y_width >> 4; int mb_index = 0; MODE_INFO *mi = cm->mi; @@ -733,7 +700,7 @@ int vp9_post_proc_frame(struct VP9Common *cm, char zz[4]; int dc_diff = !(mi[mb_index].mbmi.mode != I4X4_PRED && mi[mb_index].mbmi.mode != SPLITMV && - mi[mb_index].mbmi.skip_coeff); + mi[mb_index].mbmi.skip); if (cm->frame_type == KEY_FRAME) snprintf(zz, sizeof(zz) - 1, "a"); @@ -755,17 +722,15 @@ int vp9_post_proc_frame(struct VP9Common *cm, snprintf(message, sizeof(message), "Bitrate: %10.2f framerate: %10.2f ", cm->bitrate, cm->framerate); - vp9_blit_text(message, cm->post_proc_buffer.y_buffer, - cm->post_proc_buffer.y_stride); + vp9_blit_text(message, ppbuf->y_buffer, ppbuf->y_stride); } /* Draw motion vectors */ if ((flags & VP9D_DEBUG_DRAW_MV) && ppflags->display_mv_flag) { - YV12_BUFFER_CONFIG *post = &cm->post_proc_buffer; - int width = post->y_width; - int height = post->y_height; - uint8_t *y_buffer = cm->post_proc_buffer.y_buffer; - int y_stride = cm->post_proc_buffer.y_stride; + int width = ppbuf->y_width; + int height = ppbuf->y_height; + uint8_t *y_buffer = ppbuf->y_buffer; + int y_stride = ppbuf->y_stride; MODE_INFO *mi = cm->mi; int x0, y0; @@ -904,13 +869,12 @@ int vp9_post_proc_frame(struct VP9Common *cm, if ((flags & VP9D_DEBUG_CLR_BLK_MODES) && (ppflags->display_mb_modes_flag || ppflags->display_b_modes_flag)) { int y, x; - YV12_BUFFER_CONFIG *post = &cm->post_proc_buffer; - int width = post->y_width; - int height = post->y_height; - uint8_t *y_ptr = cm->post_proc_buffer.y_buffer; - uint8_t *u_ptr = cm->post_proc_buffer.u_buffer; - uint8_t *v_ptr = cm->post_proc_buffer.v_buffer; - int y_stride = cm->post_proc_buffer.y_stride; + int width = ppbuf->y_width; + int height = ppbuf->y_height; + uint8_t *y_ptr = ppbuf->y_buffer; + uint8_t *u_ptr = ppbuf->u_buffer; + uint8_t *v_ptr = ppbuf->v_buffer; + int y_stride = ppbuf->y_stride; MODE_INFO *mi = cm->mi; for (y = 0; y < height; y += 16) { @@ -969,13 +933,12 @@ int vp9_post_proc_frame(struct VP9Common *cm, if ((flags & VP9D_DEBUG_CLR_FRM_REF_BLKS) && ppflags->display_ref_frame_flag) { int y, x; - YV12_BUFFER_CONFIG *post = &cm->post_proc_buffer; - int width = post->y_width; - int height = post->y_height; - uint8_t *y_ptr = cm->post_proc_buffer.y_buffer; - uint8_t *u_ptr = cm->post_proc_buffer.u_buffer; - uint8_t *v_ptr = cm->post_proc_buffer.v_buffer; - int y_stride = cm->post_proc_buffer.y_stride; + int width = ppbuf->y_width; + int height = ppbuf->y_height; + uint8_t *y_ptr = ppbuf->y_buffer; + uint8_t *u_ptr = ppbuf->u_buffer; + uint8_t *v_ptr = ppbuf->v_buffer; + int y_stride = ppbuf->y_stride; MODE_INFO *mi = cm->mi; for (y = 0; y < height; y += 16) { @@ -1002,7 +965,7 @@ int vp9_post_proc_frame(struct VP9Common *cm, } #endif - *dest = cm->post_proc_buffer; + *dest = *ppbuf; /* handle problem with extending borders */ dest->y_width = cm->width; diff --git a/libvpx/vp9/common/vp9_postproc.h b/libvpx/vp9/common/vp9_postproc.h index c63beae..b07d5d0 100644 --- a/libvpx/vp9/common/vp9_postproc.h +++ b/libvpx/vp9/common/vp9_postproc.h @@ -13,6 +13,11 @@ #define VP9_COMMON_VP9_POSTPROC_H_ #include "vpx_ports/mem.h" +#include "vp9/common/vp9_ppflags.h" + +#ifdef __cplusplus +extern "C" { +#endif struct postproc_state { int last_q; @@ -23,8 +28,7 @@ struct postproc_state { DECLARE_ALIGNED(16, char, bothclamp[16]); }; -#include "vp9/common/vp9_onyxc_int.h" -#include "vp9/common/vp9_ppflags.h" +struct VP9Common; int vp9_post_proc_frame(struct VP9Common *cm, YV12_BUFFER_CONFIG *dest, vp9_ppflags_t *flags); @@ -33,4 +37,8 @@ void vp9_denoise(const YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst, int q); void vp9_deblock(const YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst, int q); +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_COMMON_VP9_POSTPROC_H_ diff --git a/libvpx/vp9/common/vp9_ppflags.h b/libvpx/vp9/common/vp9_ppflags.h index 561c930..8168935 100644 --- a/libvpx/vp9/common/vp9_ppflags.h +++ b/libvpx/vp9/common/vp9_ppflags.h @@ -11,6 +11,10 @@ #ifndef VP9_COMMON_VP9_PPFLAGS_H_ #define VP9_COMMON_VP9_PPFLAGS_H_ +#ifdef __cplusplus +extern "C" { +#endif + enum { VP9D_NOFILTERING = 0, VP9D_DEBLOCK = 1 << 0, @@ -35,4 +39,8 @@ typedef struct { int display_mv_flag; } vp9_ppflags_t; +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_COMMON_VP9_PPFLAGS_H_ diff --git a/libvpx/vp9/common/vp9_pragmas.h b/libvpx/vp9/common/vp9_pragmas.h index f079161..0efc713 100644 --- a/libvpx/vp9/common/vp9_pragmas.h +++ b/libvpx/vp9/common/vp9_pragmas.h @@ -11,6 +11,10 @@ #ifndef VP9_COMMON_VP9_PRAGMAS_H_ #define VP9_COMMON_VP9_PRAGMAS_H_ +#ifdef __cplusplus +extern "C" { +#endif + #ifdef __INTEL_COMPILER #pragma warning(disable:997 1011 170) #endif @@ -19,4 +23,8 @@ #pragma warning(disable:4799) #endif +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_COMMON_VP9_PRAGMAS_H_ diff --git a/libvpx/vp9/common/vp9_pred_common.c b/libvpx/vp9/common/vp9_pred_common.c index 6018e17..197bcb6 100644 --- a/libvpx/vp9/common/vp9_pred_common.c +++ b/libvpx/vp9/common/vp9_pred_common.c @@ -14,134 +14,110 @@ #include "vp9/common/vp9_common.h" #include "vp9/common/vp9_pred_common.h" #include "vp9/common/vp9_seg_common.h" -#include "vp9/common/vp9_treecoder.h" -static INLINE const MB_MODE_INFO *get_above_mbmi(const MODE_INFO *const above) { - return (above != NULL) ? &above->mbmi : NULL; -} - -static INLINE const MB_MODE_INFO *get_left_mbmi(const MODE_INFO *const left) { - return (left != NULL) ? &left->mbmi : NULL; +static INLINE const MB_MODE_INFO *get_mbmi(const MODE_INFO *const mi) { + return (mi != NULL) ? &mi->mbmi : NULL; } // Returns a context number for the given MB prediction signal -unsigned char vp9_get_pred_context_switchable_interp(const MACROBLOCKD *xd) { - const MODE_INFO *const above_mi = get_above_mi(xd); - const MODE_INFO *const left_mi = get_left_mi(xd); - const int above_in_image = above_mi != NULL; - const int left_in_image = left_mi != NULL; +int vp9_get_pred_context_switchable_interp(const MACROBLOCKD *xd) { // Note: // The mode info data structure has a one element border above and to the // left of the entries correpsonding to real macroblocks. // The prediction flags in these dummy entries are initialised to 0. - // left - const int left_mv_pred = left_in_image ? is_inter_block(&left_mi->mbmi) - : 0; - const int left_interp = left_in_image && left_mv_pred - ? left_mi->mbmi.interp_filter - : SWITCHABLE_FILTERS; - - // above - const int above_mv_pred = above_in_image ? is_inter_block(&above_mi->mbmi) - : 0; - const int above_interp = above_in_image && above_mv_pred - ? above_mi->mbmi.interp_filter - : SWITCHABLE_FILTERS; - - if (left_interp == above_interp) - return left_interp; - else if (left_interp == SWITCHABLE_FILTERS && - above_interp != SWITCHABLE_FILTERS) - return above_interp; - else if (left_interp != SWITCHABLE_FILTERS && - above_interp == SWITCHABLE_FILTERS) - return left_interp; + const MB_MODE_INFO *const left_mbmi = get_mbmi(get_left_mi(xd)); + const int left_type = left_mbmi != NULL && is_inter_block(left_mbmi) ? + left_mbmi->interp_filter : SWITCHABLE_FILTERS; + const MB_MODE_INFO *const above_mbmi = get_mbmi(get_above_mi(xd)); + const int above_type = above_mbmi != NULL && is_inter_block(above_mbmi) ? + above_mbmi->interp_filter : SWITCHABLE_FILTERS; + + if (left_type == above_type) + return left_type; + else if (left_type == SWITCHABLE_FILTERS && above_type != SWITCHABLE_FILTERS) + return above_type; + else if (left_type != SWITCHABLE_FILTERS && above_type == SWITCHABLE_FILTERS) + return left_type; else return SWITCHABLE_FILTERS; } -// Returns a context number for the given MB prediction signal -unsigned char vp9_get_pred_context_intra_inter(const MACROBLOCKD *xd) { - const MODE_INFO *const above_mi = get_above_mi(xd); - const MODE_INFO *const left_mi = get_left_mi(xd); - const MB_MODE_INFO *const above_mbmi = get_above_mbmi(above_mi); - const MB_MODE_INFO *const left_mbmi = get_left_mbmi(left_mi); - const int above_in_image = above_mi != NULL; - const int left_in_image = left_mi != NULL; - const int above_intra = above_in_image ? !is_inter_block(above_mbmi) : 1; - const int left_intra = left_in_image ? !is_inter_block(left_mbmi) : 1; - // The mode info data structure has a one element border above and to the - // left of the entries corresponding to real macroblocks. - // The prediction flags in these dummy entries are initialized to 0. - // 0 - inter/inter, inter/--, --/inter, --/-- - // 1 - intra/inter, inter/intra - // 2 - intra/--, --/intra - // 3 - intra/intra - if (above_in_image && left_in_image) // both edges available +// The mode info data structure has a one element border above and to the +// left of the entries corresponding to real macroblocks. +// The prediction flags in these dummy entries are initialized to 0. +// 0 - inter/inter, inter/--, --/inter, --/-- +// 1 - intra/inter, inter/intra +// 2 - intra/--, --/intra +// 3 - intra/intra +int vp9_get_intra_inter_context(const MACROBLOCKD *xd) { + const MB_MODE_INFO *const above_mbmi = get_mbmi(get_above_mi(xd)); + const MB_MODE_INFO *const left_mbmi = get_mbmi(get_left_mi(xd)); + const int has_above = above_mbmi != NULL; + const int has_left = left_mbmi != NULL; + + if (has_above && has_left) { // both edges available + const int above_intra = !is_inter_block(above_mbmi); + const int left_intra = !is_inter_block(left_mbmi); return left_intra && above_intra ? 3 : left_intra || above_intra; - else if (above_in_image || left_in_image) // one edge available - return 2 * (above_in_image ? above_intra : left_intra); - else + } else if (has_above || has_left) { // one edge available + return 2 * !is_inter_block(has_above ? above_mbmi : left_mbmi); + } else { return 0; + } } -// Returns a context number for the given MB prediction signal -unsigned char vp9_get_pred_context_comp_inter_inter(const VP9_COMMON *cm, - const MACROBLOCKD *xd) { - int pred_context; - const MODE_INFO *const above_mi = get_above_mi(xd); - const MODE_INFO *const left_mi = get_left_mi(xd); - const MB_MODE_INFO *const above_mbmi = get_above_mbmi(above_mi); - const MB_MODE_INFO *const left_mbmi = get_left_mbmi(left_mi); - const int above_in_image = above_mi != NULL; - const int left_in_image = left_mi != NULL; + +int vp9_get_reference_mode_context(const VP9_COMMON *cm, + const MACROBLOCKD *xd) { + int ctx; + const MB_MODE_INFO *const above_mbmi = get_mbmi(get_above_mi(xd)); + const MB_MODE_INFO *const left_mbmi = get_mbmi(get_left_mi(xd)); + const int has_above = above_mbmi != NULL; + const int has_left = left_mbmi != NULL; // Note: // The mode info data structure has a one element border above and to the // left of the entries correpsonding to real macroblocks. // The prediction flags in these dummy entries are initialised to 0. - if (above_in_image && left_in_image) { // both edges available + if (has_above && has_left) { // both edges available if (!has_second_ref(above_mbmi) && !has_second_ref(left_mbmi)) // neither edge uses comp pred (0/1) - pred_context = (above_mbmi->ref_frame[0] == cm->comp_fixed_ref) ^ - (left_mbmi->ref_frame[0] == cm->comp_fixed_ref); + ctx = (above_mbmi->ref_frame[0] == cm->comp_fixed_ref) ^ + (left_mbmi->ref_frame[0] == cm->comp_fixed_ref); else if (!has_second_ref(above_mbmi)) // one of two edges uses comp pred (2/3) - pred_context = 2 + (above_mbmi->ref_frame[0] == cm->comp_fixed_ref || - !is_inter_block(above_mbmi)); + ctx = 2 + (above_mbmi->ref_frame[0] == cm->comp_fixed_ref || + !is_inter_block(above_mbmi)); else if (!has_second_ref(left_mbmi)) // one of two edges uses comp pred (2/3) - pred_context = 2 + (left_mbmi->ref_frame[0] == cm->comp_fixed_ref || - !is_inter_block(left_mbmi)); + ctx = 2 + (left_mbmi->ref_frame[0] == cm->comp_fixed_ref || + !is_inter_block(left_mbmi)); else // both edges use comp pred (4) - pred_context = 4; - } else if (above_in_image || left_in_image) { // one edge available - const MB_MODE_INFO *edge_mbmi = above_in_image ? above_mbmi : left_mbmi; + ctx = 4; + } else if (has_above || has_left) { // one edge available + const MB_MODE_INFO *edge_mbmi = has_above ? above_mbmi : left_mbmi; if (!has_second_ref(edge_mbmi)) // edge does not use comp pred (0/1) - pred_context = edge_mbmi->ref_frame[0] == cm->comp_fixed_ref; + ctx = edge_mbmi->ref_frame[0] == cm->comp_fixed_ref; else // edge uses comp pred (3) - pred_context = 3; + ctx = 3; } else { // no edges available (1) - pred_context = 1; + ctx = 1; } - assert(pred_context >= 0 && pred_context < COMP_INTER_CONTEXTS); - return pred_context; + assert(ctx >= 0 && ctx < COMP_INTER_CONTEXTS); + return ctx; } // Returns a context number for the given MB prediction signal -unsigned char vp9_get_pred_context_comp_ref_p(const VP9_COMMON *cm, - const MACROBLOCKD *xd) { +int vp9_get_pred_context_comp_ref_p(const VP9_COMMON *cm, + const MACROBLOCKD *xd) { int pred_context; - const MODE_INFO *const above_mi = get_above_mi(xd); - const MODE_INFO *const left_mi = get_left_mi(xd); - const MB_MODE_INFO *const above_mbmi = get_above_mbmi(above_mi); - const MB_MODE_INFO *const left_mbmi = get_left_mbmi(left_mi); - const int above_in_image = above_mi != NULL; - const int left_in_image = left_mi != NULL; - const int above_intra = above_in_image ? !is_inter_block(above_mbmi) : 1; - const int left_intra = left_in_image ? !is_inter_block(left_mbmi) : 1; + const MB_MODE_INFO *const above_mbmi = get_mbmi(get_above_mi(xd)); + const MB_MODE_INFO *const left_mbmi = get_mbmi(get_left_mi(xd)); + const int above_in_image = above_mbmi != NULL; + const int left_in_image = left_mbmi != NULL; + // Note: // The mode info data structure has a one element border above and to the // left of the entries correpsonding to real macroblocks. @@ -150,6 +126,9 @@ unsigned char vp9_get_pred_context_comp_ref_p(const VP9_COMMON *cm, const int var_ref_idx = !fix_ref_idx; if (above_in_image && left_in_image) { // both edges available + const int above_intra = !is_inter_block(above_mbmi); + const int left_intra = !is_inter_block(left_mbmi); + if (above_intra && left_intra) { // intra/intra (2) pred_context = 2; } else if (above_intra || left_intra) { // intra/inter @@ -163,10 +142,10 @@ unsigned char vp9_get_pred_context_comp_ref_p(const VP9_COMMON *cm, } else { // inter/inter const int l_sg = !has_second_ref(left_mbmi); const int a_sg = !has_second_ref(above_mbmi); - MV_REFERENCE_FRAME vrfa = a_sg ? above_mbmi->ref_frame[0] - : above_mbmi->ref_frame[var_ref_idx]; - MV_REFERENCE_FRAME vrfl = l_sg ? left_mbmi->ref_frame[0] - : left_mbmi->ref_frame[var_ref_idx]; + const MV_REFERENCE_FRAME vrfa = a_sg ? above_mbmi->ref_frame[0] + : above_mbmi->ref_frame[var_ref_idx]; + const MV_REFERENCE_FRAME vrfl = l_sg ? left_mbmi->ref_frame[0] + : left_mbmi->ref_frame[var_ref_idx]; if (vrfa == vrfl && cm->comp_var_ref[1] == vrfa) { pred_context = 0; @@ -179,8 +158,8 @@ unsigned char vp9_get_pred_context_comp_ref_p(const VP9_COMMON *cm, else pred_context = 1; } else if (l_sg || a_sg) { // single/comp - MV_REFERENCE_FRAME vrfc = l_sg ? vrfa : vrfl; - MV_REFERENCE_FRAME rfs = a_sg ? vrfa : vrfl; + const MV_REFERENCE_FRAME vrfc = l_sg ? vrfa : vrfl; + const MV_REFERENCE_FRAME rfs = a_sg ? vrfa : vrfl; if (vrfc == cm->comp_var_ref[1] && rfs != cm->comp_var_ref[1]) pred_context = 1; else if (rfs == cm->comp_var_ref[1] && vrfc != cm->comp_var_ref[1]) @@ -212,21 +191,21 @@ unsigned char vp9_get_pred_context_comp_ref_p(const VP9_COMMON *cm, return pred_context; } -unsigned char vp9_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) { + +int vp9_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) { int pred_context; - const MODE_INFO *const above_mi = get_above_mi(xd); - const MODE_INFO *const left_mi = get_left_mi(xd); - const MB_MODE_INFO *const above_mbmi = get_above_mbmi(above_mi); - const MB_MODE_INFO *const left_mbmi = get_left_mbmi(left_mi); - const int above_in_image = above_mi != NULL; - const int left_in_image = left_mi != NULL; - const int above_intra = above_in_image ? !is_inter_block(above_mbmi) : 1; - const int left_intra = left_in_image ? !is_inter_block(left_mbmi) : 1; + const MB_MODE_INFO *const above_mbmi = get_mbmi(get_above_mi(xd)); + const MB_MODE_INFO *const left_mbmi = get_mbmi(get_left_mi(xd)); + const int has_above = above_mbmi != NULL; + const int has_left = left_mbmi != NULL; // Note: // The mode info data structure has a one element border above and to the // left of the entries correpsonding to real macroblocks. // The prediction flags in these dummy entries are initialised to 0. - if (above_in_image && left_in_image) { // both edges available + if (has_above && has_left) { // both edges available + const int above_intra = !is_inter_block(above_mbmi); + const int left_intra = !is_inter_block(left_mbmi); + if (above_intra && left_intra) { // intra/intra pred_context = 2; } else if (above_intra || left_intra) { // intra/inter or inter/intra @@ -237,30 +216,31 @@ unsigned char vp9_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) { pred_context = 1 + (edge_mbmi->ref_frame[0] == LAST_FRAME || edge_mbmi->ref_frame[1] == LAST_FRAME); } else { // inter/inter - if (!has_second_ref(above_mbmi) && !has_second_ref(left_mbmi)) { - pred_context = 2 * (above_mbmi->ref_frame[0] == LAST_FRAME) + - 2 * (left_mbmi->ref_frame[0] == LAST_FRAME); - } else if (has_second_ref(above_mbmi) && has_second_ref(left_mbmi)) { - pred_context = 1 + (above_mbmi->ref_frame[0] == LAST_FRAME || - above_mbmi->ref_frame[1] == LAST_FRAME || - left_mbmi->ref_frame[0] == LAST_FRAME || - left_mbmi->ref_frame[1] == LAST_FRAME); - } else { - const MV_REFERENCE_FRAME rfs = !has_second_ref(above_mbmi) ? - above_mbmi->ref_frame[0] : left_mbmi->ref_frame[0]; - const MV_REFERENCE_FRAME crf1 = has_second_ref(above_mbmi) ? - above_mbmi->ref_frame[0] : left_mbmi->ref_frame[0]; - const MV_REFERENCE_FRAME crf2 = has_second_ref(above_mbmi) ? - above_mbmi->ref_frame[1] : left_mbmi->ref_frame[1]; + const int above_has_second = has_second_ref(above_mbmi); + const int left_has_second = has_second_ref(left_mbmi); + const MV_REFERENCE_FRAME above0 = above_mbmi->ref_frame[0]; + const MV_REFERENCE_FRAME above1 = above_mbmi->ref_frame[1]; + const MV_REFERENCE_FRAME left0 = left_mbmi->ref_frame[0]; + const MV_REFERENCE_FRAME left1 = left_mbmi->ref_frame[1]; + + if (above_has_second && left_has_second) { + pred_context = 1 + (above0 == LAST_FRAME || above1 == LAST_FRAME || + left0 == LAST_FRAME || left1 == LAST_FRAME); + } else if (above_has_second || left_has_second) { + const MV_REFERENCE_FRAME rfs = !above_has_second ? above0 : left0; + const MV_REFERENCE_FRAME crf1 = above_has_second ? above0 : left0; + const MV_REFERENCE_FRAME crf2 = above_has_second ? above1 : left1; if (rfs == LAST_FRAME) pred_context = 3 + (crf1 == LAST_FRAME || crf2 == LAST_FRAME); else - pred_context = crf1 == LAST_FRAME || crf2 == LAST_FRAME; + pred_context = (crf1 == LAST_FRAME || crf2 == LAST_FRAME); + } else { + pred_context = 2 * (above0 == LAST_FRAME) + 2 * (left0 == LAST_FRAME); } } - } else if (above_in_image || left_in_image) { // one edge available - const MB_MODE_INFO *edge_mbmi = above_in_image ? above_mbmi : left_mbmi; + } else if (has_above || has_left) { // one edge available + const MB_MODE_INFO *edge_mbmi = has_above ? above_mbmi : left_mbmi; if (!is_inter_block(edge_mbmi)) { // intra pred_context = 2; } else { // inter @@ -278,22 +258,21 @@ unsigned char vp9_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) { return pred_context; } -unsigned char vp9_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) { +int vp9_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) { int pred_context; - const MODE_INFO *const above_mi = get_above_mi(xd); - const MODE_INFO *const left_mi = get_left_mi(xd); - const MB_MODE_INFO *const above_mbmi = get_above_mbmi(above_mi); - const MB_MODE_INFO *const left_mbmi = get_left_mbmi(left_mi); - const int above_in_image = above_mi != NULL; - const int left_in_image = left_mi != NULL; - const int above_intra = above_in_image ? !is_inter_block(above_mbmi) : 1; - const int left_intra = left_in_image ? !is_inter_block(left_mbmi) : 1; + const MB_MODE_INFO *const above_mbmi = get_mbmi(get_above_mi(xd)); + const MB_MODE_INFO *const left_mbmi = get_mbmi(get_left_mi(xd)); + const int has_above = above_mbmi != NULL; + const int has_left = left_mbmi != NULL; // Note: // The mode info data structure has a one element border above and to the // left of the entries correpsonding to real macroblocks. // The prediction flags in these dummy entries are initialised to 0. - if (above_in_image && left_in_image) { // both edges available + if (has_above && has_left) { // both edges available + const int above_intra = !is_inter_block(above_mbmi); + const int left_intra = !is_inter_block(left_mbmi); + if (above_intra && left_intra) { // intra/intra pred_context = 2; } else if (above_intra || left_intra) { // intra/inter or inter/intra @@ -308,36 +287,25 @@ unsigned char vp9_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) { edge_mbmi->ref_frame[1] == GOLDEN_FRAME); } } else { // inter/inter - if (!has_second_ref(above_mbmi) && !has_second_ref(left_mbmi)) { - if (above_mbmi->ref_frame[0] == LAST_FRAME && - left_mbmi->ref_frame[0] == LAST_FRAME) { - pred_context = 3; - } else if (above_mbmi->ref_frame[0] == LAST_FRAME || - left_mbmi->ref_frame[0] == LAST_FRAME) { - const MB_MODE_INFO *edge_mbmi = - above_mbmi->ref_frame[0] == LAST_FRAME ? left_mbmi : above_mbmi; - - pred_context = 4 * (edge_mbmi->ref_frame[0] == GOLDEN_FRAME); - } else { - pred_context = 2 * (above_mbmi->ref_frame[0] == GOLDEN_FRAME) + - 2 * (left_mbmi->ref_frame[0] == GOLDEN_FRAME); - } - } else if (has_second_ref(above_mbmi) && has_second_ref(left_mbmi)) { - if (above_mbmi->ref_frame[0] == left_mbmi->ref_frame[0] && - above_mbmi->ref_frame[1] == left_mbmi->ref_frame[1]) - pred_context = 3 * (above_mbmi->ref_frame[0] == GOLDEN_FRAME || - above_mbmi->ref_frame[1] == GOLDEN_FRAME || - left_mbmi->ref_frame[0] == GOLDEN_FRAME || - left_mbmi->ref_frame[1] == GOLDEN_FRAME); + const int above_has_second = has_second_ref(above_mbmi); + const int left_has_second = has_second_ref(left_mbmi); + const MV_REFERENCE_FRAME above0 = above_mbmi->ref_frame[0]; + const MV_REFERENCE_FRAME above1 = above_mbmi->ref_frame[1]; + const MV_REFERENCE_FRAME left0 = left_mbmi->ref_frame[0]; + const MV_REFERENCE_FRAME left1 = left_mbmi->ref_frame[1]; + + if (above_has_second && left_has_second) { + if (above0 == left0 && above1 == left1) + pred_context = 3 * (above0 == GOLDEN_FRAME || + above1 == GOLDEN_FRAME || + left0 == GOLDEN_FRAME || + left1 == GOLDEN_FRAME); else pred_context = 2; - } else { - const MV_REFERENCE_FRAME rfs = !has_second_ref(above_mbmi) ? - above_mbmi->ref_frame[0] : left_mbmi->ref_frame[0]; - const MV_REFERENCE_FRAME crf1 = has_second_ref(above_mbmi) ? - above_mbmi->ref_frame[0] : left_mbmi->ref_frame[0]; - const MV_REFERENCE_FRAME crf2 = has_second_ref(above_mbmi) ? - above_mbmi->ref_frame[1] : left_mbmi->ref_frame[1]; + } else if (above_has_second || left_has_second) { + const MV_REFERENCE_FRAME rfs = !above_has_second ? above0 : left0; + const MV_REFERENCE_FRAME crf1 = above_has_second ? above0 : left0; + const MV_REFERENCE_FRAME crf2 = above_has_second ? above1 : left1; if (rfs == GOLDEN_FRAME) pred_context = 3 + (crf1 == GOLDEN_FRAME || crf2 == GOLDEN_FRAME); @@ -345,10 +313,21 @@ unsigned char vp9_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) { pred_context = crf1 == GOLDEN_FRAME || crf2 == GOLDEN_FRAME; else pred_context = 1 + 2 * (crf1 == GOLDEN_FRAME || crf2 == GOLDEN_FRAME); + } else { + if (above0 == LAST_FRAME && left0 == LAST_FRAME) { + pred_context = 3; + } else if (above0 == LAST_FRAME || left0 == LAST_FRAME) { + const MV_REFERENCE_FRAME edge0 = (above0 == LAST_FRAME) ? left0 + : above0; + pred_context = 4 * (edge0 == GOLDEN_FRAME); + } else { + pred_context = 2 * (above0 == GOLDEN_FRAME) + + 2 * (left0 == GOLDEN_FRAME); + } } } - } else if (above_in_image || left_in_image) { // one edge available - const MB_MODE_INFO *edge_mbmi = above_in_image ? above_mbmi : left_mbmi; + } else if (has_above || has_left) { // one edge available + const MB_MODE_INFO *edge_mbmi = has_above ? above_mbmi : left_mbmi; if (!is_inter_block(edge_mbmi) || (edge_mbmi->ref_frame[0] == LAST_FRAME && !has_second_ref(edge_mbmi))) @@ -368,36 +347,23 @@ unsigned char vp9_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) { // The mode info data structure has a one element border above and to the // left of the entries corresponding to real blocks. // The prediction flags in these dummy entries are initialized to 0. -unsigned char vp9_get_pred_context_tx_size(const MACROBLOCKD *xd) { - const MODE_INFO *const above_mi = get_above_mi(xd); - const MODE_INFO *const left_mi = get_left_mi(xd); - const MB_MODE_INFO *const above_mbmi = get_above_mbmi(above_mi); - const MB_MODE_INFO *const left_mbmi = get_left_mbmi(left_mi); - const int above_in_image = above_mi != NULL; - const int left_in_image = left_mi != NULL; +int vp9_get_tx_size_context(const MACROBLOCKD *xd) { const int max_tx_size = max_txsize_lookup[xd->mi_8x8[0]->mbmi.sb_type]; - int above_context = max_tx_size; - int left_context = max_tx_size; - - if (above_in_image) - above_context = above_mbmi->skip_coeff ? max_tx_size - : above_mbmi->tx_size; - - if (left_in_image) - left_context = left_mbmi->skip_coeff ? max_tx_size - : left_mbmi->tx_size; - - if (!left_in_image) - left_context = above_context; - - if (!above_in_image) - above_context = left_context; - - return above_context + left_context > max_tx_size; -} - -void vp9_set_pred_flag_seg_id(MACROBLOCKD *xd, uint8_t pred_flag) { - xd->mi_8x8[0]->mbmi.seg_id_predicted = pred_flag; + const MB_MODE_INFO *const above_mbmi = get_mbmi(get_above_mi(xd)); + const MB_MODE_INFO *const left_mbmi = get_mbmi(get_left_mi(xd)); + const int has_above = above_mbmi != NULL; + const int has_left = left_mbmi != NULL; + int above_ctx = (has_above && !above_mbmi->skip) ? above_mbmi->tx_size + : max_tx_size; + int left_ctx = (has_left && !left_mbmi->skip) ? left_mbmi->tx_size + : max_tx_size; + if (!has_left) + left_ctx = above_ctx; + + if (!has_above) + above_ctx = left_ctx; + + return (above_ctx + left_ctx) > max_tx_size; } int vp9_get_segment_id(VP9_COMMON *cm, const uint8_t *segment_ids, diff --git a/libvpx/vp9/common/vp9_pred_common.h b/libvpx/vp9/common/vp9_pred_common.h index 9190930..6c7a0d3 100644 --- a/libvpx/vp9/common/vp9_pred_common.h +++ b/libvpx/vp9/common/vp9_pred_common.h @@ -14,6 +14,10 @@ #include "vp9/common/vp9_blockd.h" #include "vp9/common/vp9_onyxc_int.h" +#ifdef __cplusplus +extern "C" { +#endif + static INLINE const MODE_INFO *get_above_mi(const MACROBLOCKD *const xd) { return xd->up_available ? xd->mi_8x8[-xd->mode_info_stride] : NULL; } @@ -35,55 +39,42 @@ static INLINE int vp9_get_pred_context_seg_id(const MACROBLOCKD *xd) { return above_sip + left_sip; } -static INLINE vp9_prob vp9_get_pred_prob_seg_id(struct segmentation *seg, +static INLINE vp9_prob vp9_get_pred_prob_seg_id(const struct segmentation *seg, const MACROBLOCKD *xd) { return seg->pred_probs[vp9_get_pred_context_seg_id(xd)]; } -void vp9_set_pred_flag_seg_id(MACROBLOCKD *xd, uint8_t pred_flag); - -static INLINE int vp9_get_pred_context_mbskip(const MACROBLOCKD *xd) { +static INLINE int vp9_get_skip_context(const MACROBLOCKD *xd) { const MODE_INFO *const above_mi = get_above_mi(xd); const MODE_INFO *const left_mi = get_left_mi(xd); - const int above_skip_coeff = (above_mi != NULL) ? - above_mi->mbmi.skip_coeff : 0; - const int left_skip_coeff = (left_mi != NULL) ? left_mi->mbmi.skip_coeff : 0; - - return above_skip_coeff + left_skip_coeff; + const int above_skip = (above_mi != NULL) ? above_mi->mbmi.skip : 0; + const int left_skip = (left_mi != NULL) ? left_mi->mbmi.skip : 0; + return above_skip + left_skip; } -static INLINE vp9_prob vp9_get_pred_prob_mbskip(const VP9_COMMON *cm, - const MACROBLOCKD *xd) { - return cm->fc.mbskip_probs[vp9_get_pred_context_mbskip(xd)]; +static INLINE vp9_prob vp9_get_skip_prob(const VP9_COMMON *cm, + const MACROBLOCKD *xd) { + return cm->fc.skip_probs[vp9_get_skip_context(xd)]; } -static INLINE unsigned char vp9_get_pred_flag_mbskip(const MACROBLOCKD *xd) { - return xd->mi_8x8[0]->mbmi.skip_coeff; -} +int vp9_get_pred_context_switchable_interp(const MACROBLOCKD *xd); -unsigned char vp9_get_pred_context_switchable_interp(const MACROBLOCKD *xd); +int vp9_get_intra_inter_context(const MACROBLOCKD *xd); -unsigned char vp9_get_pred_context_intra_inter(const MACROBLOCKD *xd); - -static INLINE vp9_prob vp9_get_pred_prob_intra_inter(const VP9_COMMON *cm, - const MACROBLOCKD *xd) { - const int pred_context = vp9_get_pred_context_intra_inter(xd); - return cm->fc.intra_inter_prob[pred_context]; +static INLINE vp9_prob vp9_get_intra_inter_prob(const VP9_COMMON *cm, + const MACROBLOCKD *xd) { + return cm->fc.intra_inter_prob[vp9_get_intra_inter_context(xd)]; } -unsigned char vp9_get_pred_context_comp_inter_inter(const VP9_COMMON *cm, - const MACROBLOCKD *xd); +int vp9_get_reference_mode_context(const VP9_COMMON *cm, const MACROBLOCKD *xd); - -static INLINE -vp9_prob vp9_get_pred_prob_comp_inter_inter(const VP9_COMMON *cm, - const MACROBLOCKD *xd) { - const int pred_context = vp9_get_pred_context_comp_inter_inter(cm, xd); - return cm->fc.comp_inter_prob[pred_context]; +static INLINE vp9_prob vp9_get_reference_mode_prob(const VP9_COMMON *cm, + const MACROBLOCKD *xd) { + return cm->fc.comp_inter_prob[vp9_get_reference_mode_context(cm, xd)]; } -unsigned char vp9_get_pred_context_comp_ref_p(const VP9_COMMON *cm, - const MACROBLOCKD *xd); +int vp9_get_pred_context_comp_ref_p(const VP9_COMMON *cm, + const MACROBLOCKD *xd); static INLINE vp9_prob vp9_get_pred_prob_comp_ref_p(const VP9_COMMON *cm, const MACROBLOCKD *xd) { @@ -91,26 +82,24 @@ static INLINE vp9_prob vp9_get_pred_prob_comp_ref_p(const VP9_COMMON *cm, return cm->fc.comp_ref_prob[pred_context]; } -unsigned char vp9_get_pred_context_single_ref_p1(const MACROBLOCKD *xd); +int vp9_get_pred_context_single_ref_p1(const MACROBLOCKD *xd); static INLINE vp9_prob vp9_get_pred_prob_single_ref_p1(const VP9_COMMON *cm, const MACROBLOCKD *xd) { - const int pred_context = vp9_get_pred_context_single_ref_p1(xd); - return cm->fc.single_ref_prob[pred_context][0]; + return cm->fc.single_ref_prob[vp9_get_pred_context_single_ref_p1(xd)][0]; } -unsigned char vp9_get_pred_context_single_ref_p2(const MACROBLOCKD *xd); +int vp9_get_pred_context_single_ref_p2(const MACROBLOCKD *xd); static INLINE vp9_prob vp9_get_pred_prob_single_ref_p2(const VP9_COMMON *cm, const MACROBLOCKD *xd) { - const int pred_context = vp9_get_pred_context_single_ref_p2(xd); - return cm->fc.single_ref_prob[pred_context][1]; + return cm->fc.single_ref_prob[vp9_get_pred_context_single_ref_p2(xd)][1]; } -unsigned char vp9_get_pred_context_tx_size(const MACROBLOCKD *xd); +int vp9_get_tx_size_context(const MACROBLOCKD *xd); -static const vp9_prob *get_tx_probs(TX_SIZE max_tx_size, int ctx, - const struct tx_probs *tx_probs) { +static INLINE const vp9_prob *get_tx_probs(TX_SIZE max_tx_size, int ctx, + const struct tx_probs *tx_probs) { switch (max_tx_size) { case TX_8X8: return tx_probs->p8x8[ctx]; @@ -119,19 +108,19 @@ static const vp9_prob *get_tx_probs(TX_SIZE max_tx_size, int ctx, case TX_32X32: return tx_probs->p32x32[ctx]; default: - assert(!"Invalid max_tx_size."); + assert(0 && "Invalid max_tx_size."); return NULL; } } -static const vp9_prob *get_tx_probs2(TX_SIZE max_tx_size, const MACROBLOCKD *xd, - const struct tx_probs *tx_probs) { - const int ctx = vp9_get_pred_context_tx_size(xd); - return get_tx_probs(max_tx_size, ctx, tx_probs); +static INLINE const vp9_prob *get_tx_probs2(TX_SIZE max_tx_size, + const MACROBLOCKD *xd, + const struct tx_probs *tx_probs) { + return get_tx_probs(max_tx_size, vp9_get_tx_size_context(xd), tx_probs); } -static unsigned int *get_tx_counts(TX_SIZE max_tx_size, int ctx, - struct tx_counts *tx_counts) { +static INLINE unsigned int *get_tx_counts(TX_SIZE max_tx_size, int ctx, + struct tx_counts *tx_counts) { switch (max_tx_size) { case TX_8X8: return tx_counts->p8x8[ctx]; @@ -140,9 +129,13 @@ static unsigned int *get_tx_counts(TX_SIZE max_tx_size, int ctx, case TX_32X32: return tx_counts->p32x32[ctx]; default: - assert(!"Invalid max_tx_size."); + assert(0 && "Invalid max_tx_size."); return NULL; } } +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_COMMON_VP9_PRED_COMMON_H_ diff --git a/libvpx/vp9/common/vp9_prob.c b/libvpx/vp9/common/vp9_prob.c new file mode 100644 index 0000000..a1befc6 --- /dev/null +++ b/libvpx/vp9/common/vp9_prob.c @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2013 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "vp9/common/vp9_prob.h" + +const uint8_t vp9_norm[256] = { + 0, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +}; + + +static unsigned int tree_merge_probs_impl(unsigned int i, + const vp9_tree_index *tree, + const vp9_prob *pre_probs, + const unsigned int *counts, + unsigned int count_sat, + unsigned int max_update, + vp9_prob *probs) { + const int l = tree[i]; + const unsigned int left_count = (l <= 0) + ? counts[-l] + : tree_merge_probs_impl(l, tree, pre_probs, counts, + count_sat, max_update, probs); + const int r = tree[i + 1]; + const unsigned int right_count = (r <= 0) + ? counts[-r] + : tree_merge_probs_impl(r, tree, pre_probs, counts, + count_sat, max_update, probs); + const unsigned int ct[2] = { left_count, right_count }; + probs[i >> 1] = merge_probs(pre_probs[i >> 1], ct, + count_sat, max_update); + return left_count + right_count; +} + +void vp9_tree_merge_probs(const vp9_tree_index *tree, const vp9_prob *pre_probs, + const unsigned int *counts, unsigned int count_sat, + unsigned int max_update_factor, vp9_prob *probs) { + tree_merge_probs_impl(0, tree, pre_probs, counts, count_sat, + max_update_factor, probs); +} diff --git a/libvpx/vp9/common/vp9_prob.h b/libvpx/vp9/common/vp9_prob.h new file mode 100644 index 0000000..f361480 --- /dev/null +++ b/libvpx/vp9/common/vp9_prob.h @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2013 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VP9_COMMON_VP9_PROB_H_ +#define VP9_COMMON_VP9_PROB_H_ + +#include "./vpx_config.h" + +#include "vpx_ports/mem.h" +#include "vpx/vpx_integer.h" + +#include "vp9/common/vp9_common.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef uint8_t vp9_prob; + +#define MAX_PROB 255 + +#define vp9_prob_half ((vp9_prob) 128) + +typedef int8_t vp9_tree_index; + +#define TREE_SIZE(leaf_count) (2 * (leaf_count) - 2) + +#define vp9_complement(x) (255 - x) + +/* We build coding trees compactly in arrays. + Each node of the tree is a pair of vp9_tree_indices. + Array index often references a corresponding probability table. + Index <= 0 means done encoding/decoding and value = -Index, + Index > 0 means need another bit, specification at index. + Nonnegative indices are always even; processing begins at node 0. */ + +typedef const vp9_tree_index vp9_tree[]; + +static INLINE vp9_prob clip_prob(int p) { + return (p > 255) ? 255u : (p < 1) ? 1u : p; +} + +// int64 is not needed for normal frame level calculations. +// However when outputting entropy stats accumulated over many frames +// or even clips we can overflow int math. +#ifdef ENTROPY_STATS +static INLINE vp9_prob get_prob(int num, int den) { + return (den == 0) ? 128u : clip_prob(((int64_t)num * 256 + (den >> 1)) / den); +} +#else +static INLINE vp9_prob get_prob(int num, int den) { + return (den == 0) ? 128u : clip_prob((num * 256 + (den >> 1)) / den); +} +#endif + +static INLINE vp9_prob get_binary_prob(int n0, int n1) { + return get_prob(n0, n0 + n1); +} + +/* This function assumes prob1 and prob2 are already within [1,255] range. */ +static INLINE vp9_prob weighted_prob(int prob1, int prob2, int factor) { + return ROUND_POWER_OF_TWO(prob1 * (256 - factor) + prob2 * factor, 8); +} + +static INLINE vp9_prob merge_probs(vp9_prob pre_prob, + const unsigned int ct[2], + unsigned int count_sat, + unsigned int max_update_factor) { + const vp9_prob prob = get_binary_prob(ct[0], ct[1]); + const unsigned int count = MIN(ct[0] + ct[1], count_sat); + const unsigned int factor = max_update_factor * count / count_sat; + return weighted_prob(pre_prob, prob, factor); +} + +void vp9_tree_merge_probs(const vp9_tree_index *tree, const vp9_prob *pre_probs, + const unsigned int *counts, unsigned int count_sat, + unsigned int max_update_factor, vp9_prob *probs); + + +DECLARE_ALIGNED(16, extern const uint8_t, vp9_norm[256]); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // VP9_COMMON_VP9_PROB_H_ diff --git a/libvpx/vp9/common/vp9_quant_common.c b/libvpx/vp9/common/vp9_quant_common.c index 6dbdb42..def1255 100644 --- a/libvpx/vp9/common/vp9_quant_common.c +++ b/libvpx/vp9/common/vp9_quant_common.c @@ -130,12 +130,13 @@ int16_t vp9_ac_quant(int qindex, int delta) { } -int vp9_get_qindex(struct segmentation *seg, int segment_id, int base_qindex) { +int vp9_get_qindex(const struct segmentation *seg, int segment_id, + int base_qindex) { if (vp9_segfeature_active(seg, segment_id, SEG_LVL_ALT_Q)) { const int data = vp9_get_segdata(seg, segment_id, SEG_LVL_ALT_Q); - return seg->abs_delta == SEGMENT_ABSDATA ? - data : // Abs value - clamp(base_qindex + data, 0, MAXQ); // Delta value + const int seg_qindex = seg->abs_delta == SEGMENT_ABSDATA ? + data : base_qindex + data; + return clamp(seg_qindex, 0, MAXQ); } else { return base_qindex; } diff --git a/libvpx/vp9/common/vp9_quant_common.h b/libvpx/vp9/common/vp9_quant_common.h index 83f2fb6..5811040 100644 --- a/libvpx/vp9/common/vp9_quant_common.h +++ b/libvpx/vp9/common/vp9_quant_common.h @@ -13,6 +13,10 @@ #include "vp9/common/vp9_blockd.h" +#ifdef __cplusplus +extern "C" { +#endif + #define MINQ 0 #define MAXQ 255 #define QINDEX_RANGE (MAXQ - MINQ + 1) @@ -23,6 +27,11 @@ void vp9_init_quant_tables(); int16_t vp9_dc_quant(int qindex, int delta); int16_t vp9_ac_quant(int qindex, int delta); -int vp9_get_qindex(struct segmentation *seg, int segment_id, int base_qindex); +int vp9_get_qindex(const struct segmentation *seg, int segment_id, + int base_qindex); + +#ifdef __cplusplus +} // extern "C" +#endif #endif // VP9_COMMON_VP9_QUANT_COMMON_H_ diff --git a/libvpx/vp9/common/vp9_reconinter.c b/libvpx/vp9/common/vp9_reconinter.c index 7cc66c8..e357b36 100644 --- a/libvpx/vp9/common/vp9_reconinter.c +++ b/libvpx/vp9/common/vp9_reconinter.c @@ -20,59 +20,81 @@ #include "vp9/common/vp9_reconinter.h" #include "vp9/common/vp9_reconintra.h" -void vp9_setup_interp_filters(MACROBLOCKD *xd, - INTERPOLATION_TYPE mcomp_filter_type, - VP9_COMMON *cm) { - if (xd->mi_8x8 && xd->mi_8x8[0]) { - MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi; - - set_scale_factors(xd, mbmi->ref_frame[0] - LAST_FRAME, - mbmi->ref_frame[1] - LAST_FRAME, - cm->active_ref_scale); - } else { - set_scale_factors(xd, -1, -1, cm->active_ref_scale); - } +static void build_mc_border(const uint8_t *src, int src_stride, + uint8_t *dst, int dst_stride, + int x, int y, int b_w, int b_h, int w, int h) { + // Get a pointer to the start of the real data for this row. + const uint8_t *ref_row = src - x - y * src_stride; + + if (y >= h) + ref_row += (h - 1) * src_stride; + else if (y > 0) + ref_row += y * src_stride; + + do { + int right = 0, copy; + int left = x < 0 ? -x : 0; + + if (left > b_w) + left = b_w; + + if (x + b_w > w) + right = x + b_w - w; + + if (right > b_w) + right = b_w; + + copy = b_w - left - right; - xd->subpix.filter_x = xd->subpix.filter_y = - vp9_get_filter_kernel(mcomp_filter_type == SWITCHABLE ? - EIGHTTAP : mcomp_filter_type); + if (left) + memset(dst, ref_row[0], left); - assert(((intptr_t)xd->subpix.filter_x & 0xff) == 0); + if (copy) + memcpy(dst + left, ref_row + x + left, copy); + + if (right) + memset(dst + left + copy, ref_row[w - 1], right); + + dst += dst_stride; + ++y; + + if (y > 0 && y < h) + ref_row += src_stride; + } while (--b_h); } static void inter_predictor(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, - const MV32 *mv, - const struct scale_factors *scale, + const int subpel_x, + const int subpel_y, + const struct scale_factors *sf, int w, int h, int ref, - const struct subpix_fn_table *subpix, + const InterpKernel *kernel, int xs, int ys) { - const int subpel_x = mv->col & SUBPEL_MASK; - const int subpel_y = mv->row & SUBPEL_MASK; - - src += (mv->row >> SUBPEL_BITS) * src_stride + (mv->col >> SUBPEL_BITS); - scale->sfc->predict[subpel_x != 0][subpel_y != 0][ref]( + sf->predict[subpel_x != 0][subpel_y != 0][ref]( src, src_stride, dst, dst_stride, - subpix->filter_x[subpel_x], xs, - subpix->filter_y[subpel_y], ys, - w, h); + kernel[subpel_x], xs, kernel[subpel_y], ys, w, h); } void vp9_build_inter_predictor(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const MV *src_mv, - const struct scale_factors *scale, + const struct scale_factors *sf, int w, int h, int ref, - const struct subpix_fn_table *subpix, - enum mv_precision precision) { + const InterpKernel *kernel, + enum mv_precision precision, + int x, int y) { const int is_q4 = precision == MV_PRECISION_Q4; const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2, is_q4 ? src_mv->col : src_mv->col * 2 }; - const struct scale_factors_common *sfc = scale->sfc; - const MV32 mv = sfc->scale_mv(&mv_q4, scale); + MV32 mv = vp9_scale_mv(&mv_q4, x, y, sf); + const int subpel_x = mv.col & SUBPEL_MASK; + const int subpel_y = mv.row & SUBPEL_MASK; - inter_predictor(src, src_stride, dst, dst_stride, &mv, scale, - w, h, ref, subpix, sfc->x_step_q4, sfc->y_step_q4); + src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS); + + inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y, + sf, w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4); } static INLINE int round_mv_comp_q4(int value) { @@ -117,30 +139,17 @@ MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd, const MV *src_mv, return clamped_mv; } - -// TODO(jkoleszar): In principle, pred_w, pred_h are unnecessary, as we could -// calculate the subsampled BLOCK_SIZE, but that type isn't defined for -// sizes smaller than 16x16 yet. static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block, - BLOCK_SIZE bsize, int pred_w, int pred_h, + int bw, int bh, + int x, int y, int w, int h, int mi_x, int mi_y) { struct macroblockd_plane *const pd = &xd->plane[plane]; - const int bwl = b_width_log2(bsize) - pd->subsampling_x; - const int bw = 4 << bwl; - const int bh = plane_block_height(bsize, pd); - const int x = 4 * (block & ((1 << bwl) - 1)); - const int y = 4 * (block >> bwl); const MODE_INFO *mi = xd->mi_8x8[0]; const int is_compound = has_second_ref(&mi->mbmi); int ref; - assert(x < bw); - assert(y < bh); - assert(mi->mbmi.sb_type < BLOCK_8X8 || 4 << pred_w == bw); - assert(mi->mbmi.sb_type < BLOCK_8X8 || 4 << pred_h == bh); - for (ref = 0; ref < 1 + is_compound; ++ref) { - struct scale_factors *const scale = &xd->scale_factor[ref]; + const struct scale_factors *const sf = &xd->block_refs[ref]->sf; struct buf_2d *const pre_buf = &pd->pre[ref]; struct buf_2d *const dst_buf = &pd->dst; uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x; @@ -165,25 +174,27 @@ static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block, uint8_t *pre; MV32 scaled_mv; - int xs, ys; - - if (vp9_is_scaled(scale->sfc)) { - pre = pre_buf->buf + scaled_buffer_offset(x, y, pre_buf->stride, scale); - scale->sfc->set_scaled_offsets(scale, mi_y + y, mi_x + x); - scaled_mv = scale->sfc->scale_mv(&mv_q4, scale); - xs = scale->sfc->x_step_q4; - ys = scale->sfc->y_step_q4; + int xs, ys, subpel_x, subpel_y; + + if (vp9_is_scaled(sf)) { + pre = pre_buf->buf + scaled_buffer_offset(x, y, pre_buf->stride, sf); + scaled_mv = vp9_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf); + xs = sf->x_step_q4; + ys = sf->y_step_q4; } else { pre = pre_buf->buf + (y * pre_buf->stride + x); scaled_mv.row = mv_q4.row; scaled_mv.col = mv_q4.col; xs = ys = 16; } + subpel_x = scaled_mv.col & SUBPEL_MASK; + subpel_y = scaled_mv.row & SUBPEL_MASK; + pre += (scaled_mv.row >> SUBPEL_BITS) * pre_buf->stride + + (scaled_mv.col >> SUBPEL_BITS); inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride, - &scaled_mv, scale, - 4 << pred_w, 4 << pred_h, ref, - &xd->subpix, xs, ys); + subpel_x, subpel_y, sf, w, h, ref, xd->interp_kernel, + xs, ys); } } @@ -191,20 +202,26 @@ static void build_inter_predictors_for_planes(MACROBLOCKD *xd, BLOCK_SIZE bsize, int mi_row, int mi_col, int plane_from, int plane_to) { int plane; + const int mi_x = mi_col * MI_SIZE; + const int mi_y = mi_row * MI_SIZE; for (plane = plane_from; plane <= plane_to; ++plane) { - const int mi_x = mi_col * MI_SIZE; - const int mi_y = mi_row * MI_SIZE; - const int bwl = b_width_log2(bsize) - xd->plane[plane].subsampling_x; - const int bhl = b_height_log2(bsize) - xd->plane[plane].subsampling_y; + const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, + &xd->plane[plane]); + const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize]; + const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize]; + const int bw = 4 * num_4x4_w; + const int bh = 4 * num_4x4_h; if (xd->mi_8x8[0]->mbmi.sb_type < BLOCK_8X8) { int i = 0, x, y; assert(bsize == BLOCK_8X8); - for (y = 0; y < 1 << bhl; ++y) - for (x = 0; x < 1 << bwl; ++x) - build_inter_predictors(xd, plane, i++, bsize, 0, 0, mi_x, mi_y); + for (y = 0; y < num_4x4_h; ++y) + for (x = 0; x < num_4x4_w; ++x) + build_inter_predictors(xd, plane, i++, bw, bh, + 4 * x, 4 * y, 4, 4, mi_x, mi_y); } else { - build_inter_predictors(xd, plane, 0, bsize, bwl, bhl, mi_x, mi_y); + build_inter_predictors(xd, plane, 0, bw, bh, + 0, 0, bw, bh, mi_x, mi_y); } } } @@ -224,22 +241,182 @@ void vp9_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col, MAX_MB_PLANE - 1); } -// TODO(dkovalev: find better place for this function) -void vp9_setup_scale_factors(VP9_COMMON *cm, int i) { - const int ref = cm->active_ref_idx[i]; - struct scale_factors *const sf = &cm->active_ref_scale[i]; - struct scale_factors_common *const sfc = &cm->active_ref_scale_comm[i]; - if (ref >= NUM_YV12_BUFFERS) { - vp9_zero(*sf); - vp9_zero(*sfc); - } else { - YV12_BUFFER_CONFIG *const fb = &cm->yv12_fb[ref]; - vp9_setup_scale_factors_for_frame(sf, sfc, - fb->y_crop_width, fb->y_crop_height, - cm->width, cm->height); - - if (vp9_is_scaled(sfc)) - vp9_extend_frame_borders(fb, cm->subsampling_x, cm->subsampling_y); +// TODO(jingning): This function serves as a placeholder for decoder prediction +// using on demand border extension. It should be moved to /decoder/ directory. +static void dec_build_inter_predictors(MACROBLOCKD *xd, int plane, int block, + int x, int y, int w, int h, + int mi_x, int mi_y) { + struct macroblockd_plane *const pd = &xd->plane[plane]; + const MODE_INFO *mi = xd->mi_8x8[0]; + const int is_compound = has_second_ref(&mi->mbmi); + int ref; + + for (ref = 0; ref < 1 + is_compound; ++ref) { + const struct scale_factors *const sf = &xd->block_refs[ref]->sf; + struct buf_2d *const pre_buf = &pd->pre[ref]; + struct buf_2d *const dst_buf = &pd->dst; + uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x; + + // TODO(jkoleszar): All chroma MVs in SPLITMV mode are taken as the + // same MV (the average of the 4 luma MVs) but we could do something + // smarter for non-4:2:0. Just punt for now, pending the changes to get + // rid of SPLITMV mode entirely. + const MV mv = mi->mbmi.sb_type < BLOCK_8X8 + ? (plane == 0 ? mi->bmi[block].as_mv[ref].as_mv + : mi_mv_pred_q4(mi, ref)) + : mi->mbmi.mv[ref].as_mv; + MV32 scaled_mv; + int xs, ys, x0, y0, x0_16, y0_16, frame_width, frame_height, buf_stride, + subpel_x, subpel_y; + uint8_t *ref_frame, *buf_ptr; + const YV12_BUFFER_CONFIG *ref_buf = xd->block_refs[ref]->buf; + const MV mv_q4 = { + mv.row * (1 << (1 - pd->subsampling_y)), + mv.col * (1 << (1 - pd->subsampling_x)) + }; + + // Get reference frame pointer, width and height. + if (plane == 0) { + frame_width = ref_buf->y_crop_width; + frame_height = ref_buf->y_crop_height; + ref_frame = ref_buf->y_buffer; + } else { + frame_width = ref_buf->uv_crop_width; + frame_height = ref_buf->uv_crop_height; + ref_frame = plane == 1 ? ref_buf->u_buffer : ref_buf->v_buffer; + } + + // Get block position in current frame. + x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x; + y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y; + + // Precision of x0_16 and y0_16 is 1/16th pixel. + x0_16 = x0 << SUBPEL_BITS; + y0_16 = y0 << SUBPEL_BITS; + + if (vp9_is_scaled(sf)) { + scaled_mv = vp9_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf); + xs = sf->x_step_q4; + ys = sf->y_step_q4; + // Map the top left corner of the block into the reference frame. + x0 = sf->scale_value_x(x0, sf); + y0 = sf->scale_value_y(y0, sf); + x0_16 = sf->scale_value_x(x0_16, sf); + y0_16 = sf->scale_value_y(y0_16, sf); + } else { + scaled_mv.row = mv_q4.row; + scaled_mv.col = mv_q4.col; + xs = ys = 16; + } + subpel_x = scaled_mv.col & SUBPEL_MASK; + subpel_y = scaled_mv.row & SUBPEL_MASK; + + // Calculate the top left corner of the best matching block in the reference frame. + x0 += scaled_mv.col >> SUBPEL_BITS; + y0 += scaled_mv.row >> SUBPEL_BITS; + x0_16 += scaled_mv.col; + y0_16 += scaled_mv.row; + + // Get reference block pointer. + buf_ptr = ref_frame + y0 * pre_buf->stride + x0; + buf_stride = pre_buf->stride; + + // Do border extension if there is motion or the + // width/height is not a multiple of 8 pixels. + if (scaled_mv.col || scaled_mv.row || + (frame_width & 0x7) || (frame_height & 0x7)) { + // Get reference block bottom right coordinate. + int x1 = ((x0_16 + (w - 1) * xs) >> SUBPEL_BITS) + 1; + int y1 = ((y0_16 + (h - 1) * ys) >> SUBPEL_BITS) + 1; + int x_pad = 0, y_pad = 0; + + if (subpel_x || (sf->x_step_q4 & SUBPEL_MASK)) { + x0 -= VP9_INTERP_EXTEND - 1; + x1 += VP9_INTERP_EXTEND; + x_pad = 1; + } + + if (subpel_y || (sf->y_step_q4 & SUBPEL_MASK)) { + y0 -= VP9_INTERP_EXTEND - 1; + y1 += VP9_INTERP_EXTEND; + y_pad = 1; + } + + // Skip border extension if block is inside the frame. + if (x0 < 0 || x0 > frame_width - 1 || x1 < 0 || x1 > frame_width || + y0 < 0 || y0 > frame_height - 1 || y1 < 0 || y1 > frame_height - 1) { + uint8_t *buf_ptr1 = ref_frame + y0 * pre_buf->stride + x0; + // Extend the border. + build_mc_border(buf_ptr1, pre_buf->stride, xd->mc_buf, x1 - x0, + x0, y0, x1 - x0, y1 - y0, frame_width, frame_height); + buf_stride = x1 - x0; + buf_ptr = xd->mc_buf + y_pad * 3 * buf_stride + x_pad * 3; + } + } + + inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x, + subpel_y, sf, w, h, ref, xd->interp_kernel, xs, ys); + } +} + +void vp9_dec_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col, + BLOCK_SIZE bsize) { + int plane; + const int mi_x = mi_col * MI_SIZE; + const int mi_y = mi_row * MI_SIZE; + for (plane = 0; plane < MAX_MB_PLANE; ++plane) { + const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, + &xd->plane[plane]); + const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize]; + const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize]; + const int bw = 4 * num_4x4_w; + const int bh = 4 * num_4x4_h; + + if (xd->mi_8x8[0]->mbmi.sb_type < BLOCK_8X8) { + int i = 0, x, y; + assert(bsize == BLOCK_8X8); + for (y = 0; y < num_4x4_h; ++y) + for (x = 0; x < num_4x4_w; ++x) + dec_build_inter_predictors(xd, plane, i++, + 4 * x, 4 * y, 4, 4, mi_x, mi_y); + } else { + dec_build_inter_predictors(xd, plane, 0, + 0, 0, bw, bh, mi_x, mi_y); + } + } +} + +void vp9_setup_dst_planes(MACROBLOCKD *xd, + const YV12_BUFFER_CONFIG *src, + int mi_row, int mi_col) { + uint8_t *const buffers[4] = {src->y_buffer, src->u_buffer, src->v_buffer, + src->alpha_buffer}; + const int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride, + src->alpha_stride}; + int i; + + for (i = 0; i < MAX_MB_PLANE; ++i) { + struct macroblockd_plane *const pd = &xd->plane[i]; + setup_pred_plane(&pd->dst, buffers[i], strides[i], mi_row, mi_col, NULL, + pd->subsampling_x, pd->subsampling_y); } } +void vp9_setup_pre_planes(MACROBLOCKD *xd, int idx, + const YV12_BUFFER_CONFIG *src, + int mi_row, int mi_col, + const struct scale_factors *sf) { + if (src != NULL) { + int i; + uint8_t *const buffers[4] = {src->y_buffer, src->u_buffer, src->v_buffer, + src->alpha_buffer}; + const int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride, + src->alpha_stride}; + + for (i = 0; i < MAX_MB_PLANE; ++i) { + struct macroblockd_plane *const pd = &xd->plane[i]; + setup_pred_plane(&pd->pre[idx], buffers[i], strides[i], mi_row, mi_col, + sf, pd->subsampling_x, pd->subsampling_y); + } + } +} diff --git a/libvpx/vp9/common/vp9_reconinter.h b/libvpx/vp9/common/vp9_reconinter.h index 2c8a6e4..86f3158 100644 --- a/libvpx/vp9/common/vp9_reconinter.h +++ b/libvpx/vp9/common/vp9_reconinter.h @@ -14,7 +14,10 @@ #include "vpx/vpx_integer.h" #include "vp9/common/vp9_onyxc_int.h" -struct subpix_fn_table; +#ifdef __cplusplus +extern "C" { +#endif + void vp9_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col, BLOCK_SIZE bsize); @@ -24,80 +27,45 @@ void vp9_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col, void vp9_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col, BLOCK_SIZE bsize); -void vp9_setup_interp_filters(MACROBLOCKD *xd, - INTERPOLATION_TYPE filter, - VP9_COMMON *cm); +void vp9_dec_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col, + BLOCK_SIZE bsize); void vp9_build_inter_predictor(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const MV *mv_q3, - const struct scale_factors *scale, + const struct scale_factors *sf, int w, int h, int do_avg, - const struct subpix_fn_table *subpix, - enum mv_precision precision); - -static int scaled_buffer_offset(int x_offset, int y_offset, int stride, - const struct scale_factors *scale) { - const int x = scale ? scale->sfc->scale_value_x(x_offset, scale->sfc) : - x_offset; - const int y = scale ? scale->sfc->scale_value_y(y_offset, scale->sfc) : - y_offset; + const InterpKernel *kernel, + enum mv_precision precision, + int x, int y); + +static INLINE int scaled_buffer_offset(int x_offset, int y_offset, int stride, + const struct scale_factors *sf) { + const int x = sf ? sf->scale_value_x(x_offset, sf) : x_offset; + const int y = sf ? sf->scale_value_y(y_offset, sf) : y_offset; return y * stride + x; } -static void setup_pred_plane(struct buf_2d *dst, - uint8_t *src, int stride, - int mi_row, int mi_col, - const struct scale_factors *scale, - int subsampling_x, int subsampling_y) { +static INLINE void setup_pred_plane(struct buf_2d *dst, + uint8_t *src, int stride, + int mi_row, int mi_col, + const struct scale_factors *scale, + int subsampling_x, int subsampling_y) { const int x = (MI_SIZE * mi_col) >> subsampling_x; const int y = (MI_SIZE * mi_row) >> subsampling_y; dst->buf = src + scaled_buffer_offset(x, y, stride, scale); dst->stride = stride; } -// TODO(jkoleszar): audit all uses of this that don't set mb_row, mb_col -static void setup_dst_planes(MACROBLOCKD *xd, - const YV12_BUFFER_CONFIG *src, - int mi_row, int mi_col) { - uint8_t *buffers[4] = {src->y_buffer, src->u_buffer, src->v_buffer, - src->alpha_buffer}; - int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride, - src->alpha_stride}; - int i; - - for (i = 0; i < MAX_MB_PLANE; ++i) { - struct macroblockd_plane *pd = &xd->plane[i]; - setup_pred_plane(&pd->dst, buffers[i], strides[i], mi_row, mi_col, NULL, - pd->subsampling_x, pd->subsampling_y); - } -} - -static void setup_pre_planes(MACROBLOCKD *xd, int i, - const YV12_BUFFER_CONFIG *src, - int mi_row, int mi_col, - const struct scale_factors *sf) { - if (src) { - int j; - uint8_t* buffers[4] = {src->y_buffer, src->u_buffer, src->v_buffer, - src->alpha_buffer}; - int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride, - src->alpha_stride}; - - for (j = 0; j < MAX_MB_PLANE; ++j) { - struct macroblockd_plane *pd = &xd->plane[j]; - setup_pred_plane(&pd->pre[i], buffers[j], strides[j], - mi_row, mi_col, sf, pd->subsampling_x, pd->subsampling_y); - } - } -} +void vp9_setup_dst_planes(MACROBLOCKD *xd, const YV12_BUFFER_CONFIG *src, + int mi_row, int mi_col); -static void set_scale_factors(MACROBLOCKD *xd, int ref0, int ref1, - struct scale_factors sf[MAX_REF_FRAMES]) { - xd->scale_factor[0] = sf[ref0 >= 0 ? ref0 : 0]; - xd->scale_factor[1] = sf[ref1 >= 0 ? ref1 : 0]; -} +void vp9_setup_pre_planes(MACROBLOCKD *xd, int idx, + const YV12_BUFFER_CONFIG *src, int mi_row, int mi_col, + const struct scale_factors *sf); -void vp9_setup_scale_factors(VP9_COMMON *cm, int i); +#ifdef __cplusplus +} // extern "C" +#endif #endif // VP9_COMMON_VP9_RECONINTER_H_ diff --git a/libvpx/vp9/common/vp9_reconintra.c b/libvpx/vp9/common/vp9_reconintra.c index eb643b0..86f4f35 100644 --- a/libvpx/vp9/common/vp9_reconintra.c +++ b/libvpx/vp9/common/vp9_reconintra.c @@ -18,21 +18,17 @@ #include "vp9/common/vp9_reconintra.h" #include "vp9/common/vp9_onyxc_int.h" -const TX_TYPE mode2txfm_map[MB_MODE_COUNT] = { - DCT_DCT, // DC - ADST_DCT, // V - DCT_ADST, // H - DCT_DCT, // D45 - ADST_ADST, // D135 - ADST_DCT, // D117 - DCT_ADST, // D153 - DCT_ADST, // D207 - ADST_DCT, // D63 - ADST_ADST, // TM - DCT_DCT, // NEARESTMV - DCT_DCT, // NEARMV - DCT_DCT, // ZEROMV - DCT_DCT // NEWMV +const TX_TYPE mode2txfm_map[INTRA_MODES] = { + DCT_DCT, // DC + ADST_DCT, // V + DCT_ADST, // H + DCT_DCT, // D45 + ADST_ADST, // D135 + ADST_DCT, // D117 + DCT_ADST, // D153 + DCT_ADST, // D207 + ADST_DCT, // D63 + ADST_ADST, // TM }; #define intra_pred_sized(type, size) \ @@ -313,17 +309,21 @@ static void init_intra_pred_fn_ptrs(void) { #undef intra_pred_allsizes } -static void build_intra_predictors(const uint8_t *ref, int ref_stride, - uint8_t *dst, int dst_stride, +static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref, + int ref_stride, uint8_t *dst, int dst_stride, MB_PREDICTION_MODE mode, TX_SIZE tx_size, int up_available, int left_available, - int right_available) { + int right_available, int x, int y, + int plane) { int i; DECLARE_ALIGNED_ARRAY(16, uint8_t, left_col, 64); DECLARE_ALIGNED_ARRAY(16, uint8_t, above_data, 128 + 16); uint8_t *above_row = above_data + 16; const uint8_t *const_above_row = above_row; const int bs = 4 << tx_size; + int frame_width, frame_height; + int x0, y0; + const struct macroblockd_plane *const pd = &xd->plane[plane]; // 127 127 127 .. 127 127 127 127 127 127 // 129 A B .. Y Z @@ -334,26 +334,90 @@ static void build_intra_predictors(const uint8_t *ref, int ref_stride, once(init_intra_pred_fn_ptrs); + // Get current frame pointer, width and height. + if (plane == 0) { + frame_width = xd->cur_buf->y_width; + frame_height = xd->cur_buf->y_height; + } else { + frame_width = xd->cur_buf->uv_width; + frame_height = xd->cur_buf->uv_height; + } + + // Get block position in current frame. + x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x; + y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y; + // left if (left_available) { - for (i = 0; i < bs; i++) - left_col[i] = ref[i * ref_stride - 1]; + if (xd->mb_to_bottom_edge < 0) { + /* slower path if the block needs border extension */ + if (y0 + bs <= frame_height) { + for (i = 0; i < bs; ++i) + left_col[i] = ref[i * ref_stride - 1]; + } else { + const int extend_bottom = frame_height - y0; + for (i = 0; i < extend_bottom; ++i) + left_col[i] = ref[i * ref_stride - 1]; + for (; i < bs; ++i) + left_col[i] = ref[(extend_bottom - 1) * ref_stride - 1]; + } + } else { + /* faster path if the block does not need extension */ + for (i = 0; i < bs; ++i) + left_col[i] = ref[i * ref_stride - 1]; + } } else { vpx_memset(left_col, 129, bs); } + // TODO(hkuang) do not extend 2*bs pixels for all modes. // above if (up_available) { const uint8_t *above_ref = ref - ref_stride; - if (bs == 4 && right_available && left_available) { - const_above_row = above_ref; - } else { - vpx_memcpy(above_row, above_ref, bs); - if (bs == 4 && right_available) - vpx_memcpy(above_row + bs, above_ref + bs, bs); - else - vpx_memset(above_row + bs, above_row[bs - 1], bs); + if (xd->mb_to_right_edge < 0) { + /* slower path if the block needs border extension */ + if (x0 + 2 * bs <= frame_width) { + if (right_available && bs == 4) { + vpx_memcpy(above_row, above_ref, 2 * bs); + } else { + vpx_memcpy(above_row, above_ref, bs); + vpx_memset(above_row + bs, above_row[bs - 1], bs); + } + } else if (x0 + bs <= frame_width) { + const int r = frame_width - x0; + if (right_available && bs == 4) { + vpx_memcpy(above_row, above_ref, r); + vpx_memset(above_row + r, above_row[r - 1], + x0 + 2 * bs - frame_width); + } else { + vpx_memcpy(above_row, above_ref, bs); + vpx_memset(above_row + bs, above_row[bs - 1], bs); + } + } else if (x0 <= frame_width) { + const int r = frame_width - x0; + if (right_available && bs == 4) { + vpx_memcpy(above_row, above_ref, r); + vpx_memset(above_row + r, above_row[r - 1], + x0 + 2 * bs - frame_width); + } else { + vpx_memcpy(above_row, above_ref, r); + vpx_memset(above_row + r, above_row[r - 1], + x0 + 2 * bs - frame_width); + } + } above_row[-1] = left_available ? above_ref[-1] : 129; + } else { + /* faster path if the block does not need extension */ + if (bs == 4 && right_available && left_available) { + const_above_row = above_ref; + } else { + vpx_memcpy(above_row, above_ref, bs); + if (bs == 4 && right_available) + vpx_memcpy(above_row + bs, above_ref + bs, bs); + else + vpx_memset(above_row + bs, above_row[bs - 1], bs); + above_row[-1] = left_available ? above_ref[-1] : 129; + } } } else { vpx_memset(above_row, 127, bs * 2); @@ -370,16 +434,19 @@ static void build_intra_predictors(const uint8_t *ref, int ref_stride, } void vp9_predict_intra_block(const MACROBLOCKD *xd, int block_idx, int bwl_in, - TX_SIZE tx_size, int mode, - const uint8_t *ref, int ref_stride, - uint8_t *dst, int dst_stride) { + TX_SIZE tx_size, int mode, + const uint8_t *ref, int ref_stride, + uint8_t *dst, int dst_stride, + int aoff, int loff, int plane) { const int bwl = bwl_in - tx_size; const int wmask = (1 << bwl) - 1; const int have_top = (block_idx >> bwl) || xd->up_available; const int have_left = (block_idx & wmask) || xd->left_available; const int have_right = ((block_idx & wmask) != wmask); + const int x = aoff * 4; + const int y = loff * 4; assert(bwl >= 0); - build_intra_predictors(ref, ref_stride, dst, dst_stride, mode, tx_size, - have_top, have_left, have_right); + build_intra_predictors(xd, ref, ref_stride, dst, dst_stride, mode, tx_size, + have_top, have_left, have_right, x, y, plane); } diff --git a/libvpx/vp9/common/vp9_reconintra.h b/libvpx/vp9/common/vp9_reconintra.h index 6e3f55c..800736d 100644 --- a/libvpx/vp9/common/vp9_reconintra.h +++ b/libvpx/vp9/common/vp9_reconintra.h @@ -14,8 +14,17 @@ #include "vpx/vpx_integer.h" #include "vp9/common/vp9_blockd.h" +#ifdef __cplusplus +extern "C" { +#endif + void vp9_predict_intra_block(const MACROBLOCKD *xd, int block_idx, int bwl_in, TX_SIZE tx_size, int mode, const uint8_t *ref, int ref_stride, - uint8_t *dst, int dst_stride); + uint8_t *dst, int dst_stride, + int aoff, int loff, int plane); +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_COMMON_VP9_RECONINTRA_H_ diff --git a/libvpx/vp9/common/vp9_rtcd_defs.pl b/libvpx/vp9/common/vp9_rtcd_defs.pl new file mode 100644 index 0000000..e4cd9d4 --- /dev/null +++ b/libvpx/vp9/common/vp9_rtcd_defs.pl @@ -0,0 +1,778 @@ +sub vp9_common_forward_decls() { +print <<EOF +/* + * VP9 + */ + +#include "vpx/vpx_integer.h" +#include "vp9/common/vp9_enums.h" + +struct macroblockd; + +/* Encoder forward decls */ +struct macroblock; +struct vp9_variance_vtable; + +#define DEC_MVCOSTS int *mvjcost, int *mvcost[2] +struct mv; +union int_mv; +struct yv12_buffer_config; +EOF +} +forward_decls qw/vp9_common_forward_decls/; + +# x86inc.asm doesn't work if pic is enabled on 32 bit platforms so no assembly. +if (vpx_config("CONFIG_USE_X86INC") eq "yes") { + $mmx_x86inc = 'mmx'; + $sse_x86inc = 'sse'; + $sse2_x86inc = 'sse2'; + $ssse3_x86inc = 'ssse3'; + $avx_x86inc = 'avx'; + $avx2_x86inc = 'avx2'; +} else { + $mmx_x86inc = $sse_x86inc = $sse2_x86inc = $ssse3_x86inc = + $avx_x86inc = $avx2_x86inc = ''; +} + +# this variable is for functions that are 64 bit only. +if ($opts{arch} eq "x86_64") { + $mmx_x86_64 = 'mmx'; + $sse2_x86_64 = 'sse2'; + $ssse3_x86_64 = 'ssse3'; + $avx_x86_64 = 'avx'; + $avx2_x86_64 = 'avx2'; +} else { + $mmx_x86_64 = $sse2_x86_64 = $ssse3_x86_64 = + $avx_x86_64 = $avx2_x86_64 = ''; +} + +# +# RECON +# +add_proto qw/void vp9_d207_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_d207_predictor_4x4/, "$ssse3_x86inc"; + +add_proto qw/void vp9_d45_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_d45_predictor_4x4/, "$ssse3_x86inc"; + +add_proto qw/void vp9_d63_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_d63_predictor_4x4/, "$ssse3_x86inc"; + +add_proto qw/void vp9_h_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_h_predictor_4x4 neon dspr2/, "$ssse3_x86inc"; + +add_proto qw/void vp9_d117_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_d117_predictor_4x4/; + +add_proto qw/void vp9_d135_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_d135_predictor_4x4/; + +add_proto qw/void vp9_d153_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_d153_predictor_4x4/, "$ssse3_x86inc"; + +add_proto qw/void vp9_v_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_v_predictor_4x4 neon/, "$sse_x86inc"; + +add_proto qw/void vp9_tm_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_tm_predictor_4x4 neon dspr2/, "$sse_x86inc"; + +add_proto qw/void vp9_dc_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_dc_predictor_4x4 dspr2/, "$sse_x86inc"; + +add_proto qw/void vp9_dc_top_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_dc_top_predictor_4x4/; + +add_proto qw/void vp9_dc_left_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_dc_left_predictor_4x4/; + +add_proto qw/void vp9_dc_128_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_dc_128_predictor_4x4/; + +add_proto qw/void vp9_d207_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_d207_predictor_8x8/, "$ssse3_x86inc"; + +add_proto qw/void vp9_d45_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_d45_predictor_8x8/, "$ssse3_x86inc"; + +add_proto qw/void vp9_d63_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_d63_predictor_8x8/, "$ssse3_x86inc"; + +add_proto qw/void vp9_h_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_h_predictor_8x8 neon dspr2/, "$ssse3_x86inc"; + +add_proto qw/void vp9_d117_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_d117_predictor_8x8/; + +add_proto qw/void vp9_d135_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_d135_predictor_8x8/; + +add_proto qw/void vp9_d153_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_d153_predictor_8x8/, "$ssse3_x86inc"; + +add_proto qw/void vp9_v_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_v_predictor_8x8 neon/, "$sse_x86inc"; + +add_proto qw/void vp9_tm_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_tm_predictor_8x8 neon dspr2/, "$sse2_x86inc"; + +add_proto qw/void vp9_dc_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_dc_predictor_8x8 dspr2/, "$sse_x86inc"; + +add_proto qw/void vp9_dc_top_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_dc_top_predictor_8x8/; + +add_proto qw/void vp9_dc_left_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_dc_left_predictor_8x8/; + +add_proto qw/void vp9_dc_128_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_dc_128_predictor_8x8/; + +add_proto qw/void vp9_d207_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_d207_predictor_16x16/, "$ssse3_x86inc"; + +add_proto qw/void vp9_d45_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_d45_predictor_16x16/, "$ssse3_x86inc"; + +add_proto qw/void vp9_d63_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_d63_predictor_16x16/, "$ssse3_x86inc"; + +add_proto qw/void vp9_h_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_h_predictor_16x16 neon dspr2/, "$ssse3_x86inc"; + +add_proto qw/void vp9_d117_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_d117_predictor_16x16/; + +add_proto qw/void vp9_d135_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_d135_predictor_16x16/; + +add_proto qw/void vp9_d153_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_d153_predictor_16x16/, "$ssse3_x86inc"; + +add_proto qw/void vp9_v_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_v_predictor_16x16 neon/, "$sse2_x86inc"; + +add_proto qw/void vp9_tm_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_tm_predictor_16x16 neon/, "$sse2_x86inc"; + +add_proto qw/void vp9_dc_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_dc_predictor_16x16 dspr2/, "$sse2_x86inc"; + +add_proto qw/void vp9_dc_top_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_dc_top_predictor_16x16/; + +add_proto qw/void vp9_dc_left_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_dc_left_predictor_16x16/; + +add_proto qw/void vp9_dc_128_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_dc_128_predictor_16x16/; + +add_proto qw/void vp9_d207_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_d207_predictor_32x32/, "$ssse3_x86inc"; + +add_proto qw/void vp9_d45_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_d45_predictor_32x32/, "$ssse3_x86inc"; + +add_proto qw/void vp9_d63_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_d63_predictor_32x32/, "$ssse3_x86inc"; + +add_proto qw/void vp9_h_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_h_predictor_32x32 neon/, "$ssse3_x86inc"; + +add_proto qw/void vp9_d117_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_d117_predictor_32x32/; + +add_proto qw/void vp9_d135_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_d135_predictor_32x32/; + +add_proto qw/void vp9_d153_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_d153_predictor_32x32/; + +add_proto qw/void vp9_v_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_v_predictor_32x32 neon/, "$sse2_x86inc"; + +add_proto qw/void vp9_tm_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_tm_predictor_32x32 neon/, "$sse2_x86_64"; + +add_proto qw/void vp9_dc_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_dc_predictor_32x32/, "$sse2_x86inc"; + +add_proto qw/void vp9_dc_top_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_dc_top_predictor_32x32/; + +add_proto qw/void vp9_dc_left_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_dc_left_predictor_32x32/; + +add_proto qw/void vp9_dc_128_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; +specialize qw/vp9_dc_128_predictor_32x32/; + +# +# Loopfilter +# +add_proto qw/void vp9_lpf_vertical_16/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh"; +specialize qw/vp9_lpf_vertical_16 sse2 neon dspr2/; + +add_proto qw/void vp9_lpf_vertical_16_dual/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh"; +specialize qw/vp9_lpf_vertical_16_dual sse2 neon dspr2/; + +add_proto qw/void vp9_lpf_vertical_8/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count"; +specialize qw/vp9_lpf_vertical_8 sse2 neon dspr2/; + +add_proto qw/void vp9_lpf_vertical_8_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1"; +specialize qw/vp9_lpf_vertical_8_dual sse2 neon dspr2/; + +add_proto qw/void vp9_lpf_vertical_4/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count"; +specialize qw/vp9_lpf_vertical_4 mmx neon dspr2/; + +add_proto qw/void vp9_lpf_vertical_4_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1"; +specialize qw/vp9_lpf_vertical_4_dual sse2 neon dspr2/; + +add_proto qw/void vp9_lpf_horizontal_16/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count"; +specialize qw/vp9_lpf_horizontal_16 sse2 avx2 neon dspr2/; + +add_proto qw/void vp9_lpf_horizontal_8/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count"; +specialize qw/vp9_lpf_horizontal_8 sse2 neon dspr2/; + +add_proto qw/void vp9_lpf_horizontal_8_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1"; +specialize qw/vp9_lpf_horizontal_8_dual sse2 neon dspr2/; + +add_proto qw/void vp9_lpf_horizontal_4/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count"; +specialize qw/vp9_lpf_horizontal_4 mmx neon dspr2/; + +add_proto qw/void vp9_lpf_horizontal_4_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1"; +specialize qw/vp9_lpf_horizontal_4_dual sse2 neon dspr2/; + +# +# post proc +# +if (vpx_config("CONFIG_VP9_POSTPROC") eq "yes") { +add_proto qw/void vp9_mbpost_proc_down/, "uint8_t *dst, int pitch, int rows, int cols, int flimit"; +specialize qw/vp9_mbpost_proc_down mmx sse2/; +$vp9_mbpost_proc_down_sse2=vp9_mbpost_proc_down_xmm; + +add_proto qw/void vp9_mbpost_proc_across_ip/, "uint8_t *src, int pitch, int rows, int cols, int flimit"; +specialize qw/vp9_mbpost_proc_across_ip sse2/; +$vp9_mbpost_proc_across_ip_sse2=vp9_mbpost_proc_across_ip_xmm; + +add_proto qw/void vp9_post_proc_down_and_across/, "const uint8_t *src_ptr, uint8_t *dst_ptr, int src_pixels_per_line, int dst_pixels_per_line, int rows, int cols, int flimit"; +specialize qw/vp9_post_proc_down_and_across mmx sse2/; +$vp9_post_proc_down_and_across_sse2=vp9_post_proc_down_and_across_xmm; + +add_proto qw/void vp9_plane_add_noise/, "uint8_t *Start, char *noise, char blackclamp[16], char whiteclamp[16], char bothclamp[16], unsigned int Width, unsigned int Height, int Pitch"; +specialize qw/vp9_plane_add_noise mmx sse2/; +$vp9_plane_add_noise_sse2=vp9_plane_add_noise_wmt; +} + +add_proto qw/void vp9_blend_mb_inner/, "uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride"; +specialize qw/vp9_blend_mb_inner/; + +add_proto qw/void vp9_blend_mb_outer/, "uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride"; +specialize qw/vp9_blend_mb_outer/; + +add_proto qw/void vp9_blend_b/, "uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride"; +specialize qw/vp9_blend_b/; + +# +# Sub Pixel Filters +# +add_proto qw/void vp9_convolve_copy/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"; +specialize qw/vp9_convolve_copy neon dspr2/, "$sse2_x86inc"; + +add_proto qw/void vp9_convolve_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"; +specialize qw/vp9_convolve_avg neon dspr2/, "$sse2_x86inc"; + +add_proto qw/void vp9_convolve8/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"; +specialize qw/vp9_convolve8 sse2 ssse3 avx2 neon dspr2/; + +add_proto qw/void vp9_convolve8_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"; +specialize qw/vp9_convolve8_horiz sse2 ssse3 avx2 neon dspr2/; + +add_proto qw/void vp9_convolve8_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"; +specialize qw/vp9_convolve8_vert sse2 ssse3 avx2 neon dspr2/; + +add_proto qw/void vp9_convolve8_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"; +specialize qw/vp9_convolve8_avg sse2 ssse3 neon dspr2/; + +add_proto qw/void vp9_convolve8_avg_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"; +specialize qw/vp9_convolve8_avg_horiz sse2 ssse3 neon dspr2/; + +add_proto qw/void vp9_convolve8_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"; +specialize qw/vp9_convolve8_avg_vert sse2 ssse3 neon dspr2/; + +# +# dct +# +add_proto qw/void vp9_idct4x4_1_add/, "const int16_t *input, uint8_t *dest, int dest_stride"; +specialize qw/vp9_idct4x4_1_add sse2 neon dspr2/; + +add_proto qw/void vp9_idct4x4_16_add/, "const int16_t *input, uint8_t *dest, int dest_stride"; +specialize qw/vp9_idct4x4_16_add sse2 neon dspr2/; + +add_proto qw/void vp9_idct8x8_1_add/, "const int16_t *input, uint8_t *dest, int dest_stride"; +specialize qw/vp9_idct8x8_1_add sse2 neon dspr2/; + +add_proto qw/void vp9_idct8x8_64_add/, "const int16_t *input, uint8_t *dest, int dest_stride"; +specialize qw/vp9_idct8x8_64_add sse2 neon dspr2/; + +add_proto qw/void vp9_idct8x8_10_add/, "const int16_t *input, uint8_t *dest, int dest_stride"; +specialize qw/vp9_idct8x8_10_add sse2 neon dspr2/; + +add_proto qw/void vp9_idct16x16_1_add/, "const int16_t *input, uint8_t *dest, int dest_stride"; +specialize qw/vp9_idct16x16_1_add sse2 neon dspr2/; + +add_proto qw/void vp9_idct16x16_256_add/, "const int16_t *input, uint8_t *dest, int dest_stride"; +specialize qw/vp9_idct16x16_256_add sse2 neon dspr2/; + +add_proto qw/void vp9_idct16x16_10_add/, "const int16_t *input, uint8_t *dest, int dest_stride"; +specialize qw/vp9_idct16x16_10_add sse2 neon dspr2/; + +add_proto qw/void vp9_idct32x32_1024_add/, "const int16_t *input, uint8_t *dest, int dest_stride"; +specialize qw/vp9_idct32x32_1024_add sse2 neon dspr2/; + +add_proto qw/void vp9_idct32x32_34_add/, "const int16_t *input, uint8_t *dest, int dest_stride"; +specialize qw/vp9_idct32x32_34_add sse2 neon dspr2/; +$vp9_idct32x32_34_add_neon=vp9_idct32x32_1024_add_neon; + +add_proto qw/void vp9_idct32x32_1_add/, "const int16_t *input, uint8_t *dest, int dest_stride"; +specialize qw/vp9_idct32x32_1_add sse2 neon dspr2/; + +add_proto qw/void vp9_iht4x4_16_add/, "const int16_t *input, uint8_t *dest, int dest_stride, int tx_type"; +specialize qw/vp9_iht4x4_16_add sse2 neon dspr2/; + +add_proto qw/void vp9_iht8x8_64_add/, "const int16_t *input, uint8_t *dest, int dest_stride, int tx_type"; +specialize qw/vp9_iht8x8_64_add sse2 neon dspr2/; + +add_proto qw/void vp9_iht16x16_256_add/, "const int16_t *input, uint8_t *output, int pitch, int tx_type"; +specialize qw/vp9_iht16x16_256_add sse2 dspr2/; + +# dct and add + +add_proto qw/void vp9_iwht4x4_1_add/, "const int16_t *input, uint8_t *dest, int dest_stride"; +specialize qw/vp9_iwht4x4_1_add/; + +add_proto qw/void vp9_iwht4x4_16_add/, "const int16_t *input, uint8_t *dest, int dest_stride"; +specialize qw/vp9_iwht4x4_16_add/; + +# +# Encoder functions below this point. +# +if (vpx_config("CONFIG_VP9_ENCODER") eq "yes") { + + +# variance +add_proto qw/unsigned int vp9_variance32x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_variance32x16/, "$sse2_x86inc", "$avx2_x86inc"; + +add_proto qw/unsigned int vp9_variance16x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_variance16x32/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_variance64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_variance64x32/, "$sse2_x86inc", "$avx2_x86inc"; + +add_proto qw/unsigned int vp9_variance32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_variance32x64/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_variance32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_variance32x32/, "$sse2_x86inc", "$avx2_x86inc"; + +add_proto qw/unsigned int vp9_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_variance64x64/, "$sse2_x86inc", "$avx2_x86inc"; + +add_proto qw/unsigned int vp9_variance16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_variance16x16 mmx/, "$sse2_x86inc", "$avx2_x86inc"; + +add_proto qw/unsigned int vp9_variance16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_variance16x8 mmx/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_variance8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_variance8x16 mmx/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_variance8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_variance8x8 mmx/, "$sse2_x86inc"; + +add_proto qw/void vp9_get_sse_sum_8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum"; +specialize qw/vp9_get_sse_sum_8x8 sse2/; +$vp9_get_sse_sum_8x8_sse2=vp9_get8x8var_sse2; + +add_proto qw/unsigned int vp9_variance8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_variance8x4/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_variance4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_variance4x8/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_variance4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_variance4x4 mmx/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_sub_pixel_variance64x64 avx2/, "$sse2_x86inc", "$ssse3_x86inc"; + +add_proto qw/unsigned int vp9_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"; +specialize qw/vp9_sub_pixel_avg_variance64x64 avx2/, "$sse2_x86inc", "$ssse3_x86inc"; + +add_proto qw/unsigned int vp9_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_sub_pixel_variance32x64/, "$sse2_x86inc", "$ssse3_x86inc"; + +add_proto qw/unsigned int vp9_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"; +specialize qw/vp9_sub_pixel_avg_variance32x64/, "$sse2_x86inc", "$ssse3_x86inc"; + +add_proto qw/unsigned int vp9_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_sub_pixel_variance64x32/, "$sse2_x86inc", "$ssse3_x86inc"; + +add_proto qw/unsigned int vp9_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"; +specialize qw/vp9_sub_pixel_avg_variance64x32/, "$sse2_x86inc", "$ssse3_x86inc"; + +add_proto qw/unsigned int vp9_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_sub_pixel_variance32x16/, "$sse2_x86inc", "$ssse3_x86inc"; + +add_proto qw/unsigned int vp9_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"; +specialize qw/vp9_sub_pixel_avg_variance32x16/, "$sse2_x86inc", "$ssse3_x86inc"; + +add_proto qw/unsigned int vp9_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_sub_pixel_variance16x32/, "$sse2_x86inc", "$ssse3_x86inc"; + +add_proto qw/unsigned int vp9_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"; +specialize qw/vp9_sub_pixel_avg_variance16x32/, "$sse2_x86inc", "$ssse3_x86inc"; + +add_proto qw/unsigned int vp9_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_sub_pixel_variance32x32 avx2/, "$sse2_x86inc", "$ssse3_x86inc"; + +add_proto qw/unsigned int vp9_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"; +specialize qw/vp9_sub_pixel_avg_variance32x32 avx2/, "$sse2_x86inc", "$ssse3_x86inc"; + +add_proto qw/unsigned int vp9_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_sub_pixel_variance16x16/, "$sse2_x86inc", "$ssse3_x86inc"; + +add_proto qw/unsigned int vp9_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"; +specialize qw/vp9_sub_pixel_avg_variance16x16/, "$sse2_x86inc", "$ssse3_x86inc"; + +add_proto qw/unsigned int vp9_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_sub_pixel_variance8x16/, "$sse2_x86inc", "$ssse3_x86inc"; + +add_proto qw/unsigned int vp9_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"; +specialize qw/vp9_sub_pixel_avg_variance8x16/, "$sse2_x86inc", "$ssse3_x86inc"; + +add_proto qw/unsigned int vp9_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_sub_pixel_variance16x8/, "$sse2_x86inc", "$ssse3_x86inc"; + +add_proto qw/unsigned int vp9_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"; +specialize qw/vp9_sub_pixel_avg_variance16x8/, "$sse2_x86inc", "$ssse3_x86inc"; + +add_proto qw/unsigned int vp9_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_sub_pixel_variance8x8/, "$sse2_x86inc", "$ssse3_x86inc"; + +add_proto qw/unsigned int vp9_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"; +specialize qw/vp9_sub_pixel_avg_variance8x8/, "$sse2_x86inc", "$ssse3_x86inc"; + +# TODO(jingning): need to convert 8x4/4x8 functions into mmx/sse form +add_proto qw/unsigned int vp9_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_sub_pixel_variance8x4/, "$sse2_x86inc", "$ssse3_x86inc"; + +add_proto qw/unsigned int vp9_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"; +specialize qw/vp9_sub_pixel_avg_variance8x4/, "$sse2_x86inc", "$ssse3_x86inc"; + +add_proto qw/unsigned int vp9_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_sub_pixel_variance4x8/, "$sse_x86inc", "$ssse3_x86inc"; + +add_proto qw/unsigned int vp9_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"; +specialize qw/vp9_sub_pixel_avg_variance4x8/, "$sse_x86inc", "$ssse3_x86inc"; + +add_proto qw/unsigned int vp9_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_sub_pixel_variance4x4/, "$sse_x86inc", "$ssse3_x86inc"; +#vp9_sub_pixel_variance4x4_sse2=vp9_sub_pixel_variance4x4_wmt + +add_proto qw/unsigned int vp9_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"; +specialize qw/vp9_sub_pixel_avg_variance4x4/, "$sse_x86inc", "$ssse3_x86inc"; + +add_proto qw/unsigned int vp9_sad64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"; +specialize qw/vp9_sad64x64/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_sad32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"; +specialize qw/vp9_sad32x64/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_sad64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"; +specialize qw/vp9_sad64x32/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_sad32x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"; +specialize qw/vp9_sad32x16/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_sad16x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"; +specialize qw/vp9_sad16x32/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_sad32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"; +specialize qw/vp9_sad32x32/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_sad16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"; +specialize qw/vp9_sad16x16 mmx/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_sad16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"; +specialize qw/vp9_sad16x8 mmx/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_sad8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"; +specialize qw/vp9_sad8x16 mmx/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_sad8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"; +specialize qw/vp9_sad8x8 mmx/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_sad8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"; +specialize qw/vp9_sad8x4/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_sad4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"; +specialize qw/vp9_sad4x8/, "$sse_x86inc"; + +add_proto qw/unsigned int vp9_sad4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"; +specialize qw/vp9_sad4x4 mmx/, "$sse_x86inc"; + +add_proto qw/unsigned int vp9_sad64x64_avg/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad"; +specialize qw/vp9_sad64x64_avg/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_sad32x64_avg/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad"; +specialize qw/vp9_sad32x64_avg/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_sad64x32_avg/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad"; +specialize qw/vp9_sad64x32_avg/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_sad32x16_avg/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad"; +specialize qw/vp9_sad32x16_avg/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_sad16x32_avg/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad"; +specialize qw/vp9_sad16x32_avg/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_sad32x32_avg/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad"; +specialize qw/vp9_sad32x32_avg/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_sad16x16_avg/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad"; +specialize qw/vp9_sad16x16_avg/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_sad16x8_avg/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad"; +specialize qw/vp9_sad16x8_avg/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_sad8x16_avg/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad"; +specialize qw/vp9_sad8x16_avg/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_sad8x8_avg/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad"; +specialize qw/vp9_sad8x8_avg/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_sad8x4_avg/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad"; +specialize qw/vp9_sad8x4_avg/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_sad4x8_avg/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad"; +specialize qw/vp9_sad4x8_avg/, "$sse_x86inc"; + +add_proto qw/unsigned int vp9_sad4x4_avg/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad"; +specialize qw/vp9_sad4x4_avg/, "$sse_x86inc"; + +add_proto qw/unsigned int vp9_variance_halfpixvar16x16_h/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_variance_halfpixvar16x16_h/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_variance_halfpixvar16x16_v/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_variance_halfpixvar16x16_v/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_variance_halfpixvar16x16_hv/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_variance_halfpixvar16x16_hv/, "$sse2_x86inc"; + +add_proto qw/unsigned int vp9_variance_halfpixvar64x64_h/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_variance_halfpixvar64x64_h/; + +add_proto qw/unsigned int vp9_variance_halfpixvar64x64_v/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_variance_halfpixvar64x64_v/; + +add_proto qw/unsigned int vp9_variance_halfpixvar64x64_hv/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_variance_halfpixvar64x64_hv/; + +add_proto qw/unsigned int vp9_variance_halfpixvar32x32_h/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_variance_halfpixvar32x32_h/; + +add_proto qw/unsigned int vp9_variance_halfpixvar32x32_v/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_variance_halfpixvar32x32_v/; + +add_proto qw/unsigned int vp9_variance_halfpixvar32x32_hv/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_variance_halfpixvar32x32_hv/; + +add_proto qw/void vp9_sad64x64x3/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array"; +specialize qw/vp9_sad64x64x3/; + +add_proto qw/void vp9_sad32x32x3/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array"; +specialize qw/vp9_sad32x32x3/; + +add_proto qw/void vp9_sad16x16x3/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array"; +specialize qw/vp9_sad16x16x3 sse3 ssse3/; + +add_proto qw/void vp9_sad16x8x3/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array"; +specialize qw/vp9_sad16x8x3 sse3 ssse3/; + +add_proto qw/void vp9_sad8x16x3/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array"; +specialize qw/vp9_sad8x16x3 sse3/; + +add_proto qw/void vp9_sad8x8x3/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array"; +specialize qw/vp9_sad8x8x3 sse3/; + +add_proto qw/void vp9_sad4x4x3/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array"; +specialize qw/vp9_sad4x4x3 sse3/; + +add_proto qw/void vp9_sad64x64x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array"; +specialize qw/vp9_sad64x64x8/; + +add_proto qw/void vp9_sad32x32x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array"; +specialize qw/vp9_sad32x32x8/; + +add_proto qw/void vp9_sad16x16x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array"; +specialize qw/vp9_sad16x16x8 sse4/; + +add_proto qw/void vp9_sad16x8x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array"; +specialize qw/vp9_sad16x8x8 sse4/; + +add_proto qw/void vp9_sad8x16x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array"; +specialize qw/vp9_sad8x16x8 sse4/; + +add_proto qw/void vp9_sad8x8x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array"; +specialize qw/vp9_sad8x8x8 sse4/; + +add_proto qw/void vp9_sad8x4x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array"; +specialize qw/vp9_sad8x4x8/; + +add_proto qw/void vp9_sad4x8x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array"; +specialize qw/vp9_sad4x8x8/; + +add_proto qw/void vp9_sad4x4x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array"; +specialize qw/vp9_sad4x4x8 sse4/; + +add_proto qw/void vp9_sad64x64x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"; +specialize qw/vp9_sad64x64x4d sse2/; + +add_proto qw/void vp9_sad32x64x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"; +specialize qw/vp9_sad32x64x4d sse2/; + +add_proto qw/void vp9_sad64x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"; +specialize qw/vp9_sad64x32x4d sse2/; + +add_proto qw/void vp9_sad32x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"; +specialize qw/vp9_sad32x16x4d sse2/; + +add_proto qw/void vp9_sad16x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"; +specialize qw/vp9_sad16x32x4d sse2/; + +add_proto qw/void vp9_sad32x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"; +specialize qw/vp9_sad32x32x4d sse2/; + +add_proto qw/void vp9_sad16x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"; +specialize qw/vp9_sad16x16x4d sse2/; + +add_proto qw/void vp9_sad16x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"; +specialize qw/vp9_sad16x8x4d sse2/; + +add_proto qw/void vp9_sad8x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"; +specialize qw/vp9_sad8x16x4d sse2/; + +add_proto qw/void vp9_sad8x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"; +specialize qw/vp9_sad8x8x4d sse2/; + +# TODO(jingning): need to convert these 4x8/8x4 functions into sse2 form +add_proto qw/void vp9_sad8x4x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"; +specialize qw/vp9_sad8x4x4d sse2/; + +add_proto qw/void vp9_sad4x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"; +specialize qw/vp9_sad4x8x4d sse/; + +add_proto qw/void vp9_sad4x4x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"; +specialize qw/vp9_sad4x4x4d sse/; + +#add_proto qw/unsigned int vp9_sub_pixel_mse16x16/, "const uint8_t *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, const uint8_t *dst_ptr, int dst_pixels_per_line, unsigned int *sse"; +#specialize qw/vp9_sub_pixel_mse16x16 sse2 mmx/; + +add_proto qw/unsigned int vp9_mse16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse"; +specialize qw/vp9_mse16x16 mmx/, "$sse2_x86inc", "$avx2_x86inc"; + +add_proto qw/unsigned int vp9_mse8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse"; +specialize qw/vp9_mse8x16/; + +add_proto qw/unsigned int vp9_mse16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse"; +specialize qw/vp9_mse16x8/; + +add_proto qw/unsigned int vp9_mse8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse"; +specialize qw/vp9_mse8x8/; + +add_proto qw/unsigned int vp9_sub_pixel_mse64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_sub_pixel_mse64x64/; + +add_proto qw/unsigned int vp9_sub_pixel_mse32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; +specialize qw/vp9_sub_pixel_mse32x32/; + +add_proto qw/unsigned int vp9_get_mb_ss/, "const int16_t *"; +specialize qw/vp9_get_mb_ss mmx sse2/; +# ENCODEMB INVOKE + +add_proto qw/int64_t vp9_block_error/, "const int16_t *coeff, const int16_t *dqcoeff, intptr_t block_size, int64_t *ssz"; +specialize qw/vp9_block_error/, "$sse2_x86inc"; + +add_proto qw/void vp9_subtract_block/, "int rows, int cols, int16_t *diff_ptr, ptrdiff_t diff_stride, const uint8_t *src_ptr, ptrdiff_t src_stride, const uint8_t *pred_ptr, ptrdiff_t pred_stride"; +specialize qw/vp9_subtract_block/, "$sse2_x86inc"; + +add_proto qw/void vp9_quantize_b/, "const int16_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr, const int16_t *dequant_ptr, int zbin_oq_value, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan"; +specialize qw/vp9_quantize_b/, "$ssse3_x86_64"; + +add_proto qw/void vp9_quantize_b_32x32/, "const int16_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr, const int16_t *dequant_ptr, int zbin_oq_value, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan"; +specialize qw/vp9_quantize_b_32x32/, "$ssse3_x86_64"; + +# +# Structured Similarity (SSIM) +# +if (vpx_config("CONFIG_INTERNAL_STATS") eq "yes") { + add_proto qw/void vp9_ssim_parms_8x8/, "uint8_t *s, int sp, uint8_t *r, int rp, unsigned long *sum_s, unsigned long *sum_r, unsigned long *sum_sq_s, unsigned long *sum_sq_r, unsigned long *sum_sxr"; + specialize qw/vp9_ssim_parms_8x8/, "$sse2_x86_64"; + + add_proto qw/void vp9_ssim_parms_16x16/, "uint8_t *s, int sp, uint8_t *r, int rp, unsigned long *sum_s, unsigned long *sum_r, unsigned long *sum_sq_s, unsigned long *sum_sq_r, unsigned long *sum_sxr"; + specialize qw/vp9_ssim_parms_16x16/, "$sse2_x86_64"; +} + +# fdct functions +add_proto qw/void vp9_fht4x4/, "const int16_t *input, int16_t *output, int stride, int tx_type"; +specialize qw/vp9_fht4x4 sse2 avx2/; + +add_proto qw/void vp9_fht8x8/, "const int16_t *input, int16_t *output, int stride, int tx_type"; +specialize qw/vp9_fht8x8 sse2 avx2/; + +add_proto qw/void vp9_fht16x16/, "const int16_t *input, int16_t *output, int stride, int tx_type"; +specialize qw/vp9_fht16x16 sse2 avx2/; + +add_proto qw/void vp9_fwht4x4/, "const int16_t *input, int16_t *output, int stride"; +specialize qw/vp9_fwht4x4/; + +add_proto qw/void vp9_fdct4x4/, "const int16_t *input, int16_t *output, int stride"; +specialize qw/vp9_fdct4x4 sse2 avx2/; + +add_proto qw/void vp9_fdct8x8/, "const int16_t *input, int16_t *output, int stride"; +specialize qw/vp9_fdct8x8 sse2 avx2/; + +add_proto qw/void vp9_fdct16x16/, "const int16_t *input, int16_t *output, int stride"; +specialize qw/vp9_fdct16x16 sse2 avx2/; + +add_proto qw/void vp9_fdct32x32/, "const int16_t *input, int16_t *output, int stride"; +specialize qw/vp9_fdct32x32 sse2 avx2/; + +add_proto qw/void vp9_fdct32x32_rd/, "const int16_t *input, int16_t *output, int stride"; +specialize qw/vp9_fdct32x32_rd sse2 avx2/; + +# +# Motion search +# +add_proto qw/int vp9_full_search_sad/, "const struct macroblock *x, const struct mv *ref_mv, int sad_per_bit, int distance, const struct vp9_variance_vtable *fn_ptr, DEC_MVCOSTS, const struct mv *center_mv, struct mv *best_mv"; +specialize qw/vp9_full_search_sad sse3 sse4_1/; +$vp9_full_search_sad_sse3=vp9_full_search_sadx3; +$vp9_full_search_sad_sse4_1=vp9_full_search_sadx8; + +add_proto qw/int vp9_refining_search_sad/, "const struct macroblock *x, struct mv *ref_mv, int sad_per_bit, int distance, const struct vp9_variance_vtable *fn_ptr, DEC_MVCOSTS, const struct mv *center_mv"; +specialize qw/vp9_refining_search_sad sse3/; +$vp9_refining_search_sad_sse3=vp9_refining_search_sadx4; + +add_proto qw/int vp9_diamond_search_sad/, "const struct macroblock *x, struct mv *ref_mv, struct mv *best_mv, int search_param, int sad_per_bit, int *num00, const struct vp9_variance_vtable *fn_ptr, DEC_MVCOSTS, const struct mv *center_mv"; +specialize qw/vp9_diamond_search_sad sse3/; +$vp9_diamond_search_sad_sse3=vp9_diamond_search_sadx4; + +add_proto qw/int vp9_full_range_search/, "const struct macroblock *x, struct mv *ref_mv, struct mv *best_mv, int search_param, int sad_per_bit, int *num00, const struct vp9_variance_vtable *fn_ptr, DEC_MVCOSTS, const struct mv *center_mv"; +specialize qw/vp9_full_range_search/; + +add_proto qw/void vp9_temporal_filter_apply/, "uint8_t *frame1, unsigned int stride, uint8_t *frame2, unsigned int block_size, int strength, int filter_weight, unsigned int *accumulator, uint16_t *count"; +specialize qw/vp9_temporal_filter_apply sse2/; + +} +# end encoder functions +1; diff --git a/libvpx/vp9/common/vp9_rtcd_defs.sh b/libvpx/vp9/common/vp9_rtcd_defs.sh deleted file mode 100644 index 2c0864e..0000000 --- a/libvpx/vp9/common/vp9_rtcd_defs.sh +++ /dev/null @@ -1,744 +0,0 @@ -vp9_common_forward_decls() { -cat <<EOF -/* - * VP9 - */ - -#include "vpx/vpx_integer.h" -#include "vp9/common/vp9_enums.h" - -struct macroblockd; - -/* Encoder forward decls */ -struct macroblock; -struct vp9_variance_vtable; - -#define DEC_MVCOSTS int *mvjcost, int *mvcost[2] -union int_mv; -struct yv12_buffer_config; -EOF -} -forward_decls vp9_common_forward_decls - -# x86inc.asm doesn't work if pic is enabled on 32 bit platforms so no assembly. -[ "$CONFIG_USE_X86INC" = "yes" ] && mmx_x86inc=mmx && sse_x86inc=sse && - sse2_x86inc=sse2 && ssse3_x86inc=ssse3 && avx_x86inc=avx && avx2_x86inc=avx2 - -# this variable is for functions that are 64 bit only. -[ $arch = "x86_64" ] && mmx_x86_64=mmx && sse2_x86_64=sse2 && - ssse3_x86_64=ssse3 && avx_x86_64=avx && avx2_x86_64=avx2 - -# -# RECON -# -prototype void vp9_d207_predictor_4x4 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_d207_predictor_4x4 $ssse3_x86inc - -prototype void vp9_d45_predictor_4x4 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_d45_predictor_4x4 $ssse3_x86inc - -prototype void vp9_d63_predictor_4x4 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_d63_predictor_4x4 $ssse3_x86inc - -prototype void vp9_h_predictor_4x4 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_h_predictor_4x4 $ssse3_x86inc dspr2 - -prototype void vp9_d117_predictor_4x4 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_d117_predictor_4x4 - -prototype void vp9_d135_predictor_4x4 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_d135_predictor_4x4 - -prototype void vp9_d153_predictor_4x4 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_d153_predictor_4x4 $ssse3_x86inc - -prototype void vp9_v_predictor_4x4 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_v_predictor_4x4 $sse_x86inc - -prototype void vp9_tm_predictor_4x4 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_tm_predictor_4x4 $sse_x86inc dspr2 - -prototype void vp9_dc_predictor_4x4 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_dc_predictor_4x4 $sse_x86inc dspr2 - -prototype void vp9_dc_top_predictor_4x4 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_dc_top_predictor_4x4 - -prototype void vp9_dc_left_predictor_4x4 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_dc_left_predictor_4x4 - -prototype void vp9_dc_128_predictor_4x4 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_dc_128_predictor_4x4 - -prototype void vp9_d207_predictor_8x8 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_d207_predictor_8x8 $ssse3_x86inc - -prototype void vp9_d45_predictor_8x8 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_d45_predictor_8x8 $ssse3_x86inc - -prototype void vp9_d63_predictor_8x8 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_d63_predictor_8x8 $ssse3_x86inc - -prototype void vp9_h_predictor_8x8 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_h_predictor_8x8 $ssse3_x86inc dspr2 - -prototype void vp9_d117_predictor_8x8 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_d117_predictor_8x8 - -prototype void vp9_d135_predictor_8x8 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_d135_predictor_8x8 - -prototype void vp9_d153_predictor_8x8 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_d153_predictor_8x8 $ssse3_x86inc - -prototype void vp9_v_predictor_8x8 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_v_predictor_8x8 $sse_x86inc - -prototype void vp9_tm_predictor_8x8 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_tm_predictor_8x8 $sse2_x86inc dspr2 - -prototype void vp9_dc_predictor_8x8 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_dc_predictor_8x8 $sse_x86inc dspr2 - -prototype void vp9_dc_top_predictor_8x8 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_dc_top_predictor_8x8 - -prototype void vp9_dc_left_predictor_8x8 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_dc_left_predictor_8x8 - -prototype void vp9_dc_128_predictor_8x8 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_dc_128_predictor_8x8 - -prototype void vp9_d207_predictor_16x16 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_d207_predictor_16x16 $ssse3_x86inc - -prototype void vp9_d45_predictor_16x16 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_d45_predictor_16x16 $ssse3_x86inc - -prototype void vp9_d63_predictor_16x16 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_d63_predictor_16x16 $ssse3_x86inc - -prototype void vp9_h_predictor_16x16 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_h_predictor_16x16 $ssse3_x86inc dspr2 - -prototype void vp9_d117_predictor_16x16 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_d117_predictor_16x16 - -prototype void vp9_d135_predictor_16x16 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_d135_predictor_16x16 - -prototype void vp9_d153_predictor_16x16 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_d153_predictor_16x16 $ssse3_x86inc - -prototype void vp9_v_predictor_16x16 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_v_predictor_16x16 $sse2_x86inc - -prototype void vp9_tm_predictor_16x16 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_tm_predictor_16x16 $sse2_x86inc - -prototype void vp9_dc_predictor_16x16 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_dc_predictor_16x16 $sse2_x86inc dspr2 - -prototype void vp9_dc_top_predictor_16x16 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_dc_top_predictor_16x16 - -prototype void vp9_dc_left_predictor_16x16 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_dc_left_predictor_16x16 - -prototype void vp9_dc_128_predictor_16x16 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_dc_128_predictor_16x16 - -prototype void vp9_d207_predictor_32x32 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_d207_predictor_32x32 $ssse3_x86inc - -prototype void vp9_d45_predictor_32x32 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_d45_predictor_32x32 $ssse3_x86inc - -prototype void vp9_d63_predictor_32x32 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_d63_predictor_32x32 $ssse3_x86inc - -prototype void vp9_h_predictor_32x32 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_h_predictor_32x32 $ssse3_x86inc - -prototype void vp9_d117_predictor_32x32 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_d117_predictor_32x32 - -prototype void vp9_d135_predictor_32x32 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_d135_predictor_32x32 - -prototype void vp9_d153_predictor_32x32 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_d153_predictor_32x32 - -prototype void vp9_v_predictor_32x32 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_v_predictor_32x32 $sse2_x86inc - -prototype void vp9_tm_predictor_32x32 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_tm_predictor_32x32 $sse2_x86_64 - -prototype void vp9_dc_predictor_32x32 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_dc_predictor_32x32 $sse2_x86inc - -prototype void vp9_dc_top_predictor_32x32 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_dc_top_predictor_32x32 - -prototype void vp9_dc_left_predictor_32x32 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_dc_left_predictor_32x32 - -prototype void vp9_dc_128_predictor_32x32 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left" -specialize vp9_dc_128_predictor_32x32 - -# -# Loopfilter -# -prototype void vp9_mb_lpf_vertical_edge_w "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh" -specialize vp9_mb_lpf_vertical_edge_w sse2 neon dspr2 - -prototype void vp9_mbloop_filter_vertical_edge "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count" -specialize vp9_mbloop_filter_vertical_edge sse2 neon dspr2 - -prototype void vp9_loop_filter_vertical_edge "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count" -specialize vp9_loop_filter_vertical_edge mmx neon dspr2 - -prototype void vp9_mb_lpf_horizontal_edge_w "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count" -specialize vp9_mb_lpf_horizontal_edge_w sse2 avx2 neon dspr2 - -prototype void vp9_mbloop_filter_horizontal_edge "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count" -specialize vp9_mbloop_filter_horizontal_edge sse2 neon dspr2 - -prototype void vp9_loop_filter_horizontal_edge "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count" -specialize vp9_loop_filter_horizontal_edge mmx neon dspr2 - -# -# post proc -# -if [ "$CONFIG_VP9_POSTPROC" = "yes" ]; then -prototype void vp9_mbpost_proc_down "uint8_t *dst, int pitch, int rows, int cols, int flimit" -specialize vp9_mbpost_proc_down mmx sse2 -vp9_mbpost_proc_down_sse2=vp9_mbpost_proc_down_xmm - -prototype void vp9_mbpost_proc_across_ip "uint8_t *src, int pitch, int rows, int cols, int flimit" -specialize vp9_mbpost_proc_across_ip sse2 -vp9_mbpost_proc_across_ip_sse2=vp9_mbpost_proc_across_ip_xmm - -prototype void vp9_post_proc_down_and_across "const uint8_t *src_ptr, uint8_t *dst_ptr, int src_pixels_per_line, int dst_pixels_per_line, int rows, int cols, int flimit" -specialize vp9_post_proc_down_and_across mmx sse2 -vp9_post_proc_down_and_across_sse2=vp9_post_proc_down_and_across_xmm - -prototype void vp9_plane_add_noise "uint8_t *Start, char *noise, char blackclamp[16], char whiteclamp[16], char bothclamp[16], unsigned int Width, unsigned int Height, int Pitch" -specialize vp9_plane_add_noise mmx sse2 -vp9_plane_add_noise_sse2=vp9_plane_add_noise_wmt -fi - -prototype void vp9_blend_mb_inner "uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride" -specialize vp9_blend_mb_inner - -prototype void vp9_blend_mb_outer "uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride" -specialize vp9_blend_mb_outer - -prototype void vp9_blend_b "uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride" -specialize vp9_blend_b - -# -# Sub Pixel Filters -# -prototype void vp9_convolve_copy "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h" -specialize vp9_convolve_copy $sse2_x86inc neon dspr2 - -prototype void vp9_convolve_avg "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h" -specialize vp9_convolve_avg $sse2_x86inc neon dspr2 - -prototype void vp9_convolve8 "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h" -specialize vp9_convolve8 sse2 ssse3 neon dspr2 - -prototype void vp9_convolve8_horiz "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h" -specialize vp9_convolve8_horiz sse2 ssse3 neon dspr2 - -prototype void vp9_convolve8_vert "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h" -specialize vp9_convolve8_vert sse2 ssse3 neon dspr2 - -prototype void vp9_convolve8_avg "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h" -specialize vp9_convolve8_avg sse2 ssse3 neon dspr2 - -prototype void vp9_convolve8_avg_horiz "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h" -specialize vp9_convolve8_avg_horiz sse2 ssse3 neon dspr2 - -prototype void vp9_convolve8_avg_vert "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h" -specialize vp9_convolve8_avg_vert sse2 ssse3 neon dspr2 - -# -# dct -# -prototype void vp9_idct4x4_1_add "const int16_t *input, uint8_t *dest, int dest_stride" -specialize vp9_idct4x4_1_add sse2 neon dspr2 - -prototype void vp9_idct4x4_16_add "const int16_t *input, uint8_t *dest, int dest_stride" -specialize vp9_idct4x4_16_add sse2 neon dspr2 - -prototype void vp9_idct8x8_1_add "const int16_t *input, uint8_t *dest, int dest_stride" -specialize vp9_idct8x8_1_add sse2 neon dspr2 - -prototype void vp9_idct8x8_64_add "const int16_t *input, uint8_t *dest, int dest_stride" -specialize vp9_idct8x8_64_add sse2 neon dspr2 - -prototype void vp9_idct8x8_10_add "const int16_t *input, uint8_t *dest, int dest_stride" -specialize vp9_idct8x8_10_add sse2 neon dspr2 - -prototype void vp9_idct16x16_1_add "const int16_t *input, uint8_t *dest, int dest_stride" -specialize vp9_idct16x16_1_add sse2 neon dspr2 - -prototype void vp9_idct16x16_256_add "const int16_t *input, uint8_t *dest, int dest_stride" -specialize vp9_idct16x16_256_add sse2 neon dspr2 - -prototype void vp9_idct16x16_10_add "const int16_t *input, uint8_t *dest, int dest_stride" -specialize vp9_idct16x16_10_add sse2 neon dspr2 - -prototype void vp9_idct32x32_1024_add "const int16_t *input, uint8_t *dest, int dest_stride" -specialize vp9_idct32x32_1024_add sse2 neon dspr2 - -prototype void vp9_idct32x32_34_add "const int16_t *input, uint8_t *dest, int dest_stride" -specialize vp9_idct32x32_34_add sse2 dspr2 - -prototype void vp9_idct32x32_1_add "const int16_t *input, uint8_t *dest, int dest_stride" -specialize vp9_idct32x32_1_add sse2 neon dspr2 - -prototype void vp9_iht4x4_16_add "const int16_t *input, uint8_t *dest, int dest_stride, int tx_type" -specialize vp9_iht4x4_16_add sse2 neon dspr2 - -prototype void vp9_iht8x8_64_add "const int16_t *input, uint8_t *dest, int dest_stride, int tx_type" -specialize vp9_iht8x8_64_add sse2 neon dspr2 - -prototype void vp9_iht16x16_256_add "const int16_t *input, uint8_t *output, int pitch, int tx_type" -specialize vp9_iht16x16_256_add sse2 dspr2 - -# dct and add - -prototype void vp9_iwht4x4_1_add "const int16_t *input, uint8_t *dest, int dest_stride" -specialize vp9_iwht4x4_1_add - -prototype void vp9_iwht4x4_16_add "const int16_t *input, uint8_t *dest, int dest_stride" -specialize vp9_iwht4x4_16_add - -# -# Encoder functions below this point. -# -if [ "$CONFIG_VP9_ENCODER" = "yes" ]; then - - -# variance -prototype unsigned int vp9_variance32x16 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_variance32x16 $sse2_x86inc - -prototype unsigned int vp9_variance16x32 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_variance16x32 $sse2_x86inc - -prototype unsigned int vp9_variance64x32 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_variance64x32 $sse2_x86inc - -prototype unsigned int vp9_variance32x64 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_variance32x64 $sse2_x86inc - -prototype unsigned int vp9_variance32x32 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_variance32x32 $sse2_x86inc - -prototype unsigned int vp9_variance64x64 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_variance64x64 $sse2_x86inc - -prototype unsigned int vp9_variance16x16 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_variance16x16 mmx $sse2_x86inc - -prototype unsigned int vp9_variance16x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_variance16x8 mmx $sse2_x86inc - -prototype unsigned int vp9_variance8x16 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_variance8x16 mmx $sse2_x86inc - -prototype unsigned int vp9_variance8x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_variance8x8 mmx $sse2_x86inc - -prototype void vp9_get_sse_sum_8x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum" -specialize vp9_get_sse_sum_8x8 sse2 -vp9_get_sse_sum_8x8_sse2=vp9_get8x8var_sse2 - -prototype unsigned int vp9_variance8x4 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_variance8x4 $sse2_x86inc - -prototype unsigned int vp9_variance4x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_variance4x8 $sse2_x86inc - -prototype unsigned int vp9_variance4x4 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_variance4x4 mmx $sse2_x86inc - -prototype unsigned int vp9_sub_pixel_variance64x64 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_sub_pixel_variance64x64 $sse2_x86inc $ssse3_x86inc - -prototype unsigned int vp9_sub_pixel_avg_variance64x64 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred" -specialize vp9_sub_pixel_avg_variance64x64 $sse2_x86inc $ssse3_x86inc - -prototype unsigned int vp9_sub_pixel_variance32x64 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_sub_pixel_variance32x64 $sse2_x86inc $ssse3_x86inc - -prototype unsigned int vp9_sub_pixel_avg_variance32x64 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred" -specialize vp9_sub_pixel_avg_variance32x64 $sse2_x86inc $ssse3_x86inc - -prototype unsigned int vp9_sub_pixel_variance64x32 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_sub_pixel_variance64x32 $sse2_x86inc $ssse3_x86inc - -prototype unsigned int vp9_sub_pixel_avg_variance64x32 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred" -specialize vp9_sub_pixel_avg_variance64x32 $sse2_x86inc $ssse3_x86inc - -prototype unsigned int vp9_sub_pixel_variance32x16 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_sub_pixel_variance32x16 $sse2_x86inc $ssse3_x86inc - -prototype unsigned int vp9_sub_pixel_avg_variance32x16 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred" -specialize vp9_sub_pixel_avg_variance32x16 $sse2_x86inc $ssse3_x86inc - -prototype unsigned int vp9_sub_pixel_variance16x32 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_sub_pixel_variance16x32 $sse2_x86inc $ssse3_x86inc - -prototype unsigned int vp9_sub_pixel_avg_variance16x32 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred" -specialize vp9_sub_pixel_avg_variance16x32 $sse2_x86inc $ssse3_x86inc - -prototype unsigned int vp9_sub_pixel_variance32x32 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_sub_pixel_variance32x32 $sse2_x86inc $ssse3_x86inc - -prototype unsigned int vp9_sub_pixel_avg_variance32x32 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred" -specialize vp9_sub_pixel_avg_variance32x32 $sse2_x86inc $ssse3_x86inc - -prototype unsigned int vp9_sub_pixel_variance16x16 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_sub_pixel_variance16x16 $sse2_x86inc $ssse3_x86inc - -prototype unsigned int vp9_sub_pixel_avg_variance16x16 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred" -specialize vp9_sub_pixel_avg_variance16x16 $sse2_x86inc $ssse3_x86inc - -prototype unsigned int vp9_sub_pixel_variance8x16 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_sub_pixel_variance8x16 $sse2_x86inc $ssse3_x86inc - -prototype unsigned int vp9_sub_pixel_avg_variance8x16 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred" -specialize vp9_sub_pixel_avg_variance8x16 $sse2_x86inc $ssse3_x86inc - -prototype unsigned int vp9_sub_pixel_variance16x8 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_sub_pixel_variance16x8 $sse2_x86inc $ssse3_x86inc - -prototype unsigned int vp9_sub_pixel_avg_variance16x8 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred" -specialize vp9_sub_pixel_avg_variance16x8 $sse2_x86inc $ssse3_x86inc - -prototype unsigned int vp9_sub_pixel_variance8x8 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_sub_pixel_variance8x8 $sse2_x86inc $ssse3_x86inc - -prototype unsigned int vp9_sub_pixel_avg_variance8x8 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred" -specialize vp9_sub_pixel_avg_variance8x8 $sse2_x86inc $ssse3_x86inc - -# TODO(jingning): need to convert 8x4/4x8 functions into mmx/sse form -prototype unsigned int vp9_sub_pixel_variance8x4 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_sub_pixel_variance8x4 $sse2_x86inc $ssse3_x86inc - -prototype unsigned int vp9_sub_pixel_avg_variance8x4 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred" -specialize vp9_sub_pixel_avg_variance8x4 $sse2_x86inc $ssse3_x86inc - -prototype unsigned int vp9_sub_pixel_variance4x8 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_sub_pixel_variance4x8 $sse_x86inc $ssse3_x86inc - -prototype unsigned int vp9_sub_pixel_avg_variance4x8 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred" -specialize vp9_sub_pixel_avg_variance4x8 $sse_x86inc $ssse3_x86inc - -prototype unsigned int vp9_sub_pixel_variance4x4 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_sub_pixel_variance4x4 $sse_x86inc $ssse3_x86inc -#vp9_sub_pixel_variance4x4_sse2=vp9_sub_pixel_variance4x4_wmt - -prototype unsigned int vp9_sub_pixel_avg_variance4x4 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred" -specialize vp9_sub_pixel_avg_variance4x4 $sse_x86inc $ssse3_x86inc - -prototype unsigned int vp9_sad64x64 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad" -specialize vp9_sad64x64 $sse2_x86inc - -prototype unsigned int vp9_sad32x64 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad" -specialize vp9_sad32x64 $sse2_x86inc - -prototype unsigned int vp9_sad64x32 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad" -specialize vp9_sad64x32 $sse2_x86inc - -prototype unsigned int vp9_sad32x16 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad" -specialize vp9_sad32x16 $sse2_x86inc - -prototype unsigned int vp9_sad16x32 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad" -specialize vp9_sad16x32 $sse2_x86inc - -prototype unsigned int vp9_sad32x32 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad" -specialize vp9_sad32x32 $sse2_x86inc - -prototype unsigned int vp9_sad16x16 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad" -specialize vp9_sad16x16 mmx $sse2_x86inc - -prototype unsigned int vp9_sad16x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad" -specialize vp9_sad16x8 mmx $sse2_x86inc - -prototype unsigned int vp9_sad8x16 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad" -specialize vp9_sad8x16 mmx $sse2_x86inc - -prototype unsigned int vp9_sad8x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad" -specialize vp9_sad8x8 mmx $sse2_x86inc - -prototype unsigned int vp9_sad8x4 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad" -specialize vp9_sad8x4 $sse2_x86inc - -prototype unsigned int vp9_sad4x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad" -specialize vp9_sad4x8 $sse_x86inc - -prototype unsigned int vp9_sad4x4 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad" -specialize vp9_sad4x4 mmx $sse_x86inc - -prototype unsigned int vp9_sad64x64_avg "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad" -specialize vp9_sad64x64_avg $sse2_x86inc - -prototype unsigned int vp9_sad32x64_avg "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad" -specialize vp9_sad32x64_avg $sse2_x86inc - -prototype unsigned int vp9_sad64x32_avg "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad" -specialize vp9_sad64x32_avg $sse2_x86inc - -prototype unsigned int vp9_sad32x16_avg "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad" -specialize vp9_sad32x16_avg $sse2_x86inc - -prototype unsigned int vp9_sad16x32_avg "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad" -specialize vp9_sad16x32_avg $sse2_x86inc - -prototype unsigned int vp9_sad32x32_avg "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad" -specialize vp9_sad32x32_avg $sse2_x86inc - -prototype unsigned int vp9_sad16x16_avg "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad" -specialize vp9_sad16x16_avg $sse2_x86inc - -prototype unsigned int vp9_sad16x8_avg "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad" -specialize vp9_sad16x8_avg $sse2_x86inc - -prototype unsigned int vp9_sad8x16_avg "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad" -specialize vp9_sad8x16_avg $sse2_x86inc - -prototype unsigned int vp9_sad8x8_avg "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad" -specialize vp9_sad8x8_avg $sse2_x86inc - -prototype unsigned int vp9_sad8x4_avg "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad" -specialize vp9_sad8x4_avg $sse2_x86inc - -prototype unsigned int vp9_sad4x8_avg "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad" -specialize vp9_sad4x8_avg $sse_x86inc - -prototype unsigned int vp9_sad4x4_avg "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad" -specialize vp9_sad4x4_avg $sse_x86inc - -prototype unsigned int vp9_variance_halfpixvar16x16_h "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_variance_halfpixvar16x16_h $sse2_x86inc - -prototype unsigned int vp9_variance_halfpixvar16x16_v "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_variance_halfpixvar16x16_v $sse2_x86inc - -prototype unsigned int vp9_variance_halfpixvar16x16_hv "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_variance_halfpixvar16x16_hv $sse2_x86inc - -prototype unsigned int vp9_variance_halfpixvar64x64_h "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_variance_halfpixvar64x64_h - -prototype unsigned int vp9_variance_halfpixvar64x64_v "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_variance_halfpixvar64x64_v - -prototype unsigned int vp9_variance_halfpixvar64x64_hv "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_variance_halfpixvar64x64_hv - -prototype unsigned int vp9_variance_halfpixvar32x32_h "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_variance_halfpixvar32x32_h - -prototype unsigned int vp9_variance_halfpixvar32x32_v "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_variance_halfpixvar32x32_v - -prototype unsigned int vp9_variance_halfpixvar32x32_hv "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_variance_halfpixvar32x32_hv - -prototype void vp9_sad64x64x3 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array" -specialize vp9_sad64x64x3 - -prototype void vp9_sad32x32x3 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array" -specialize vp9_sad32x32x3 - -prototype void vp9_sad16x16x3 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array" -specialize vp9_sad16x16x3 sse3 ssse3 - -prototype void vp9_sad16x8x3 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array" -specialize vp9_sad16x8x3 sse3 ssse3 - -prototype void vp9_sad8x16x3 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array" -specialize vp9_sad8x16x3 sse3 - -prototype void vp9_sad8x8x3 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array" -specialize vp9_sad8x8x3 sse3 - -prototype void vp9_sad4x4x3 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array" -specialize vp9_sad4x4x3 sse3 - -prototype void vp9_sad64x64x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array" -specialize vp9_sad64x64x8 - -prototype void vp9_sad32x32x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array" -specialize vp9_sad32x32x8 - -prototype void vp9_sad16x16x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array" -specialize vp9_sad16x16x8 sse4 - -prototype void vp9_sad16x8x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array" -specialize vp9_sad16x8x8 sse4 - -prototype void vp9_sad8x16x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array" -specialize vp9_sad8x16x8 sse4 - -prototype void vp9_sad8x8x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array" -specialize vp9_sad8x8x8 sse4 - -prototype void vp9_sad8x4x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array" -specialize vp9_sad8x4x8 - -prototype void vp9_sad4x8x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array" -specialize vp9_sad4x8x8 - -prototype void vp9_sad4x4x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array" -specialize vp9_sad4x4x8 sse4 - -prototype void vp9_sad64x64x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array" -specialize vp9_sad64x64x4d sse2 - -prototype void vp9_sad32x64x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array" -specialize vp9_sad32x64x4d sse2 - -prototype void vp9_sad64x32x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array" -specialize vp9_sad64x32x4d sse2 - -prototype void vp9_sad32x16x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array" -specialize vp9_sad32x16x4d sse2 - -prototype void vp9_sad16x32x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array" -specialize vp9_sad16x32x4d sse2 - -prototype void vp9_sad32x32x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array" -specialize vp9_sad32x32x4d sse2 - -prototype void vp9_sad16x16x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array" -specialize vp9_sad16x16x4d sse2 - -prototype void vp9_sad16x8x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array" -specialize vp9_sad16x8x4d sse2 - -prototype void vp9_sad8x16x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array" -specialize vp9_sad8x16x4d sse2 - -prototype void vp9_sad8x8x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array" -specialize vp9_sad8x8x4d sse2 - -# TODO(jingning): need to convert these 4x8/8x4 functions into sse2 form -prototype void vp9_sad8x4x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array" -specialize vp9_sad8x4x4d sse2 - -prototype void vp9_sad4x8x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array" -specialize vp9_sad4x8x4d sse - -prototype void vp9_sad4x4x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array" -specialize vp9_sad4x4x4d sse - -#prototype unsigned int vp9_sub_pixel_mse16x16 "const uint8_t *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, const uint8_t *dst_ptr, int dst_pixels_per_line, unsigned int *sse" -#specialize vp9_sub_pixel_mse16x16 sse2 mmx - -prototype unsigned int vp9_mse16x16 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse" -specialize vp9_mse16x16 mmx $sse2_x86inc - -prototype unsigned int vp9_mse8x16 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse" -specialize vp9_mse8x16 - -prototype unsigned int vp9_mse16x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse" -specialize vp9_mse16x8 - -prototype unsigned int vp9_mse8x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse" -specialize vp9_mse8x8 - -prototype unsigned int vp9_sub_pixel_mse64x64 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_sub_pixel_mse64x64 - -prototype unsigned int vp9_sub_pixel_mse32x32 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" -specialize vp9_sub_pixel_mse32x32 - -prototype unsigned int vp9_get_mb_ss "const int16_t *" -specialize vp9_get_mb_ss mmx sse2 -# ENCODEMB INVOKE - -prototype int64_t vp9_block_error "int16_t *coeff, int16_t *dqcoeff, intptr_t block_size, int64_t *ssz" -specialize vp9_block_error $sse2_x86inc - -prototype void vp9_subtract_block "int rows, int cols, int16_t *diff_ptr, ptrdiff_t diff_stride, const uint8_t *src_ptr, ptrdiff_t src_stride, const uint8_t *pred_ptr, ptrdiff_t pred_stride" -specialize vp9_subtract_block $sse2_x86inc - -prototype void vp9_quantize_b "const int16_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr, const int16_t *dequant_ptr, int zbin_oq_value, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan" -specialize vp9_quantize_b $ssse3_x86_64 - -prototype void vp9_quantize_b_32x32 "const int16_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr, const int16_t *dequant_ptr, int zbin_oq_value, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan" -specialize vp9_quantize_b_32x32 $ssse3_x86_64 - -# -# Structured Similarity (SSIM) -# -if [ "$CONFIG_INTERNAL_STATS" = "yes" ]; then - prototype void vp9_ssim_parms_8x8 "uint8_t *s, int sp, uint8_t *r, int rp, unsigned long *sum_s, unsigned long *sum_r, unsigned long *sum_sq_s, unsigned long *sum_sq_r, unsigned long *sum_sxr" - specialize vp9_ssim_parms_8x8 $sse2_x86_64 - - prototype void vp9_ssim_parms_16x16 "uint8_t *s, int sp, uint8_t *r, int rp, unsigned long *sum_s, unsigned long *sum_r, unsigned long *sum_sq_s, unsigned long *sum_sq_r, unsigned long *sum_sxr" - specialize vp9_ssim_parms_16x16 $sse2_x86_64 -fi - -# fdct functions -prototype void vp9_short_fht4x4 "const int16_t *input, int16_t *output, int stride, int tx_type" -specialize vp9_short_fht4x4 sse2 - -prototype void vp9_short_fht8x8 "const int16_t *input, int16_t *output, int stride, int tx_type" -specialize vp9_short_fht8x8 sse2 - -prototype void vp9_short_fht16x16 "const int16_t *input, int16_t *output, int stride, int tx_type" -specialize vp9_short_fht16x16 sse2 - -prototype void vp9_fwht4x4 "const int16_t *input, int16_t *output, int stride" -specialize vp9_fwht4x4 - -prototype void vp9_fdct4x4 "const int16_t *input, int16_t *output, int stride" -specialize vp9_fdct4x4 sse2 - -prototype void vp9_fdct8x8 "const int16_t *input, int16_t *output, int stride" -specialize vp9_fdct8x8 sse2 - -prototype void vp9_fdct16x16 "const int16_t *input, int16_t *output, int stride" -specialize vp9_fdct16x16 sse2 - -prototype void vp9_fdct32x32 "const int16_t *input, int16_t *output, int stride" -specialize vp9_fdct32x32 sse2 - -prototype void vp9_fdct32x32_rd "const int16_t *input, int16_t *output, int stride" -specialize vp9_fdct32x32_rd sse2 - -# -# Motion search -# -prototype int vp9_full_search_sad "struct macroblock *x, union int_mv *ref_mv, int sad_per_bit, int distance, struct vp9_variance_vtable *fn_ptr, DEC_MVCOSTS, union int_mv *center_mv, int n" -specialize vp9_full_search_sad sse3 sse4_1 -vp9_full_search_sad_sse3=vp9_full_search_sadx3 -vp9_full_search_sad_sse4_1=vp9_full_search_sadx8 - -prototype int vp9_refining_search_sad "struct macroblock *x, union int_mv *ref_mv, int sad_per_bit, int distance, struct vp9_variance_vtable *fn_ptr, DEC_MVCOSTS, union int_mv *center_mv" -specialize vp9_refining_search_sad sse3 -vp9_refining_search_sad_sse3=vp9_refining_search_sadx4 - -prototype int vp9_diamond_search_sad "struct macroblock *x, union int_mv *ref_mv, union int_mv *best_mv, int search_param, int sad_per_bit, int *num00, struct vp9_variance_vtable *fn_ptr, DEC_MVCOSTS, union int_mv *center_mv" -specialize vp9_diamond_search_sad sse3 -vp9_diamond_search_sad_sse3=vp9_diamond_search_sadx4 - -prototype void vp9_temporal_filter_apply "uint8_t *frame1, unsigned int stride, uint8_t *frame2, unsigned int block_size, int strength, int filter_weight, unsigned int *accumulator, uint16_t *count" -specialize vp9_temporal_filter_apply sse2 - -prototype void vp9_yv12_copy_partial_frame "struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc, int fraction" -specialize vp9_yv12_copy_partial_frame - - -fi -# end encoder functions diff --git a/libvpx/vp9/common/vp9_sadmxn.h b/libvpx/vp9/common/vp9_sadmxn.h deleted file mode 100644 index b2dfd63..0000000 --- a/libvpx/vp9/common/vp9_sadmxn.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP9_COMMON_VP9_SADMXN_H_ -#define VP9_COMMON_VP9_SADMXN_H_ - -#include "./vpx_config.h" -#include "vpx/vpx_integer.h" - -static INLINE unsigned int sad_mx_n_c(const uint8_t *src_ptr, - int src_stride, - const uint8_t *ref_ptr, - int ref_stride, - int m, - int n) { - int r, c; - unsigned int sad = 0; - - for (r = 0; r < n; r++) { - for (c = 0; c < m; c++) { - sad += abs(src_ptr[c] - ref_ptr[c]); - } - - src_ptr += src_stride; - ref_ptr += ref_stride; - } - - return sad; -} - -#endif // VP9_COMMON_VP9_SADMXN_H_ diff --git a/libvpx/vp9/common/vp9_scale.c b/libvpx/vp9/common/vp9_scale.c index 3f0994f..e0f1e34 100644 --- a/libvpx/vp9/common/vp9_scale.c +++ b/libvpx/vp9/common/vp9_scale.c @@ -12,47 +12,19 @@ #include "vp9/common/vp9_filter.h" #include "vp9/common/vp9_scale.h" -static INLINE int scaled_x(int val, const struct scale_factors_common *sfc) { - return val * sfc->x_scale_fp >> REF_SCALE_SHIFT; +static INLINE int scaled_x(int val, const struct scale_factors *sf) { + return val * sf->x_scale_fp >> REF_SCALE_SHIFT; } -static INLINE int scaled_y(int val, const struct scale_factors_common *sfc) { - return val * sfc->y_scale_fp >> REF_SCALE_SHIFT; +static INLINE int scaled_y(int val, const struct scale_factors *sf) { + return val * sf->y_scale_fp >> REF_SCALE_SHIFT; } -static int unscaled_value(int val, const struct scale_factors_common *sfc) { - (void) sfc; +static int unscaled_value(int val, const struct scale_factors *sf) { + (void) sf; return val; } -static MV32 scaled_mv(const MV *mv, const struct scale_factors *scale) { - const MV32 res = { - scaled_y(mv->row, scale->sfc) + scale->y_offset_q4, - scaled_x(mv->col, scale->sfc) + scale->x_offset_q4 - }; - return res; -} - -static MV32 unscaled_mv(const MV *mv, const struct scale_factors *scale) { - const MV32 res = { - mv->row, - mv->col - }; - return res; -} - -static void set_offsets_with_scaling(struct scale_factors *scale, - int row, int col) { - scale->x_offset_q4 = scaled_x(col << SUBPEL_BITS, scale->sfc) & SUBPEL_MASK; - scale->y_offset_q4 = scaled_y(row << SUBPEL_BITS, scale->sfc) & SUBPEL_MASK; -} - -static void set_offsets_without_scaling(struct scale_factors *scale, - int row, int col) { - scale->x_offset_q4 = 0; - scale->y_offset_q4 = 0; -} - static int get_fixed_point_scale_factor(int other_size, int this_size) { // Calculate scaling factor once for each reference frame // and use fixed point scaling factors in decoding and encoding routines. @@ -69,31 +41,36 @@ static int check_scale_factors(int other_w, int other_h, this_h <= 16 * other_h; } -void vp9_setup_scale_factors_for_frame(struct scale_factors *scale, - struct scale_factors_common *scale_comm, +MV32 vp9_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf) { + const int x_off_q4 = scaled_x(x << SUBPEL_BITS, sf) & SUBPEL_MASK; + const int y_off_q4 = scaled_y(y << SUBPEL_BITS, sf) & SUBPEL_MASK; + const MV32 res = { + scaled_y(mv->row, sf) + y_off_q4, + scaled_x(mv->col, sf) + x_off_q4 + }; + return res; +} + +void vp9_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w, int other_h, int this_w, int this_h) { if (!check_scale_factors(other_w, other_h, this_w, this_h)) { - scale_comm->x_scale_fp = REF_INVALID_SCALE; - scale_comm->y_scale_fp = REF_INVALID_SCALE; + sf->x_scale_fp = REF_INVALID_SCALE; + sf->y_scale_fp = REF_INVALID_SCALE; return; } - scale_comm->x_scale_fp = get_fixed_point_scale_factor(other_w, this_w); - scale_comm->y_scale_fp = get_fixed_point_scale_factor(other_h, this_h); - scale_comm->x_step_q4 = scaled_x(16, scale_comm); - scale_comm->y_step_q4 = scaled_y(16, scale_comm); + sf->x_scale_fp = get_fixed_point_scale_factor(other_w, this_w); + sf->y_scale_fp = get_fixed_point_scale_factor(other_h, this_h); + sf->x_step_q4 = scaled_x(16, sf); + sf->y_step_q4 = scaled_y(16, sf); - if (vp9_is_scaled(scale_comm)) { - scale_comm->scale_value_x = scaled_x; - scale_comm->scale_value_y = scaled_y; - scale_comm->set_scaled_offsets = set_offsets_with_scaling; - scale_comm->scale_mv = scaled_mv; + if (vp9_is_scaled(sf)) { + sf->scale_value_x = scaled_x; + sf->scale_value_y = scaled_y; } else { - scale_comm->scale_value_x = unscaled_value; - scale_comm->scale_value_y = unscaled_value; - scale_comm->set_scaled_offsets = set_offsets_without_scaling; - scale_comm->scale_mv = unscaled_mv; + sf->scale_value_x = unscaled_value; + sf->scale_value_y = unscaled_value; } // TODO(agrange): Investigate the best choice of functions to use here @@ -102,48 +79,44 @@ void vp9_setup_scale_factors_for_frame(struct scale_factors *scale, // applied in one direction only, and not at all for 0,0, seems to give the // best quality, but it may be worth trying an additional mode that does // do the filtering on full-pel. - if (scale_comm->x_step_q4 == 16) { - if (scale_comm->y_step_q4 == 16) { + if (sf->x_step_q4 == 16) { + if (sf->y_step_q4 == 16) { // No scaling in either direction. - scale_comm->predict[0][0][0] = vp9_convolve_copy; - scale_comm->predict[0][0][1] = vp9_convolve_avg; - scale_comm->predict[0][1][0] = vp9_convolve8_vert; - scale_comm->predict[0][1][1] = vp9_convolve8_avg_vert; - scale_comm->predict[1][0][0] = vp9_convolve8_horiz; - scale_comm->predict[1][0][1] = vp9_convolve8_avg_horiz; + sf->predict[0][0][0] = vp9_convolve_copy; + sf->predict[0][0][1] = vp9_convolve_avg; + sf->predict[0][1][0] = vp9_convolve8_vert; + sf->predict[0][1][1] = vp9_convolve8_avg_vert; + sf->predict[1][0][0] = vp9_convolve8_horiz; + sf->predict[1][0][1] = vp9_convolve8_avg_horiz; } else { // No scaling in x direction. Must always scale in the y direction. - scale_comm->predict[0][0][0] = vp9_convolve8_vert; - scale_comm->predict[0][0][1] = vp9_convolve8_avg_vert; - scale_comm->predict[0][1][0] = vp9_convolve8_vert; - scale_comm->predict[0][1][1] = vp9_convolve8_avg_vert; - scale_comm->predict[1][0][0] = vp9_convolve8; - scale_comm->predict[1][0][1] = vp9_convolve8_avg; + sf->predict[0][0][0] = vp9_convolve8_vert; + sf->predict[0][0][1] = vp9_convolve8_avg_vert; + sf->predict[0][1][0] = vp9_convolve8_vert; + sf->predict[0][1][1] = vp9_convolve8_avg_vert; + sf->predict[1][0][0] = vp9_convolve8; + sf->predict[1][0][1] = vp9_convolve8_avg; } } else { - if (scale_comm->y_step_q4 == 16) { + if (sf->y_step_q4 == 16) { // No scaling in the y direction. Must always scale in the x direction. - scale_comm->predict[0][0][0] = vp9_convolve8_horiz; - scale_comm->predict[0][0][1] = vp9_convolve8_avg_horiz; - scale_comm->predict[0][1][0] = vp9_convolve8; - scale_comm->predict[0][1][1] = vp9_convolve8_avg; - scale_comm->predict[1][0][0] = vp9_convolve8_horiz; - scale_comm->predict[1][0][1] = vp9_convolve8_avg_horiz; + sf->predict[0][0][0] = vp9_convolve8_horiz; + sf->predict[0][0][1] = vp9_convolve8_avg_horiz; + sf->predict[0][1][0] = vp9_convolve8; + sf->predict[0][1][1] = vp9_convolve8_avg; + sf->predict[1][0][0] = vp9_convolve8_horiz; + sf->predict[1][0][1] = vp9_convolve8_avg_horiz; } else { // Must always scale in both directions. - scale_comm->predict[0][0][0] = vp9_convolve8; - scale_comm->predict[0][0][1] = vp9_convolve8_avg; - scale_comm->predict[0][1][0] = vp9_convolve8; - scale_comm->predict[0][1][1] = vp9_convolve8_avg; - scale_comm->predict[1][0][0] = vp9_convolve8; - scale_comm->predict[1][0][1] = vp9_convolve8_avg; + sf->predict[0][0][0] = vp9_convolve8; + sf->predict[0][0][1] = vp9_convolve8_avg; + sf->predict[0][1][0] = vp9_convolve8; + sf->predict[0][1][1] = vp9_convolve8_avg; + sf->predict[1][0][0] = vp9_convolve8; + sf->predict[1][0][1] = vp9_convolve8_avg; } } // 2D subpel motion always gets filtered in both directions - scale_comm->predict[1][1][0] = vp9_convolve8; - scale_comm->predict[1][1][1] = vp9_convolve8_avg; - - scale->sfc = scale_comm; - scale->x_offset_q4 = 0; // calculated per block - scale->y_offset_q4 = 0; // calculated per block + sf->predict[1][1][0] = vp9_convolve8; + sf->predict[1][1][1] = vp9_convolve8_avg; } diff --git a/libvpx/vp9/common/vp9_scale.h b/libvpx/vp9/common/vp9_scale.h index 1437fcd..a9dda18 100644 --- a/libvpx/vp9/common/vp9_scale.h +++ b/libvpx/vp9/common/vp9_scale.h @@ -14,44 +14,44 @@ #include "vp9/common/vp9_mv.h" #include "vp9/common/vp9_convolve.h" +#ifdef __cplusplus +extern "C" { +#endif + #define REF_SCALE_SHIFT 14 #define REF_NO_SCALE (1 << REF_SCALE_SHIFT) #define REF_INVALID_SCALE -1 -struct scale_factors; -struct scale_factors_common { +struct scale_factors { int x_scale_fp; // horizontal fixed point scale factor int y_scale_fp; // vertical fixed point scale factor int x_step_q4; int y_step_q4; - int (*scale_value_x)(int val, const struct scale_factors_common *sfc); - int (*scale_value_y)(int val, const struct scale_factors_common *sfc); - void (*set_scaled_offsets)(struct scale_factors *scale, int row, int col); - MV32 (*scale_mv)(const MV *mv, const struct scale_factors *scale); + int (*scale_value_x)(int val, const struct scale_factors *sf); + int (*scale_value_y)(int val, const struct scale_factors *sf); convolve_fn_t predict[2][2][2]; // horiz, vert, avg }; -struct scale_factors { - int x_offset_q4; - int y_offset_q4; - const struct scale_factors_common *sfc; -}; +MV32 vp9_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf); -void vp9_setup_scale_factors_for_frame(struct scale_factors *scale, - struct scale_factors_common *scale_comm, +void vp9_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w, int other_h, int this_w, int this_h); -static int vp9_is_valid_scale(const struct scale_factors_common *sfc) { - return sfc->x_scale_fp != REF_INVALID_SCALE && - sfc->y_scale_fp != REF_INVALID_SCALE; +static INLINE int vp9_is_valid_scale(const struct scale_factors *sf) { + return sf->x_scale_fp != REF_INVALID_SCALE && + sf->y_scale_fp != REF_INVALID_SCALE; } -static int vp9_is_scaled(const struct scale_factors_common *sfc) { - return sfc->x_scale_fp != REF_NO_SCALE || - sfc->y_scale_fp != REF_NO_SCALE; +static INLINE int vp9_is_scaled(const struct scale_factors *sf) { + return sf->x_scale_fp != REF_NO_SCALE || + sf->y_scale_fp != REF_NO_SCALE; } +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_COMMON_VP9_SCALE_H_ diff --git a/libvpx/vp9/common/vp9_scan.c b/libvpx/vp9/common/vp9_scan.c index f17da91..1ec5a0c 100644 --- a/libvpx/vp9/common/vp9_scan.c +++ b/libvpx/vp9/common/vp9_scan.c @@ -12,28 +12,28 @@ #include "vp9/common/vp9_scan.h" -DECLARE_ALIGNED(16, const int16_t, vp9_default_scan_4x4[16]) = { +DECLARE_ALIGNED(16, static const int16_t, default_scan_4x4[16]) = { 0, 4, 1, 5, 8, 2, 12, 9, 3, 6, 13, 10, 7, 14, 11, 15, }; -DECLARE_ALIGNED(16, const int16_t, vp9_col_scan_4x4[16]) = { +DECLARE_ALIGNED(16, static const int16_t, col_scan_4x4[16]) = { 0, 4, 8, 1, 12, 5, 9, 2, 13, 6, 10, 3, 7, 14, 11, 15, }; -DECLARE_ALIGNED(16, const int16_t, vp9_row_scan_4x4[16]) = { +DECLARE_ALIGNED(16, static const int16_t, row_scan_4x4[16]) = { 0, 1, 4, 2, 5, 3, 6, 8, 9, 7, 12, 10, 13, 11, 14, 15, }; -DECLARE_ALIGNED(16, const int16_t, vp9_default_scan_8x8[64]) = { +DECLARE_ALIGNED(16, static const int16_t, default_scan_8x8[64]) = { 0, 8, 1, 16, 9, 2, 17, 24, 10, 3, 18, 25, 32, 11, 4, 26, 33, 19, 40, 12, 34, 27, 5, 41, @@ -44,7 +44,7 @@ DECLARE_ALIGNED(16, const int16_t, vp9_default_scan_8x8[64]) = { 46, 39, 61, 54, 47, 62, 55, 63, }; -DECLARE_ALIGNED(16, const int16_t, vp9_col_scan_8x8[64]) = { +DECLARE_ALIGNED(16, static const int16_t, col_scan_8x8[64]) = { 0, 8, 16, 1, 24, 9, 32, 17, 2, 40, 25, 10, 33, 18, 48, 3, 26, 41, 11, 56, 19, 34, 4, 49, @@ -55,7 +55,7 @@ DECLARE_ALIGNED(16, const int16_t, vp9_col_scan_8x8[64]) = { 31, 61, 39, 54, 47, 62, 55, 63, }; -DECLARE_ALIGNED(16, const int16_t, vp9_row_scan_8x8[64]) = { +DECLARE_ALIGNED(16, static const int16_t, row_scan_8x8[64]) = { 0, 1, 2, 8, 9, 3, 16, 10, 4, 17, 11, 24, 5, 18, 25, 12, 19, 26, 32, 6, 13, 20, 33, 27, @@ -66,7 +66,7 @@ DECLARE_ALIGNED(16, const int16_t, vp9_row_scan_8x8[64]) = { 60, 39, 61, 47, 54, 55, 62, 63, }; -DECLARE_ALIGNED(16, const int16_t, vp9_default_scan_16x16[256]) = { +DECLARE_ALIGNED(16, static const int16_t, default_scan_16x16[256]) = { 0, 16, 1, 32, 17, 2, 48, 33, 18, 3, 64, 34, 49, 19, 65, 80, 50, 4, 35, 66, 20, 81, 96, 51, 5, 36, 82, 97, 67, 112, 21, 52, 98, 37, 83, 113, 6, 68, 128, 53, 22, 99, 114, 84, 7, 129, 38, 69, @@ -87,7 +87,7 @@ DECLARE_ALIGNED(16, const int16_t, vp9_default_scan_16x16[256]) = { 255, }; -DECLARE_ALIGNED(16, const int16_t, vp9_col_scan_16x16[256]) = { +DECLARE_ALIGNED(16, static const int16_t, col_scan_16x16[256]) = { 0, 16, 32, 48, 1, 64, 17, 80, 33, 96, 49, 2, 65, 112, 18, 81, 34, 128, 50, 97, 3, 66, 144, 19, 113, 35, 82, 160, 98, 51, 129, 4, 67, 176, 20, 114, 145, 83, 36, 99, 130, 52, 192, 5, 161, 68, 115, 21, @@ -108,7 +108,7 @@ DECLARE_ALIGNED(16, const int16_t, vp9_col_scan_16x16[256]) = { 255, }; -DECLARE_ALIGNED(16, const int16_t, vp9_row_scan_16x16[256]) = { +DECLARE_ALIGNED(16, static const int16_t, row_scan_16x16[256]) = { 0, 1, 2, 16, 3, 17, 4, 18, 32, 5, 33, 19, 6, 34, 48, 20, 49, 7, 35, 21, 50, 64, 8, 36, 65, 22, 51, 37, 80, 9, 66, 52, 23, 38, 81, 67, 10, 53, 24, 82, 68, 96, 39, 11, 54, 83, 97, 69, @@ -130,7 +130,7 @@ DECLARE_ALIGNED(16, const int16_t, vp9_row_scan_16x16[256]) = { 255, }; -DECLARE_ALIGNED(16, const int16_t, vp9_default_scan_32x32[1024]) = { +DECLARE_ALIGNED(16, static const int16_t, default_scan_32x32[1024]) = { 0, 32, 1, 64, 33, 2, 96, 65, 34, 128, 3, 97, 66, 160, 129, 35, 98, 4, 67, 130, 161, 192, 36, 99, 224, 5, 162, 193, 68, 131, 37, 100, @@ -233,38 +233,68 @@ DECLARE_ALIGNED(16, const int16_t, vp9_default_scan_32x32[1024]) = { // in {top, left, topleft, topright, bottomleft} order // for each position in raster scan order. // -1 indicates the neighbor does not exist. -DECLARE_ALIGNED(16, int16_t, - vp9_default_scan_4x4_neighbors[17 * MAX_NEIGHBORS]); -DECLARE_ALIGNED(16, int16_t, - vp9_col_scan_4x4_neighbors[17 * MAX_NEIGHBORS]); -DECLARE_ALIGNED(16, int16_t, - vp9_row_scan_4x4_neighbors[17 * MAX_NEIGHBORS]); -DECLARE_ALIGNED(16, int16_t, - vp9_col_scan_8x8_neighbors[65 * MAX_NEIGHBORS]); -DECLARE_ALIGNED(16, int16_t, - vp9_row_scan_8x8_neighbors[65 * MAX_NEIGHBORS]); -DECLARE_ALIGNED(16, int16_t, - vp9_default_scan_8x8_neighbors[65 * MAX_NEIGHBORS]); -DECLARE_ALIGNED(16, int16_t, - vp9_col_scan_16x16_neighbors[257 * MAX_NEIGHBORS]); -DECLARE_ALIGNED(16, int16_t, - vp9_row_scan_16x16_neighbors[257 * MAX_NEIGHBORS]); -DECLARE_ALIGNED(16, int16_t, - vp9_default_scan_16x16_neighbors[257 * MAX_NEIGHBORS]); -DECLARE_ALIGNED(16, int16_t, - vp9_default_scan_32x32_neighbors[1025 * MAX_NEIGHBORS]); +DECLARE_ALIGNED(16, static int16_t, + default_scan_4x4_neighbors[17 * MAX_NEIGHBORS]); +DECLARE_ALIGNED(16, static int16_t, + col_scan_4x4_neighbors[17 * MAX_NEIGHBORS]); +DECLARE_ALIGNED(16, static int16_t, + row_scan_4x4_neighbors[17 * MAX_NEIGHBORS]); +DECLARE_ALIGNED(16, static int16_t, + col_scan_8x8_neighbors[65 * MAX_NEIGHBORS]); +DECLARE_ALIGNED(16, static int16_t, + row_scan_8x8_neighbors[65 * MAX_NEIGHBORS]); +DECLARE_ALIGNED(16, static int16_t, + default_scan_8x8_neighbors[65 * MAX_NEIGHBORS]); +DECLARE_ALIGNED(16, static int16_t, + col_scan_16x16_neighbors[257 * MAX_NEIGHBORS]); +DECLARE_ALIGNED(16, static int16_t, + row_scan_16x16_neighbors[257 * MAX_NEIGHBORS]); +DECLARE_ALIGNED(16, static int16_t, + default_scan_16x16_neighbors[257 * MAX_NEIGHBORS]); +DECLARE_ALIGNED(16, static int16_t, + default_scan_32x32_neighbors[1025 * MAX_NEIGHBORS]); +DECLARE_ALIGNED(16, static int16_t, vp9_default_iscan_4x4[16]); +DECLARE_ALIGNED(16, static int16_t, vp9_col_iscan_4x4[16]); +DECLARE_ALIGNED(16, static int16_t, vp9_row_iscan_4x4[16]); +DECLARE_ALIGNED(16, static int16_t, vp9_col_iscan_8x8[64]); +DECLARE_ALIGNED(16, static int16_t, vp9_row_iscan_8x8[64]); +DECLARE_ALIGNED(16, static int16_t, vp9_default_iscan_8x8[64]); +DECLARE_ALIGNED(16, static int16_t, vp9_col_iscan_16x16[256]); +DECLARE_ALIGNED(16, static int16_t, vp9_row_iscan_16x16[256]); +DECLARE_ALIGNED(16, static int16_t, vp9_default_iscan_16x16[256]); +DECLARE_ALIGNED(16, static int16_t, vp9_default_iscan_32x32[1024]); -DECLARE_ALIGNED(16, int16_t, vp9_default_iscan_4x4[16]); -DECLARE_ALIGNED(16, int16_t, vp9_col_iscan_4x4[16]); -DECLARE_ALIGNED(16, int16_t, vp9_row_iscan_4x4[16]); -DECLARE_ALIGNED(16, int16_t, vp9_col_iscan_8x8[64]); -DECLARE_ALIGNED(16, int16_t, vp9_row_iscan_8x8[64]); -DECLARE_ALIGNED(16, int16_t, vp9_default_iscan_8x8[64]); -DECLARE_ALIGNED(16, int16_t, vp9_col_iscan_16x16[256]); -DECLARE_ALIGNED(16, int16_t, vp9_row_iscan_16x16[256]); -DECLARE_ALIGNED(16, int16_t, vp9_default_iscan_16x16[256]); -DECLARE_ALIGNED(16, int16_t, vp9_default_iscan_32x32[1024]); +const scan_order vp9_default_scan_orders[TX_SIZES] = { + {default_scan_4x4, vp9_default_iscan_4x4, default_scan_4x4_neighbors}, + {default_scan_8x8, vp9_default_iscan_8x8, default_scan_8x8_neighbors}, + {default_scan_16x16, vp9_default_iscan_16x16, default_scan_16x16_neighbors}, + {default_scan_32x32, vp9_default_iscan_32x32, default_scan_32x32_neighbors}, +}; + +const scan_order vp9_scan_orders[TX_SIZES][TX_TYPES] = { + { // TX_4X4 + {default_scan_4x4, vp9_default_iscan_4x4, default_scan_4x4_neighbors}, + {row_scan_4x4, vp9_row_iscan_4x4, row_scan_4x4_neighbors}, + {col_scan_4x4, vp9_col_iscan_4x4, col_scan_4x4_neighbors}, + {default_scan_4x4, vp9_default_iscan_4x4, default_scan_4x4_neighbors} + }, { // TX_8X8 + {default_scan_8x8, vp9_default_iscan_8x8, default_scan_8x8_neighbors}, + {row_scan_8x8, vp9_row_iscan_8x8, row_scan_8x8_neighbors}, + {col_scan_8x8, vp9_col_iscan_8x8, col_scan_8x8_neighbors}, + {default_scan_8x8, vp9_default_iscan_8x8, default_scan_8x8_neighbors} + }, { // TX_16X16 + {default_scan_16x16, vp9_default_iscan_16x16, default_scan_16x16_neighbors}, + {row_scan_16x16, vp9_row_iscan_16x16, row_scan_16x16_neighbors}, + {col_scan_16x16, vp9_col_iscan_16x16, col_scan_16x16_neighbors}, + {default_scan_16x16, vp9_default_iscan_16x16, default_scan_16x16_neighbors} + }, { // TX_32X32 + {default_scan_32x32, vp9_default_iscan_32x32, default_scan_32x32_neighbors}, + {default_scan_32x32, vp9_default_iscan_32x32, default_scan_32x32_neighbors}, + {default_scan_32x32, vp9_default_iscan_32x32, default_scan_32x32_neighbors}, + {default_scan_32x32, vp9_default_iscan_32x32, default_scan_32x32_neighbors}, + } +}; static int find_in_scan(const int16_t *scan, int l, int idx) { int n, l2 = l * l; @@ -276,9 +306,9 @@ static int find_in_scan(const int16_t *scan, int l, int idx) { assert(0); return -1; } -static void init_scan_neighbors(const int16_t *scan, - int16_t *iscan, - int l, int16_t *neighbors) { + +static void init_scan_neighbors(const int16_t *scan, int16_t *iscan, int l, + int16_t *neighbors) { int l2 = l * l; int n, i, j; @@ -302,15 +332,15 @@ static void init_scan_neighbors(const int16_t *scan, // use the combination of the two as a context. int a = (i - 1) * l + j; int b = i * l + j - 1; - if (scan == vp9_col_scan_4x4 || scan == vp9_col_scan_8x8 || - scan == vp9_col_scan_16x16) { + if (scan == col_scan_4x4 || scan == col_scan_8x8 || + scan == col_scan_16x16) { // in the col/row scan cases (as well as left/top edge cases), we set // both contexts to the same value, so we can branchlessly do a+b+1>>1 // which automatically becomes a if a == b neighbors[MAX_NEIGHBORS * n + 0] = neighbors[MAX_NEIGHBORS * n + 1] = a; - } else if (scan == vp9_row_scan_4x4 || scan == vp9_row_scan_8x8 || - scan == vp9_row_scan_16x16) { + } else if (scan == row_scan_4x4 || scan == row_scan_8x8 || + scan == row_scan_16x16) { neighbors[MAX_NEIGHBORS * n + 0] = neighbors[MAX_NEIGHBORS * n + 1] = b; } else { @@ -334,24 +364,24 @@ static void init_scan_neighbors(const int16_t *scan, } void vp9_init_neighbors() { - init_scan_neighbors(vp9_default_scan_4x4, vp9_default_iscan_4x4, 4, - vp9_default_scan_4x4_neighbors); - init_scan_neighbors(vp9_row_scan_4x4, vp9_row_iscan_4x4, 4, - vp9_row_scan_4x4_neighbors); - init_scan_neighbors(vp9_col_scan_4x4, vp9_col_iscan_4x4, 4, - vp9_col_scan_4x4_neighbors); - init_scan_neighbors(vp9_default_scan_8x8, vp9_default_iscan_8x8, 8, - vp9_default_scan_8x8_neighbors); - init_scan_neighbors(vp9_row_scan_8x8, vp9_row_iscan_8x8, 8, - vp9_row_scan_8x8_neighbors); - init_scan_neighbors(vp9_col_scan_8x8, vp9_col_iscan_8x8, 8, - vp9_col_scan_8x8_neighbors); - init_scan_neighbors(vp9_default_scan_16x16, vp9_default_iscan_16x16, 16, - vp9_default_scan_16x16_neighbors); - init_scan_neighbors(vp9_row_scan_16x16, vp9_row_iscan_16x16, 16, - vp9_row_scan_16x16_neighbors); - init_scan_neighbors(vp9_col_scan_16x16, vp9_col_iscan_16x16, 16, - vp9_col_scan_16x16_neighbors); - init_scan_neighbors(vp9_default_scan_32x32, vp9_default_iscan_32x32, 32, - vp9_default_scan_32x32_neighbors); + init_scan_neighbors(default_scan_4x4, vp9_default_iscan_4x4, 4, + default_scan_4x4_neighbors); + init_scan_neighbors(row_scan_4x4, vp9_row_iscan_4x4, 4, + row_scan_4x4_neighbors); + init_scan_neighbors(col_scan_4x4, vp9_col_iscan_4x4, 4, + col_scan_4x4_neighbors); + init_scan_neighbors(default_scan_8x8, vp9_default_iscan_8x8, 8, + default_scan_8x8_neighbors); + init_scan_neighbors(row_scan_8x8, vp9_row_iscan_8x8, 8, + row_scan_8x8_neighbors); + init_scan_neighbors(col_scan_8x8, vp9_col_iscan_8x8, 8, + col_scan_8x8_neighbors); + init_scan_neighbors(default_scan_16x16, vp9_default_iscan_16x16, 16, + default_scan_16x16_neighbors); + init_scan_neighbors(row_scan_16x16, vp9_row_iscan_16x16, 16, + row_scan_16x16_neighbors); + init_scan_neighbors(col_scan_16x16, vp9_col_iscan_16x16, 16, + col_scan_16x16_neighbors); + init_scan_neighbors(default_scan_32x32, vp9_default_iscan_32x32, 32, + default_scan_32x32_neighbors); } diff --git a/libvpx/vp9/common/vp9_scan.h b/libvpx/vp9/common/vp9_scan.h index 14a1a7e..9613b67 100644 --- a/libvpx/vp9/common/vp9_scan.h +++ b/libvpx/vp9/common/vp9_scan.h @@ -15,180 +15,24 @@ #include "vpx_ports/mem.h" #include "vp9/common/vp9_enums.h" +#include "vp9/common/vp9_blockd.h" -#define MAX_NEIGHBORS 2 - -extern DECLARE_ALIGNED(16, const int16_t, vp9_default_scan_4x4[16]); -extern DECLARE_ALIGNED(16, const int16_t, vp9_col_scan_4x4[16]); -extern DECLARE_ALIGNED(16, const int16_t, vp9_row_scan_4x4[16]); - -extern DECLARE_ALIGNED(16, const int16_t, vp9_default_scan_8x8[64]); -extern DECLARE_ALIGNED(16, const int16_t, vp9_col_scan_8x8[64]); -extern DECLARE_ALIGNED(16, const int16_t, vp9_row_scan_8x8[64]); - -extern DECLARE_ALIGNED(16, const int16_t, vp9_default_scan_16x16[256]); -extern DECLARE_ALIGNED(16, const int16_t, vp9_col_scan_16x16[256]); -extern DECLARE_ALIGNED(16, const int16_t, vp9_row_scan_16x16[256]); - -extern DECLARE_ALIGNED(16, const int16_t, vp9_default_scan_32x32[1024]); - -extern DECLARE_ALIGNED(16, int16_t, vp9_default_iscan_4x4[16]); -extern DECLARE_ALIGNED(16, int16_t, vp9_col_iscan_4x4[16]); -extern DECLARE_ALIGNED(16, int16_t, vp9_row_iscan_4x4[16]); - -extern DECLARE_ALIGNED(16, int16_t, vp9_default_iscan_8x8[64]); -extern DECLARE_ALIGNED(16, int16_t, vp9_col_iscan_8x8[64]); -extern DECLARE_ALIGNED(16, int16_t, vp9_row_iscan_8x8[64]); - -extern DECLARE_ALIGNED(16, int16_t, vp9_default_iscan_16x16[256]); -extern DECLARE_ALIGNED(16, int16_t, vp9_col_iscan_16x16[256]); -extern DECLARE_ALIGNED(16, int16_t, vp9_row_iscan_16x16[256]); - -extern DECLARE_ALIGNED(16, int16_t, vp9_default_iscan_32x32[1024]); - -extern DECLARE_ALIGNED(16, int16_t, - vp9_default_scan_4x4_neighbors[17 * MAX_NEIGHBORS]); -extern DECLARE_ALIGNED(16, int16_t, - vp9_col_scan_4x4_neighbors[17 * MAX_NEIGHBORS]); -extern DECLARE_ALIGNED(16, int16_t, - vp9_row_scan_4x4_neighbors[17 * MAX_NEIGHBORS]); -extern DECLARE_ALIGNED(16, int16_t, - vp9_col_scan_8x8_neighbors[65 * MAX_NEIGHBORS]); -extern DECLARE_ALIGNED(16, int16_t, - vp9_row_scan_8x8_neighbors[65 * MAX_NEIGHBORS]); -extern DECLARE_ALIGNED(16, int16_t, - vp9_default_scan_8x8_neighbors[65 * MAX_NEIGHBORS]); -extern DECLARE_ALIGNED(16, int16_t, - vp9_col_scan_16x16_neighbors[257 * MAX_NEIGHBORS]); -extern DECLARE_ALIGNED(16, int16_t, - vp9_row_scan_16x16_neighbors[257 * MAX_NEIGHBORS]); -extern DECLARE_ALIGNED(16, int16_t, - vp9_default_scan_16x16_neighbors[257 * MAX_NEIGHBORS]); -extern DECLARE_ALIGNED(16, int16_t, - vp9_default_scan_32x32_neighbors[1025 * MAX_NEIGHBORS]); +#ifdef __cplusplus +extern "C" { +#endif +#define MAX_NEIGHBORS 2 void vp9_init_neighbors(); -static INLINE const int16_t* get_scan_4x4(TX_TYPE tx_type) { - switch (tx_type) { - case ADST_DCT: - return vp9_row_scan_4x4; - case DCT_ADST: - return vp9_col_scan_4x4; - default: - return vp9_default_scan_4x4; - } -} - -static INLINE void get_scan_nb_4x4(TX_TYPE tx_type, - const int16_t **scan, const int16_t **nb) { - switch (tx_type) { - case ADST_DCT: - *scan = vp9_row_scan_4x4; - *nb = vp9_row_scan_4x4_neighbors; - break; - case DCT_ADST: - *scan = vp9_col_scan_4x4; - *nb = vp9_col_scan_4x4_neighbors; - break; - default: - *scan = vp9_default_scan_4x4; - *nb = vp9_default_scan_4x4_neighbors; - break; - } -} - -static INLINE const int16_t* get_iscan_4x4(TX_TYPE tx_type) { - switch (tx_type) { - case ADST_DCT: - return vp9_row_iscan_4x4; - case DCT_ADST: - return vp9_col_iscan_4x4; - default: - return vp9_default_iscan_4x4; - } -} - -static INLINE const int16_t* get_scan_8x8(TX_TYPE tx_type) { - switch (tx_type) { - case ADST_DCT: - return vp9_row_scan_8x8; - case DCT_ADST: - return vp9_col_scan_8x8; - default: - return vp9_default_scan_8x8; - } -} - -static INLINE void get_scan_nb_8x8(TX_TYPE tx_type, - const int16_t **scan, const int16_t **nb) { - switch (tx_type) { - case ADST_DCT: - *scan = vp9_row_scan_8x8; - *nb = vp9_row_scan_8x8_neighbors; - break; - case DCT_ADST: - *scan = vp9_col_scan_8x8; - *nb = vp9_col_scan_8x8_neighbors; - break; - default: - *scan = vp9_default_scan_8x8; - *nb = vp9_default_scan_8x8_neighbors; - break; - } -} - -static INLINE const int16_t* get_iscan_8x8(TX_TYPE tx_type) { - switch (tx_type) { - case ADST_DCT: - return vp9_row_iscan_8x8; - case DCT_ADST: - return vp9_col_iscan_8x8; - default: - return vp9_default_iscan_8x8; - } -} - -static INLINE const int16_t* get_scan_16x16(TX_TYPE tx_type) { - switch (tx_type) { - case ADST_DCT: - return vp9_row_scan_16x16; - case DCT_ADST: - return vp9_col_scan_16x16; - default: - return vp9_default_scan_16x16; - } -} +typedef struct { + const int16_t *scan; + const int16_t *iscan; + const int16_t *neighbors; +} scan_order; -static INLINE void get_scan_nb_16x16(TX_TYPE tx_type, - const int16_t **scan, const int16_t **nb) { - switch (tx_type) { - case ADST_DCT: - *scan = vp9_row_scan_16x16; - *nb = vp9_row_scan_16x16_neighbors; - break; - case DCT_ADST: - *scan = vp9_col_scan_16x16; - *nb = vp9_col_scan_16x16_neighbors; - break; - default: - *scan = vp9_default_scan_16x16; - *nb = vp9_default_scan_16x16_neighbors; - break; - } -} - -static INLINE const int16_t* get_iscan_16x16(TX_TYPE tx_type) { - switch (tx_type) { - case ADST_DCT: - return vp9_row_iscan_16x16; - case DCT_ADST: - return vp9_col_iscan_16x16; - default: - return vp9_default_iscan_16x16; - } -} +extern const scan_order vp9_default_scan_orders[TX_SIZES]; +extern const scan_order vp9_scan_orders[TX_SIZES][TX_TYPES]; static INLINE int get_coef_context(const int16_t *neighbors, const uint8_t *token_cache, int c) { @@ -196,4 +40,8 @@ static INLINE int get_coef_context(const int16_t *neighbors, token_cache[neighbors[MAX_NEIGHBORS * c + 1]]) >> 1; } +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_COMMON_VP9_SCAN_H_ diff --git a/libvpx/vp9/common/vp9_seg_common.c b/libvpx/vp9/common/vp9_seg_common.c index ef30404..910200e 100644 --- a/libvpx/vp9/common/vp9_seg_common.c +++ b/libvpx/vp9/common/vp9_seg_common.c @@ -41,11 +41,6 @@ void vp9_enable_segfeature(struct segmentation *seg, int segment_id, seg->feature_mask[segment_id] |= 1 << feature_id; } -void vp9_disable_segfeature(struct segmentation *seg, int segment_id, - SEG_LVL_FEATURES feature_id) { - seg->feature_mask[segment_id] &= ~(1 << feature_id); -} - int vp9_seg_feature_data_max(SEG_LVL_FEATURES feature_id) { return seg_feature_data_max[feature_id]; } @@ -54,11 +49,6 @@ int vp9_is_segfeature_signed(SEG_LVL_FEATURES feature_id) { return seg_feature_data_signed[feature_id]; } -void vp9_clear_segdata(struct segmentation *seg, int segment_id, - SEG_LVL_FEATURES feature_id) { - seg->feature_data[segment_id][feature_id] = 0; -} - void vp9_set_segdata(struct segmentation *seg, int segment_id, SEG_LVL_FEATURES feature_id, int seg_data) { assert(seg_data <= seg_feature_data_max[feature_id]); diff --git a/libvpx/vp9/common/vp9_seg_common.h b/libvpx/vp9/common/vp9_seg_common.h index eb38c06..ff2d66a 100644 --- a/libvpx/vp9/common/vp9_seg_common.h +++ b/libvpx/vp9/common/vp9_seg_common.h @@ -11,7 +11,11 @@ #ifndef VP9_COMMON_VP9_SEG_COMMON_H_ #define VP9_COMMON_VP9_SEG_COMMON_H_ -#include "vp9/common/vp9_treecoder.h" +#include "vp9/common/vp9_prob.h" + +#ifdef __cplusplus +extern "C" { +#endif #define SEGMENT_DELTADATA 0 #define SEGMENT_ABSDATA 1 @@ -55,18 +59,10 @@ void vp9_enable_segfeature(struct segmentation *seg, int segment_id, SEG_LVL_FEATURES feature_id); -void vp9_disable_segfeature(struct segmentation *seg, - int segment_id, - SEG_LVL_FEATURES feature_id); - int vp9_seg_feature_data_max(SEG_LVL_FEATURES feature_id); int vp9_is_segfeature_signed(SEG_LVL_FEATURES feature_id); -void vp9_clear_segdata(struct segmentation *seg, - int segment_id, - SEG_LVL_FEATURES feature_id); - void vp9_set_segdata(struct segmentation *seg, int segment_id, SEG_LVL_FEATURES feature_id, @@ -78,5 +74,9 @@ int vp9_get_segdata(const struct segmentation *seg, extern const vp9_tree_index vp9_segment_tree[TREE_SIZE(MAX_SEGMENTS)]; +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_COMMON_VP9_SEG_COMMON_H_ diff --git a/libvpx/vp9/common/vp9_systemdependent.h b/libvpx/vp9/common/vp9_systemdependent.h index 254a431..e971158 100644 --- a/libvpx/vp9/common/vp9_systemdependent.h +++ b/libvpx/vp9/common/vp9_systemdependent.h @@ -12,8 +12,16 @@ #define VP9_COMMON_VP9_SYSTEMDEPENDENT_H_ #ifdef _MSC_VER -#include <math.h> -#define snprintf _snprintf +# include <math.h> // the ceil() definition must precede intrin.h +# if _MSC_VER > 1310 && (defined(_M_X64) || defined(_M_IX86)) +# include <intrin.h> +# define USE_MSC_INTRIN +# endif +# define snprintf _snprintf +#endif + +#ifdef __cplusplus +extern "C" { #endif #include "./vpx_config.h" @@ -26,7 +34,7 @@ void vpx_reset_mmx_state(void); #if defined(_MSC_VER) && _MSC_VER < 1800 // round is not defined in MSVC before VS2013. -static int round(double x) { +static INLINE int round(double x) { if (x < 0) return (int)ceil(x - 0.5); else @@ -34,7 +42,42 @@ static int round(double x) { } #endif -struct VP9Common; -void vp9_machine_specific_config(struct VP9Common *cm); +// use GNU builtins where available. +#if defined(__GNUC__) && \ + ((__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || __GNUC__ >= 4) +static INLINE int get_msb(unsigned int n) { + return 31 ^ __builtin_clz(n); +} +#elif defined(USE_MSC_INTRIN) +#pragma intrinsic(_BitScanReverse) + +static INLINE int get_msb(unsigned int n) { + unsigned long first_set_bit; + _BitScanReverse(&first_set_bit, n); + return first_set_bit; +} +#undef USE_MSC_INTRIN +#else +// Returns (int)floor(log2(n)). n must be > 0. +static INLINE int get_msb(unsigned int n) { + int log = 0; + unsigned int value = n; + int i; + + for (i = 4; i >= 0; --i) { + const int shift = (1 << i); + const unsigned int x = value >> shift; + if (x != 0) { + value = x; + log += shift; + } + } + return log; +} +#endif + +#ifdef __cplusplus +} // extern "C" +#endif #endif // VP9_COMMON_VP9_SYSTEMDEPENDENT_H_ diff --git a/libvpx/vp9/common/vp9_textblit.h b/libvpx/vp9/common/vp9_textblit.h index c968628..158ec1b 100644 --- a/libvpx/vp9/common/vp9_textblit.h +++ b/libvpx/vp9/common/vp9_textblit.h @@ -11,9 +11,17 @@ #ifndef VP9_COMMON_VP9_TEXTBLIT_H_ #define VP9_COMMON_VP9_TEXTBLIT_H_ +#ifdef __cplusplus +extern "C" { +#endif + void vp9_blit_text(const char *msg, unsigned char *address, int pitch); void vp9_blit_line(int x0, int x1, int y0, int y1, unsigned char *image, int pitch); +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_COMMON_VP9_TEXTBLIT_H_ diff --git a/libvpx/vp9/common/vp9_tile_common.c b/libvpx/vp9/common/vp9_tile_common.c index e3035d0..78909dd 100644 --- a/libvpx/vp9/common/vp9_tile_common.c +++ b/libvpx/vp9/common/vp9_tile_common.c @@ -15,46 +15,37 @@ #define MIN_TILE_WIDTH_B64 4 #define MAX_TILE_WIDTH_B64 64 -static int to_sbs(n_mis) { - return mi_cols_aligned_to_sb(n_mis) >> MI_BLOCK_SIZE_LOG2; +static int get_tile_offset(int idx, int mis, int log2) { + const int sb_cols = mi_cols_aligned_to_sb(mis) >> MI_BLOCK_SIZE_LOG2; + const int offset = ((idx * sb_cols) >> log2) << MI_BLOCK_SIZE_LOG2; + return MIN(offset, mis); } -static void get_tile_offsets(int *min_tile_off, int *max_tile_off, - int tile_idx, int log2_n_tiles, int n_mis) { - const int n_sbs = to_sbs(n_mis); - const int sb_off1 = (tile_idx * n_sbs) >> log2_n_tiles; - const int sb_off2 = ((tile_idx + 1) * n_sbs) >> log2_n_tiles; - - *min_tile_off = MIN(sb_off1 << 3, n_mis); - *max_tile_off = MIN(sb_off2 << 3, n_mis); -} - -void vp9_tile_init(TileInfo *tile, const VP9_COMMON *cm, - int row_idx, int col_idx) { - get_tile_offsets(&tile->mi_row_start, &tile->mi_row_end, - row_idx, cm->log2_tile_rows, cm->mi_rows); - get_tile_offsets(&tile->mi_col_start, &tile->mi_col_end, - col_idx, cm->log2_tile_cols, cm->mi_cols); +void vp9_tile_init(TileInfo *tile, const VP9_COMMON *cm, int row, int col) { + tile->mi_row_start = get_tile_offset(row, cm->mi_rows, cm->log2_tile_rows); + tile->mi_row_end = get_tile_offset(row + 1, cm->mi_rows, cm->log2_tile_rows); + tile->mi_col_start = get_tile_offset(col, cm->mi_cols, cm->log2_tile_cols); + tile->mi_col_end = get_tile_offset(col + 1, cm->mi_cols, cm->log2_tile_cols); } void vp9_get_tile_n_bits(int mi_cols, int *min_log2_tile_cols, int *max_log2_tile_cols) { - const int sb_cols = to_sbs(mi_cols); - int min_log2_n_tiles, max_log2_n_tiles; + const int sb_cols = mi_cols_aligned_to_sb(mi_cols) >> MI_BLOCK_SIZE_LOG2; + int min_log2 = 0, max_log2 = 0; - for (max_log2_n_tiles = 0; - (sb_cols >> max_log2_n_tiles) >= MIN_TILE_WIDTH_B64; - max_log2_n_tiles++) {} - max_log2_n_tiles--; - if (max_log2_n_tiles < 0) - max_log2_n_tiles = 0; + // max + while ((sb_cols >> max_log2) >= MIN_TILE_WIDTH_B64) + ++max_log2; + --max_log2; + if (max_log2 < 0) + max_log2 = 0; - for (min_log2_n_tiles = 0; - (MAX_TILE_WIDTH_B64 << min_log2_n_tiles) < sb_cols; - min_log2_n_tiles++) {} + // min + while ((MAX_TILE_WIDTH_B64 << min_log2) < sb_cols) + ++min_log2; - assert(min_log2_n_tiles <= max_log2_n_tiles); + assert(min_log2 <= max_log2); - *min_log2_tile_cols = min_log2_n_tiles; - *max_log2_tile_cols = max_log2_n_tiles; + *min_log2_tile_cols = min_log2; + *max_log2_tile_cols = max_log2; } diff --git a/libvpx/vp9/common/vp9_tile_common.h b/libvpx/vp9/common/vp9_tile_common.h index a110abb..a97719e 100644 --- a/libvpx/vp9/common/vp9_tile_common.h +++ b/libvpx/vp9/common/vp9_tile_common.h @@ -11,6 +11,10 @@ #ifndef VP9_COMMON_VP9_TILE_COMMON_H_ #define VP9_COMMON_VP9_TILE_COMMON_H_ +#ifdef __cplusplus +extern "C" { +#endif + struct VP9Common; typedef struct TileInfo { @@ -18,12 +22,16 @@ typedef struct TileInfo { int mi_col_start, mi_col_end; } TileInfo; -// initializes 'tile->mi_(row|col)_(start|end)' for (row_idx, col_idx) based on +// initializes 'tile->mi_(row|col)_(start|end)' for (row, col) based on // 'cm->log2_tile_(rows|cols)' & 'cm->mi_(rows|cols)' void vp9_tile_init(TileInfo *tile, const struct VP9Common *cm, - int row_idx, int col_idx); + int row, int col); void vp9_get_tile_n_bits(int mi_cols, int *min_log2_tile_cols, int *max_log2_tile_cols); +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_COMMON_VP9_TILE_COMMON_H_ diff --git a/libvpx/vp9/common/vp9_treecoder.c b/libvpx/vp9/common/vp9_treecoder.c deleted file mode 100644 index e2a5b9f..0000000 --- a/libvpx/vp9/common/vp9_treecoder.c +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - - -#include <assert.h> - -#include "./vpx_config.h" -#include "vp9/common/vp9_treecoder.h" - -static void tree2tok(struct vp9_token *const p, vp9_tree t, - int i, int v, int l) { - v += v; - ++l; - - do { - const vp9_tree_index j = t[i++]; - - if (j <= 0) { - p[-j].value = v; - p[-j].len = l; - } else { - tree2tok(p, t, j, v, l); - } - } while (++v & 1); -} - -void vp9_tokens_from_tree(struct vp9_token *p, vp9_tree t) { - tree2tok(p, t, 0, 0, 0); -} - -static unsigned int convert_distribution(unsigned int i, vp9_tree tree, - unsigned int branch_ct[][2], - const unsigned int num_events[]) { - unsigned int left, right; - - if (tree[i] <= 0) - left = num_events[-tree[i]]; - else - left = convert_distribution(tree[i], tree, branch_ct, num_events); - - if (tree[i + 1] <= 0) - right = num_events[-tree[i + 1]]; - else - right = convert_distribution(tree[i + 1], tree, branch_ct, num_events); - - branch_ct[i >> 1][0] = left; - branch_ct[i >> 1][1] = right; - return left + right; -} - -void vp9_tree_probs_from_distribution(vp9_tree tree, - unsigned int branch_ct[/* n-1 */][2], - const unsigned int num_events[/* n */]) { - convert_distribution(0, tree, branch_ct, num_events); -} - - diff --git a/libvpx/vp9/common/vp9_treecoder.h b/libvpx/vp9/common/vp9_treecoder.h deleted file mode 100644 index a79b156..0000000 --- a/libvpx/vp9/common/vp9_treecoder.h +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP9_COMMON_VP9_TREECODER_H_ -#define VP9_COMMON_VP9_TREECODER_H_ - -#include "./vpx_config.h" -#include "vpx/vpx_integer.h" -#include "vp9/common/vp9_common.h" - -typedef uint8_t vp9_prob; - -#define vp9_prob_half ((vp9_prob) 128) - -typedef int8_t vp9_tree_index; - -#define TREE_SIZE(leaf_count) (2 * (leaf_count) - 2) - -#define vp9_complement(x) (255 - x) - -/* We build coding trees compactly in arrays. - Each node of the tree is a pair of vp9_tree_indices. - Array index often references a corresponding probability table. - Index <= 0 means done encoding/decoding and value = -Index, - Index > 0 means need another bit, specification at index. - Nonnegative indices are always even; processing begins at node 0. */ - -typedef const vp9_tree_index vp9_tree[]; - -struct vp9_token { - int value; - int len; -}; - -/* Construct encoding array from tree. */ - -void vp9_tokens_from_tree(struct vp9_token*, vp9_tree); - -/* Convert array of token occurrence counts into a table of probabilities - for the associated binary encoding tree. Also writes count of branches - taken for each node on the tree; this facilitiates decisions as to - probability updates. */ - -void vp9_tree_probs_from_distribution(vp9_tree tree, - unsigned int branch_ct[ /* n - 1 */ ][2], - const unsigned int num_events[ /* n */ ]); - - -static INLINE vp9_prob clip_prob(int p) { - return (p > 255) ? 255u : (p < 1) ? 1u : p; -} - -// int64 is not needed for normal frame level calculations. -// However when outputing entropy stats accumulated over many frames -// or even clips we can overflow int math. -#ifdef ENTROPY_STATS -static INLINE vp9_prob get_prob(int num, int den) { - return (den == 0) ? 128u : clip_prob(((int64_t)num * 256 + (den >> 1)) / den); -} -#else -static INLINE vp9_prob get_prob(int num, int den) { - return (den == 0) ? 128u : clip_prob((num * 256 + (den >> 1)) / den); -} -#endif - -static INLINE vp9_prob get_binary_prob(int n0, int n1) { - return get_prob(n0, n0 + n1); -} - -/* this function assumes prob1 and prob2 are already within [1,255] range */ -static INLINE vp9_prob weighted_prob(int prob1, int prob2, int factor) { - return ROUND_POWER_OF_TWO(prob1 * (256 - factor) + prob2 * factor, 8); -} - -static INLINE vp9_prob merge_probs(vp9_prob pre_prob, - const unsigned int ct[2], - unsigned int count_sat, - unsigned int max_update_factor) { - const vp9_prob prob = get_binary_prob(ct[0], ct[1]); - const unsigned int count = MIN(ct[0] + ct[1], count_sat); - const unsigned int factor = max_update_factor * count / count_sat; - return weighted_prob(pre_prob, prob, factor); -} - -static unsigned int tree_merge_probs_impl(unsigned int i, - const vp9_tree_index *tree, - const vp9_prob *pre_probs, - const unsigned int *counts, - unsigned int count_sat, - unsigned int max_update_factor, - vp9_prob *probs) { - const int l = tree[i]; - const unsigned int left_count = (l <= 0) - ? counts[-l] - : tree_merge_probs_impl(l, tree, pre_probs, counts, - count_sat, max_update_factor, probs); - const int r = tree[i + 1]; - const unsigned int right_count = (r <= 0) - ? counts[-r] - : tree_merge_probs_impl(r, tree, pre_probs, counts, - count_sat, max_update_factor, probs); - const unsigned int ct[2] = { left_count, right_count }; - probs[i >> 1] = merge_probs(pre_probs[i >> 1], ct, - count_sat, max_update_factor); - return left_count + right_count; -} - -static void tree_merge_probs(const vp9_tree_index *tree, - const vp9_prob *pre_probs, - const unsigned int *counts, - unsigned int count_sat, - unsigned int max_update_factor, vp9_prob *probs) { - tree_merge_probs_impl(0, tree, pre_probs, counts, - count_sat, max_update_factor, probs); -} - - -#endif // VP9_COMMON_VP9_TREECODER_H_ diff --git a/libvpx/vp9/common/x86/vp9_asm_stubs.c b/libvpx/vp9/common/x86/vp9_asm_stubs.c index 106e6d4..1b4904c 100644 --- a/libvpx/vp9/common/x86/vp9_asm_stubs.c +++ b/libvpx/vp9/common/x86/vp9_asm_stubs.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. + * Copyright (c) 2014 The WebM project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source @@ -13,45 +13,205 @@ #include "./vpx_config.h" #include "./vp9_rtcd.h" #include "vpx_ports/mem.h" -/////////////////////////////////////////////////////////////////////////// -// the mmx function that does the bilinear filtering and var calculation // -// int one pass // -/////////////////////////////////////////////////////////////////////////// -DECLARE_ALIGNED(16, const short, vp9_bilinear_filters_mmx[16][8]) = { - { 128, 128, 128, 128, 0, 0, 0, 0 }, - { 120, 120, 120, 120, 8, 8, 8, 8 }, - { 112, 112, 112, 112, 16, 16, 16, 16 }, - { 104, 104, 104, 104, 24, 24, 24, 24 }, - { 96, 96, 96, 96, 32, 32, 32, 32 }, - { 88, 88, 88, 88, 40, 40, 40, 40 }, - { 80, 80, 80, 80, 48, 48, 48, 48 }, - { 72, 72, 72, 72, 56, 56, 56, 56 }, - { 64, 64, 64, 64, 64, 64, 64, 64 }, - { 56, 56, 56, 56, 72, 72, 72, 72 }, - { 48, 48, 48, 48, 80, 80, 80, 80 }, - { 40, 40, 40, 40, 88, 88, 88, 88 }, - { 32, 32, 32, 32, 96, 96, 96, 96 }, - { 24, 24, 24, 24, 104, 104, 104, 104 }, - { 16, 16, 16, 16, 112, 112, 112, 112 }, - { 8, 8, 8, 8, 120, 120, 120, 120 } -}; typedef void filter8_1dfunction ( const unsigned char *src_ptr, - const unsigned int src_pitch, + const ptrdiff_t src_pitch, unsigned char *output_ptr, - unsigned int out_pitch, + ptrdiff_t out_pitch, unsigned int output_height, const short *filter ); +#define FUN_CONV_1D(name, step_q4, filter, dir, src_start, avg, opt) \ + void vp9_convolve8_##name##_##opt(const uint8_t *src, ptrdiff_t src_stride, \ + uint8_t *dst, ptrdiff_t dst_stride, \ + const int16_t *filter_x, int x_step_q4, \ + const int16_t *filter_y, int y_step_q4, \ + int w, int h) { \ + if (step_q4 == 16 && filter[3] != 128) { \ + if (filter[0] || filter[1] || filter[2]) { \ + while (w >= 16) { \ + vp9_filter_block1d16_##dir##8_##avg##opt(src_start, \ + src_stride, \ + dst, \ + dst_stride, \ + h, \ + filter); \ + src += 16; \ + dst += 16; \ + w -= 16; \ + } \ + while (w >= 8) { \ + vp9_filter_block1d8_##dir##8_##avg##opt(src_start, \ + src_stride, \ + dst, \ + dst_stride, \ + h, \ + filter); \ + src += 8; \ + dst += 8; \ + w -= 8; \ + } \ + while (w >= 4) { \ + vp9_filter_block1d4_##dir##8_##avg##opt(src_start, \ + src_stride, \ + dst, \ + dst_stride, \ + h, \ + filter); \ + src += 4; \ + dst += 4; \ + w -= 4; \ + } \ + } else { \ + while (w >= 16) { \ + vp9_filter_block1d16_##dir##2_##avg##opt(src, \ + src_stride, \ + dst, \ + dst_stride, \ + h, \ + filter); \ + src += 16; \ + dst += 16; \ + w -= 16; \ + } \ + while (w >= 8) { \ + vp9_filter_block1d8_##dir##2_##avg##opt(src, \ + src_stride, \ + dst, \ + dst_stride, \ + h, \ + filter); \ + src += 8; \ + dst += 8; \ + w -= 8; \ + } \ + while (w >= 4) { \ + vp9_filter_block1d4_##dir##2_##avg##opt(src, \ + src_stride, \ + dst, \ + dst_stride, \ + h, \ + filter); \ + src += 4; \ + dst += 4; \ + w -= 4; \ + } \ + } \ + } \ + if (w) { \ + vp9_convolve8_##name##_c(src, src_stride, dst, dst_stride, \ + filter_x, x_step_q4, filter_y, y_step_q4, \ + w, h); \ + } \ +} + +#define FUN_CONV_2D(avg, opt) \ +void vp9_convolve8_##avg##opt(const uint8_t *src, ptrdiff_t src_stride, \ + uint8_t *dst, ptrdiff_t dst_stride, \ + const int16_t *filter_x, int x_step_q4, \ + const int16_t *filter_y, int y_step_q4, \ + int w, int h) { \ + assert(w <= 64); \ + assert(h <= 64); \ + if (x_step_q4 == 16 && y_step_q4 == 16) { \ + if (filter_x[0] || filter_x[1] || filter_x[2] || filter_x[3] == 128 || \ + filter_y[0] || filter_y[1] || filter_y[2] || filter_y[3] == 128) { \ + DECLARE_ALIGNED_ARRAY(16, unsigned char, fdata2, 64 * 71); \ + vp9_convolve8_horiz_##opt(src - 3 * src_stride, src_stride, fdata2, 64, \ + filter_x, x_step_q4, filter_y, y_step_q4, \ + w, h + 7); \ + vp9_convolve8_##avg##vert_##opt(fdata2 + 3 * 64, 64, dst, dst_stride, \ + filter_x, x_step_q4, filter_y, \ + y_step_q4, w, h); \ + } else { \ + DECLARE_ALIGNED_ARRAY(16, unsigned char, fdata2, 64 * 65); \ + vp9_convolve8_horiz_##opt(src, src_stride, fdata2, 64, \ + filter_x, x_step_q4, filter_y, y_step_q4, \ + w, h + 1); \ + vp9_convolve8_##avg##vert_##opt(fdata2, 64, dst, dst_stride, \ + filter_x, x_step_q4, filter_y, \ + y_step_q4, w, h); \ + } \ + } else { \ + vp9_convolve8_##avg##c(src, src_stride, dst, dst_stride, \ + filter_x, x_step_q4, filter_y, y_step_q4, w, h); \ + } \ +} +#if HAVE_AVX2 +filter8_1dfunction vp9_filter_block1d16_v8_avx2; +filter8_1dfunction vp9_filter_block1d16_h8_avx2; +filter8_1dfunction vp9_filter_block1d4_v8_ssse3; +#if (ARCH_X86_64) +filter8_1dfunction vp9_filter_block1d8_v8_intrin_ssse3; +filter8_1dfunction vp9_filter_block1d8_h8_intrin_ssse3; +filter8_1dfunction vp9_filter_block1d4_h8_intrin_ssse3; +#define vp9_filter_block1d8_v8_avx2 vp9_filter_block1d8_v8_intrin_ssse3 +#define vp9_filter_block1d8_h8_avx2 vp9_filter_block1d8_h8_intrin_ssse3 +#define vp9_filter_block1d4_h8_avx2 vp9_filter_block1d4_h8_intrin_ssse3 +#else +filter8_1dfunction vp9_filter_block1d8_v8_ssse3; +filter8_1dfunction vp9_filter_block1d8_h8_ssse3; +filter8_1dfunction vp9_filter_block1d4_h8_ssse3; +#define vp9_filter_block1d8_v8_avx2 vp9_filter_block1d8_v8_ssse3 +#define vp9_filter_block1d8_h8_avx2 vp9_filter_block1d8_h8_ssse3 +#define vp9_filter_block1d4_h8_avx2 vp9_filter_block1d4_h8_ssse3 +#endif +filter8_1dfunction vp9_filter_block1d16_v2_ssse3; +filter8_1dfunction vp9_filter_block1d16_h2_ssse3; +filter8_1dfunction vp9_filter_block1d8_v2_ssse3; +filter8_1dfunction vp9_filter_block1d8_h2_ssse3; +filter8_1dfunction vp9_filter_block1d4_v2_ssse3; +filter8_1dfunction vp9_filter_block1d4_h2_ssse3; +#define vp9_filter_block1d4_v8_avx2 vp9_filter_block1d4_v8_ssse3 +#define vp9_filter_block1d16_v2_avx2 vp9_filter_block1d16_v2_ssse3 +#define vp9_filter_block1d16_h2_avx2 vp9_filter_block1d16_h2_ssse3 +#define vp9_filter_block1d8_v2_avx2 vp9_filter_block1d8_v2_ssse3 +#define vp9_filter_block1d8_h2_avx2 vp9_filter_block1d8_h2_ssse3 +#define vp9_filter_block1d4_v2_avx2 vp9_filter_block1d4_v2_ssse3 +#define vp9_filter_block1d4_h2_avx2 vp9_filter_block1d4_h2_ssse3 +// void vp9_convolve8_horiz_avx2(const uint8_t *src, ptrdiff_t src_stride, +// uint8_t *dst, ptrdiff_t dst_stride, +// const int16_t *filter_x, int x_step_q4, +// const int16_t *filter_y, int y_step_q4, +// int w, int h); +// void vp9_convolve8_vert_avx2(const uint8_t *src, ptrdiff_t src_stride, +// uint8_t *dst, ptrdiff_t dst_stride, +// const int16_t *filter_x, int x_step_q4, +// const int16_t *filter_y, int y_step_q4, +// int w, int h); +FUN_CONV_1D(horiz, x_step_q4, filter_x, h, src, , avx2); +FUN_CONV_1D(vert, y_step_q4, filter_y, v, src - src_stride * 3, , avx2); + +// void vp9_convolve8_avx2(const uint8_t *src, ptrdiff_t src_stride, +// uint8_t *dst, ptrdiff_t dst_stride, +// const int16_t *filter_x, int x_step_q4, +// const int16_t *filter_y, int y_step_q4, +// int w, int h); +FUN_CONV_2D(, avx2); +#endif #if HAVE_SSSE3 +#if (ARCH_X86_64) +filter8_1dfunction vp9_filter_block1d16_v8_intrin_ssse3; +filter8_1dfunction vp9_filter_block1d16_h8_intrin_ssse3; +filter8_1dfunction vp9_filter_block1d8_v8_intrin_ssse3; +filter8_1dfunction vp9_filter_block1d8_h8_intrin_ssse3; +filter8_1dfunction vp9_filter_block1d4_v8_ssse3; +filter8_1dfunction vp9_filter_block1d4_h8_intrin_ssse3; +#define vp9_filter_block1d16_v8_ssse3 vp9_filter_block1d16_v8_intrin_ssse3 +#define vp9_filter_block1d16_h8_ssse3 vp9_filter_block1d16_h8_intrin_ssse3 +#define vp9_filter_block1d8_v8_ssse3 vp9_filter_block1d8_v8_intrin_ssse3 +#define vp9_filter_block1d8_h8_ssse3 vp9_filter_block1d8_h8_intrin_ssse3 +#define vp9_filter_block1d4_h8_ssse3 vp9_filter_block1d4_h8_intrin_ssse3 +#else filter8_1dfunction vp9_filter_block1d16_v8_ssse3; filter8_1dfunction vp9_filter_block1d16_h8_ssse3; filter8_1dfunction vp9_filter_block1d8_v8_ssse3; filter8_1dfunction vp9_filter_block1d8_h8_ssse3; filter8_1dfunction vp9_filter_block1d4_v8_ssse3; filter8_1dfunction vp9_filter_block1d4_h8_ssse3; +#endif filter8_1dfunction vp9_filter_block1d16_v8_avg_ssse3; filter8_1dfunction vp9_filter_block1d16_h8_avg_ssse3; filter8_1dfunction vp9_filter_block1d8_v8_avg_ssse3; @@ -59,201 +219,57 @@ filter8_1dfunction vp9_filter_block1d8_h8_avg_ssse3; filter8_1dfunction vp9_filter_block1d4_v8_avg_ssse3; filter8_1dfunction vp9_filter_block1d4_h8_avg_ssse3; -void vp9_convolve8_horiz_ssse3(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { - /* Ensure the filter can be compressed to int16_t. */ - if (x_step_q4 == 16 && filter_x[3] != 128) { - while (w >= 16) { - vp9_filter_block1d16_h8_ssse3(src, src_stride, - dst, dst_stride, - h, filter_x); - src += 16; - dst += 16; - w -= 16; - } - while (w >= 8) { - vp9_filter_block1d8_h8_ssse3(src, src_stride, - dst, dst_stride, - h, filter_x); - src += 8; - dst += 8; - w -= 8; - } - while (w >= 4) { - vp9_filter_block1d4_h8_ssse3(src, src_stride, - dst, dst_stride, - h, filter_x); - src += 4; - dst += 4; - w -= 4; - } - } - if (w) { - vp9_convolve8_horiz_c(src, src_stride, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, - w, h); - } -} - -void vp9_convolve8_vert_ssse3(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { - if (y_step_q4 == 16 && filter_y[3] != 128) { - while (w >= 16) { - vp9_filter_block1d16_v8_ssse3(src - src_stride * 3, src_stride, - dst, dst_stride, - h, filter_y); - src += 16; - dst += 16; - w -= 16; - } - while (w >= 8) { - vp9_filter_block1d8_v8_ssse3(src - src_stride * 3, src_stride, - dst, dst_stride, - h, filter_y); - src += 8; - dst += 8; - w -= 8; - } - while (w >= 4) { - vp9_filter_block1d4_v8_ssse3(src - src_stride * 3, src_stride, - dst, dst_stride, - h, filter_y); - src += 4; - dst += 4; - w -= 4; - } - } - if (w) { - vp9_convolve8_vert_c(src, src_stride, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, - w, h); - } -} - -void vp9_convolve8_avg_horiz_ssse3(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { - if (x_step_q4 == 16 && filter_x[3] != 128) { - while (w >= 16) { - vp9_filter_block1d16_h8_avg_ssse3(src, src_stride, - dst, dst_stride, - h, filter_x); - src += 16; - dst += 16; - w -= 16; - } - while (w >= 8) { - vp9_filter_block1d8_h8_avg_ssse3(src, src_stride, - dst, dst_stride, - h, filter_x); - src += 8; - dst += 8; - w -= 8; - } - while (w >= 4) { - vp9_filter_block1d4_h8_avg_ssse3(src, src_stride, - dst, dst_stride, - h, filter_x); - src += 4; - dst += 4; - w -= 4; - } - } - if (w) { - vp9_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, - w, h); - } -} - -void vp9_convolve8_avg_vert_ssse3(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { - if (y_step_q4 == 16 && filter_y[3] != 128) { - while (w >= 16) { - vp9_filter_block1d16_v8_avg_ssse3(src - src_stride * 3, src_stride, - dst, dst_stride, - h, filter_y); - src += 16; - dst += 16; - w -= 16; - } - while (w >= 8) { - vp9_filter_block1d8_v8_avg_ssse3(src - src_stride * 3, src_stride, - dst, dst_stride, - h, filter_y); - src += 8; - dst += 8; - w -= 8; - } - while (w >= 4) { - vp9_filter_block1d4_v8_avg_ssse3(src - src_stride * 3, src_stride, - dst, dst_stride, - h, filter_y); - src += 4; - dst += 4; - w -= 4; - } - } - if (w) { - vp9_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, - w, h); - } -} - -void vp9_convolve8_ssse3(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { - DECLARE_ALIGNED_ARRAY(16, unsigned char, fdata2, 64 * 71); - - assert(w <= 64); - assert(h <= 64); - if (x_step_q4 == 16 && y_step_q4 == 16) { - vp9_convolve8_horiz_ssse3(src - 3 * src_stride, src_stride, fdata2, 64, - filter_x, x_step_q4, filter_y, y_step_q4, - w, h + 7); - vp9_convolve8_vert_ssse3(fdata2 + 3 * 64, 64, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, w, h); - } else { - vp9_convolve8_c(src, src_stride, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, w, h); - } -} +filter8_1dfunction vp9_filter_block1d16_v2_ssse3; +filter8_1dfunction vp9_filter_block1d16_h2_ssse3; +filter8_1dfunction vp9_filter_block1d8_v2_ssse3; +filter8_1dfunction vp9_filter_block1d8_h2_ssse3; +filter8_1dfunction vp9_filter_block1d4_v2_ssse3; +filter8_1dfunction vp9_filter_block1d4_h2_ssse3; +filter8_1dfunction vp9_filter_block1d16_v2_avg_ssse3; +filter8_1dfunction vp9_filter_block1d16_h2_avg_ssse3; +filter8_1dfunction vp9_filter_block1d8_v2_avg_ssse3; +filter8_1dfunction vp9_filter_block1d8_h2_avg_ssse3; +filter8_1dfunction vp9_filter_block1d4_v2_avg_ssse3; +filter8_1dfunction vp9_filter_block1d4_h2_avg_ssse3; -void vp9_convolve8_avg_ssse3(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { - DECLARE_ALIGNED_ARRAY(16, unsigned char, fdata2, 64 * 71); +// void vp9_convolve8_horiz_ssse3(const uint8_t *src, ptrdiff_t src_stride, +// uint8_t *dst, ptrdiff_t dst_stride, +// const int16_t *filter_x, int x_step_q4, +// const int16_t *filter_y, int y_step_q4, +// int w, int h); +// void vp9_convolve8_vert_ssse3(const uint8_t *src, ptrdiff_t src_stride, +// uint8_t *dst, ptrdiff_t dst_stride, +// const int16_t *filter_x, int x_step_q4, +// const int16_t *filter_y, int y_step_q4, +// int w, int h); +// void vp9_convolve8_avg_horiz_ssse3(const uint8_t *src, ptrdiff_t src_stride, +// uint8_t *dst, ptrdiff_t dst_stride, +// const int16_t *filter_x, int x_step_q4, +// const int16_t *filter_y, int y_step_q4, +// int w, int h); +// void vp9_convolve8_avg_vert_ssse3(const uint8_t *src, ptrdiff_t src_stride, +// uint8_t *dst, ptrdiff_t dst_stride, +// const int16_t *filter_x, int x_step_q4, +// const int16_t *filter_y, int y_step_q4, +// int w, int h); +FUN_CONV_1D(horiz, x_step_q4, filter_x, h, src, , ssse3); +FUN_CONV_1D(vert, y_step_q4, filter_y, v, src - src_stride * 3, , ssse3); +FUN_CONV_1D(avg_horiz, x_step_q4, filter_x, h, src, avg_, ssse3); +FUN_CONV_1D(avg_vert, y_step_q4, filter_y, v, src - src_stride * 3, avg_, + ssse3); - assert(w <= 64); - assert(h <= 64); - if (x_step_q4 == 16 && y_step_q4 == 16) { - vp9_convolve8_horiz_ssse3(src - 3 * src_stride, src_stride, fdata2, 64, - filter_x, x_step_q4, filter_y, y_step_q4, - w, h + 7); - vp9_convolve8_avg_vert_ssse3(fdata2 + 3 * 64, 64, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, - w, h); - } else { - vp9_convolve8_avg_c(src, src_stride, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, w, h); - } -} +// void vp9_convolve8_ssse3(const uint8_t *src, ptrdiff_t src_stride, +// uint8_t *dst, ptrdiff_t dst_stride, +// const int16_t *filter_x, int x_step_q4, +// const int16_t *filter_y, int y_step_q4, +// int w, int h); +// void vp9_convolve8_avg_ssse3(const uint8_t *src, ptrdiff_t src_stride, +// uint8_t *dst, ptrdiff_t dst_stride, +// const int16_t *filter_x, int x_step_q4, +// const int16_t *filter_y, int y_step_q4, +// int w, int h); +FUN_CONV_2D(, ssse3); +FUN_CONV_2D(avg_ , ssse3); #endif #if HAVE_SSE2 @@ -270,199 +286,54 @@ filter8_1dfunction vp9_filter_block1d8_h8_avg_sse2; filter8_1dfunction vp9_filter_block1d4_v8_avg_sse2; filter8_1dfunction vp9_filter_block1d4_h8_avg_sse2; -void vp9_convolve8_horiz_sse2(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { - /* Ensure the filter can be compressed to int16_t. */ - if (x_step_q4 == 16 && filter_x[3] != 128) { - while (w >= 16) { - vp9_filter_block1d16_h8_sse2(src, src_stride, - dst, dst_stride, - h, filter_x); - src += 16; - dst += 16; - w -= 16; - } - while (w >= 8) { - vp9_filter_block1d8_h8_sse2(src, src_stride, - dst, dst_stride, - h, filter_x); - src += 8; - dst += 8; - w -= 8; - } - while (w >= 4) { - vp9_filter_block1d4_h8_sse2(src, src_stride, - dst, dst_stride, - h, filter_x); - src += 4; - dst += 4; - w -= 4; - } - } - if (w) { - vp9_convolve8_horiz_c(src, src_stride, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, - w, h); - } -} +filter8_1dfunction vp9_filter_block1d16_v2_sse2; +filter8_1dfunction vp9_filter_block1d16_h2_sse2; +filter8_1dfunction vp9_filter_block1d8_v2_sse2; +filter8_1dfunction vp9_filter_block1d8_h2_sse2; +filter8_1dfunction vp9_filter_block1d4_v2_sse2; +filter8_1dfunction vp9_filter_block1d4_h2_sse2; +filter8_1dfunction vp9_filter_block1d16_v2_avg_sse2; +filter8_1dfunction vp9_filter_block1d16_h2_avg_sse2; +filter8_1dfunction vp9_filter_block1d8_v2_avg_sse2; +filter8_1dfunction vp9_filter_block1d8_h2_avg_sse2; +filter8_1dfunction vp9_filter_block1d4_v2_avg_sse2; +filter8_1dfunction vp9_filter_block1d4_h2_avg_sse2; -void vp9_convolve8_vert_sse2(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { - if (y_step_q4 == 16 && filter_y[3] != 128) { - while (w >= 16) { - vp9_filter_block1d16_v8_sse2(src - src_stride * 3, src_stride, - dst, dst_stride, - h, filter_y); - src += 16; - dst += 16; - w -= 16; - } - while (w >= 8) { - vp9_filter_block1d8_v8_sse2(src - src_stride * 3, src_stride, - dst, dst_stride, - h, filter_y); - src += 8; - dst += 8; - w -= 8; - } - while (w >= 4) { - vp9_filter_block1d4_v8_sse2(src - src_stride * 3, src_stride, - dst, dst_stride, - h, filter_y); - src += 4; - dst += 4; - w -= 4; - } - } - if (w) { - vp9_convolve8_vert_c(src, src_stride, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, - w, h); - } -} +// void vp9_convolve8_horiz_sse2(const uint8_t *src, ptrdiff_t src_stride, +// uint8_t *dst, ptrdiff_t dst_stride, +// const int16_t *filter_x, int x_step_q4, +// const int16_t *filter_y, int y_step_q4, +// int w, int h); +// void vp9_convolve8_vert_sse2(const uint8_t *src, ptrdiff_t src_stride, +// uint8_t *dst, ptrdiff_t dst_stride, +// const int16_t *filter_x, int x_step_q4, +// const int16_t *filter_y, int y_step_q4, +// int w, int h); +// void vp9_convolve8_avg_horiz_sse2(const uint8_t *src, ptrdiff_t src_stride, +// uint8_t *dst, ptrdiff_t dst_stride, +// const int16_t *filter_x, int x_step_q4, +// const int16_t *filter_y, int y_step_q4, +// int w, int h); +// void vp9_convolve8_avg_vert_sse2(const uint8_t *src, ptrdiff_t src_stride, +// uint8_t *dst, ptrdiff_t dst_stride, +// const int16_t *filter_x, int x_step_q4, +// const int16_t *filter_y, int y_step_q4, +// int w, int h); +FUN_CONV_1D(horiz, x_step_q4, filter_x, h, src, , sse2); +FUN_CONV_1D(vert, y_step_q4, filter_y, v, src - src_stride * 3, , sse2); +FUN_CONV_1D(avg_horiz, x_step_q4, filter_x, h, src, avg_, sse2); +FUN_CONV_1D(avg_vert, y_step_q4, filter_y, v, src - src_stride * 3, avg_, sse2); -void vp9_convolve8_avg_horiz_sse2(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { - if (x_step_q4 == 16 && filter_x[3] != 128) { - while (w >= 16) { - vp9_filter_block1d16_h8_avg_sse2(src, src_stride, - dst, dst_stride, - h, filter_x); - src += 16; - dst += 16; - w -= 16; - } - while (w >= 8) { - vp9_filter_block1d8_h8_avg_sse2(src, src_stride, - dst, dst_stride, - h, filter_x); - src += 8; - dst += 8; - w -= 8; - } - while (w >= 4) { - vp9_filter_block1d4_h8_avg_sse2(src, src_stride, - dst, dst_stride, - h, filter_x); - src += 4; - dst += 4; - w -= 4; - } - } - if (w) { - vp9_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, - w, h); - } -} - -void vp9_convolve8_avg_vert_sse2(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { - if (y_step_q4 == 16 && filter_y[3] != 128) { - while (w >= 16) { - vp9_filter_block1d16_v8_avg_sse2(src - src_stride * 3, src_stride, - dst, dst_stride, - h, filter_y); - src += 16; - dst += 16; - w -= 16; - } - while (w >= 8) { - vp9_filter_block1d8_v8_avg_sse2(src - src_stride * 3, src_stride, - dst, dst_stride, - h, filter_y); - src += 8; - dst += 8; - w -= 8; - } - while (w >= 4) { - vp9_filter_block1d4_v8_avg_sse2(src - src_stride * 3, src_stride, - dst, dst_stride, - h, filter_y); - src += 4; - dst += 4; - w -= 4; - } - } - if (w) { - vp9_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, - w, h); - } -} - -void vp9_convolve8_sse2(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { - DECLARE_ALIGNED_ARRAY(16, unsigned char, fdata2, 64 * 71); - - assert(w <= 64); - assert(h <= 64); - if (x_step_q4 == 16 && y_step_q4 == 16) { - vp9_convolve8_horiz_sse2(src - 3 * src_stride, src_stride, fdata2, 64, - filter_x, x_step_q4, filter_y, y_step_q4, - w, h + 7); - vp9_convolve8_vert_sse2(fdata2 + 3 * 64, 64, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, w, h); - } else { - vp9_convolve8_c(src, src_stride, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, w, h); - } -} - -void vp9_convolve8_avg_sse2(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { - DECLARE_ALIGNED_ARRAY(16, unsigned char, fdata2, 64 * 71); - - assert(w <= 64); - assert(h <= 64); - if (x_step_q4 == 16 && y_step_q4 == 16) { - vp9_convolve8_horiz_sse2(src - 3 * src_stride, src_stride, fdata2, 64, - filter_x, x_step_q4, filter_y, y_step_q4, - w, h + 7); - vp9_convolve8_avg_vert_sse2(fdata2 + 3 * 64, 64, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, - w, h); - } else { - vp9_convolve8_avg_c(src, src_stride, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, w, h); - } -} +// void vp9_convolve8_sse2(const uint8_t *src, ptrdiff_t src_stride, +// uint8_t *dst, ptrdiff_t dst_stride, +// const int16_t *filter_x, int x_step_q4, +// const int16_t *filter_y, int y_step_q4, +// int w, int h); +// void vp9_convolve8_avg_sse2(const uint8_t *src, ptrdiff_t src_stride, +// uint8_t *dst, ptrdiff_t dst_stride, +// const int16_t *filter_x, int x_step_q4, +// const int16_t *filter_y, int y_step_q4, +// int w, int h); +FUN_CONV_2D(, sse2); +FUN_CONV_2D(avg_ , sse2); #endif diff --git a/libvpx/vp9/common/x86/vp9_idct_intrin_sse2.c b/libvpx/vp9/common/x86/vp9_idct_intrin_sse2.c index 2a33844..13a5b5a 100644 --- a/libvpx/vp9/common/x86/vp9_idct_intrin_sse2.c +++ b/libvpx/vp9/common/x86/vp9_idct_intrin_sse2.c @@ -174,15 +174,13 @@ void vp9_idct4x4_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) { static INLINE void transpose_4x4(__m128i *res) { const __m128i tr0_0 = _mm_unpacklo_epi16(res[0], res[1]); - const __m128i tr0_1 = _mm_unpacklo_epi16(res[2], res[3]); - res[0] = _mm_unpacklo_epi32(tr0_0, tr0_1); - res[2] = _mm_unpackhi_epi32(tr0_0, tr0_1); + const __m128i tr0_1 = _mm_unpackhi_epi16(res[0], res[1]); - res[1] = _mm_unpackhi_epi64(res[0], res[0]); - res[3] = _mm_unpackhi_epi64(res[2], res[2]); + res[0] = _mm_unpacklo_epi16(tr0_0, tr0_1); + res[1] = _mm_unpackhi_epi16(tr0_0, tr0_1); } -static void idct4_1d_sse2(__m128i *in) { +static void idct4_sse2(__m128i *in) { const __m128i k__cospi_p16_p16 = pair_set_epi16(cospi_16_64, cospi_16_64); const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64); @@ -192,8 +190,8 @@ static void idct4_1d_sse2(__m128i *in) { transpose_4x4(in); // stage 1 - u[0] = _mm_unpacklo_epi16(in[0], in[2]); - u[1] = _mm_unpacklo_epi16(in[1], in[3]); + u[0] = _mm_unpacklo_epi16(in[0], in[1]); + u[1] = _mm_unpackhi_epi16(in[0], in[1]); v[0] = _mm_madd_epi16(u[0], k__cospi_p16_p16); v[1] = _mm_madd_epi16(u[0], k__cospi_p16_m16); v[2] = _mm_madd_epi16(u[1], k__cospi_p24_m08); @@ -209,19 +207,16 @@ static void idct4_1d_sse2(__m128i *in) { v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS); v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS); - u[0] = _mm_packs_epi32(v[0], v[2]); - u[1] = _mm_packs_epi32(v[1], v[3]); - u[2] = _mm_unpackhi_epi64(u[0], u[0]); - u[3] = _mm_unpackhi_epi64(u[1], u[1]); + u[0] = _mm_packs_epi32(v[0], v[1]); + u[1] = _mm_packs_epi32(v[3], v[2]); // stage 2 - in[0] = _mm_add_epi16(u[0], u[3]); - in[1] = _mm_add_epi16(u[1], u[2]); - in[2] = _mm_sub_epi16(u[1], u[2]); - in[3] = _mm_sub_epi16(u[0], u[3]); + in[0] = _mm_add_epi16(u[0], u[1]); + in[1] = _mm_sub_epi16(u[0], u[1]); + in[1] = _mm_shuffle_epi32(in[1], 0x4E); } -static void iadst4_1d_sse2(__m128i *in) { +static void iadst4_sse2(__m128i *in) { const __m128i k__sinpi_p01_p04 = pair_set_epi16(sinpi_1_9, sinpi_4_9); const __m128i k__sinpi_p03_p02 = pair_set_epi16(sinpi_3_9, sinpi_2_9); const __m128i k__sinpi_p02_m01 = pair_set_epi16(sinpi_2_9, -sinpi_1_9); @@ -232,13 +227,14 @@ static void iadst4_1d_sse2(__m128i *in) { __m128i u[8], v[8], in7; transpose_4x4(in); - in7 = _mm_add_epi16(in[0], in[3]); - in7 = _mm_sub_epi16(in7, in[2]); + in7 = _mm_srli_si128(in[1], 8); + in7 = _mm_add_epi16(in7, in[0]); + in7 = _mm_sub_epi16(in7, in[1]); - u[0] = _mm_unpacklo_epi16(in[0], in[2]); - u[1] = _mm_unpacklo_epi16(in[1], in[3]); + u[0] = _mm_unpacklo_epi16(in[0], in[1]); + u[1] = _mm_unpackhi_epi16(in[0], in[1]); u[2] = _mm_unpacklo_epi16(in7, kZero); - u[3] = _mm_unpacklo_epi16(in[1], kZero); + u[3] = _mm_unpackhi_epi16(in[0], kZero); v[0] = _mm_madd_epi16(u[0], k__sinpi_p01_p04); // s0 + s3 v[1] = _mm_madd_epi16(u[1], k__sinpi_p03_p02); // s2 + s5 @@ -265,39 +261,35 @@ static void iadst4_1d_sse2(__m128i *in) { u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS); u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS); - in[0] = _mm_packs_epi32(u[0], u[2]); - in[1] = _mm_packs_epi32(u[1], u[3]); - in[2] = _mm_unpackhi_epi64(in[0], in[0]); - in[3] = _mm_unpackhi_epi64(in[1], in[1]); + in[0] = _mm_packs_epi32(u[0], u[1]); + in[1] = _mm_packs_epi32(u[2], u[3]); } void vp9_iht4x4_16_add_sse2(const int16_t *input, uint8_t *dest, int stride, int tx_type) { - __m128i in[4]; + __m128i in[2]; const __m128i zero = _mm_setzero_si128(); const __m128i eight = _mm_set1_epi16(8); - in[0] = _mm_loadl_epi64((const __m128i *)input); - in[1] = _mm_loadl_epi64((const __m128i *)(input + 4)); - in[2] = _mm_loadl_epi64((const __m128i *)(input + 8)); - in[3] = _mm_loadl_epi64((const __m128i *)(input + 12)); + in[0]= _mm_loadu_si128((const __m128i *)(input)); + in[1]= _mm_loadu_si128((const __m128i *)(input + 8)); switch (tx_type) { case 0: // DCT_DCT - idct4_1d_sse2(in); - idct4_1d_sse2(in); + idct4_sse2(in); + idct4_sse2(in); break; case 1: // ADST_DCT - idct4_1d_sse2(in); - iadst4_1d_sse2(in); + idct4_sse2(in); + iadst4_sse2(in); break; case 2: // DCT_ADST - iadst4_1d_sse2(in); - idct4_1d_sse2(in); + iadst4_sse2(in); + idct4_sse2(in); break; case 3: // ADST_ADST - iadst4_1d_sse2(in); - iadst4_1d_sse2(in); + iadst4_sse2(in); + iadst4_sse2(in); break; default: assert(0); @@ -307,18 +299,35 @@ void vp9_iht4x4_16_add_sse2(const int16_t *input, uint8_t *dest, int stride, // Final round and shift in[0] = _mm_add_epi16(in[0], eight); in[1] = _mm_add_epi16(in[1], eight); - in[2] = _mm_add_epi16(in[2], eight); - in[3] = _mm_add_epi16(in[3], eight); in[0] = _mm_srai_epi16(in[0], 4); in[1] = _mm_srai_epi16(in[1], 4); - in[2] = _mm_srai_epi16(in[2], 4); - in[3] = _mm_srai_epi16(in[3], 4); - RECON_AND_STORE4X4(dest, in[0]); - RECON_AND_STORE4X4(dest, in[1]); - RECON_AND_STORE4X4(dest, in[2]); - RECON_AND_STORE4X4(dest, in[3]); + // Reconstruction and Store + { + __m128i d0 = _mm_cvtsi32_si128(*(const int *)(dest)); + __m128i d2 = _mm_cvtsi32_si128(*(const int *)(dest + stride * 2)); + d0 = _mm_unpacklo_epi32(d0, + _mm_cvtsi32_si128(*(const int *) (dest + stride))); + d2 = _mm_unpacklo_epi32(d2, _mm_cvtsi32_si128( + *(const int *) (dest + stride * 3))); + d0 = _mm_unpacklo_epi8(d0, zero); + d2 = _mm_unpacklo_epi8(d2, zero); + d0 = _mm_add_epi16(d0, in[0]); + d2 = _mm_add_epi16(d2, in[1]); + d0 = _mm_packus_epi16(d0, d2); + // store result[0] + *(int *)dest = _mm_cvtsi128_si32(d0); + // store result[1] + d0 = _mm_srli_si128(d0, 4); + *(int *)(dest + stride) = _mm_cvtsi128_si32(d0); + // store result[2] + d0 = _mm_srli_si128(d0, 4); + *(int *)(dest + stride * 2) = _mm_cvtsi128_si32(d0); + // store result[3] + d0 = _mm_srli_si128(d0, 4); + *(int *)(dest + stride * 3) = _mm_cvtsi128_si32(d0); + } } #define TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, \ @@ -352,37 +361,40 @@ void vp9_iht4x4_16_add_sse2(const int16_t *input, uint8_t *dest, int stride, out7 = _mm_unpackhi_epi64(tr1_3, tr1_7); \ } -#define TRANSPOSE_4X8(in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3, out4, out5, out6, out7) \ - { \ - const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \ - const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \ - const __m128i tr0_4 = _mm_unpacklo_epi16(in4, in5); \ - const __m128i tr0_5 = _mm_unpacklo_epi16(in6, in7); \ - \ +#define TRANSPOSE_4X8_10(tmp0, tmp1, tmp2, tmp3, \ + out0, out1, out2, out3) \ + { \ + const __m128i tr0_0 = _mm_unpackhi_epi16(tmp0, tmp1); \ + const __m128i tr0_1 = _mm_unpacklo_epi16(tmp1, tmp0); \ + const __m128i tr0_4 = _mm_unpacklo_epi16(tmp2, tmp3); \ + const __m128i tr0_5 = _mm_unpackhi_epi16(tmp3, tmp2); \ + \ const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); \ const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); \ const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); \ const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); \ - \ + \ out0 = _mm_unpacklo_epi64(tr1_0, tr1_4); \ out1 = _mm_unpackhi_epi64(tr1_0, tr1_4); \ out2 = _mm_unpacklo_epi64(tr1_2, tr1_6); \ out3 = _mm_unpackhi_epi64(tr1_2, tr1_6); \ - out4 = out5 = out6 = out7 = zero; \ } -#define TRANSPOSE_8X4(in0, in1, in2, in3, out0, out1, out2, out3) \ +#define TRANSPOSE_8X4(in0, in1, in2, in3, out0, out1) \ { \ const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \ const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \ - const __m128i tr0_2 = _mm_unpackhi_epi16(in0, in1); \ - const __m128i tr0_3 = _mm_unpackhi_epi16(in2, in3); \ \ in0 = _mm_unpacklo_epi32(tr0_0, tr0_1); /* i1 i0 */ \ in1 = _mm_unpackhi_epi32(tr0_0, tr0_1); /* i3 i2 */ \ - in2 = _mm_unpacklo_epi32(tr0_2, tr0_3); /* i5 i4 */ \ - in3 = _mm_unpackhi_epi32(tr0_2, tr0_3); /* i7 i6 */ \ + } + +#define TRANSPOSE_8X8_10(in0, in1, in2, in3, out0, out1) \ + { \ + const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \ + const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \ + out0 = _mm_unpacklo_epi32(tr0_0, tr0_1); \ + out1 = _mm_unpackhi_epi32(tr0_0, tr0_1); \ } // Define Macro for multiplying elements by constants and adding them together. @@ -422,7 +434,30 @@ void vp9_iht4x4_16_add_sse2(const int16_t *input, uint8_t *dest, int stride, res3 = _mm_packs_epi32(tmp6, tmp7); \ } -#define IDCT8_1D \ +#define MULTIPLICATION_AND_ADD_2(lo_0, hi_0, cst0, cst1, res0, res1) \ + { \ + tmp0 = _mm_madd_epi16(lo_0, cst0); \ + tmp1 = _mm_madd_epi16(hi_0, cst0); \ + tmp2 = _mm_madd_epi16(lo_0, cst1); \ + tmp3 = _mm_madd_epi16(hi_0, cst1); \ + \ + tmp0 = _mm_add_epi32(tmp0, rounding); \ + tmp1 = _mm_add_epi32(tmp1, rounding); \ + tmp2 = _mm_add_epi32(tmp2, rounding); \ + tmp3 = _mm_add_epi32(tmp3, rounding); \ + \ + tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \ + tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \ + tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \ + tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \ + \ + res0 = _mm_packs_epi32(tmp0, tmp1); \ + res1 = _mm_packs_epi32(tmp2, tmp3); \ + } + +#define IDCT8(in0, in1, in2, in3, in4, in5, in6, in7, \ + out0, out1, out2, out3, out4, out5, out6, out7) \ + { \ /* Stage1 */ \ { \ const __m128i lo_17 = _mm_unpacklo_epi16(in1, in7); \ @@ -482,14 +517,15 @@ void vp9_iht4x4_16_add_sse2(const int16_t *input, uint8_t *dest, int stride, } \ \ /* Stage4 */ \ - in0 = _mm_adds_epi16(stp1_0, stp2_7); \ - in1 = _mm_adds_epi16(stp1_1, stp1_6); \ - in2 = _mm_adds_epi16(stp1_2, stp1_5); \ - in3 = _mm_adds_epi16(stp1_3, stp2_4); \ - in4 = _mm_subs_epi16(stp1_3, stp2_4); \ - in5 = _mm_subs_epi16(stp1_2, stp1_5); \ - in6 = _mm_subs_epi16(stp1_1, stp1_6); \ - in7 = _mm_subs_epi16(stp1_0, stp2_7); + out0 = _mm_adds_epi16(stp1_0, stp2_7); \ + out1 = _mm_adds_epi16(stp1_1, stp1_6); \ + out2 = _mm_adds_epi16(stp1_2, stp1_5); \ + out3 = _mm_adds_epi16(stp1_3, stp2_4); \ + out4 = _mm_subs_epi16(stp1_3, stp2_4); \ + out5 = _mm_subs_epi16(stp1_2, stp1_5); \ + out6 = _mm_subs_epi16(stp1_1, stp1_6); \ + out7 = _mm_subs_epi16(stp1_0, stp2_7); \ + } #define RECON_AND_STORE(dest, in_x) \ { \ @@ -533,11 +569,12 @@ void vp9_idct8x8_64_add_sse2(const int16_t *input, uint8_t *dest, int stride) { // 2-D for (i = 0; i < 2; i++) { // 8x8 Transpose is copied from vp9_fdct8x8_sse2() - TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, - in4, in5, in6, in7); + TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, + in0, in1, in2, in3, in4, in5, in6, in7); // 4-stage 1D idct8x8 - IDCT8_1D + IDCT8(in0, in1, in2, in3, in4, in5, in6, in7, + in0, in1, in2, in3, in4, in5, in6, in7); } // Final rounding and shift @@ -620,7 +657,24 @@ static INLINE void array_transpose_8x8(__m128i *in, __m128i *res) { res[7] = _mm_unpackhi_epi64(tr1_6, tr1_7); } -static void idct8_1d_sse2(__m128i *in) { +static INLINE void array_transpose_4X8(__m128i *in, __m128i * out) { + const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]); + const __m128i tr0_1 = _mm_unpacklo_epi16(in[2], in[3]); + const __m128i tr0_4 = _mm_unpacklo_epi16(in[4], in[5]); + const __m128i tr0_5 = _mm_unpacklo_epi16(in[6], in[7]); + + const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); + const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); + const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); + const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); + + out[0] = _mm_unpacklo_epi64(tr1_0, tr1_4); + out[1] = _mm_unpackhi_epi64(tr1_0, tr1_4); + out[2] = _mm_unpacklo_epi64(tr1_2, tr1_6); + out[3] = _mm_unpackhi_epi64(tr1_2, tr1_6); +} + +static void idct8_sse2(__m128i *in) { const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING); const __m128i stg1_0 = pair_set_epi16(cospi_28_64, -cospi_4_64); const __m128i stg1_1 = pair_set_epi16(cospi_4_64, cospi_28_64); @@ -636,32 +690,16 @@ static void idct8_1d_sse2(__m128i *in) { __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7; __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; - in0 = in[0]; - in1 = in[1]; - in2 = in[2]; - in3 = in[3]; - in4 = in[4]; - in5 = in[5]; - in6 = in[6]; - in7 = in[7]; - // 8x8 Transpose is copied from vp9_fdct8x8_sse2() - TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, - in4, in5, in6, in7); + TRANSPOSE_8X8(in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7], + in0, in1, in2, in3, in4, in5, in6, in7); // 4-stage 1D idct8x8 - IDCT8_1D - in[0] = in0; - in[1] = in1; - in[2] = in2; - in[3] = in3; - in[4] = in4; - in[5] = in5; - in[6] = in6; - in[7] = in7; + IDCT8(in0, in1, in2, in3, in4, in5, in6, in7, + in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7]); } -static void iadst8_1d_sse2(__m128i *in) { +static void iadst8_sse2(__m128i *in) { const __m128i k__cospi_p02_p30 = pair_set_epi16(cospi_2_64, cospi_30_64); const __m128i k__cospi_p30_m02 = pair_set_epi16(cospi_30_64, -cospi_2_64); const __m128i k__cospi_p10_p22 = pair_set_epi16(cospi_10_64, cospi_22_64); @@ -908,20 +946,20 @@ void vp9_iht8x8_64_add_sse2(const int16_t *input, uint8_t *dest, int stride, switch (tx_type) { case 0: // DCT_DCT - idct8_1d_sse2(in); - idct8_1d_sse2(in); + idct8_sse2(in); + idct8_sse2(in); break; case 1: // ADST_DCT - idct8_1d_sse2(in); - iadst8_1d_sse2(in); + idct8_sse2(in); + iadst8_sse2(in); break; case 2: // DCT_ADST - iadst8_1d_sse2(in); - idct8_1d_sse2(in); + iadst8_sse2(in); + idct8_sse2(in); break; case 3: // ADST_ADST - iadst8_1d_sse2(in); - iadst8_1d_sse2(in); + iadst8_sse2(in); + iadst8_sse2(in); break; default: assert(0); @@ -983,12 +1021,11 @@ void vp9_idct8x8_10_add_sse2(const int16_t *input, uint8_t *dest, int stride) { in3 = _mm_load_si128((const __m128i *)(input + 8 * 3)); // 8x4 Transpose - TRANSPOSE_8X4(in0, in1, in2, in3, in0, in1, in2, in3) - + TRANSPOSE_8X8_10(in0, in1, in2, in3, in0, in1); // Stage1 { //NOLINT - const __m128i lo_17 = _mm_unpackhi_epi16(in0, in3); - const __m128i lo_35 = _mm_unpackhi_epi16(in1, in2); + const __m128i lo_17 = _mm_unpackhi_epi16(in0, zero); + const __m128i lo_35 = _mm_unpackhi_epi16(in1, zero); tmp0 = _mm_madd_epi16(lo_17, stg1_0); tmp2 = _mm_madd_epi16(lo_17, stg1_1); @@ -1004,16 +1041,14 @@ void vp9_idct8x8_10_add_sse2(const int16_t *input, uint8_t *dest, int stride) { tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS); tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS); - stp1_4 = _mm_packs_epi32(tmp0, zero); - stp1_7 = _mm_packs_epi32(tmp2, zero); - stp1_5 = _mm_packs_epi32(tmp4, zero); - stp1_6 = _mm_packs_epi32(tmp6, zero); + stp1_4 = _mm_packs_epi32(tmp0, tmp2); + stp1_5 = _mm_packs_epi32(tmp4, tmp6); } // Stage2 { //NOLINT - const __m128i lo_04 = _mm_unpacklo_epi16(in0, in2); - const __m128i lo_26 = _mm_unpacklo_epi16(in1, in3); + const __m128i lo_04 = _mm_unpacklo_epi16(in0, zero); + const __m128i lo_26 = _mm_unpacklo_epi16(in1, zero); tmp0 = _mm_madd_epi16(lo_04, stg2_0); tmp2 = _mm_madd_epi16(lo_04, stg2_1); @@ -1029,24 +1064,26 @@ void vp9_idct8x8_10_add_sse2(const int16_t *input, uint8_t *dest, int stride) { tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS); tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS); - stp2_0 = _mm_packs_epi32(tmp0, zero); - stp2_1 = _mm_packs_epi32(tmp2, zero); - stp2_2 = _mm_packs_epi32(tmp4, zero); - stp2_3 = _mm_packs_epi32(tmp6, zero); + stp2_0 = _mm_packs_epi32(tmp0, tmp2); + stp2_2 = _mm_packs_epi32(tmp6, tmp4); - stp2_4 = _mm_adds_epi16(stp1_4, stp1_5); - stp2_5 = _mm_subs_epi16(stp1_4, stp1_5); - stp2_6 = _mm_subs_epi16(stp1_7, stp1_6); - stp2_7 = _mm_adds_epi16(stp1_7, stp1_6); + tmp0 = _mm_adds_epi16(stp1_4, stp1_5); + tmp1 = _mm_subs_epi16(stp1_4, stp1_5); + + stp2_4 = tmp0; + stp2_5 = _mm_unpacklo_epi64(tmp1, zero); + stp2_6 = _mm_unpackhi_epi64(tmp1, zero); } // Stage3 { //NOLINT const __m128i lo_56 = _mm_unpacklo_epi16(stp2_5, stp2_6); - stp1_0 = _mm_adds_epi16(stp2_0, stp2_3); - stp1_1 = _mm_adds_epi16(stp2_1, stp2_2); - stp1_2 = _mm_subs_epi16(stp2_1, stp2_2); - stp1_3 = _mm_subs_epi16(stp2_0, stp2_3); + + tmp4 = _mm_adds_epi16(stp2_0, stp2_2); + tmp6 = _mm_subs_epi16(stp2_0, stp2_2); + + stp1_2 = _mm_unpackhi_epi64(tmp6, tmp4); + stp1_3 = _mm_unpacklo_epi64(tmp6, tmp4); tmp0 = _mm_madd_epi16(lo_56, stg3_0); tmp2 = _mm_madd_epi16(lo_56, stg2_0); // stg3_1 = stg2_0 @@ -1056,27 +1093,19 @@ void vp9_idct8x8_10_add_sse2(const int16_t *input, uint8_t *dest, int stride) { tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); - stp1_5 = _mm_packs_epi32(tmp0, zero); - stp1_6 = _mm_packs_epi32(tmp2, zero); + stp1_5 = _mm_packs_epi32(tmp0, tmp2); } // Stage4 - in0 = _mm_adds_epi16(stp1_0, stp2_7); - in1 = _mm_adds_epi16(stp1_1, stp1_6); - in2 = _mm_adds_epi16(stp1_2, stp1_5); - in3 = _mm_adds_epi16(stp1_3, stp2_4); - in4 = _mm_subs_epi16(stp1_3, stp2_4); - in5 = _mm_subs_epi16(stp1_2, stp1_5); - in6 = _mm_subs_epi16(stp1_1, stp1_6); - in7 = _mm_subs_epi16(stp1_0, stp2_7); - - // Columns. 4x8 Transpose - TRANSPOSE_4X8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, - in4, in5, in6, in7) - - // 1D idct8x8 - IDCT8_1D + tmp0 = _mm_adds_epi16(stp1_3, stp2_4); + tmp1 = _mm_adds_epi16(stp1_2, stp1_5); + tmp2 = _mm_subs_epi16(stp1_3, stp2_4); + tmp3 = _mm_subs_epi16(stp1_2, stp1_5); + TRANSPOSE_4X8_10(tmp0, tmp1, tmp2, tmp3, in0, in1, in2, in3) + + IDCT8(in0, in1, in2, in3, zero, zero, zero, zero, + in0, in1, in2, in3, in4, in5, in6, in7); // Final rounding and shift in0 = _mm_adds_epi16(in0, final_rounding); in1 = _mm_adds_epi16(in1, final_rounding); @@ -1106,17 +1135,17 @@ void vp9_idct8x8_10_add_sse2(const int16_t *input, uint8_t *dest, int stride) { RECON_AND_STORE(dest, in7); } -#define IDCT16_1D \ +#define IDCT16 \ /* Stage2 */ \ { \ - const __m128i lo_1_15 = _mm_unpacklo_epi16(in1, in15); \ - const __m128i hi_1_15 = _mm_unpackhi_epi16(in1, in15); \ - const __m128i lo_9_7 = _mm_unpacklo_epi16(in9, in7); \ - const __m128i hi_9_7 = _mm_unpackhi_epi16(in9, in7); \ - const __m128i lo_5_11 = _mm_unpacklo_epi16(in5, in11); \ - const __m128i hi_5_11 = _mm_unpackhi_epi16(in5, in11); \ - const __m128i lo_13_3 = _mm_unpacklo_epi16(in13, in3); \ - const __m128i hi_13_3 = _mm_unpackhi_epi16(in13, in3); \ + const __m128i lo_1_15 = _mm_unpacklo_epi16(in[1], in[15]); \ + const __m128i hi_1_15 = _mm_unpackhi_epi16(in[1], in[15]); \ + const __m128i lo_9_7 = _mm_unpacklo_epi16(in[9], in[7]); \ + const __m128i hi_9_7 = _mm_unpackhi_epi16(in[9], in[7]); \ + const __m128i lo_5_11 = _mm_unpacklo_epi16(in[5], in[11]); \ + const __m128i hi_5_11 = _mm_unpackhi_epi16(in[5], in[11]); \ + const __m128i lo_13_3 = _mm_unpacklo_epi16(in[13], in[3]); \ + const __m128i hi_13_3 = _mm_unpackhi_epi16(in[13], in[3]); \ \ MULTIPLICATION_AND_ADD(lo_1_15, hi_1_15, lo_9_7, hi_9_7, \ stg2_0, stg2_1, stg2_2, stg2_3, \ @@ -1129,10 +1158,10 @@ void vp9_idct8x8_10_add_sse2(const int16_t *input, uint8_t *dest, int stride) { \ /* Stage3 */ \ { \ - const __m128i lo_2_14 = _mm_unpacklo_epi16(in2, in14); \ - const __m128i hi_2_14 = _mm_unpackhi_epi16(in2, in14); \ - const __m128i lo_10_6 = _mm_unpacklo_epi16(in10, in6); \ - const __m128i hi_10_6 = _mm_unpackhi_epi16(in10, in6); \ + const __m128i lo_2_14 = _mm_unpacklo_epi16(in[2], in[14]); \ + const __m128i hi_2_14 = _mm_unpackhi_epi16(in[2], in[14]); \ + const __m128i lo_10_6 = _mm_unpacklo_epi16(in[10], in[6]); \ + const __m128i hi_10_6 = _mm_unpackhi_epi16(in[10], in[6]); \ \ MULTIPLICATION_AND_ADD(lo_2_14, hi_2_14, lo_10_6, hi_10_6, \ stg3_0, stg3_1, stg3_2, stg3_3, \ @@ -1151,10 +1180,10 @@ void vp9_idct8x8_10_add_sse2(const int16_t *input, uint8_t *dest, int stride) { \ /* Stage4 */ \ { \ - const __m128i lo_0_8 = _mm_unpacklo_epi16(in0, in8); \ - const __m128i hi_0_8 = _mm_unpackhi_epi16(in0, in8); \ - const __m128i lo_4_12 = _mm_unpacklo_epi16(in4, in12); \ - const __m128i hi_4_12 = _mm_unpackhi_epi16(in4, in12); \ + const __m128i lo_0_8 = _mm_unpacklo_epi16(in[0], in[8]); \ + const __m128i hi_0_8 = _mm_unpackhi_epi16(in[0], in[8]); \ + const __m128i lo_4_12 = _mm_unpacklo_epi16(in[4], in[12]); \ + const __m128i hi_4_12 = _mm_unpackhi_epi16(in[4], in[12]); \ \ const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14); \ const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14); \ @@ -1235,6 +1264,114 @@ void vp9_idct8x8_10_add_sse2(const int16_t *input, uint8_t *dest, int stride) { stp2_10, stp2_13, stp2_11, stp2_12) \ } +#define IDCT16_10 \ + /* Stage2 */ \ + { \ + const __m128i lo_1_15 = _mm_unpacklo_epi16(in[1], zero); \ + const __m128i hi_1_15 = _mm_unpackhi_epi16(in[1], zero); \ + const __m128i lo_13_3 = _mm_unpacklo_epi16(zero, in[3]); \ + const __m128i hi_13_3 = _mm_unpackhi_epi16(zero, in[3]); \ + \ + MULTIPLICATION_AND_ADD(lo_1_15, hi_1_15, lo_13_3, hi_13_3, \ + stg2_0, stg2_1, stg2_6, stg2_7, \ + stp1_8_0, stp1_15, stp1_11, stp1_12_0) \ + } \ + \ + /* Stage3 */ \ + { \ + const __m128i lo_2_14 = _mm_unpacklo_epi16(in[2], zero); \ + const __m128i hi_2_14 = _mm_unpackhi_epi16(in[2], zero); \ + \ + MULTIPLICATION_AND_ADD_2(lo_2_14, hi_2_14, \ + stg3_0, stg3_1, \ + stp2_4, stp2_7) \ + \ + stp1_9 = stp1_8_0; \ + stp1_10 = stp1_11; \ + \ + stp1_13 = stp1_12_0; \ + stp1_14 = stp1_15; \ + } \ + \ + /* Stage4 */ \ + { \ + const __m128i lo_0_8 = _mm_unpacklo_epi16(in[0], zero); \ + const __m128i hi_0_8 = _mm_unpackhi_epi16(in[0], zero); \ + \ + const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14); \ + const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14); \ + const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \ + const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \ + \ + MULTIPLICATION_AND_ADD_2(lo_0_8, hi_0_8, \ + stg4_0, stg4_1, \ + stp1_0, stp1_1) \ + stp2_5 = stp2_4; \ + stp2_6 = stp2_7; \ + \ + MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, \ + stg4_4, stg4_5, stg4_6, stg4_7, \ + stp2_9, stp2_14, stp2_10, stp2_13) \ + } \ + \ + /* Stage5 */ \ + { \ + const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5); \ + const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5); \ + \ + stp1_2 = stp1_1; \ + stp1_3 = stp1_0; \ + \ + tmp0 = _mm_madd_epi16(lo_6_5, stg4_1); \ + tmp1 = _mm_madd_epi16(hi_6_5, stg4_1); \ + tmp2 = _mm_madd_epi16(lo_6_5, stg4_0); \ + tmp3 = _mm_madd_epi16(hi_6_5, stg4_0); \ + \ + tmp0 = _mm_add_epi32(tmp0, rounding); \ + tmp1 = _mm_add_epi32(tmp1, rounding); \ + tmp2 = _mm_add_epi32(tmp2, rounding); \ + tmp3 = _mm_add_epi32(tmp3, rounding); \ + \ + tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \ + tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \ + tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \ + tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \ + \ + stp1_5 = _mm_packs_epi32(tmp0, tmp1); \ + stp1_6 = _mm_packs_epi32(tmp2, tmp3); \ + \ + stp1_8 = _mm_add_epi16(stp1_8_0, stp1_11); \ + stp1_9 = _mm_add_epi16(stp2_9, stp2_10); \ + stp1_10 = _mm_sub_epi16(stp2_9, stp2_10); \ + stp1_11 = _mm_sub_epi16(stp1_8_0, stp1_11); \ + \ + stp1_12 = _mm_sub_epi16(stp1_15, stp1_12_0); \ + stp1_13 = _mm_sub_epi16(stp2_14, stp2_13); \ + stp1_14 = _mm_add_epi16(stp2_14, stp2_13); \ + stp1_15 = _mm_add_epi16(stp1_15, stp1_12_0); \ + } \ + \ + /* Stage6 */ \ + { \ + const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \ + const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \ + const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12); \ + const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12); \ + \ + stp2_0 = _mm_add_epi16(stp1_0, stp2_7); \ + stp2_1 = _mm_add_epi16(stp1_1, stp1_6); \ + stp2_2 = _mm_add_epi16(stp1_2, stp1_5); \ + stp2_3 = _mm_add_epi16(stp1_3, stp2_4); \ + stp2_4 = _mm_sub_epi16(stp1_3, stp2_4); \ + stp2_5 = _mm_sub_epi16(stp1_2, stp1_5); \ + stp2_6 = _mm_sub_epi16(stp1_1, stp1_6); \ + stp2_7 = _mm_sub_epi16(stp1_0, stp2_7); \ + \ + MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, \ + stg6_0, stg4_0, stg6_0, stg4_0, \ + stp2_10, stp2_13, stp2_11, stp2_12) \ + } + void vp9_idct16x16_256_add_sse2(const int16_t *input, uint8_t *dest, int stride) { const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING); @@ -1266,16 +1403,7 @@ void vp9_idct16x16_256_add_sse2(const int16_t *input, uint8_t *dest, const __m128i stg6_0 = pair_set_epi16(-cospi_16_64, cospi_16_64); - __m128i in0 = zero, in1 = zero, in2 = zero, in3 = zero, in4 = zero, - in5 = zero, in6 = zero, in7 = zero, in8 = zero, in9 = zero, - in10 = zero, in11 = zero, in12 = zero, in13 = zero, - in14 = zero, in15 = zero; - __m128i l0 = zero, l1 = zero, l2 = zero, l3 = zero, l4 = zero, l5 = zero, - l6 = zero, l7 = zero, l8 = zero, l9 = zero, l10 = zero, l11 = zero, - l12 = zero, l13 = zero, l14 = zero, l15 = zero; - __m128i r0 = zero, r1 = zero, r2 = zero, r3 = zero, r4 = zero, r5 = zero, - r6 = zero, r7 = zero, r8 = zero, r9 = zero, r10 = zero, r11 = zero, - r12 = zero, r13 = zero, r14 = zero, r15 = zero; + __m128i in[16], l[16], r[16], *curr1; __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7, stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15, stp1_8_0, stp1_12_0; @@ -1284,162 +1412,132 @@ void vp9_idct16x16_256_add_sse2(const int16_t *input, uint8_t *dest, __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; int i; - // We work on a 8x16 block each time, and loop 4 times for 2-D 16x16 idct. - for (i = 0; i < 4; i++) { - // 1-D idct - if (i < 2) { - if (i == 1) input += 128; + curr1 = l; + for (i = 0; i < 2; i++) { + // 1-D idct // Load input data. - in0 = _mm_load_si128((const __m128i *)input); - in8 = _mm_load_si128((const __m128i *)(input + 8 * 1)); - in1 = _mm_load_si128((const __m128i *)(input + 8 * 2)); - in9 = _mm_load_si128((const __m128i *)(input + 8 * 3)); - in2 = _mm_load_si128((const __m128i *)(input + 8 * 4)); - in10 = _mm_load_si128((const __m128i *)(input + 8 * 5)); - in3 = _mm_load_si128((const __m128i *)(input + 8 * 6)); - in11 = _mm_load_si128((const __m128i *)(input + 8 * 7)); - in4 = _mm_load_si128((const __m128i *)(input + 8 * 8)); - in12 = _mm_load_si128((const __m128i *)(input + 8 * 9)); - in5 = _mm_load_si128((const __m128i *)(input + 8 * 10)); - in13 = _mm_load_si128((const __m128i *)(input + 8 * 11)); - in6 = _mm_load_si128((const __m128i *)(input + 8 * 12)); - in14 = _mm_load_si128((const __m128i *)(input + 8 * 13)); - in7 = _mm_load_si128((const __m128i *)(input + 8 * 14)); - in15 = _mm_load_si128((const __m128i *)(input + 8 * 15)); - - TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, - in4, in5, in6, in7); - TRANSPOSE_8X8(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9, - in10, in11, in12, in13, in14, in15); - } - - if (i == 2) { - TRANSPOSE_8X8(l0, l1, l2, l3, l4, l5, l6, l7, in0, in1, in2, in3, in4, - in5, in6, in7); - TRANSPOSE_8X8(r0, r1, r2, r3, r4, r5, r6, r7, in8, in9, in10, in11, in12, - in13, in14, in15); - } - - if (i == 3) { - TRANSPOSE_8X8(l8, l9, l10, l11, l12, l13, l14, l15, in0, in1, in2, in3, - in4, in5, in6, in7); - TRANSPOSE_8X8(r8, r9, r10, r11, r12, r13, r14, r15, in8, in9, in10, in11, - in12, in13, in14, in15); - } + in[0] = _mm_load_si128((const __m128i *)input); + in[8] = _mm_load_si128((const __m128i *)(input + 8 * 1)); + in[1] = _mm_load_si128((const __m128i *)(input + 8 * 2)); + in[9] = _mm_load_si128((const __m128i *)(input + 8 * 3)); + in[2] = _mm_load_si128((const __m128i *)(input + 8 * 4)); + in[10] = _mm_load_si128((const __m128i *)(input + 8 * 5)); + in[3] = _mm_load_si128((const __m128i *)(input + 8 * 6)); + in[11] = _mm_load_si128((const __m128i *)(input + 8 * 7)); + in[4] = _mm_load_si128((const __m128i *)(input + 8 * 8)); + in[12] = _mm_load_si128((const __m128i *)(input + 8 * 9)); + in[5] = _mm_load_si128((const __m128i *)(input + 8 * 10)); + in[13] = _mm_load_si128((const __m128i *)(input + 8 * 11)); + in[6] = _mm_load_si128((const __m128i *)(input + 8 * 12)); + in[14] = _mm_load_si128((const __m128i *)(input + 8 * 13)); + in[7] = _mm_load_si128((const __m128i *)(input + 8 * 14)); + in[15] = _mm_load_si128((const __m128i *)(input + 8 * 15)); + + array_transpose_8x8(in, in); + array_transpose_8x8(in+8, in+8); + + IDCT16 + + // Stage7 + curr1[0] = _mm_add_epi16(stp2_0, stp1_15); + curr1[1] = _mm_add_epi16(stp2_1, stp1_14); + curr1[2] = _mm_add_epi16(stp2_2, stp2_13); + curr1[3] = _mm_add_epi16(stp2_3, stp2_12); + curr1[4] = _mm_add_epi16(stp2_4, stp2_11); + curr1[5] = _mm_add_epi16(stp2_5, stp2_10); + curr1[6] = _mm_add_epi16(stp2_6, stp1_9); + curr1[7] = _mm_add_epi16(stp2_7, stp1_8); + curr1[8] = _mm_sub_epi16(stp2_7, stp1_8); + curr1[9] = _mm_sub_epi16(stp2_6, stp1_9); + curr1[10] = _mm_sub_epi16(stp2_5, stp2_10); + curr1[11] = _mm_sub_epi16(stp2_4, stp2_11); + curr1[12] = _mm_sub_epi16(stp2_3, stp2_12); + curr1[13] = _mm_sub_epi16(stp2_2, stp2_13); + curr1[14] = _mm_sub_epi16(stp2_1, stp1_14); + curr1[15] = _mm_sub_epi16(stp2_0, stp1_15); + + curr1 = r; + input += 128; + } + for (i = 0; i < 2; i++) { + // 1-D idct + array_transpose_8x8(l+i*8, in); + array_transpose_8x8(r+i*8, in+8); - IDCT16_1D + IDCT16 - // Stage7 - if (i == 0) { - // Left 8x16 - l0 = _mm_add_epi16(stp2_0, stp1_15); - l1 = _mm_add_epi16(stp2_1, stp1_14); - l2 = _mm_add_epi16(stp2_2, stp2_13); - l3 = _mm_add_epi16(stp2_3, stp2_12); - l4 = _mm_add_epi16(stp2_4, stp2_11); - l5 = _mm_add_epi16(stp2_5, stp2_10); - l6 = _mm_add_epi16(stp2_6, stp1_9); - l7 = _mm_add_epi16(stp2_7, stp1_8); - l8 = _mm_sub_epi16(stp2_7, stp1_8); - l9 = _mm_sub_epi16(stp2_6, stp1_9); - l10 = _mm_sub_epi16(stp2_5, stp2_10); - l11 = _mm_sub_epi16(stp2_4, stp2_11); - l12 = _mm_sub_epi16(stp2_3, stp2_12); - l13 = _mm_sub_epi16(stp2_2, stp2_13); - l14 = _mm_sub_epi16(stp2_1, stp1_14); - l15 = _mm_sub_epi16(stp2_0, stp1_15); - } else if (i == 1) { - // Right 8x16 - r0 = _mm_add_epi16(stp2_0, stp1_15); - r1 = _mm_add_epi16(stp2_1, stp1_14); - r2 = _mm_add_epi16(stp2_2, stp2_13); - r3 = _mm_add_epi16(stp2_3, stp2_12); - r4 = _mm_add_epi16(stp2_4, stp2_11); - r5 = _mm_add_epi16(stp2_5, stp2_10); - r6 = _mm_add_epi16(stp2_6, stp1_9); - r7 = _mm_add_epi16(stp2_7, stp1_8); - r8 = _mm_sub_epi16(stp2_7, stp1_8); - r9 = _mm_sub_epi16(stp2_6, stp1_9); - r10 = _mm_sub_epi16(stp2_5, stp2_10); - r11 = _mm_sub_epi16(stp2_4, stp2_11); - r12 = _mm_sub_epi16(stp2_3, stp2_12); - r13 = _mm_sub_epi16(stp2_2, stp2_13); - r14 = _mm_sub_epi16(stp2_1, stp1_14); - r15 = _mm_sub_epi16(stp2_0, stp1_15); - } else { // 2-D - in0 = _mm_add_epi16(stp2_0, stp1_15); - in1 = _mm_add_epi16(stp2_1, stp1_14); - in2 = _mm_add_epi16(stp2_2, stp2_13); - in3 = _mm_add_epi16(stp2_3, stp2_12); - in4 = _mm_add_epi16(stp2_4, stp2_11); - in5 = _mm_add_epi16(stp2_5, stp2_10); - in6 = _mm_add_epi16(stp2_6, stp1_9); - in7 = _mm_add_epi16(stp2_7, stp1_8); - in8 = _mm_sub_epi16(stp2_7, stp1_8); - in9 = _mm_sub_epi16(stp2_6, stp1_9); - in10 = _mm_sub_epi16(stp2_5, stp2_10); - in11 = _mm_sub_epi16(stp2_4, stp2_11); - in12 = _mm_sub_epi16(stp2_3, stp2_12); - in13 = _mm_sub_epi16(stp2_2, stp2_13); - in14 = _mm_sub_epi16(stp2_1, stp1_14); - in15 = _mm_sub_epi16(stp2_0, stp1_15); + in[0] = _mm_add_epi16(stp2_0, stp1_15); + in[1] = _mm_add_epi16(stp2_1, stp1_14); + in[2] = _mm_add_epi16(stp2_2, stp2_13); + in[3] = _mm_add_epi16(stp2_3, stp2_12); + in[4] = _mm_add_epi16(stp2_4, stp2_11); + in[5] = _mm_add_epi16(stp2_5, stp2_10); + in[6] = _mm_add_epi16(stp2_6, stp1_9); + in[7] = _mm_add_epi16(stp2_7, stp1_8); + in[8] = _mm_sub_epi16(stp2_7, stp1_8); + in[9] = _mm_sub_epi16(stp2_6, stp1_9); + in[10] = _mm_sub_epi16(stp2_5, stp2_10); + in[11] = _mm_sub_epi16(stp2_4, stp2_11); + in[12] = _mm_sub_epi16(stp2_3, stp2_12); + in[13] = _mm_sub_epi16(stp2_2, stp2_13); + in[14] = _mm_sub_epi16(stp2_1, stp1_14); + in[15] = _mm_sub_epi16(stp2_0, stp1_15); // Final rounding and shift - in0 = _mm_adds_epi16(in0, final_rounding); - in1 = _mm_adds_epi16(in1, final_rounding); - in2 = _mm_adds_epi16(in2, final_rounding); - in3 = _mm_adds_epi16(in3, final_rounding); - in4 = _mm_adds_epi16(in4, final_rounding); - in5 = _mm_adds_epi16(in5, final_rounding); - in6 = _mm_adds_epi16(in6, final_rounding); - in7 = _mm_adds_epi16(in7, final_rounding); - in8 = _mm_adds_epi16(in8, final_rounding); - in9 = _mm_adds_epi16(in9, final_rounding); - in10 = _mm_adds_epi16(in10, final_rounding); - in11 = _mm_adds_epi16(in11, final_rounding); - in12 = _mm_adds_epi16(in12, final_rounding); - in13 = _mm_adds_epi16(in13, final_rounding); - in14 = _mm_adds_epi16(in14, final_rounding); - in15 = _mm_adds_epi16(in15, final_rounding); - - in0 = _mm_srai_epi16(in0, 6); - in1 = _mm_srai_epi16(in1, 6); - in2 = _mm_srai_epi16(in2, 6); - in3 = _mm_srai_epi16(in3, 6); - in4 = _mm_srai_epi16(in4, 6); - in5 = _mm_srai_epi16(in5, 6); - in6 = _mm_srai_epi16(in6, 6); - in7 = _mm_srai_epi16(in7, 6); - in8 = _mm_srai_epi16(in8, 6); - in9 = _mm_srai_epi16(in9, 6); - in10 = _mm_srai_epi16(in10, 6); - in11 = _mm_srai_epi16(in11, 6); - in12 = _mm_srai_epi16(in12, 6); - in13 = _mm_srai_epi16(in13, 6); - in14 = _mm_srai_epi16(in14, 6); - in15 = _mm_srai_epi16(in15, 6); - - RECON_AND_STORE(dest, in0); - RECON_AND_STORE(dest, in1); - RECON_AND_STORE(dest, in2); - RECON_AND_STORE(dest, in3); - RECON_AND_STORE(dest, in4); - RECON_AND_STORE(dest, in5); - RECON_AND_STORE(dest, in6); - RECON_AND_STORE(dest, in7); - RECON_AND_STORE(dest, in8); - RECON_AND_STORE(dest, in9); - RECON_AND_STORE(dest, in10); - RECON_AND_STORE(dest, in11); - RECON_AND_STORE(dest, in12); - RECON_AND_STORE(dest, in13); - RECON_AND_STORE(dest, in14); - RECON_AND_STORE(dest, in15); + in[0] = _mm_adds_epi16(in[0], final_rounding); + in[1] = _mm_adds_epi16(in[1], final_rounding); + in[2] = _mm_adds_epi16(in[2], final_rounding); + in[3] = _mm_adds_epi16(in[3], final_rounding); + in[4] = _mm_adds_epi16(in[4], final_rounding); + in[5] = _mm_adds_epi16(in[5], final_rounding); + in[6] = _mm_adds_epi16(in[6], final_rounding); + in[7] = _mm_adds_epi16(in[7], final_rounding); + in[8] = _mm_adds_epi16(in[8], final_rounding); + in[9] = _mm_adds_epi16(in[9], final_rounding); + in[10] = _mm_adds_epi16(in[10], final_rounding); + in[11] = _mm_adds_epi16(in[11], final_rounding); + in[12] = _mm_adds_epi16(in[12], final_rounding); + in[13] = _mm_adds_epi16(in[13], final_rounding); + in[14] = _mm_adds_epi16(in[14], final_rounding); + in[15] = _mm_adds_epi16(in[15], final_rounding); + + in[0] = _mm_srai_epi16(in[0], 6); + in[1] = _mm_srai_epi16(in[1], 6); + in[2] = _mm_srai_epi16(in[2], 6); + in[3] = _mm_srai_epi16(in[3], 6); + in[4] = _mm_srai_epi16(in[4], 6); + in[5] = _mm_srai_epi16(in[5], 6); + in[6] = _mm_srai_epi16(in[6], 6); + in[7] = _mm_srai_epi16(in[7], 6); + in[8] = _mm_srai_epi16(in[8], 6); + in[9] = _mm_srai_epi16(in[9], 6); + in[10] = _mm_srai_epi16(in[10], 6); + in[11] = _mm_srai_epi16(in[11], 6); + in[12] = _mm_srai_epi16(in[12], 6); + in[13] = _mm_srai_epi16(in[13], 6); + in[14] = _mm_srai_epi16(in[14], 6); + in[15] = _mm_srai_epi16(in[15], 6); + + RECON_AND_STORE(dest, in[0]); + RECON_AND_STORE(dest, in[1]); + RECON_AND_STORE(dest, in[2]); + RECON_AND_STORE(dest, in[3]); + RECON_AND_STORE(dest, in[4]); + RECON_AND_STORE(dest, in[5]); + RECON_AND_STORE(dest, in[6]); + RECON_AND_STORE(dest, in[7]); + RECON_AND_STORE(dest, in[8]); + RECON_AND_STORE(dest, in[9]); + RECON_AND_STORE(dest, in[10]); + RECON_AND_STORE(dest, in[11]); + RECON_AND_STORE(dest, in[12]); + RECON_AND_STORE(dest, in[13]); + RECON_AND_STORE(dest, in[14]); + RECON_AND_STORE(dest, in[15]); dest += 8 - (stride * 16); - } } } @@ -1492,7 +1590,7 @@ static INLINE void array_transpose_16x16(__m128i *res0, __m128i *res1) { res0[15] = tbuf[7]; } -static void iadst16_1d_8col(__m128i *in) { +static void iadst16_8col(__m128i *in) { // perform 16x16 1-D ADST for 8 columns __m128i s[16], x[16], u[32], v[32]; const __m128i k__cospi_p01_p31 = pair_set_epi16(cospi_1_64, cospi_31_64); @@ -1962,7 +2060,7 @@ static void iadst16_1d_8col(__m128i *in) { in[15] = _mm_sub_epi16(kZero, s[1]); } -static void idct16_1d_8col(__m128i *in) { +static void idct16_8col(__m128i *in) { const __m128i k__cospi_p30_m02 = pair_set_epi16(cospi_30_64, -cospi_2_64); const __m128i k__cospi_p02_p30 = pair_set_epi16(cospi_2_64, cospi_30_64); const __m128i k__cospi_p14_m18 = pair_set_epi16(cospi_14_64, -cospi_18_64); @@ -2306,16 +2404,16 @@ static void idct16_1d_8col(__m128i *in) { in[15] = _mm_sub_epi16(s[0], s[15]); } -static void idct16_1d_sse2(__m128i *in0, __m128i *in1) { +static void idct16_sse2(__m128i *in0, __m128i *in1) { array_transpose_16x16(in0, in1); - idct16_1d_8col(in0); - idct16_1d_8col(in1); + idct16_8col(in0); + idct16_8col(in1); } -static void iadst16_1d_sse2(__m128i *in0, __m128i *in1) { +static void iadst16_sse2(__m128i *in0, __m128i *in1) { array_transpose_16x16(in0, in1); - iadst16_1d_8col(in0); - iadst16_1d_8col(in1); + iadst16_8col(in0); + iadst16_8col(in1); } static INLINE void load_buffer_8x16(const int16_t *input, __m128i *in) { @@ -2404,20 +2502,20 @@ void vp9_iht16x16_256_add_sse2(const int16_t *input, uint8_t *dest, int stride, switch (tx_type) { case 0: // DCT_DCT - idct16_1d_sse2(in0, in1); - idct16_1d_sse2(in0, in1); + idct16_sse2(in0, in1); + idct16_sse2(in0, in1); break; case 1: // ADST_DCT - idct16_1d_sse2(in0, in1); - iadst16_1d_sse2(in0, in1); + idct16_sse2(in0, in1); + iadst16_sse2(in0, in1); break; case 2: // DCT_ADST - iadst16_1d_sse2(in0, in1); - idct16_1d_sse2(in0, in1); + iadst16_sse2(in0, in1); + idct16_sse2(in0, in1); break; case 3: // ADST_ADST - iadst16_1d_sse2(in0, in1); - iadst16_1d_sse2(in0, in1); + iadst16_sse2(in0, in1); + iadst16_sse2(in0, in1); break; default: assert(0); @@ -2437,149 +2535,87 @@ void vp9_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest, const __m128i stg2_0 = pair_set_epi16(cospi_30_64, -cospi_2_64); const __m128i stg2_1 = pair_set_epi16(cospi_2_64, cospi_30_64); - const __m128i stg2_2 = pair_set_epi16(cospi_14_64, -cospi_18_64); - const __m128i stg2_3 = pair_set_epi16(cospi_18_64, cospi_14_64); - const __m128i stg2_4 = pair_set_epi16(cospi_22_64, -cospi_10_64); - const __m128i stg2_5 = pair_set_epi16(cospi_10_64, cospi_22_64); const __m128i stg2_6 = pair_set_epi16(cospi_6_64, -cospi_26_64); const __m128i stg2_7 = pair_set_epi16(cospi_26_64, cospi_6_64); const __m128i stg3_0 = pair_set_epi16(cospi_28_64, -cospi_4_64); const __m128i stg3_1 = pair_set_epi16(cospi_4_64, cospi_28_64); - const __m128i stg3_2 = pair_set_epi16(cospi_12_64, -cospi_20_64); - const __m128i stg3_3 = pair_set_epi16(cospi_20_64, cospi_12_64); const __m128i stg4_0 = pair_set_epi16(cospi_16_64, cospi_16_64); const __m128i stg4_1 = pair_set_epi16(cospi_16_64, -cospi_16_64); - const __m128i stg4_2 = pair_set_epi16(cospi_24_64, -cospi_8_64); - const __m128i stg4_3 = pair_set_epi16(cospi_8_64, cospi_24_64); const __m128i stg4_4 = pair_set_epi16(-cospi_8_64, cospi_24_64); const __m128i stg4_5 = pair_set_epi16(cospi_24_64, cospi_8_64); const __m128i stg4_6 = pair_set_epi16(-cospi_24_64, -cospi_8_64); const __m128i stg4_7 = pair_set_epi16(-cospi_8_64, cospi_24_64); const __m128i stg6_0 = pair_set_epi16(-cospi_16_64, cospi_16_64); - - __m128i in0 = zero, in1 = zero, in2 = zero, in3 = zero, in4 = zero, - in5 = zero, in6 = zero, in7 = zero, in8 = zero, in9 = zero, - in10 = zero, in11 = zero, in12 = zero, in13 = zero, - in14 = zero, in15 = zero; - __m128i l0 = zero, l1 = zero, l2 = zero, l3 = zero, l4 = zero, l5 = zero, - l6 = zero, l7 = zero, l8 = zero, l9 = zero, l10 = zero, l11 = zero, - l12 = zero, l13 = zero, l14 = zero, l15 = zero; - - __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7, + __m128i in[16], l[16]; + __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15, stp1_8_0, stp1_12_0; __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7, - stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15; + stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14; __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; int i; - // 1-D idct. Load input data. - in0 = _mm_load_si128((const __m128i *)input); - in8 = _mm_load_si128((const __m128i *)(input + 8 * 1)); - in1 = _mm_load_si128((const __m128i *)(input + 8 * 2)); - in9 = _mm_load_si128((const __m128i *)(input + 8 * 3)); - in2 = _mm_load_si128((const __m128i *)(input + 8 * 4)); - in10 = _mm_load_si128((const __m128i *)(input + 8 * 5)); - in3 = _mm_load_si128((const __m128i *)(input + 8 * 6)); - in11 = _mm_load_si128((const __m128i *)(input + 8 * 7)); + // First 1-D inverse DCT + // Load input data. + in[0] = _mm_load_si128((const __m128i *)input); + in[1] = _mm_load_si128((const __m128i *)(input + 8 * 2)); + in[2] = _mm_load_si128((const __m128i *)(input + 8 * 4)); + in[3] = _mm_load_si128((const __m128i *)(input + 8 * 6)); - TRANSPOSE_8X4(in0, in1, in2, in3, in0, in1, in2, in3); - TRANSPOSE_8X4(in8, in9, in10, in11, in8, in9, in10, in11); + TRANSPOSE_8X4(in[0], in[1], in[2], in[3], in[0], in[1]); // Stage2 { - const __m128i lo_1_15 = _mm_unpackhi_epi16(in0, in11); - const __m128i lo_9_7 = _mm_unpackhi_epi16(in8, in3); - const __m128i lo_5_11 = _mm_unpackhi_epi16(in2, in9); - const __m128i lo_13_3 = _mm_unpackhi_epi16(in10, in1); + const __m128i lo_1_15 = _mm_unpackhi_epi16(in[0], zero); + const __m128i lo_13_3 = _mm_unpackhi_epi16(zero, in[1]); tmp0 = _mm_madd_epi16(lo_1_15, stg2_0); tmp2 = _mm_madd_epi16(lo_1_15, stg2_1); - tmp4 = _mm_madd_epi16(lo_9_7, stg2_2); - tmp6 = _mm_madd_epi16(lo_9_7, stg2_3); - tmp1 = _mm_madd_epi16(lo_5_11, stg2_4); - tmp3 = _mm_madd_epi16(lo_5_11, stg2_5); tmp5 = _mm_madd_epi16(lo_13_3, stg2_6); tmp7 = _mm_madd_epi16(lo_13_3, stg2_7); tmp0 = _mm_add_epi32(tmp0, rounding); tmp2 = _mm_add_epi32(tmp2, rounding); - tmp4 = _mm_add_epi32(tmp4, rounding); - tmp6 = _mm_add_epi32(tmp6, rounding); - tmp1 = _mm_add_epi32(tmp1, rounding); - tmp3 = _mm_add_epi32(tmp3, rounding); tmp5 = _mm_add_epi32(tmp5, rounding); tmp7 = _mm_add_epi32(tmp7, rounding); tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); - tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS); - tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS); - tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); - tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS); tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS); - stp2_8 = _mm_packs_epi32(tmp0, zero); - stp2_15 = _mm_packs_epi32(tmp2, zero); - stp2_9 = _mm_packs_epi32(tmp4, zero); - stp2_14 = _mm_packs_epi32(tmp6, zero); - - stp2_10 = _mm_packs_epi32(tmp1, zero); - stp2_13 = _mm_packs_epi32(tmp3, zero); - stp2_11 = _mm_packs_epi32(tmp5, zero); - stp2_12 = _mm_packs_epi32(tmp7, zero); + stp2_8 = _mm_packs_epi32(tmp0, tmp2); + stp2_11 = _mm_packs_epi32(tmp5, tmp7); } // Stage3 { - const __m128i lo_2_14 = _mm_unpacklo_epi16(in1, in11); - const __m128i lo_10_6 = _mm_unpacklo_epi16(in9, in3); + const __m128i lo_2_14 = _mm_unpacklo_epi16(in[1], zero); tmp0 = _mm_madd_epi16(lo_2_14, stg3_0); tmp2 = _mm_madd_epi16(lo_2_14, stg3_1); - tmp4 = _mm_madd_epi16(lo_10_6, stg3_2); - tmp6 = _mm_madd_epi16(lo_10_6, stg3_3); tmp0 = _mm_add_epi32(tmp0, rounding); tmp2 = _mm_add_epi32(tmp2, rounding); - tmp4 = _mm_add_epi32(tmp4, rounding); - tmp6 = _mm_add_epi32(tmp6, rounding); - tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); - tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS); - tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS); - - stp1_4 = _mm_packs_epi32(tmp0, zero); - stp1_7 = _mm_packs_epi32(tmp2, zero); - stp1_5 = _mm_packs_epi32(tmp4, zero); - stp1_6 = _mm_packs_epi32(tmp6, zero); - stp1_8_0 = _mm_add_epi16(stp2_8, stp2_9); - stp1_9 = _mm_sub_epi16(stp2_8, stp2_9); - stp1_10 = _mm_sub_epi16(stp2_11, stp2_10); - stp1_11 = _mm_add_epi16(stp2_11, stp2_10); + stp1_13 = _mm_unpackhi_epi64(stp2_11, zero); + stp1_14 = _mm_unpackhi_epi64(stp2_8, zero); - stp1_12_0 = _mm_add_epi16(stp2_12, stp2_13); - stp1_13 = _mm_sub_epi16(stp2_12, stp2_13); - stp1_14 = _mm_sub_epi16(stp2_15, stp2_14); - stp1_15 = _mm_add_epi16(stp2_15, stp2_14); + stp1_4 = _mm_packs_epi32(tmp0, tmp2); } // Stage4 { - const __m128i lo_0_8 = _mm_unpacklo_epi16(in0, in8); - const __m128i lo_4_12 = _mm_unpacklo_epi16(in2, in10); - const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14); - const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); + const __m128i lo_0_8 = _mm_unpacklo_epi16(in[0], zero); + const __m128i lo_9_14 = _mm_unpacklo_epi16(stp2_8, stp1_14); + const __m128i lo_10_13 = _mm_unpacklo_epi16(stp2_11, stp1_13); tmp0 = _mm_madd_epi16(lo_0_8, stg4_0); tmp2 = _mm_madd_epi16(lo_0_8, stg4_1); - tmp4 = _mm_madd_epi16(lo_4_12, stg4_2); - tmp6 = _mm_madd_epi16(lo_4_12, stg4_3); tmp1 = _mm_madd_epi16(lo_9_14, stg4_4); tmp3 = _mm_madd_epi16(lo_9_14, stg4_5); tmp5 = _mm_madd_epi16(lo_10_13, stg4_6); @@ -2587,8 +2623,6 @@ void vp9_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest, tmp0 = _mm_add_epi32(tmp0, rounding); tmp2 = _mm_add_epi32(tmp2, rounding); - tmp4 = _mm_add_epi32(tmp4, rounding); - tmp6 = _mm_add_epi32(tmp6, rounding); tmp1 = _mm_add_epi32(tmp1, rounding); tmp3 = _mm_add_epi32(tmp3, rounding); tmp5 = _mm_add_epi32(tmp5, rounding); @@ -2596,49 +2630,40 @@ void vp9_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest, tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); - tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS); - tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS); tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS); tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS); - stp2_0 = _mm_packs_epi32(tmp0, zero); - stp2_1 = _mm_packs_epi32(tmp2, zero); - stp2_2 = _mm_packs_epi32(tmp4, zero); - stp2_3 = _mm_packs_epi32(tmp6, zero); - stp2_9 = _mm_packs_epi32(tmp1, zero); - stp2_14 = _mm_packs_epi32(tmp3, zero); - stp2_10 = _mm_packs_epi32(tmp5, zero); - stp2_13 = _mm_packs_epi32(tmp7, zero); - - stp2_4 = _mm_add_epi16(stp1_4, stp1_5); - stp2_5 = _mm_sub_epi16(stp1_4, stp1_5); - stp2_6 = _mm_sub_epi16(stp1_7, stp1_6); - stp2_7 = _mm_add_epi16(stp1_7, stp1_6); + stp1_0 = _mm_packs_epi32(tmp0, tmp0); + stp1_1 = _mm_packs_epi32(tmp2, tmp2); + stp2_9 = _mm_packs_epi32(tmp1, tmp3); + stp2_10 = _mm_packs_epi32(tmp5, tmp7); + + stp2_6 = _mm_unpackhi_epi64(stp1_4, zero); } // Stage5 and Stage6 { - stp1_0 = _mm_add_epi16(stp2_0, stp2_3); - stp1_1 = _mm_add_epi16(stp2_1, stp2_2); - stp1_2 = _mm_sub_epi16(stp2_1, stp2_2); - stp1_3 = _mm_sub_epi16(stp2_0, stp2_3); - - stp1_8 = _mm_add_epi16(stp1_8_0, stp1_11); - stp1_9 = _mm_add_epi16(stp2_9, stp2_10); - stp1_10 = _mm_sub_epi16(stp2_9, stp2_10); - stp1_11 = _mm_sub_epi16(stp1_8_0, stp1_11); - - stp1_12 = _mm_sub_epi16(stp1_15, stp1_12_0); - stp1_13 = _mm_sub_epi16(stp2_14, stp2_13); - stp1_14 = _mm_add_epi16(stp2_14, stp2_13); - stp1_15 = _mm_add_epi16(stp1_15, stp1_12_0); + tmp0 = _mm_add_epi16(stp2_8, stp2_11); + tmp1 = _mm_sub_epi16(stp2_8, stp2_11); + tmp2 = _mm_add_epi16(stp2_9, stp2_10); + tmp3 = _mm_sub_epi16(stp2_9, stp2_10); + + stp1_9 = _mm_unpacklo_epi64(tmp2, zero); + stp1_10 = _mm_unpacklo_epi64(tmp3, zero); + stp1_8 = _mm_unpacklo_epi64(tmp0, zero); + stp1_11 = _mm_unpacklo_epi64(tmp1, zero); + + stp1_13 = _mm_unpackhi_epi64(tmp3, zero); + stp1_14 = _mm_unpackhi_epi64(tmp2, zero); + stp1_12 = _mm_unpackhi_epi64(tmp1, zero); + stp1_15 = _mm_unpackhi_epi64(tmp0, zero); } // Stage6 { - const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5); + const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp1_4); const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12); @@ -2663,124 +2688,121 @@ void vp9_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest, tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS); tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS); - stp1_5 = _mm_packs_epi32(tmp1, zero); - stp1_6 = _mm_packs_epi32(tmp3, zero); + stp1_6 = _mm_packs_epi32(tmp3, tmp1); + stp2_10 = _mm_packs_epi32(tmp0, zero); stp2_13 = _mm_packs_epi32(tmp2, zero); stp2_11 = _mm_packs_epi32(tmp4, zero); stp2_12 = _mm_packs_epi32(tmp6, zero); - stp2_0 = _mm_add_epi16(stp1_0, stp2_7); - stp2_1 = _mm_add_epi16(stp1_1, stp1_6); - stp2_2 = _mm_add_epi16(stp1_2, stp1_5); - stp2_3 = _mm_add_epi16(stp1_3, stp2_4); - stp2_4 = _mm_sub_epi16(stp1_3, stp2_4); - stp2_5 = _mm_sub_epi16(stp1_2, stp1_5); - stp2_6 = _mm_sub_epi16(stp1_1, stp1_6); - stp2_7 = _mm_sub_epi16(stp1_0, stp2_7); + tmp0 = _mm_add_epi16(stp1_0, stp1_4); + tmp1 = _mm_sub_epi16(stp1_0, stp1_4); + tmp2 = _mm_add_epi16(stp1_1, stp1_6); + tmp3 = _mm_sub_epi16(stp1_1, stp1_6); + + stp2_0 = _mm_unpackhi_epi64(tmp0, zero); + stp2_1 = _mm_unpacklo_epi64(tmp2, zero); + stp2_2 = _mm_unpackhi_epi64(tmp2, zero); + stp2_3 = _mm_unpacklo_epi64(tmp0, zero); + stp2_4 = _mm_unpacklo_epi64(tmp1, zero); + stp2_5 = _mm_unpackhi_epi64(tmp3, zero); + stp2_6 = _mm_unpacklo_epi64(tmp3, zero); + stp2_7 = _mm_unpackhi_epi64(tmp1, zero); } // Stage7. Left 8x16 only. - l0 = _mm_add_epi16(stp2_0, stp1_15); - l1 = _mm_add_epi16(stp2_1, stp1_14); - l2 = _mm_add_epi16(stp2_2, stp2_13); - l3 = _mm_add_epi16(stp2_3, stp2_12); - l4 = _mm_add_epi16(stp2_4, stp2_11); - l5 = _mm_add_epi16(stp2_5, stp2_10); - l6 = _mm_add_epi16(stp2_6, stp1_9); - l7 = _mm_add_epi16(stp2_7, stp1_8); - l8 = _mm_sub_epi16(stp2_7, stp1_8); - l9 = _mm_sub_epi16(stp2_6, stp1_9); - l10 = _mm_sub_epi16(stp2_5, stp2_10); - l11 = _mm_sub_epi16(stp2_4, stp2_11); - l12 = _mm_sub_epi16(stp2_3, stp2_12); - l13 = _mm_sub_epi16(stp2_2, stp2_13); - l14 = _mm_sub_epi16(stp2_1, stp1_14); - l15 = _mm_sub_epi16(stp2_0, stp1_15); - - // 2-D idct. We do 2 8x16 blocks. + l[0] = _mm_add_epi16(stp2_0, stp1_15); + l[1] = _mm_add_epi16(stp2_1, stp1_14); + l[2] = _mm_add_epi16(stp2_2, stp2_13); + l[3] = _mm_add_epi16(stp2_3, stp2_12); + l[4] = _mm_add_epi16(stp2_4, stp2_11); + l[5] = _mm_add_epi16(stp2_5, stp2_10); + l[6] = _mm_add_epi16(stp2_6, stp1_9); + l[7] = _mm_add_epi16(stp2_7, stp1_8); + l[8] = _mm_sub_epi16(stp2_7, stp1_8); + l[9] = _mm_sub_epi16(stp2_6, stp1_9); + l[10] = _mm_sub_epi16(stp2_5, stp2_10); + l[11] = _mm_sub_epi16(stp2_4, stp2_11); + l[12] = _mm_sub_epi16(stp2_3, stp2_12); + l[13] = _mm_sub_epi16(stp2_2, stp2_13); + l[14] = _mm_sub_epi16(stp2_1, stp1_14); + l[15] = _mm_sub_epi16(stp2_0, stp1_15); + + // Second 1-D inverse transform, performed per 8x16 block for (i = 0; i < 2; i++) { - if (i == 0) - TRANSPOSE_4X8(l0, l1, l2, l3, l4, l5, l6, l7, in0, in1, in2, in3, in4, - in5, in6, in7); - - if (i == 1) - TRANSPOSE_4X8(l8, l9, l10, l11, l12, l13, l14, l15, in0, in1, in2, in3, - in4, in5, in6, in7); - - in8 = in9 = in10 = in11 = in12 = in13 = in14 = in15 = zero; + array_transpose_4X8(l + 8*i, in); - IDCT16_1D + IDCT16_10 // Stage7 - in0 = _mm_add_epi16(stp2_0, stp1_15); - in1 = _mm_add_epi16(stp2_1, stp1_14); - in2 = _mm_add_epi16(stp2_2, stp2_13); - in3 = _mm_add_epi16(stp2_3, stp2_12); - in4 = _mm_add_epi16(stp2_4, stp2_11); - in5 = _mm_add_epi16(stp2_5, stp2_10); - in6 = _mm_add_epi16(stp2_6, stp1_9); - in7 = _mm_add_epi16(stp2_7, stp1_8); - in8 = _mm_sub_epi16(stp2_7, stp1_8); - in9 = _mm_sub_epi16(stp2_6, stp1_9); - in10 = _mm_sub_epi16(stp2_5, stp2_10); - in11 = _mm_sub_epi16(stp2_4, stp2_11); - in12 = _mm_sub_epi16(stp2_3, stp2_12); - in13 = _mm_sub_epi16(stp2_2, stp2_13); - in14 = _mm_sub_epi16(stp2_1, stp1_14); - in15 = _mm_sub_epi16(stp2_0, stp1_15); + in[0] = _mm_add_epi16(stp2_0, stp1_15); + in[1] = _mm_add_epi16(stp2_1, stp1_14); + in[2] = _mm_add_epi16(stp2_2, stp2_13); + in[3] = _mm_add_epi16(stp2_3, stp2_12); + in[4] = _mm_add_epi16(stp2_4, stp2_11); + in[5] = _mm_add_epi16(stp2_5, stp2_10); + in[6] = _mm_add_epi16(stp2_6, stp1_9); + in[7] = _mm_add_epi16(stp2_7, stp1_8); + in[8] = _mm_sub_epi16(stp2_7, stp1_8); + in[9] = _mm_sub_epi16(stp2_6, stp1_9); + in[10] = _mm_sub_epi16(stp2_5, stp2_10); + in[11] = _mm_sub_epi16(stp2_4, stp2_11); + in[12] = _mm_sub_epi16(stp2_3, stp2_12); + in[13] = _mm_sub_epi16(stp2_2, stp2_13); + in[14] = _mm_sub_epi16(stp2_1, stp1_14); + in[15] = _mm_sub_epi16(stp2_0, stp1_15); // Final rounding and shift - in0 = _mm_adds_epi16(in0, final_rounding); - in1 = _mm_adds_epi16(in1, final_rounding); - in2 = _mm_adds_epi16(in2, final_rounding); - in3 = _mm_adds_epi16(in3, final_rounding); - in4 = _mm_adds_epi16(in4, final_rounding); - in5 = _mm_adds_epi16(in5, final_rounding); - in6 = _mm_adds_epi16(in6, final_rounding); - in7 = _mm_adds_epi16(in7, final_rounding); - in8 = _mm_adds_epi16(in8, final_rounding); - in9 = _mm_adds_epi16(in9, final_rounding); - in10 = _mm_adds_epi16(in10, final_rounding); - in11 = _mm_adds_epi16(in11, final_rounding); - in12 = _mm_adds_epi16(in12, final_rounding); - in13 = _mm_adds_epi16(in13, final_rounding); - in14 = _mm_adds_epi16(in14, final_rounding); - in15 = _mm_adds_epi16(in15, final_rounding); - - in0 = _mm_srai_epi16(in0, 6); - in1 = _mm_srai_epi16(in1, 6); - in2 = _mm_srai_epi16(in2, 6); - in3 = _mm_srai_epi16(in3, 6); - in4 = _mm_srai_epi16(in4, 6); - in5 = _mm_srai_epi16(in5, 6); - in6 = _mm_srai_epi16(in6, 6); - in7 = _mm_srai_epi16(in7, 6); - in8 = _mm_srai_epi16(in8, 6); - in9 = _mm_srai_epi16(in9, 6); - in10 = _mm_srai_epi16(in10, 6); - in11 = _mm_srai_epi16(in11, 6); - in12 = _mm_srai_epi16(in12, 6); - in13 = _mm_srai_epi16(in13, 6); - in14 = _mm_srai_epi16(in14, 6); - in15 = _mm_srai_epi16(in15, 6); - - RECON_AND_STORE(dest, in0); - RECON_AND_STORE(dest, in1); - RECON_AND_STORE(dest, in2); - RECON_AND_STORE(dest, in3); - RECON_AND_STORE(dest, in4); - RECON_AND_STORE(dest, in5); - RECON_AND_STORE(dest, in6); - RECON_AND_STORE(dest, in7); - RECON_AND_STORE(dest, in8); - RECON_AND_STORE(dest, in9); - RECON_AND_STORE(dest, in10); - RECON_AND_STORE(dest, in11); - RECON_AND_STORE(dest, in12); - RECON_AND_STORE(dest, in13); - RECON_AND_STORE(dest, in14); - RECON_AND_STORE(dest, in15); + in[0] = _mm_adds_epi16(in[0], final_rounding); + in[1] = _mm_adds_epi16(in[1], final_rounding); + in[2] = _mm_adds_epi16(in[2], final_rounding); + in[3] = _mm_adds_epi16(in[3], final_rounding); + in[4] = _mm_adds_epi16(in[4], final_rounding); + in[5] = _mm_adds_epi16(in[5], final_rounding); + in[6] = _mm_adds_epi16(in[6], final_rounding); + in[7] = _mm_adds_epi16(in[7], final_rounding); + in[8] = _mm_adds_epi16(in[8], final_rounding); + in[9] = _mm_adds_epi16(in[9], final_rounding); + in[10] = _mm_adds_epi16(in[10], final_rounding); + in[11] = _mm_adds_epi16(in[11], final_rounding); + in[12] = _mm_adds_epi16(in[12], final_rounding); + in[13] = _mm_adds_epi16(in[13], final_rounding); + in[14] = _mm_adds_epi16(in[14], final_rounding); + in[15] = _mm_adds_epi16(in[15], final_rounding); + + in[0] = _mm_srai_epi16(in[0], 6); + in[1] = _mm_srai_epi16(in[1], 6); + in[2] = _mm_srai_epi16(in[2], 6); + in[3] = _mm_srai_epi16(in[3], 6); + in[4] = _mm_srai_epi16(in[4], 6); + in[5] = _mm_srai_epi16(in[5], 6); + in[6] = _mm_srai_epi16(in[6], 6); + in[7] = _mm_srai_epi16(in[7], 6); + in[8] = _mm_srai_epi16(in[8], 6); + in[9] = _mm_srai_epi16(in[9], 6); + in[10] = _mm_srai_epi16(in[10], 6); + in[11] = _mm_srai_epi16(in[11], 6); + in[12] = _mm_srai_epi16(in[12], 6); + in[13] = _mm_srai_epi16(in[13], 6); + in[14] = _mm_srai_epi16(in[14], 6); + in[15] = _mm_srai_epi16(in[15], 6); + + RECON_AND_STORE(dest, in[0]); + RECON_AND_STORE(dest, in[1]); + RECON_AND_STORE(dest, in[2]); + RECON_AND_STORE(dest, in[3]); + RECON_AND_STORE(dest, in[4]); + RECON_AND_STORE(dest, in[5]); + RECON_AND_STORE(dest, in[6]); + RECON_AND_STORE(dest, in[7]); + RECON_AND_STORE(dest, in[8]); + RECON_AND_STORE(dest, in[9]); + RECON_AND_STORE(dest, in[10]); + RECON_AND_STORE(dest, in[11]); + RECON_AND_STORE(dest, in[12]); + RECON_AND_STORE(dest, in[13]); + RECON_AND_STORE(dest, in[14]); + RECON_AND_STORE(dest, in[15]); dest += 8 - (stride * 16); } @@ -2792,28 +2814,329 @@ void vp9_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest, input += 8; \ } \ -#define IDCT32_1D \ +#define IDCT32_34 \ +/* Stage1 */ \ +{ \ + const __m128i zero = _mm_setzero_si128();\ + const __m128i lo_1_31 = _mm_unpacklo_epi16(in[1], zero); \ + const __m128i hi_1_31 = _mm_unpackhi_epi16(in[1], zero); \ + \ + const __m128i lo_25_7= _mm_unpacklo_epi16(zero, in[7]); \ + const __m128i hi_25_7 = _mm_unpackhi_epi16(zero, in[7]); \ + \ + const __m128i lo_5_27 = _mm_unpacklo_epi16(in[5], zero); \ + const __m128i hi_5_27 = _mm_unpackhi_epi16(in[5], zero); \ + \ + const __m128i lo_29_3 = _mm_unpacklo_epi16(zero, in[3]); \ + const __m128i hi_29_3 = _mm_unpackhi_epi16(zero, in[3]); \ + \ + MULTIPLICATION_AND_ADD_2(lo_1_31, hi_1_31, stg1_0, \ + stg1_1, stp1_16, stp1_31); \ + MULTIPLICATION_AND_ADD_2(lo_25_7, hi_25_7, stg1_6, \ + stg1_7, stp1_19, stp1_28); \ + MULTIPLICATION_AND_ADD_2(lo_5_27, hi_5_27, stg1_8, \ + stg1_9, stp1_20, stp1_27); \ + MULTIPLICATION_AND_ADD_2(lo_29_3, hi_29_3, stg1_14, \ + stg1_15, stp1_23, stp1_24); \ +} \ +\ +/* Stage2 */ \ +{ \ + const __m128i zero = _mm_setzero_si128();\ + const __m128i lo_2_30 = _mm_unpacklo_epi16(in[2], zero); \ + const __m128i hi_2_30 = _mm_unpackhi_epi16(in[2], zero); \ + \ + const __m128i lo_26_6 = _mm_unpacklo_epi16(zero, in[6]); \ + const __m128i hi_26_6 = _mm_unpackhi_epi16(zero, in[6]); \ + \ + MULTIPLICATION_AND_ADD_2(lo_2_30, hi_2_30, stg2_0, \ + stg2_1, stp2_8, stp2_15); \ + MULTIPLICATION_AND_ADD_2(lo_26_6, hi_26_6, stg2_6, \ + stg2_7, stp2_11, stp2_12); \ + \ + stp2_16 = stp1_16; \ + stp2_19 = stp1_19; \ + \ + stp2_20 = stp1_20; \ + stp2_23 = stp1_23; \ + \ + stp2_24 = stp1_24; \ + stp2_27 = stp1_27; \ + \ + stp2_28 = stp1_28; \ + stp2_31 = stp1_31; \ +} \ +\ +/* Stage3 */ \ +{ \ + const __m128i zero = _mm_setzero_si128();\ + const __m128i lo_4_28 = _mm_unpacklo_epi16(in[4], zero); \ + const __m128i hi_4_28 = _mm_unpackhi_epi16(in[4], zero); \ + \ + const __m128i lo_17_30 = _mm_unpacklo_epi16(stp1_16, stp1_31); \ + const __m128i hi_17_30 = _mm_unpackhi_epi16(stp1_16, stp1_31); \ + const __m128i lo_18_29 = _mm_unpacklo_epi16(stp1_19, stp1_28); \ + const __m128i hi_18_29 = _mm_unpackhi_epi16(stp1_19, stp1_28); \ + \ + const __m128i lo_21_26 = _mm_unpacklo_epi16(stp1_20, stp1_27); \ + const __m128i hi_21_26 = _mm_unpackhi_epi16(stp1_20, stp1_27); \ + const __m128i lo_22_25 = _mm_unpacklo_epi16(stp1_23, stp1_24); \ + const __m128i hi_22_25 = _mm_unpackhi_epi16(stp1_23, stp2_24); \ + \ + MULTIPLICATION_AND_ADD_2(lo_4_28, hi_4_28, stg3_0, \ + stg3_1, stp1_4, stp1_7); \ + \ + stp1_8 = stp2_8; \ + stp1_11 = stp2_11; \ + stp1_12 = stp2_12; \ + stp1_15 = stp2_15; \ + \ + MULTIPLICATION_AND_ADD(lo_17_30, hi_17_30, lo_18_29, hi_18_29, stg3_4, \ + stg3_5, stg3_6, stg3_4, stp1_17, stp1_30, \ + stp1_18, stp1_29) \ + MULTIPLICATION_AND_ADD(lo_21_26, hi_21_26, lo_22_25, hi_22_25, stg3_8, \ + stg3_9, stg3_10, stg3_8, stp1_21, stp1_26, \ + stp1_22, stp1_25) \ + \ + stp1_16 = stp2_16; \ + stp1_31 = stp2_31; \ + stp1_19 = stp2_19; \ + stp1_20 = stp2_20; \ + stp1_23 = stp2_23; \ + stp1_24 = stp2_24; \ + stp1_27 = stp2_27; \ + stp1_28 = stp2_28; \ +} \ +\ +/* Stage4 */ \ +{ \ + const __m128i zero = _mm_setzero_si128();\ + const __m128i lo_0_16 = _mm_unpacklo_epi16(in[0], zero); \ + const __m128i hi_0_16 = _mm_unpackhi_epi16(in[0], zero); \ + \ + const __m128i lo_9_14 = _mm_unpacklo_epi16(stp2_8, stp2_15); \ + const __m128i hi_9_14 = _mm_unpackhi_epi16(stp2_8, stp2_15); \ + const __m128i lo_10_13 = _mm_unpacklo_epi16(stp2_11, stp2_12); \ + const __m128i hi_10_13 = _mm_unpackhi_epi16(stp2_11, stp2_12); \ + \ + MULTIPLICATION_AND_ADD_2(lo_0_16, hi_0_16, stg4_0, \ + stg4_1, stp2_0, stp2_1); \ + \ + stp2_4 = stp1_4; \ + stp2_5 = stp1_4; \ + stp2_6 = stp1_7; \ + stp2_7 = stp1_7; \ + \ + MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, stg4_4, \ + stg4_5, stg4_6, stg4_4, stp2_9, stp2_14, \ + stp2_10, stp2_13) \ + \ + stp2_8 = stp1_8; \ + stp2_15 = stp1_15; \ + stp2_11 = stp1_11; \ + stp2_12 = stp1_12; \ + \ + stp2_16 = _mm_add_epi16(stp1_16, stp1_19); \ + stp2_17 = _mm_add_epi16(stp1_17, stp1_18); \ + stp2_18 = _mm_sub_epi16(stp1_17, stp1_18); \ + stp2_19 = _mm_sub_epi16(stp1_16, stp1_19); \ + stp2_20 = _mm_sub_epi16(stp1_23, stp1_20); \ + stp2_21 = _mm_sub_epi16(stp1_22, stp1_21); \ + stp2_22 = _mm_add_epi16(stp1_22, stp1_21); \ + stp2_23 = _mm_add_epi16(stp1_23, stp1_20); \ + \ + stp2_24 = _mm_add_epi16(stp1_24, stp1_27); \ + stp2_25 = _mm_add_epi16(stp1_25, stp1_26); \ + stp2_26 = _mm_sub_epi16(stp1_25, stp1_26); \ + stp2_27 = _mm_sub_epi16(stp1_24, stp1_27); \ + stp2_28 = _mm_sub_epi16(stp1_31, stp1_28); \ + stp2_29 = _mm_sub_epi16(stp1_30, stp1_29); \ + stp2_30 = _mm_add_epi16(stp1_29, stp1_30); \ + stp2_31 = _mm_add_epi16(stp1_28, stp1_31); \ +} \ +\ +/* Stage5 */ \ +{ \ + const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5); \ + const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5); \ + const __m128i lo_18_29 = _mm_unpacklo_epi16(stp2_18, stp2_29); \ + const __m128i hi_18_29 = _mm_unpackhi_epi16(stp2_18, stp2_29); \ + \ + const __m128i lo_19_28 = _mm_unpacklo_epi16(stp2_19, stp2_28); \ + const __m128i hi_19_28 = _mm_unpackhi_epi16(stp2_19, stp2_28); \ + const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27); \ + const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27); \ + \ + const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26); \ + const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26); \ + \ + stp1_0 = stp2_0; \ + stp1_1 = stp2_1; \ + stp1_2 = stp2_1; \ + stp1_3 = stp2_0; \ + \ + tmp0 = _mm_madd_epi16(lo_6_5, stg4_1); \ + tmp1 = _mm_madd_epi16(hi_6_5, stg4_1); \ + tmp2 = _mm_madd_epi16(lo_6_5, stg4_0); \ + tmp3 = _mm_madd_epi16(hi_6_5, stg4_0); \ + \ + tmp0 = _mm_add_epi32(tmp0, rounding); \ + tmp1 = _mm_add_epi32(tmp1, rounding); \ + tmp2 = _mm_add_epi32(tmp2, rounding); \ + tmp3 = _mm_add_epi32(tmp3, rounding); \ + \ + tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \ + tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \ + tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \ + tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \ + \ + stp1_5 = _mm_packs_epi32(tmp0, tmp1); \ + stp1_6 = _mm_packs_epi32(tmp2, tmp3); \ + \ + stp1_4 = stp2_4; \ + stp1_7 = stp2_7; \ + \ + stp1_8 = _mm_add_epi16(stp2_8, stp2_11); \ + stp1_9 = _mm_add_epi16(stp2_9, stp2_10); \ + stp1_10 = _mm_sub_epi16(stp2_9, stp2_10); \ + stp1_11 = _mm_sub_epi16(stp2_8, stp2_11); \ + stp1_12 = _mm_sub_epi16(stp2_15, stp2_12); \ + stp1_13 = _mm_sub_epi16(stp2_14, stp2_13); \ + stp1_14 = _mm_add_epi16(stp2_14, stp2_13); \ + stp1_15 = _mm_add_epi16(stp2_15, stp2_12); \ + \ + stp1_16 = stp2_16; \ + stp1_17 = stp2_17; \ + \ + MULTIPLICATION_AND_ADD(lo_18_29, hi_18_29, lo_19_28, hi_19_28, stg4_4, \ + stg4_5, stg4_4, stg4_5, stp1_18, stp1_29, \ + stp1_19, stp1_28) \ + MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg4_6, \ + stg4_4, stg4_6, stg4_4, stp1_20, stp1_27, \ + stp1_21, stp1_26) \ + \ + stp1_22 = stp2_22; \ + stp1_23 = stp2_23; \ + stp1_24 = stp2_24; \ + stp1_25 = stp2_25; \ + stp1_30 = stp2_30; \ + stp1_31 = stp2_31; \ +} \ +\ +/* Stage6 */ \ +{ \ + const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \ + const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \ + const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12); \ + const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12); \ + \ + stp2_0 = _mm_add_epi16(stp1_0, stp1_7); \ + stp2_1 = _mm_add_epi16(stp1_1, stp1_6); \ + stp2_2 = _mm_add_epi16(stp1_2, stp1_5); \ + stp2_3 = _mm_add_epi16(stp1_3, stp1_4); \ + stp2_4 = _mm_sub_epi16(stp1_3, stp1_4); \ + stp2_5 = _mm_sub_epi16(stp1_2, stp1_5); \ + stp2_6 = _mm_sub_epi16(stp1_1, stp1_6); \ + stp2_7 = _mm_sub_epi16(stp1_0, stp1_7); \ + \ + stp2_8 = stp1_8; \ + stp2_9 = stp1_9; \ + stp2_14 = stp1_14; \ + stp2_15 = stp1_15; \ + \ + MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, \ + stg6_0, stg4_0, stg6_0, stg4_0, stp2_10, \ + stp2_13, stp2_11, stp2_12) \ + \ + stp2_16 = _mm_add_epi16(stp1_16, stp1_23); \ + stp2_17 = _mm_add_epi16(stp1_17, stp1_22); \ + stp2_18 = _mm_add_epi16(stp1_18, stp1_21); \ + stp2_19 = _mm_add_epi16(stp1_19, stp1_20); \ + stp2_20 = _mm_sub_epi16(stp1_19, stp1_20); \ + stp2_21 = _mm_sub_epi16(stp1_18, stp1_21); \ + stp2_22 = _mm_sub_epi16(stp1_17, stp1_22); \ + stp2_23 = _mm_sub_epi16(stp1_16, stp1_23); \ + \ + stp2_24 = _mm_sub_epi16(stp1_31, stp1_24); \ + stp2_25 = _mm_sub_epi16(stp1_30, stp1_25); \ + stp2_26 = _mm_sub_epi16(stp1_29, stp1_26); \ + stp2_27 = _mm_sub_epi16(stp1_28, stp1_27); \ + stp2_28 = _mm_add_epi16(stp1_27, stp1_28); \ + stp2_29 = _mm_add_epi16(stp1_26, stp1_29); \ + stp2_30 = _mm_add_epi16(stp1_25, stp1_30); \ + stp2_31 = _mm_add_epi16(stp1_24, stp1_31); \ +} \ +\ +/* Stage7 */ \ +{ \ + const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27); \ + const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27); \ + const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26); \ + const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26); \ + \ + const __m128i lo_22_25 = _mm_unpacklo_epi16(stp2_22, stp2_25); \ + const __m128i hi_22_25 = _mm_unpackhi_epi16(stp2_22, stp2_25); \ + const __m128i lo_23_24 = _mm_unpacklo_epi16(stp2_23, stp2_24); \ + const __m128i hi_23_24 = _mm_unpackhi_epi16(stp2_23, stp2_24); \ + \ + stp1_0 = _mm_add_epi16(stp2_0, stp2_15); \ + stp1_1 = _mm_add_epi16(stp2_1, stp2_14); \ + stp1_2 = _mm_add_epi16(stp2_2, stp2_13); \ + stp1_3 = _mm_add_epi16(stp2_3, stp2_12); \ + stp1_4 = _mm_add_epi16(stp2_4, stp2_11); \ + stp1_5 = _mm_add_epi16(stp2_5, stp2_10); \ + stp1_6 = _mm_add_epi16(stp2_6, stp2_9); \ + stp1_7 = _mm_add_epi16(stp2_7, stp2_8); \ + stp1_8 = _mm_sub_epi16(stp2_7, stp2_8); \ + stp1_9 = _mm_sub_epi16(stp2_6, stp2_9); \ + stp1_10 = _mm_sub_epi16(stp2_5, stp2_10); \ + stp1_11 = _mm_sub_epi16(stp2_4, stp2_11); \ + stp1_12 = _mm_sub_epi16(stp2_3, stp2_12); \ + stp1_13 = _mm_sub_epi16(stp2_2, stp2_13); \ + stp1_14 = _mm_sub_epi16(stp2_1, stp2_14); \ + stp1_15 = _mm_sub_epi16(stp2_0, stp2_15); \ + \ + stp1_16 = stp2_16; \ + stp1_17 = stp2_17; \ + stp1_18 = stp2_18; \ + stp1_19 = stp2_19; \ + \ + MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg6_0, \ + stg4_0, stg6_0, stg4_0, stp1_20, stp1_27, \ + stp1_21, stp1_26) \ + MULTIPLICATION_AND_ADD(lo_22_25, hi_22_25, lo_23_24, hi_23_24, stg6_0, \ + stg4_0, stg6_0, stg4_0, stp1_22, stp1_25, \ + stp1_23, stp1_24) \ + \ + stp1_28 = stp2_28; \ + stp1_29 = stp2_29; \ + stp1_30 = stp2_30; \ + stp1_31 = stp2_31; \ +} + + +#define IDCT32 \ /* Stage1 */ \ { \ - const __m128i lo_1_31 = _mm_unpacklo_epi16(in1, in31); \ - const __m128i hi_1_31 = _mm_unpackhi_epi16(in1, in31); \ - const __m128i lo_17_15 = _mm_unpacklo_epi16(in17, in15); \ - const __m128i hi_17_15 = _mm_unpackhi_epi16(in17, in15); \ - \ - const __m128i lo_9_23 = _mm_unpacklo_epi16(in9, in23); \ - const __m128i hi_9_23 = _mm_unpackhi_epi16(in9, in23); \ - const __m128i lo_25_7= _mm_unpacklo_epi16(in25, in7); \ - const __m128i hi_25_7 = _mm_unpackhi_epi16(in25, in7); \ - \ - const __m128i lo_5_27 = _mm_unpacklo_epi16(in5, in27); \ - const __m128i hi_5_27 = _mm_unpackhi_epi16(in5, in27); \ - const __m128i lo_21_11 = _mm_unpacklo_epi16(in21, in11); \ - const __m128i hi_21_11 = _mm_unpackhi_epi16(in21, in11); \ - \ - const __m128i lo_13_19 = _mm_unpacklo_epi16(in13, in19); \ - const __m128i hi_13_19 = _mm_unpackhi_epi16(in13, in19); \ - const __m128i lo_29_3 = _mm_unpacklo_epi16(in29, in3); \ - const __m128i hi_29_3 = _mm_unpackhi_epi16(in29, in3); \ + const __m128i lo_1_31 = _mm_unpacklo_epi16(in[1], in[31]); \ + const __m128i hi_1_31 = _mm_unpackhi_epi16(in[1], in[31]); \ + const __m128i lo_17_15 = _mm_unpacklo_epi16(in[17], in[15]); \ + const __m128i hi_17_15 = _mm_unpackhi_epi16(in[17], in[15]); \ + \ + const __m128i lo_9_23 = _mm_unpacklo_epi16(in[9], in[23]); \ + const __m128i hi_9_23 = _mm_unpackhi_epi16(in[9], in[23]); \ + const __m128i lo_25_7= _mm_unpacklo_epi16(in[25], in[7]); \ + const __m128i hi_25_7 = _mm_unpackhi_epi16(in[25], in[7]); \ + \ + const __m128i lo_5_27 = _mm_unpacklo_epi16(in[5], in[27]); \ + const __m128i hi_5_27 = _mm_unpackhi_epi16(in[5], in[27]); \ + const __m128i lo_21_11 = _mm_unpacklo_epi16(in[21], in[11]); \ + const __m128i hi_21_11 = _mm_unpackhi_epi16(in[21], in[11]); \ + \ + const __m128i lo_13_19 = _mm_unpacklo_epi16(in[13], in[19]); \ + const __m128i hi_13_19 = _mm_unpackhi_epi16(in[13], in[19]); \ + const __m128i lo_29_3 = _mm_unpacklo_epi16(in[29], in[3]); \ + const __m128i hi_29_3 = _mm_unpackhi_epi16(in[29], in[3]); \ \ MULTIPLICATION_AND_ADD(lo_1_31, hi_1_31, lo_17_15, hi_17_15, stg1_0, \ stg1_1, stg1_2, stg1_3, stp1_16, stp1_31, \ @@ -2831,15 +3154,15 @@ void vp9_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest, \ /* Stage2 */ \ { \ - const __m128i lo_2_30 = _mm_unpacklo_epi16(in2, in30); \ - const __m128i hi_2_30 = _mm_unpackhi_epi16(in2, in30); \ - const __m128i lo_18_14 = _mm_unpacklo_epi16(in18, in14); \ - const __m128i hi_18_14 = _mm_unpackhi_epi16(in18, in14); \ + const __m128i lo_2_30 = _mm_unpacklo_epi16(in[2], in[30]); \ + const __m128i hi_2_30 = _mm_unpackhi_epi16(in[2], in[30]); \ + const __m128i lo_18_14 = _mm_unpacklo_epi16(in[18], in[14]); \ + const __m128i hi_18_14 = _mm_unpackhi_epi16(in[18], in[14]); \ \ - const __m128i lo_10_22 = _mm_unpacklo_epi16(in10, in22); \ - const __m128i hi_10_22 = _mm_unpackhi_epi16(in10, in22); \ - const __m128i lo_26_6 = _mm_unpacklo_epi16(in26, in6); \ - const __m128i hi_26_6 = _mm_unpackhi_epi16(in26, in6); \ + const __m128i lo_10_22 = _mm_unpacklo_epi16(in[10], in[22]); \ + const __m128i hi_10_22 = _mm_unpackhi_epi16(in[10], in[22]); \ + const __m128i lo_26_6 = _mm_unpacklo_epi16(in[26], in[6]); \ + const __m128i hi_26_6 = _mm_unpackhi_epi16(in[26], in[6]); \ \ MULTIPLICATION_AND_ADD(lo_2_30, hi_2_30, lo_18_14, hi_18_14, stg2_0, \ stg2_1, stg2_2, stg2_3, stp2_8, stp2_15, stp2_9, \ @@ -2871,10 +3194,10 @@ void vp9_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest, \ /* Stage3 */ \ { \ - const __m128i lo_4_28 = _mm_unpacklo_epi16(in4, in28); \ - const __m128i hi_4_28 = _mm_unpackhi_epi16(in4, in28); \ - const __m128i lo_20_12 = _mm_unpacklo_epi16(in20, in12); \ - const __m128i hi_20_12 = _mm_unpackhi_epi16(in20, in12); \ + const __m128i lo_4_28 = _mm_unpacklo_epi16(in[4], in[28]); \ + const __m128i hi_4_28 = _mm_unpackhi_epi16(in[4], in[28]); \ + const __m128i lo_20_12 = _mm_unpacklo_epi16(in[20], in[12]); \ + const __m128i hi_20_12 = _mm_unpackhi_epi16(in[20], in[12]); \ \ const __m128i lo_17_30 = _mm_unpacklo_epi16(stp2_17, stp2_30); \ const __m128i hi_17_30 = _mm_unpackhi_epi16(stp2_17, stp2_30); \ @@ -2918,10 +3241,10 @@ void vp9_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest, \ /* Stage4 */ \ { \ - const __m128i lo_0_16 = _mm_unpacklo_epi16(in0, in16); \ - const __m128i hi_0_16 = _mm_unpackhi_epi16(in0, in16); \ - const __m128i lo_8_24 = _mm_unpacklo_epi16(in8, in24); \ - const __m128i hi_8_24 = _mm_unpackhi_epi16(in8, in24); \ + const __m128i lo_0_16 = _mm_unpacklo_epi16(in[0], in[16]); \ + const __m128i hi_0_16 = _mm_unpackhi_epi16(in[0], in[16]); \ + const __m128i lo_8_24 = _mm_unpacklo_epi16(in[8], in[24]); \ + const __m128i hi_8_24 = _mm_unpackhi_epi16(in[8], in[24]); \ \ const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14); \ const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14); \ @@ -3178,10 +3501,7 @@ void vp9_idct32x32_34_add_sse2(const int16_t *input, uint8_t *dest, const __m128i stg6_0 = pair_set_epi16(-cospi_16_64, cospi_16_64); - __m128i in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11, in12, - in13, in14, in15, in16, in17, in18, in19, in20, in21, in22, in23, - in24, in25, in26, in27, in28, in29, in30, in31; - __m128i col[128]; + __m128i in[32], col[32]; __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7, stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15, stp1_16, stp1_17, stp1_18, stp1_19, stp1_20, stp1_21, stp1_22, @@ -3193,296 +3513,225 @@ void vp9_idct32x32_34_add_sse2(const int16_t *input, uint8_t *dest, stp2_23, stp2_24, stp2_25, stp2_26, stp2_27, stp2_28, stp2_29, stp2_30, stp2_31; __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; - int i, j, i32; - - // We work on a 8x32 block each time, and loop 8 times for 2-D 32x32 idct. - for (i = 0; i < 8; i++) { - i32 = (i << 5); - if (i == 0) { - // First 1-D idct: first 8 rows - // Load input data. - LOAD_DQCOEFF(in0, input); - LOAD_DQCOEFF(in8, input); - LOAD_DQCOEFF(in16, input); - LOAD_DQCOEFF(in24, input); - LOAD_DQCOEFF(in1, input); - LOAD_DQCOEFF(in9, input); - LOAD_DQCOEFF(in17, input); - LOAD_DQCOEFF(in25, input); - LOAD_DQCOEFF(in2, input); - LOAD_DQCOEFF(in10, input); - LOAD_DQCOEFF(in18, input); - LOAD_DQCOEFF(in26, input); - LOAD_DQCOEFF(in3, input); - LOAD_DQCOEFF(in11, input); - LOAD_DQCOEFF(in19, input); - LOAD_DQCOEFF(in27, input); - - LOAD_DQCOEFF(in4, input); - LOAD_DQCOEFF(in12, input); - LOAD_DQCOEFF(in20, input); - LOAD_DQCOEFF(in28, input); - LOAD_DQCOEFF(in5, input); - LOAD_DQCOEFF(in13, input); - LOAD_DQCOEFF(in21, input); - LOAD_DQCOEFF(in29, input); - LOAD_DQCOEFF(in6, input); - LOAD_DQCOEFF(in14, input); - LOAD_DQCOEFF(in22, input); - LOAD_DQCOEFF(in30, input); - LOAD_DQCOEFF(in7, input); - LOAD_DQCOEFF(in15, input); - LOAD_DQCOEFF(in23, input); - LOAD_DQCOEFF(in31, input); - - // Transpose 32x8 block to 8x32 block - TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, - in4, in5, in6, in7); - TRANSPOSE_8X8(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9, - in10, in11, in12, in13, in14, in15); - TRANSPOSE_8X8(in16, in17, in18, in19, in20, in21, in22, in23, in16, in17, - in18, in19, in20, in21, in22, in23); - TRANSPOSE_8X8(in24, in25, in26, in27, in28, in29, in30, in31, in24, in25, - in26, in27, in28, in29, in30, in31); - } else if (i < 4) { - // First 1-D idct: next 24 zero-coeff rows - col[i32 + 0] = _mm_setzero_si128(); - col[i32 + 1] = _mm_setzero_si128(); - col[i32 + 2] = _mm_setzero_si128(); - col[i32 + 3] = _mm_setzero_si128(); - col[i32 + 4] = _mm_setzero_si128(); - col[i32 + 5] = _mm_setzero_si128(); - col[i32 + 6] = _mm_setzero_si128(); - col[i32 + 7] = _mm_setzero_si128(); - col[i32 + 8] = _mm_setzero_si128(); - col[i32 + 9] = _mm_setzero_si128(); - col[i32 + 10] = _mm_setzero_si128(); - col[i32 + 11] = _mm_setzero_si128(); - col[i32 + 12] = _mm_setzero_si128(); - col[i32 + 13] = _mm_setzero_si128(); - col[i32 + 14] = _mm_setzero_si128(); - col[i32 + 15] = _mm_setzero_si128(); - col[i32 + 16] = _mm_setzero_si128(); - col[i32 + 17] = _mm_setzero_si128(); - col[i32 + 18] = _mm_setzero_si128(); - col[i32 + 19] = _mm_setzero_si128(); - col[i32 + 20] = _mm_setzero_si128(); - col[i32 + 21] = _mm_setzero_si128(); - col[i32 + 22] = _mm_setzero_si128(); - col[i32 + 23] = _mm_setzero_si128(); - col[i32 + 24] = _mm_setzero_si128(); - col[i32 + 25] = _mm_setzero_si128(); - col[i32 + 26] = _mm_setzero_si128(); - col[i32 + 27] = _mm_setzero_si128(); - col[i32 + 28] = _mm_setzero_si128(); - col[i32 + 29] = _mm_setzero_si128(); - col[i32 + 30] = _mm_setzero_si128(); - col[i32 + 31] = _mm_setzero_si128(); - continue; - } else { - // Second 1-D idct - j = i - 4; - - // Transpose 32x8 block to 8x32 block - TRANSPOSE_8X8(col[j * 8 + 0], col[j * 8 + 1], col[j * 8 + 2], - col[j * 8 + 3], col[j * 8 + 4], col[j * 8 + 5], - col[j * 8 + 6], col[j * 8 + 7], in0, in1, in2, in3, in4, - in5, in6, in7); - j += 4; - TRANSPOSE_8X8(col[j * 8 + 0], col[j * 8 + 1], col[j * 8 + 2], - col[j * 8 + 3], col[j * 8 + 4], col[j * 8 + 5], - col[j * 8 + 6], col[j * 8 + 7], in8, in9, in10, - in11, in12, in13, in14, in15); - j += 4; - TRANSPOSE_8X8(col[j * 8 + 0], col[j * 8 + 1], col[j * 8 + 2], - col[j * 8 + 3], col[j * 8 + 4], col[j * 8 + 5], - col[j * 8 + 6], col[j * 8 + 7], in16, in17, in18, - in19, in20, in21, in22, in23); - j += 4; - TRANSPOSE_8X8(col[j * 8 + 0], col[j * 8 + 1], col[j * 8 + 2], - col[j * 8 + 3], col[j * 8 + 4], col[j * 8 + 5], - col[j * 8 + 6], col[j * 8 + 7], in24, in25, in26, in27, - in28, in29, in30, in31); - } - - IDCT32_1D + int i; + // Load input data. + LOAD_DQCOEFF(in[0], input); + LOAD_DQCOEFF(in[8], input); + LOAD_DQCOEFF(in[16], input); + LOAD_DQCOEFF(in[24], input); + LOAD_DQCOEFF(in[1], input); + LOAD_DQCOEFF(in[9], input); + LOAD_DQCOEFF(in[17], input); + LOAD_DQCOEFF(in[25], input); + LOAD_DQCOEFF(in[2], input); + LOAD_DQCOEFF(in[10], input); + LOAD_DQCOEFF(in[18], input); + LOAD_DQCOEFF(in[26], input); + LOAD_DQCOEFF(in[3], input); + LOAD_DQCOEFF(in[11], input); + LOAD_DQCOEFF(in[19], input); + LOAD_DQCOEFF(in[27], input); + + LOAD_DQCOEFF(in[4], input); + LOAD_DQCOEFF(in[12], input); + LOAD_DQCOEFF(in[20], input); + LOAD_DQCOEFF(in[28], input); + LOAD_DQCOEFF(in[5], input); + LOAD_DQCOEFF(in[13], input); + LOAD_DQCOEFF(in[21], input); + LOAD_DQCOEFF(in[29], input); + LOAD_DQCOEFF(in[6], input); + LOAD_DQCOEFF(in[14], input); + LOAD_DQCOEFF(in[22], input); + LOAD_DQCOEFF(in[30], input); + LOAD_DQCOEFF(in[7], input); + LOAD_DQCOEFF(in[15], input); + LOAD_DQCOEFF(in[23], input); + LOAD_DQCOEFF(in[31], input); - // final stage - if (i < 4) { - // 1_D: Store 32 intermediate results for each 8x32 block. - col[i32 + 0] = _mm_add_epi16(stp1_0, stp1_31); - col[i32 + 1] = _mm_add_epi16(stp1_1, stp1_30); - col[i32 + 2] = _mm_add_epi16(stp1_2, stp1_29); - col[i32 + 3] = _mm_add_epi16(stp1_3, stp1_28); - col[i32 + 4] = _mm_add_epi16(stp1_4, stp1_27); - col[i32 + 5] = _mm_add_epi16(stp1_5, stp1_26); - col[i32 + 6] = _mm_add_epi16(stp1_6, stp1_25); - col[i32 + 7] = _mm_add_epi16(stp1_7, stp1_24); - col[i32 + 8] = _mm_add_epi16(stp1_8, stp1_23); - col[i32 + 9] = _mm_add_epi16(stp1_9, stp1_22); - col[i32 + 10] = _mm_add_epi16(stp1_10, stp1_21); - col[i32 + 11] = _mm_add_epi16(stp1_11, stp1_20); - col[i32 + 12] = _mm_add_epi16(stp1_12, stp1_19); - col[i32 + 13] = _mm_add_epi16(stp1_13, stp1_18); - col[i32 + 14] = _mm_add_epi16(stp1_14, stp1_17); - col[i32 + 15] = _mm_add_epi16(stp1_15, stp1_16); - col[i32 + 16] = _mm_sub_epi16(stp1_15, stp1_16); - col[i32 + 17] = _mm_sub_epi16(stp1_14, stp1_17); - col[i32 + 18] = _mm_sub_epi16(stp1_13, stp1_18); - col[i32 + 19] = _mm_sub_epi16(stp1_12, stp1_19); - col[i32 + 20] = _mm_sub_epi16(stp1_11, stp1_20); - col[i32 + 21] = _mm_sub_epi16(stp1_10, stp1_21); - col[i32 + 22] = _mm_sub_epi16(stp1_9, stp1_22); - col[i32 + 23] = _mm_sub_epi16(stp1_8, stp1_23); - col[i32 + 24] = _mm_sub_epi16(stp1_7, stp1_24); - col[i32 + 25] = _mm_sub_epi16(stp1_6, stp1_25); - col[i32 + 26] = _mm_sub_epi16(stp1_5, stp1_26); - col[i32 + 27] = _mm_sub_epi16(stp1_4, stp1_27); - col[i32 + 28] = _mm_sub_epi16(stp1_3, stp1_28); - col[i32 + 29] = _mm_sub_epi16(stp1_2, stp1_29); - col[i32 + 30] = _mm_sub_epi16(stp1_1, stp1_30); - col[i32 + 31] = _mm_sub_epi16(stp1_0, stp1_31); - } else { + array_transpose_8x8(in, in); + array_transpose_8x8(in+8, in+8); + array_transpose_8x8(in+16, in+16); + array_transpose_8x8(in+24, in+24); + + IDCT32 + + // 1_D: Store 32 intermediate results for each 8x32 block. + col[0] = _mm_add_epi16(stp1_0, stp1_31); + col[1] = _mm_add_epi16(stp1_1, stp1_30); + col[2] = _mm_add_epi16(stp1_2, stp1_29); + col[3] = _mm_add_epi16(stp1_3, stp1_28); + col[4] = _mm_add_epi16(stp1_4, stp1_27); + col[5] = _mm_add_epi16(stp1_5, stp1_26); + col[6] = _mm_add_epi16(stp1_6, stp1_25); + col[7] = _mm_add_epi16(stp1_7, stp1_24); + col[8] = _mm_add_epi16(stp1_8, stp1_23); + col[9] = _mm_add_epi16(stp1_9, stp1_22); + col[10] = _mm_add_epi16(stp1_10, stp1_21); + col[11] = _mm_add_epi16(stp1_11, stp1_20); + col[12] = _mm_add_epi16(stp1_12, stp1_19); + col[13] = _mm_add_epi16(stp1_13, stp1_18); + col[14] = _mm_add_epi16(stp1_14, stp1_17); + col[15] = _mm_add_epi16(stp1_15, stp1_16); + col[16] = _mm_sub_epi16(stp1_15, stp1_16); + col[17] = _mm_sub_epi16(stp1_14, stp1_17); + col[18] = _mm_sub_epi16(stp1_13, stp1_18); + col[19] = _mm_sub_epi16(stp1_12, stp1_19); + col[20] = _mm_sub_epi16(stp1_11, stp1_20); + col[21] = _mm_sub_epi16(stp1_10, stp1_21); + col[22] = _mm_sub_epi16(stp1_9, stp1_22); + col[23] = _mm_sub_epi16(stp1_8, stp1_23); + col[24] = _mm_sub_epi16(stp1_7, stp1_24); + col[25] = _mm_sub_epi16(stp1_6, stp1_25); + col[26] = _mm_sub_epi16(stp1_5, stp1_26); + col[27] = _mm_sub_epi16(stp1_4, stp1_27); + col[28] = _mm_sub_epi16(stp1_3, stp1_28); + col[29] = _mm_sub_epi16(stp1_2, stp1_29); + col[30] = _mm_sub_epi16(stp1_1, stp1_30); + col[31] = _mm_sub_epi16(stp1_0, stp1_31); + for (i = 0; i < 4; i++) { const __m128i zero = _mm_setzero_si128(); + // Transpose 32x8 block to 8x32 block + array_transpose_8x8(col+i*8, in); + IDCT32_34 // 2_D: Calculate the results and store them to destination. - in0 = _mm_add_epi16(stp1_0, stp1_31); - in1 = _mm_add_epi16(stp1_1, stp1_30); - in2 = _mm_add_epi16(stp1_2, stp1_29); - in3 = _mm_add_epi16(stp1_3, stp1_28); - in4 = _mm_add_epi16(stp1_4, stp1_27); - in5 = _mm_add_epi16(stp1_5, stp1_26); - in6 = _mm_add_epi16(stp1_6, stp1_25); - in7 = _mm_add_epi16(stp1_7, stp1_24); - in8 = _mm_add_epi16(stp1_8, stp1_23); - in9 = _mm_add_epi16(stp1_9, stp1_22); - in10 = _mm_add_epi16(stp1_10, stp1_21); - in11 = _mm_add_epi16(stp1_11, stp1_20); - in12 = _mm_add_epi16(stp1_12, stp1_19); - in13 = _mm_add_epi16(stp1_13, stp1_18); - in14 = _mm_add_epi16(stp1_14, stp1_17); - in15 = _mm_add_epi16(stp1_15, stp1_16); - in16 = _mm_sub_epi16(stp1_15, stp1_16); - in17 = _mm_sub_epi16(stp1_14, stp1_17); - in18 = _mm_sub_epi16(stp1_13, stp1_18); - in19 = _mm_sub_epi16(stp1_12, stp1_19); - in20 = _mm_sub_epi16(stp1_11, stp1_20); - in21 = _mm_sub_epi16(stp1_10, stp1_21); - in22 = _mm_sub_epi16(stp1_9, stp1_22); - in23 = _mm_sub_epi16(stp1_8, stp1_23); - in24 = _mm_sub_epi16(stp1_7, stp1_24); - in25 = _mm_sub_epi16(stp1_6, stp1_25); - in26 = _mm_sub_epi16(stp1_5, stp1_26); - in27 = _mm_sub_epi16(stp1_4, stp1_27); - in28 = _mm_sub_epi16(stp1_3, stp1_28); - in29 = _mm_sub_epi16(stp1_2, stp1_29); - in30 = _mm_sub_epi16(stp1_1, stp1_30); - in31 = _mm_sub_epi16(stp1_0, stp1_31); + in[0] = _mm_add_epi16(stp1_0, stp1_31); + in[1] = _mm_add_epi16(stp1_1, stp1_30); + in[2] = _mm_add_epi16(stp1_2, stp1_29); + in[3] = _mm_add_epi16(stp1_3, stp1_28); + in[4] = _mm_add_epi16(stp1_4, stp1_27); + in[5] = _mm_add_epi16(stp1_5, stp1_26); + in[6] = _mm_add_epi16(stp1_6, stp1_25); + in[7] = _mm_add_epi16(stp1_7, stp1_24); + in[8] = _mm_add_epi16(stp1_8, stp1_23); + in[9] = _mm_add_epi16(stp1_9, stp1_22); + in[10] = _mm_add_epi16(stp1_10, stp1_21); + in[11] = _mm_add_epi16(stp1_11, stp1_20); + in[12] = _mm_add_epi16(stp1_12, stp1_19); + in[13] = _mm_add_epi16(stp1_13, stp1_18); + in[14] = _mm_add_epi16(stp1_14, stp1_17); + in[15] = _mm_add_epi16(stp1_15, stp1_16); + in[16] = _mm_sub_epi16(stp1_15, stp1_16); + in[17] = _mm_sub_epi16(stp1_14, stp1_17); + in[18] = _mm_sub_epi16(stp1_13, stp1_18); + in[19] = _mm_sub_epi16(stp1_12, stp1_19); + in[20] = _mm_sub_epi16(stp1_11, stp1_20); + in[21] = _mm_sub_epi16(stp1_10, stp1_21); + in[22] = _mm_sub_epi16(stp1_9, stp1_22); + in[23] = _mm_sub_epi16(stp1_8, stp1_23); + in[24] = _mm_sub_epi16(stp1_7, stp1_24); + in[25] = _mm_sub_epi16(stp1_6, stp1_25); + in[26] = _mm_sub_epi16(stp1_5, stp1_26); + in[27] = _mm_sub_epi16(stp1_4, stp1_27); + in[28] = _mm_sub_epi16(stp1_3, stp1_28); + in[29] = _mm_sub_epi16(stp1_2, stp1_29); + in[30] = _mm_sub_epi16(stp1_1, stp1_30); + in[31] = _mm_sub_epi16(stp1_0, stp1_31); // Final rounding and shift - in0 = _mm_adds_epi16(in0, final_rounding); - in1 = _mm_adds_epi16(in1, final_rounding); - in2 = _mm_adds_epi16(in2, final_rounding); - in3 = _mm_adds_epi16(in3, final_rounding); - in4 = _mm_adds_epi16(in4, final_rounding); - in5 = _mm_adds_epi16(in5, final_rounding); - in6 = _mm_adds_epi16(in6, final_rounding); - in7 = _mm_adds_epi16(in7, final_rounding); - in8 = _mm_adds_epi16(in8, final_rounding); - in9 = _mm_adds_epi16(in9, final_rounding); - in10 = _mm_adds_epi16(in10, final_rounding); - in11 = _mm_adds_epi16(in11, final_rounding); - in12 = _mm_adds_epi16(in12, final_rounding); - in13 = _mm_adds_epi16(in13, final_rounding); - in14 = _mm_adds_epi16(in14, final_rounding); - in15 = _mm_adds_epi16(in15, final_rounding); - in16 = _mm_adds_epi16(in16, final_rounding); - in17 = _mm_adds_epi16(in17, final_rounding); - in18 = _mm_adds_epi16(in18, final_rounding); - in19 = _mm_adds_epi16(in19, final_rounding); - in20 = _mm_adds_epi16(in20, final_rounding); - in21 = _mm_adds_epi16(in21, final_rounding); - in22 = _mm_adds_epi16(in22, final_rounding); - in23 = _mm_adds_epi16(in23, final_rounding); - in24 = _mm_adds_epi16(in24, final_rounding); - in25 = _mm_adds_epi16(in25, final_rounding); - in26 = _mm_adds_epi16(in26, final_rounding); - in27 = _mm_adds_epi16(in27, final_rounding); - in28 = _mm_adds_epi16(in28, final_rounding); - in29 = _mm_adds_epi16(in29, final_rounding); - in30 = _mm_adds_epi16(in30, final_rounding); - in31 = _mm_adds_epi16(in31, final_rounding); - - in0 = _mm_srai_epi16(in0, 6); - in1 = _mm_srai_epi16(in1, 6); - in2 = _mm_srai_epi16(in2, 6); - in3 = _mm_srai_epi16(in3, 6); - in4 = _mm_srai_epi16(in4, 6); - in5 = _mm_srai_epi16(in5, 6); - in6 = _mm_srai_epi16(in6, 6); - in7 = _mm_srai_epi16(in7, 6); - in8 = _mm_srai_epi16(in8, 6); - in9 = _mm_srai_epi16(in9, 6); - in10 = _mm_srai_epi16(in10, 6); - in11 = _mm_srai_epi16(in11, 6); - in12 = _mm_srai_epi16(in12, 6); - in13 = _mm_srai_epi16(in13, 6); - in14 = _mm_srai_epi16(in14, 6); - in15 = _mm_srai_epi16(in15, 6); - in16 = _mm_srai_epi16(in16, 6); - in17 = _mm_srai_epi16(in17, 6); - in18 = _mm_srai_epi16(in18, 6); - in19 = _mm_srai_epi16(in19, 6); - in20 = _mm_srai_epi16(in20, 6); - in21 = _mm_srai_epi16(in21, 6); - in22 = _mm_srai_epi16(in22, 6); - in23 = _mm_srai_epi16(in23, 6); - in24 = _mm_srai_epi16(in24, 6); - in25 = _mm_srai_epi16(in25, 6); - in26 = _mm_srai_epi16(in26, 6); - in27 = _mm_srai_epi16(in27, 6); - in28 = _mm_srai_epi16(in28, 6); - in29 = _mm_srai_epi16(in29, 6); - in30 = _mm_srai_epi16(in30, 6); - in31 = _mm_srai_epi16(in31, 6); - - RECON_AND_STORE(dest, in0); - RECON_AND_STORE(dest, in1); - RECON_AND_STORE(dest, in2); - RECON_AND_STORE(dest, in3); - RECON_AND_STORE(dest, in4); - RECON_AND_STORE(dest, in5); - RECON_AND_STORE(dest, in6); - RECON_AND_STORE(dest, in7); - RECON_AND_STORE(dest, in8); - RECON_AND_STORE(dest, in9); - RECON_AND_STORE(dest, in10); - RECON_AND_STORE(dest, in11); - RECON_AND_STORE(dest, in12); - RECON_AND_STORE(dest, in13); - RECON_AND_STORE(dest, in14); - RECON_AND_STORE(dest, in15); - RECON_AND_STORE(dest, in16); - RECON_AND_STORE(dest, in17); - RECON_AND_STORE(dest, in18); - RECON_AND_STORE(dest, in19); - RECON_AND_STORE(dest, in20); - RECON_AND_STORE(dest, in21); - RECON_AND_STORE(dest, in22); - RECON_AND_STORE(dest, in23); - RECON_AND_STORE(dest, in24); - RECON_AND_STORE(dest, in25); - RECON_AND_STORE(dest, in26); - RECON_AND_STORE(dest, in27); - RECON_AND_STORE(dest, in28); - RECON_AND_STORE(dest, in29); - RECON_AND_STORE(dest, in30); - RECON_AND_STORE(dest, in31); + in[0] = _mm_adds_epi16(in[0], final_rounding); + in[1] = _mm_adds_epi16(in[1], final_rounding); + in[2] = _mm_adds_epi16(in[2], final_rounding); + in[3] = _mm_adds_epi16(in[3], final_rounding); + in[4] = _mm_adds_epi16(in[4], final_rounding); + in[5] = _mm_adds_epi16(in[5], final_rounding); + in[6] = _mm_adds_epi16(in[6], final_rounding); + in[7] = _mm_adds_epi16(in[7], final_rounding); + in[8] = _mm_adds_epi16(in[8], final_rounding); + in[9] = _mm_adds_epi16(in[9], final_rounding); + in[10] = _mm_adds_epi16(in[10], final_rounding); + in[11] = _mm_adds_epi16(in[11], final_rounding); + in[12] = _mm_adds_epi16(in[12], final_rounding); + in[13] = _mm_adds_epi16(in[13], final_rounding); + in[14] = _mm_adds_epi16(in[14], final_rounding); + in[15] = _mm_adds_epi16(in[15], final_rounding); + in[16] = _mm_adds_epi16(in[16], final_rounding); + in[17] = _mm_adds_epi16(in[17], final_rounding); + in[18] = _mm_adds_epi16(in[18], final_rounding); + in[19] = _mm_adds_epi16(in[19], final_rounding); + in[20] = _mm_adds_epi16(in[20], final_rounding); + in[21] = _mm_adds_epi16(in[21], final_rounding); + in[22] = _mm_adds_epi16(in[22], final_rounding); + in[23] = _mm_adds_epi16(in[23], final_rounding); + in[24] = _mm_adds_epi16(in[24], final_rounding); + in[25] = _mm_adds_epi16(in[25], final_rounding); + in[26] = _mm_adds_epi16(in[26], final_rounding); + in[27] = _mm_adds_epi16(in[27], final_rounding); + in[28] = _mm_adds_epi16(in[28], final_rounding); + in[29] = _mm_adds_epi16(in[29], final_rounding); + in[30] = _mm_adds_epi16(in[30], final_rounding); + in[31] = _mm_adds_epi16(in[31], final_rounding); + + in[0] = _mm_srai_epi16(in[0], 6); + in[1] = _mm_srai_epi16(in[1], 6); + in[2] = _mm_srai_epi16(in[2], 6); + in[3] = _mm_srai_epi16(in[3], 6); + in[4] = _mm_srai_epi16(in[4], 6); + in[5] = _mm_srai_epi16(in[5], 6); + in[6] = _mm_srai_epi16(in[6], 6); + in[7] = _mm_srai_epi16(in[7], 6); + in[8] = _mm_srai_epi16(in[8], 6); + in[9] = _mm_srai_epi16(in[9], 6); + in[10] = _mm_srai_epi16(in[10], 6); + in[11] = _mm_srai_epi16(in[11], 6); + in[12] = _mm_srai_epi16(in[12], 6); + in[13] = _mm_srai_epi16(in[13], 6); + in[14] = _mm_srai_epi16(in[14], 6); + in[15] = _mm_srai_epi16(in[15], 6); + in[16] = _mm_srai_epi16(in[16], 6); + in[17] = _mm_srai_epi16(in[17], 6); + in[18] = _mm_srai_epi16(in[18], 6); + in[19] = _mm_srai_epi16(in[19], 6); + in[20] = _mm_srai_epi16(in[20], 6); + in[21] = _mm_srai_epi16(in[21], 6); + in[22] = _mm_srai_epi16(in[22], 6); + in[23] = _mm_srai_epi16(in[23], 6); + in[24] = _mm_srai_epi16(in[24], 6); + in[25] = _mm_srai_epi16(in[25], 6); + in[26] = _mm_srai_epi16(in[26], 6); + in[27] = _mm_srai_epi16(in[27], 6); + in[28] = _mm_srai_epi16(in[28], 6); + in[29] = _mm_srai_epi16(in[29], 6); + in[30] = _mm_srai_epi16(in[30], 6); + in[31] = _mm_srai_epi16(in[31], 6); + + RECON_AND_STORE(dest, in[0]); + RECON_AND_STORE(dest, in[1]); + RECON_AND_STORE(dest, in[2]); + RECON_AND_STORE(dest, in[3]); + RECON_AND_STORE(dest, in[4]); + RECON_AND_STORE(dest, in[5]); + RECON_AND_STORE(dest, in[6]); + RECON_AND_STORE(dest, in[7]); + RECON_AND_STORE(dest, in[8]); + RECON_AND_STORE(dest, in[9]); + RECON_AND_STORE(dest, in[10]); + RECON_AND_STORE(dest, in[11]); + RECON_AND_STORE(dest, in[12]); + RECON_AND_STORE(dest, in[13]); + RECON_AND_STORE(dest, in[14]); + RECON_AND_STORE(dest, in[15]); + RECON_AND_STORE(dest, in[16]); + RECON_AND_STORE(dest, in[17]); + RECON_AND_STORE(dest, in[18]); + RECON_AND_STORE(dest, in[19]); + RECON_AND_STORE(dest, in[20]); + RECON_AND_STORE(dest, in[21]); + RECON_AND_STORE(dest, in[22]); + RECON_AND_STORE(dest, in[23]); + RECON_AND_STORE(dest, in[24]); + RECON_AND_STORE(dest, in[25]); + RECON_AND_STORE(dest, in[26]); + RECON_AND_STORE(dest, in[27]); + RECON_AND_STORE(dest, in[28]); + RECON_AND_STORE(dest, in[29]); + RECON_AND_STORE(dest, in[30]); + RECON_AND_STORE(dest, in[31]); dest += 8 - (stride * 32); } } -} void vp9_idct32x32_1024_add_sse2(const int16_t *input, uint8_t *dest, int stride) { @@ -3537,10 +3786,7 @@ void vp9_idct32x32_1024_add_sse2(const int16_t *input, uint8_t *dest, const __m128i stg6_0 = pair_set_epi16(-cospi_16_64, cospi_16_64); - __m128i in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11, in12, - in13, in14, in15, in16, in17, in18, in19, in20, in21, in22, in23, - in24, in25, in26, in27, in28, in29, in30, in31; - __m128i col[128]; + __m128i in[32], col[128], zero_idx[16]; __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7, stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15, stp1_16, stp1_17, stp1_18, stp1_19, stp1_20, stp1_21, stp1_22, @@ -3553,66 +3799,63 @@ void vp9_idct32x32_1024_add_sse2(const int16_t *input, uint8_t *dest, stp2_30, stp2_31; __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; int i, j, i32; - __m128i zero_idx[16]; int zero_flag[2]; - // We work on a 8x32 block each time, and loop 8 times for 2-D 32x32 idct. - for (i = 0; i < 8; i++) { + for (i = 0; i < 4; i++) { i32 = (i << 5); - if (i < 4) { // First 1-D idct // Load input data. - LOAD_DQCOEFF(in0, input); - LOAD_DQCOEFF(in8, input); - LOAD_DQCOEFF(in16, input); - LOAD_DQCOEFF(in24, input); - LOAD_DQCOEFF(in1, input); - LOAD_DQCOEFF(in9, input); - LOAD_DQCOEFF(in17, input); - LOAD_DQCOEFF(in25, input); - LOAD_DQCOEFF(in2, input); - LOAD_DQCOEFF(in10, input); - LOAD_DQCOEFF(in18, input); - LOAD_DQCOEFF(in26, input); - LOAD_DQCOEFF(in3, input); - LOAD_DQCOEFF(in11, input); - LOAD_DQCOEFF(in19, input); - LOAD_DQCOEFF(in27, input); - - LOAD_DQCOEFF(in4, input); - LOAD_DQCOEFF(in12, input); - LOAD_DQCOEFF(in20, input); - LOAD_DQCOEFF(in28, input); - LOAD_DQCOEFF(in5, input); - LOAD_DQCOEFF(in13, input); - LOAD_DQCOEFF(in21, input); - LOAD_DQCOEFF(in29, input); - LOAD_DQCOEFF(in6, input); - LOAD_DQCOEFF(in14, input); - LOAD_DQCOEFF(in22, input); - LOAD_DQCOEFF(in30, input); - LOAD_DQCOEFF(in7, input); - LOAD_DQCOEFF(in15, input); - LOAD_DQCOEFF(in23, input); - LOAD_DQCOEFF(in31, input); + LOAD_DQCOEFF(in[0], input); + LOAD_DQCOEFF(in[8], input); + LOAD_DQCOEFF(in[16], input); + LOAD_DQCOEFF(in[24], input); + LOAD_DQCOEFF(in[1], input); + LOAD_DQCOEFF(in[9], input); + LOAD_DQCOEFF(in[17], input); + LOAD_DQCOEFF(in[25], input); + LOAD_DQCOEFF(in[2], input); + LOAD_DQCOEFF(in[10], input); + LOAD_DQCOEFF(in[18], input); + LOAD_DQCOEFF(in[26], input); + LOAD_DQCOEFF(in[3], input); + LOAD_DQCOEFF(in[11], input); + LOAD_DQCOEFF(in[19], input); + LOAD_DQCOEFF(in[27], input); + + LOAD_DQCOEFF(in[4], input); + LOAD_DQCOEFF(in[12], input); + LOAD_DQCOEFF(in[20], input); + LOAD_DQCOEFF(in[28], input); + LOAD_DQCOEFF(in[5], input); + LOAD_DQCOEFF(in[13], input); + LOAD_DQCOEFF(in[21], input); + LOAD_DQCOEFF(in[29], input); + LOAD_DQCOEFF(in[6], input); + LOAD_DQCOEFF(in[14], input); + LOAD_DQCOEFF(in[22], input); + LOAD_DQCOEFF(in[30], input); + LOAD_DQCOEFF(in[7], input); + LOAD_DQCOEFF(in[15], input); + LOAD_DQCOEFF(in[23], input); + LOAD_DQCOEFF(in[31], input); // checking if all entries are zero - zero_idx[0] = _mm_or_si128(in0, in1); - zero_idx[1] = _mm_or_si128(in2, in3); - zero_idx[2] = _mm_or_si128(in4, in5); - zero_idx[3] = _mm_or_si128(in6, in7); - zero_idx[4] = _mm_or_si128(in8, in9); - zero_idx[5] = _mm_or_si128(in10, in11); - zero_idx[6] = _mm_or_si128(in12, in13); - zero_idx[7] = _mm_or_si128(in14, in15); - zero_idx[8] = _mm_or_si128(in16, in17); - zero_idx[9] = _mm_or_si128(in18, in19); - zero_idx[10] = _mm_or_si128(in20, in21); - zero_idx[11] = _mm_or_si128(in22, in23); - zero_idx[12] = _mm_or_si128(in24, in25); - zero_idx[13] = _mm_or_si128(in26, in27); - zero_idx[14] = _mm_or_si128(in28, in29); - zero_idx[15] = _mm_or_si128(in30, in31); + zero_idx[0] = _mm_or_si128(in[0], in[1]); + zero_idx[1] = _mm_or_si128(in[2], in[3]); + zero_idx[2] = _mm_or_si128(in[4], in[5]); + zero_idx[3] = _mm_or_si128(in[6], in[7]); + zero_idx[4] = _mm_or_si128(in[8], in[9]); + zero_idx[5] = _mm_or_si128(in[10], in[11]); + zero_idx[6] = _mm_or_si128(in[12], in[13]); + zero_idx[7] = _mm_or_si128(in[14], in[15]); + zero_idx[8] = _mm_or_si128(in[16], in[17]); + zero_idx[9] = _mm_or_si128(in[18], in[19]); + zero_idx[10] = _mm_or_si128(in[20], in[21]); + zero_idx[11] = _mm_or_si128(in[22], in[23]); + zero_idx[12] = _mm_or_si128(in[24], in[25]); + zero_idx[13] = _mm_or_si128(in[26], in[27]); + zero_idx[14] = _mm_or_si128(in[28], in[29]); + zero_idx[15] = _mm_or_si128(in[30], in[31]); zero_idx[0] = _mm_or_si128(zero_idx[0], zero_idx[1]); zero_idx[1] = _mm_or_si128(zero_idx[2], zero_idx[3]); @@ -3674,44 +3917,13 @@ void vp9_idct32x32_1024_add_sse2(const int16_t *input, uint8_t *dest, } // Transpose 32x8 block to 8x32 block - TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, - in4, in5, in6, in7); - TRANSPOSE_8X8(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9, - in10, in11, in12, in13, in14, in15); - TRANSPOSE_8X8(in16, in17, in18, in19, in20, in21, in22, in23, in16, in17, - in18, in19, in20, in21, in22, in23); - TRANSPOSE_8X8(in24, in25, in26, in27, in28, in29, in30, in31, in24, in25, - in26, in27, in28, in29, in30, in31); - } else { - // Second 1-D idct - j = i - 4; - - // Transpose 32x8 block to 8x32 block - TRANSPOSE_8X8(col[j * 8 + 0], col[j * 8 + 1], col[j * 8 + 2], - col[j * 8 + 3], col[j * 8 + 4], col[j * 8 + 5], - col[j * 8 + 6], col[j * 8 + 7], in0, in1, in2, in3, in4, - in5, in6, in7); - j += 4; - TRANSPOSE_8X8(col[j * 8 + 0], col[j * 8 + 1], col[j * 8 + 2], - col[j * 8 + 3], col[j * 8 + 4], col[j * 8 + 5], - col[j * 8 + 6], col[j * 8 + 7], in8, in9, in10, - in11, in12, in13, in14, in15); - j += 4; - TRANSPOSE_8X8(col[j * 8 + 0], col[j * 8 + 1], col[j * 8 + 2], - col[j * 8 + 3], col[j * 8 + 4], col[j * 8 + 5], - col[j * 8 + 6], col[j * 8 + 7], in16, in17, in18, - in19, in20, in21, in22, in23); - j += 4; - TRANSPOSE_8X8(col[j * 8 + 0], col[j * 8 + 1], col[j * 8 + 2], - col[j * 8 + 3], col[j * 8 + 4], col[j * 8 + 5], - col[j * 8 + 6], col[j * 8 + 7], in24, in25, in26, in27, - in28, in29, in30, in31); - } + array_transpose_8x8(in, in); + array_transpose_8x8(in+8, in+8); + array_transpose_8x8(in+16, in+16); + array_transpose_8x8(in+24, in+24); - IDCT32_1D + IDCT32 - // final stage - if (i < 4) { // 1_D: Store 32 intermediate results for each 8x32 block. col[i32 + 0] = _mm_add_epi16(stp1_0, stp1_31); col[i32 + 1] = _mm_add_epi16(stp1_1, stp1_30); @@ -3745,146 +3957,156 @@ void vp9_idct32x32_1024_add_sse2(const int16_t *input, uint8_t *dest, col[i32 + 29] = _mm_sub_epi16(stp1_2, stp1_29); col[i32 + 30] = _mm_sub_epi16(stp1_1, stp1_30); col[i32 + 31] = _mm_sub_epi16(stp1_0, stp1_31); - } else { + } + for (i = 0; i < 4; i++) { const __m128i zero = _mm_setzero_si128(); + // Second 1-D idct + j = i << 3; + + // Transpose 32x8 block to 8x32 block + array_transpose_8x8(col+j, in); + array_transpose_8x8(col+j+32, in+8); + array_transpose_8x8(col+j+64, in+16); + array_transpose_8x8(col+j+96, in+24); + + IDCT32 // 2_D: Calculate the results and store them to destination. - in0 = _mm_add_epi16(stp1_0, stp1_31); - in1 = _mm_add_epi16(stp1_1, stp1_30); - in2 = _mm_add_epi16(stp1_2, stp1_29); - in3 = _mm_add_epi16(stp1_3, stp1_28); - in4 = _mm_add_epi16(stp1_4, stp1_27); - in5 = _mm_add_epi16(stp1_5, stp1_26); - in6 = _mm_add_epi16(stp1_6, stp1_25); - in7 = _mm_add_epi16(stp1_7, stp1_24); - in8 = _mm_add_epi16(stp1_8, stp1_23); - in9 = _mm_add_epi16(stp1_9, stp1_22); - in10 = _mm_add_epi16(stp1_10, stp1_21); - in11 = _mm_add_epi16(stp1_11, stp1_20); - in12 = _mm_add_epi16(stp1_12, stp1_19); - in13 = _mm_add_epi16(stp1_13, stp1_18); - in14 = _mm_add_epi16(stp1_14, stp1_17); - in15 = _mm_add_epi16(stp1_15, stp1_16); - in16 = _mm_sub_epi16(stp1_15, stp1_16); - in17 = _mm_sub_epi16(stp1_14, stp1_17); - in18 = _mm_sub_epi16(stp1_13, stp1_18); - in19 = _mm_sub_epi16(stp1_12, stp1_19); - in20 = _mm_sub_epi16(stp1_11, stp1_20); - in21 = _mm_sub_epi16(stp1_10, stp1_21); - in22 = _mm_sub_epi16(stp1_9, stp1_22); - in23 = _mm_sub_epi16(stp1_8, stp1_23); - in24 = _mm_sub_epi16(stp1_7, stp1_24); - in25 = _mm_sub_epi16(stp1_6, stp1_25); - in26 = _mm_sub_epi16(stp1_5, stp1_26); - in27 = _mm_sub_epi16(stp1_4, stp1_27); - in28 = _mm_sub_epi16(stp1_3, stp1_28); - in29 = _mm_sub_epi16(stp1_2, stp1_29); - in30 = _mm_sub_epi16(stp1_1, stp1_30); - in31 = _mm_sub_epi16(stp1_0, stp1_31); + in[0] = _mm_add_epi16(stp1_0, stp1_31); + in[1] = _mm_add_epi16(stp1_1, stp1_30); + in[2] = _mm_add_epi16(stp1_2, stp1_29); + in[3] = _mm_add_epi16(stp1_3, stp1_28); + in[4] = _mm_add_epi16(stp1_4, stp1_27); + in[5] = _mm_add_epi16(stp1_5, stp1_26); + in[6] = _mm_add_epi16(stp1_6, stp1_25); + in[7] = _mm_add_epi16(stp1_7, stp1_24); + in[8] = _mm_add_epi16(stp1_8, stp1_23); + in[9] = _mm_add_epi16(stp1_9, stp1_22); + in[10] = _mm_add_epi16(stp1_10, stp1_21); + in[11] = _mm_add_epi16(stp1_11, stp1_20); + in[12] = _mm_add_epi16(stp1_12, stp1_19); + in[13] = _mm_add_epi16(stp1_13, stp1_18); + in[14] = _mm_add_epi16(stp1_14, stp1_17); + in[15] = _mm_add_epi16(stp1_15, stp1_16); + in[16] = _mm_sub_epi16(stp1_15, stp1_16); + in[17] = _mm_sub_epi16(stp1_14, stp1_17); + in[18] = _mm_sub_epi16(stp1_13, stp1_18); + in[19] = _mm_sub_epi16(stp1_12, stp1_19); + in[20] = _mm_sub_epi16(stp1_11, stp1_20); + in[21] = _mm_sub_epi16(stp1_10, stp1_21); + in[22] = _mm_sub_epi16(stp1_9, stp1_22); + in[23] = _mm_sub_epi16(stp1_8, stp1_23); + in[24] = _mm_sub_epi16(stp1_7, stp1_24); + in[25] = _mm_sub_epi16(stp1_6, stp1_25); + in[26] = _mm_sub_epi16(stp1_5, stp1_26); + in[27] = _mm_sub_epi16(stp1_4, stp1_27); + in[28] = _mm_sub_epi16(stp1_3, stp1_28); + in[29] = _mm_sub_epi16(stp1_2, stp1_29); + in[30] = _mm_sub_epi16(stp1_1, stp1_30); + in[31] = _mm_sub_epi16(stp1_0, stp1_31); // Final rounding and shift - in0 = _mm_adds_epi16(in0, final_rounding); - in1 = _mm_adds_epi16(in1, final_rounding); - in2 = _mm_adds_epi16(in2, final_rounding); - in3 = _mm_adds_epi16(in3, final_rounding); - in4 = _mm_adds_epi16(in4, final_rounding); - in5 = _mm_adds_epi16(in5, final_rounding); - in6 = _mm_adds_epi16(in6, final_rounding); - in7 = _mm_adds_epi16(in7, final_rounding); - in8 = _mm_adds_epi16(in8, final_rounding); - in9 = _mm_adds_epi16(in9, final_rounding); - in10 = _mm_adds_epi16(in10, final_rounding); - in11 = _mm_adds_epi16(in11, final_rounding); - in12 = _mm_adds_epi16(in12, final_rounding); - in13 = _mm_adds_epi16(in13, final_rounding); - in14 = _mm_adds_epi16(in14, final_rounding); - in15 = _mm_adds_epi16(in15, final_rounding); - in16 = _mm_adds_epi16(in16, final_rounding); - in17 = _mm_adds_epi16(in17, final_rounding); - in18 = _mm_adds_epi16(in18, final_rounding); - in19 = _mm_adds_epi16(in19, final_rounding); - in20 = _mm_adds_epi16(in20, final_rounding); - in21 = _mm_adds_epi16(in21, final_rounding); - in22 = _mm_adds_epi16(in22, final_rounding); - in23 = _mm_adds_epi16(in23, final_rounding); - in24 = _mm_adds_epi16(in24, final_rounding); - in25 = _mm_adds_epi16(in25, final_rounding); - in26 = _mm_adds_epi16(in26, final_rounding); - in27 = _mm_adds_epi16(in27, final_rounding); - in28 = _mm_adds_epi16(in28, final_rounding); - in29 = _mm_adds_epi16(in29, final_rounding); - in30 = _mm_adds_epi16(in30, final_rounding); - in31 = _mm_adds_epi16(in31, final_rounding); - - in0 = _mm_srai_epi16(in0, 6); - in1 = _mm_srai_epi16(in1, 6); - in2 = _mm_srai_epi16(in2, 6); - in3 = _mm_srai_epi16(in3, 6); - in4 = _mm_srai_epi16(in4, 6); - in5 = _mm_srai_epi16(in5, 6); - in6 = _mm_srai_epi16(in6, 6); - in7 = _mm_srai_epi16(in7, 6); - in8 = _mm_srai_epi16(in8, 6); - in9 = _mm_srai_epi16(in9, 6); - in10 = _mm_srai_epi16(in10, 6); - in11 = _mm_srai_epi16(in11, 6); - in12 = _mm_srai_epi16(in12, 6); - in13 = _mm_srai_epi16(in13, 6); - in14 = _mm_srai_epi16(in14, 6); - in15 = _mm_srai_epi16(in15, 6); - in16 = _mm_srai_epi16(in16, 6); - in17 = _mm_srai_epi16(in17, 6); - in18 = _mm_srai_epi16(in18, 6); - in19 = _mm_srai_epi16(in19, 6); - in20 = _mm_srai_epi16(in20, 6); - in21 = _mm_srai_epi16(in21, 6); - in22 = _mm_srai_epi16(in22, 6); - in23 = _mm_srai_epi16(in23, 6); - in24 = _mm_srai_epi16(in24, 6); - in25 = _mm_srai_epi16(in25, 6); - in26 = _mm_srai_epi16(in26, 6); - in27 = _mm_srai_epi16(in27, 6); - in28 = _mm_srai_epi16(in28, 6); - in29 = _mm_srai_epi16(in29, 6); - in30 = _mm_srai_epi16(in30, 6); - in31 = _mm_srai_epi16(in31, 6); - - RECON_AND_STORE(dest, in0); - RECON_AND_STORE(dest, in1); - RECON_AND_STORE(dest, in2); - RECON_AND_STORE(dest, in3); - RECON_AND_STORE(dest, in4); - RECON_AND_STORE(dest, in5); - RECON_AND_STORE(dest, in6); - RECON_AND_STORE(dest, in7); - RECON_AND_STORE(dest, in8); - RECON_AND_STORE(dest, in9); - RECON_AND_STORE(dest, in10); - RECON_AND_STORE(dest, in11); - RECON_AND_STORE(dest, in12); - RECON_AND_STORE(dest, in13); - RECON_AND_STORE(dest, in14); - RECON_AND_STORE(dest, in15); - RECON_AND_STORE(dest, in16); - RECON_AND_STORE(dest, in17); - RECON_AND_STORE(dest, in18); - RECON_AND_STORE(dest, in19); - RECON_AND_STORE(dest, in20); - RECON_AND_STORE(dest, in21); - RECON_AND_STORE(dest, in22); - RECON_AND_STORE(dest, in23); - RECON_AND_STORE(dest, in24); - RECON_AND_STORE(dest, in25); - RECON_AND_STORE(dest, in26); - RECON_AND_STORE(dest, in27); - RECON_AND_STORE(dest, in28); - RECON_AND_STORE(dest, in29); - RECON_AND_STORE(dest, in30); - RECON_AND_STORE(dest, in31); + in[0] = _mm_adds_epi16(in[0], final_rounding); + in[1] = _mm_adds_epi16(in[1], final_rounding); + in[2] = _mm_adds_epi16(in[2], final_rounding); + in[3] = _mm_adds_epi16(in[3], final_rounding); + in[4] = _mm_adds_epi16(in[4], final_rounding); + in[5] = _mm_adds_epi16(in[5], final_rounding); + in[6] = _mm_adds_epi16(in[6], final_rounding); + in[7] = _mm_adds_epi16(in[7], final_rounding); + in[8] = _mm_adds_epi16(in[8], final_rounding); + in[9] = _mm_adds_epi16(in[9], final_rounding); + in[10] = _mm_adds_epi16(in[10], final_rounding); + in[11] = _mm_adds_epi16(in[11], final_rounding); + in[12] = _mm_adds_epi16(in[12], final_rounding); + in[13] = _mm_adds_epi16(in[13], final_rounding); + in[14] = _mm_adds_epi16(in[14], final_rounding); + in[15] = _mm_adds_epi16(in[15], final_rounding); + in[16] = _mm_adds_epi16(in[16], final_rounding); + in[17] = _mm_adds_epi16(in[17], final_rounding); + in[18] = _mm_adds_epi16(in[18], final_rounding); + in[19] = _mm_adds_epi16(in[19], final_rounding); + in[20] = _mm_adds_epi16(in[20], final_rounding); + in[21] = _mm_adds_epi16(in[21], final_rounding); + in[22] = _mm_adds_epi16(in[22], final_rounding); + in[23] = _mm_adds_epi16(in[23], final_rounding); + in[24] = _mm_adds_epi16(in[24], final_rounding); + in[25] = _mm_adds_epi16(in[25], final_rounding); + in[26] = _mm_adds_epi16(in[26], final_rounding); + in[27] = _mm_adds_epi16(in[27], final_rounding); + in[28] = _mm_adds_epi16(in[28], final_rounding); + in[29] = _mm_adds_epi16(in[29], final_rounding); + in[30] = _mm_adds_epi16(in[30], final_rounding); + in[31] = _mm_adds_epi16(in[31], final_rounding); + + in[0] = _mm_srai_epi16(in[0], 6); + in[1] = _mm_srai_epi16(in[1], 6); + in[2] = _mm_srai_epi16(in[2], 6); + in[3] = _mm_srai_epi16(in[3], 6); + in[4] = _mm_srai_epi16(in[4], 6); + in[5] = _mm_srai_epi16(in[5], 6); + in[6] = _mm_srai_epi16(in[6], 6); + in[7] = _mm_srai_epi16(in[7], 6); + in[8] = _mm_srai_epi16(in[8], 6); + in[9] = _mm_srai_epi16(in[9], 6); + in[10] = _mm_srai_epi16(in[10], 6); + in[11] = _mm_srai_epi16(in[11], 6); + in[12] = _mm_srai_epi16(in[12], 6); + in[13] = _mm_srai_epi16(in[13], 6); + in[14] = _mm_srai_epi16(in[14], 6); + in[15] = _mm_srai_epi16(in[15], 6); + in[16] = _mm_srai_epi16(in[16], 6); + in[17] = _mm_srai_epi16(in[17], 6); + in[18] = _mm_srai_epi16(in[18], 6); + in[19] = _mm_srai_epi16(in[19], 6); + in[20] = _mm_srai_epi16(in[20], 6); + in[21] = _mm_srai_epi16(in[21], 6); + in[22] = _mm_srai_epi16(in[22], 6); + in[23] = _mm_srai_epi16(in[23], 6); + in[24] = _mm_srai_epi16(in[24], 6); + in[25] = _mm_srai_epi16(in[25], 6); + in[26] = _mm_srai_epi16(in[26], 6); + in[27] = _mm_srai_epi16(in[27], 6); + in[28] = _mm_srai_epi16(in[28], 6); + in[29] = _mm_srai_epi16(in[29], 6); + in[30] = _mm_srai_epi16(in[30], 6); + in[31] = _mm_srai_epi16(in[31], 6); + + RECON_AND_STORE(dest, in[0]); + RECON_AND_STORE(dest, in[1]); + RECON_AND_STORE(dest, in[2]); + RECON_AND_STORE(dest, in[3]); + RECON_AND_STORE(dest, in[4]); + RECON_AND_STORE(dest, in[5]); + RECON_AND_STORE(dest, in[6]); + RECON_AND_STORE(dest, in[7]); + RECON_AND_STORE(dest, in[8]); + RECON_AND_STORE(dest, in[9]); + RECON_AND_STORE(dest, in[10]); + RECON_AND_STORE(dest, in[11]); + RECON_AND_STORE(dest, in[12]); + RECON_AND_STORE(dest, in[13]); + RECON_AND_STORE(dest, in[14]); + RECON_AND_STORE(dest, in[15]); + RECON_AND_STORE(dest, in[16]); + RECON_AND_STORE(dest, in[17]); + RECON_AND_STORE(dest, in[18]); + RECON_AND_STORE(dest, in[19]); + RECON_AND_STORE(dest, in[20]); + RECON_AND_STORE(dest, in[21]); + RECON_AND_STORE(dest, in[22]); + RECON_AND_STORE(dest, in[23]); + RECON_AND_STORE(dest, in[24]); + RECON_AND_STORE(dest, in[25]); + RECON_AND_STORE(dest, in[26]); + RECON_AND_STORE(dest, in[27]); + RECON_AND_STORE(dest, in[28]); + RECON_AND_STORE(dest, in[29]); + RECON_AND_STORE(dest, in[30]); + RECON_AND_STORE(dest, in[31]); dest += 8 - (stride * 32); } - } } //NOLINT void vp9_idct32x32_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) { diff --git a/libvpx/vp9/common/x86/vp9_loopfilter_intrin_avx2.c b/libvpx/vp9/common/x86/vp9_loopfilter_intrin_avx2.c index 3c5cb8f..439c028 100644 --- a/libvpx/vp9/common/x86/vp9_loopfilter_intrin_avx2.c +++ b/libvpx/vp9/common/x86/vp9_loopfilter_intrin_avx2.c @@ -933,7 +933,7 @@ static void mb_lpf_horizontal_edge_w_avx2_16(unsigned char *s, int p, } } -void vp9_mb_lpf_horizontal_edge_w_avx2(unsigned char *s, int p, +void vp9_lpf_horizontal_16_avx2(unsigned char *s, int p, const unsigned char *_blimit, const unsigned char *_limit, const unsigned char *_thresh, int count) { if (count == 1) diff --git a/libvpx/vp9/common/x86/vp9_loopfilter_intrin_sse2.c b/libvpx/vp9/common/x86/vp9_loopfilter_intrin_sse2.c index fa4dd9b..448ad5a 100644 --- a/libvpx/vp9/common/x86/vp9_loopfilter_intrin_sse2.c +++ b/libvpx/vp9/common/x86/vp9_loopfilter_intrin_sse2.c @@ -8,7 +8,7 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include <emmintrin.h> /* SSE2 */ +#include <emmintrin.h> // SSE2 #include "vp9/common/vp9_loopfilter.h" #include "vpx_ports/emmintrin_compat.h" @@ -17,20 +17,14 @@ static void mb_lpf_horizontal_edge_w_sse2_8(unsigned char *s, const unsigned char *_blimit, const unsigned char *_limit, const unsigned char *_thresh) { - __m128i mask, hev, flat, flat2; const __m128i zero = _mm_set1_epi16(0); const __m128i one = _mm_set1_epi8(1); + const __m128i blimit = _mm_load_si128((const __m128i *)_blimit); + const __m128i limit = _mm_load_si128((const __m128i *)_limit); + const __m128i thresh = _mm_load_si128((const __m128i *)_thresh); + __m128i mask, hev, flat, flat2; __m128i q7p7, q6p6, q5p5, q4p4, q3p3, q2p2, q1p1, q0p0, p0q0, p1q1; __m128i abs_p1p0; - const unsigned int extended_thresh = _thresh[0] * 0x01010101u; - const unsigned int extended_limit = _limit[0] * 0x01010101u; - const unsigned int extended_blimit = _blimit[0] * 0x01010101u; - const __m128i thresh = - _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_thresh), 0); - const __m128i limit = - _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_limit), 0); - const __m128i blimit = - _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_blimit), 0); q4p4 = _mm_loadl_epi64((__m128i *)(s - 5 * p)); q4p4 = _mm_castps_si128(_mm_loadh_pi(_mm_castsi128_ps(q4p4), @@ -105,7 +99,7 @@ static void mb_lpf_horizontal_edge_w_sse2_8(unsigned char *s, filt = _mm_adds_epi8(filt, work_a); filt = _mm_adds_epi8(filt, work_a); filt = _mm_adds_epi8(filt, work_a); - /* (vp9_filter + 3 * (qs0 - ps0)) & mask */ + // (vp9_filter + 3 * (qs0 - ps0)) & mask filt = _mm_and_si128(filt, mask); filter1 = _mm_adds_epi8(filt, t4); @@ -116,11 +110,11 @@ static void mb_lpf_horizontal_edge_w_sse2_8(unsigned char *s, filter2 = _mm_unpacklo_epi8(zero, filter2); filter2 = _mm_srai_epi16(filter2, 0xB); - /* Filter1 >> 3 */ + // Filter1 >> 3 filt = _mm_packs_epi16(filter2, _mm_subs_epi16(zero, filter1)); qs0ps0 = _mm_xor_si128(_mm_adds_epi8(qs0ps0, filt), t80); - /* filt >> 1 */ + // filt >> 1 filt = _mm_adds_epi16(filter1, t1); filt = _mm_srai_epi16(filt, 1); filt = _mm_andnot_si128(_mm_srai_epi16(_mm_unpacklo_epi8(zero, hev), 0x8), @@ -375,32 +369,25 @@ static void mb_lpf_horizontal_edge_w_sse2_16(unsigned char *s, const unsigned char *_blimit, const unsigned char *_limit, const unsigned char *_thresh) { - DECLARE_ALIGNED(16, unsigned char, flat2_op[7][16]); - DECLARE_ALIGNED(16, unsigned char, flat2_oq[7][16]); - - DECLARE_ALIGNED(16, unsigned char, flat_op[3][16]); - DECLARE_ALIGNED(16, unsigned char, flat_oq[3][16]); + DECLARE_ALIGNED_ARRAY(16, unsigned char, flat2_op, 7 * 16); + DECLARE_ALIGNED_ARRAY(16, unsigned char, flat2_oq, 7 * 16); - DECLARE_ALIGNED(16, unsigned char, ap[8][16]); - DECLARE_ALIGNED(16, unsigned char, aq[8][16]); + DECLARE_ALIGNED_ARRAY(16, unsigned char, flat_op, 3 * 16); + DECLARE_ALIGNED_ARRAY(16, unsigned char, flat_oq, 3 * 16); + DECLARE_ALIGNED_ARRAY(16, unsigned char, ap, 8 * 16); + DECLARE_ALIGNED_ARRAY(16, unsigned char, aq, 8 * 16); - __m128i mask, hev, flat, flat2; const __m128i zero = _mm_set1_epi16(0); const __m128i one = _mm_set1_epi8(1); + const __m128i blimit = _mm_load_si128((const __m128i *)_blimit); + const __m128i limit = _mm_load_si128((const __m128i *)_limit); + const __m128i thresh = _mm_load_si128((const __m128i *)_thresh); + __m128i mask, hev, flat, flat2; __m128i p7, p6, p5; __m128i p4, p3, p2, p1, p0, q0, q1, q2, q3, q4; __m128i q5, q6, q7; int i = 0; - const unsigned int extended_thresh = _thresh[0] * 0x01010101u; - const unsigned int extended_limit = _limit[0] * 0x01010101u; - const unsigned int extended_blimit = _blimit[0] * 0x01010101u; - const __m128i thresh = - _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_thresh), 0); - const __m128i limit = - _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_limit), 0); - const __m128i blimit = - _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_blimit), 0); p4 = _mm_loadu_si128((__m128i *)(s - 5 * p)); p3 = _mm_loadu_si128((__m128i *)(s - 4 * p)); @@ -413,16 +400,16 @@ static void mb_lpf_horizontal_edge_w_sse2_16(unsigned char *s, q3 = _mm_loadu_si128((__m128i *)(s + 3 * p)); q4 = _mm_loadu_si128((__m128i *)(s + 4 * p)); - _mm_store_si128((__m128i *)ap[4], p4); - _mm_store_si128((__m128i *)ap[3], p3); - _mm_store_si128((__m128i *)ap[2], p2); - _mm_store_si128((__m128i *)ap[1], p1); - _mm_store_si128((__m128i *)ap[0], p0); - _mm_store_si128((__m128i *)aq[4], q4); - _mm_store_si128((__m128i *)aq[3], q3); - _mm_store_si128((__m128i *)aq[2], q2); - _mm_store_si128((__m128i *)aq[1], q1); - _mm_store_si128((__m128i *)aq[0], q0); + _mm_store_si128((__m128i *)&ap[4 * 16], p4); + _mm_store_si128((__m128i *)&ap[3 * 16], p3); + _mm_store_si128((__m128i *)&ap[2 * 16], p2); + _mm_store_si128((__m128i *)&ap[1 * 16], p1); + _mm_store_si128((__m128i *)&ap[0 * 16], p0); + _mm_store_si128((__m128i *)&aq[4 * 16], q4); + _mm_store_si128((__m128i *)&aq[3 * 16], q3); + _mm_store_si128((__m128i *)&aq[2 * 16], q2); + _mm_store_si128((__m128i *)&aq[1 * 16], q1); + _mm_store_si128((__m128i *)&aq[0 * 16], q0); { @@ -486,13 +473,13 @@ static void mb_lpf_horizontal_edge_w_sse2_16(unsigned char *s, filt = _mm_adds_epi8(filt, work_a); filt = _mm_adds_epi8(filt, work_a); filt = _mm_adds_epi8(filt, work_a); - /* (vp9_filter + 3 * (qs0 - ps0)) & mask */ + // (vp9_filter + 3 * (qs0 - ps0)) & mask filt = _mm_and_si128(filt, mask); filter1 = _mm_adds_epi8(filt, t4); filter2 = _mm_adds_epi8(filt, t3); - /* Filter1 >> 3 */ + // Filter1 >> 3 work_a = _mm_cmpgt_epi8(zero, filter1); filter1 = _mm_srli_epi16(filter1, 3); work_a = _mm_and_si128(work_a, te0); @@ -500,7 +487,7 @@ static void mb_lpf_horizontal_edge_w_sse2_16(unsigned char *s, filter1 = _mm_or_si128(filter1, work_a); qs0 = _mm_xor_si128(_mm_subs_epi8(qs0, filter1), t80); - /* Filter2 >> 3 */ + // Filter2 >> 3 work_a = _mm_cmpgt_epi8(zero, filter2); filter2 = _mm_srli_epi16(filter2, 3); work_a = _mm_and_si128(work_a, te0); @@ -508,7 +495,7 @@ static void mb_lpf_horizontal_edge_w_sse2_16(unsigned char *s, filter2 = _mm_or_si128(filter2, work_a); ps0 = _mm_xor_si128(_mm_adds_epi8(ps0, filter2), t80); - /* filt >> 1 */ + // filt >> 1 filt = _mm_adds_epi8(filter1, t1); work_a = _mm_cmpgt_epi8(zero, filt); filt = _mm_srli_epi16(filt, 1); @@ -546,8 +533,8 @@ static void mb_lpf_horizontal_edge_w_sse2_16(unsigned char *s, _mm_subs_epu8(p0, p5)), _mm_or_si128(_mm_subs_epu8(q5, q0), _mm_subs_epu8(q0, q5))); - _mm_store_si128((__m128i *)ap[5], p5); - _mm_store_si128((__m128i *)aq[5], q5); + _mm_store_si128((__m128i *)&ap[5 * 16], p5); + _mm_store_si128((__m128i *)&aq[5 * 16], q5); flat2 = _mm_max_epu8(work, flat2); p6 = _mm_loadu_si128((__m128i *)(s - 7 * p)); q6 = _mm_loadu_si128((__m128i *)(s + 6 * p)); @@ -555,8 +542,8 @@ static void mb_lpf_horizontal_edge_w_sse2_16(unsigned char *s, _mm_subs_epu8(p0, p6)), _mm_or_si128(_mm_subs_epu8(q6, q0), _mm_subs_epu8(q0, q6))); - _mm_store_si128((__m128i *)ap[6], p6); - _mm_store_si128((__m128i *)aq[6], q6); + _mm_store_si128((__m128i *)&ap[6 * 16], p6); + _mm_store_si128((__m128i *)&aq[6 * 16], q6); flat2 = _mm_max_epu8(work, flat2); p7 = _mm_loadu_si128((__m128i *)(s - 8 * p)); @@ -565,8 +552,8 @@ static void mb_lpf_horizontal_edge_w_sse2_16(unsigned char *s, _mm_subs_epu8(p0, p7)), _mm_or_si128(_mm_subs_epu8(q7, q0), _mm_subs_epu8(q0, q7))); - _mm_store_si128((__m128i *)ap[7], p7); - _mm_store_si128((__m128i *)aq[7], q7); + _mm_store_si128((__m128i *)&ap[7 * 16], p7); + _mm_store_si128((__m128i *)&aq[7 * 16], q7); flat2 = _mm_max_epu8(work, flat2); flat2 = _mm_subs_epu8(flat2, one); flat2 = _mm_cmpeq_epi8(flat2, zero); @@ -586,22 +573,38 @@ static void mb_lpf_horizontal_edge_w_sse2_16(unsigned char *s, __m128i a, b, c; unsigned int off = i * 8; - p7 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(ap[7] + off)), zero); - p6 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(ap[6] + off)), zero); - p5 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(ap[5] + off)), zero); - p4 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(ap[4] + off)), zero); - p3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(ap[3] + off)), zero); - p2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(ap[2] + off)), zero); - p1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(ap[1] + off)), zero); - p0 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(ap[0] + off)), zero); - q0 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(aq[0] + off)), zero); - q1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(aq[1] + off)), zero); - q2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(aq[2] + off)), zero); - q3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(aq[3] + off)), zero); - q4 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(aq[4] + off)), zero); - q5 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(aq[5] + off)), zero); - q6 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(aq[6] + off)), zero); - q7 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(aq[7] + off)), zero); + p7 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(&ap[7 * 16] + off)), + zero); + p6 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(&ap[6 * 16] + off)), + zero); + p5 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(&ap[5 * 16] + off)), + zero); + p4 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(&ap[4 * 16] + off)), + zero); + p3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(&ap[3 * 16] + off)), + zero); + p2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(&ap[2 * 16] + off)), + zero); + p1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(&ap[1 * 16] + off)), + zero); + p0 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(&ap[0 * 16] + off)), + zero); + q0 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(&aq[0 * 16] + off)), + zero); + q1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(&aq[1 * 16] + off)), + zero); + q2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(&aq[2 * 16] + off)), + zero); + q3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(&aq[3 * 16] + off)), + zero); + q4 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(&aq[4 * 16] + off)), + zero); + q5 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(&aq[5 * 16] + off)), + zero); + q6 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(&aq[6 * 16] + off)), + zero); + q7 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(&aq[7 * 16] + off)), + zero); c = _mm_sub_epi16(_mm_slli_epi16(p7, 3), p7); // p7 * 7 c = _mm_add_epi16(_mm_slli_epi16(p6, 1), _mm_add_epi16(p4, c)); @@ -610,117 +613,117 @@ static void mb_lpf_horizontal_edge_w_sse2_16(unsigned char *s, a = _mm_add_epi16(p3, _mm_add_epi16(p2, p1)); a = _mm_add_epi16(_mm_add_epi16(p0, q0), a); - _mm_storel_epi64((__m128i *)&flat_op[2][i*8], + _mm_storel_epi64((__m128i *)&flat_op[2 * 16 + i * 8], _mm_packus_epi16(_mm_srli_epi16(_mm_add_epi16(a, b), 3) , b)); c = _mm_add_epi16(_mm_add_epi16(p5, eight), c); workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4); - _mm_storel_epi64((__m128i *)&flat2_op[6][i*8], + _mm_storel_epi64((__m128i *)&flat2_op[6 * 16 + i * 8], _mm_packus_epi16(workp_shft, workp_shft)); a = _mm_add_epi16(q1, a); b = _mm_add_epi16(_mm_sub_epi16(b, _mm_add_epi16(p3, p2)), p1); - _mm_storel_epi64((__m128i *)&flat_op[1][i*8], + _mm_storel_epi64((__m128i *)&flat_op[1 * 16 + i * 8], _mm_packus_epi16(_mm_srli_epi16(_mm_add_epi16(a, b), 3) , b)); c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p7, p6)), p5); workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4); - _mm_storel_epi64((__m128i *)&flat2_op[5][i*8], + _mm_storel_epi64((__m128i *)&flat2_op[5 * 16 + i * 8], _mm_packus_epi16(workp_shft, workp_shft)); a = _mm_add_epi16(q2, a); b = _mm_add_epi16(_mm_sub_epi16(b, _mm_add_epi16(p3, p1)), p0); - _mm_storel_epi64((__m128i *)&flat_op[0][i*8], + _mm_storel_epi64((__m128i *)&flat_op[i * 8], _mm_packus_epi16(_mm_srli_epi16(_mm_add_epi16(a, b), 3) , b)); c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p7, p5)), p4); workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4); - _mm_storel_epi64((__m128i *)&flat2_op[4][i*8], + _mm_storel_epi64((__m128i *)&flat2_op[4 * 16 + i * 8], _mm_packus_epi16(workp_shft, workp_shft)); a = _mm_add_epi16(q3, a); b = _mm_add_epi16(_mm_sub_epi16(b, _mm_add_epi16(p3, p0)), q0); - _mm_storel_epi64((__m128i *)&flat_oq[0][i*8], + _mm_storel_epi64((__m128i *)&flat_oq[i * 8], _mm_packus_epi16(_mm_srli_epi16(_mm_add_epi16(a, b), 3) , b)); c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p7, p4)), p3); workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4); - _mm_storel_epi64((__m128i *)&flat2_op[3][i*8], + _mm_storel_epi64((__m128i *)&flat2_op[3 * 16 + i * 8], _mm_packus_epi16(workp_shft, workp_shft)); b = _mm_add_epi16(q3, b); b = _mm_add_epi16(_mm_sub_epi16(b, _mm_add_epi16(p2, q0)), q1); - _mm_storel_epi64((__m128i *)&flat_oq[1][i*8], + _mm_storel_epi64((__m128i *)&flat_oq[16 + i * 8], _mm_packus_epi16(_mm_srli_epi16(_mm_add_epi16(a, b), 3) , b)); c = _mm_add_epi16(q4, c); c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p7, p3)), p2); workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4); - _mm_storel_epi64((__m128i *)&flat2_op[2][i*8], + _mm_storel_epi64((__m128i *)&flat2_op[2 * 16 + i * 8], _mm_packus_epi16(workp_shft, workp_shft)); b = _mm_add_epi16(q3, b); b = _mm_add_epi16(_mm_sub_epi16(b, _mm_add_epi16(p1, q1)), q2); - _mm_storel_epi64((__m128i *)&flat_oq[2][i*8], + _mm_storel_epi64((__m128i *)&flat_oq[2 * 16 + i * 8], _mm_packus_epi16(_mm_srli_epi16(_mm_add_epi16(a, b), 3) , b)); a = _mm_add_epi16(q5, a); c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p7, p2)), p1); workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4); - _mm_storel_epi64((__m128i *)&flat2_op[1][i*8], + _mm_storel_epi64((__m128i *)&flat2_op[16 + i * 8], _mm_packus_epi16(workp_shft, workp_shft)); a = _mm_add_epi16(q6, a); c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p7, p1)), p0); workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4); - _mm_storel_epi64((__m128i *)&flat2_op[0][i*8], + _mm_storel_epi64((__m128i *)&flat2_op[i * 8], _mm_packus_epi16(workp_shft, workp_shft)); a = _mm_add_epi16(q7, a); c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p7, p0)), q0); workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4); - _mm_storel_epi64((__m128i *)&flat2_oq[0][i*8], + _mm_storel_epi64((__m128i *)&flat2_oq[i * 8], _mm_packus_epi16(workp_shft, workp_shft)); a = _mm_add_epi16(q7, a); c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p6, q0)), q1); workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4); - _mm_storel_epi64((__m128i *)&flat2_oq[1][i*8], + _mm_storel_epi64((__m128i *)&flat2_oq[16 + i * 8], _mm_packus_epi16(workp_shft, workp_shft)); a = _mm_add_epi16(q7, a); c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p5, q1)), q2); workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4); - _mm_storel_epi64((__m128i *)&flat2_oq[2][i*8], + _mm_storel_epi64((__m128i *)&flat2_oq[2 * 16 + i * 8], _mm_packus_epi16(workp_shft, workp_shft)); a = _mm_add_epi16(q7, a); c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p4, q2)), q3); workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4); - _mm_storel_epi64((__m128i *)&flat2_oq[3][i*8], + _mm_storel_epi64((__m128i *)&flat2_oq[3 * 16 + i * 8], _mm_packus_epi16(workp_shft, workp_shft)); a = _mm_add_epi16(q7, a); c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p3, q3)), q4); workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4); - _mm_storel_epi64((__m128i *)&flat2_oq[4][i*8], + _mm_storel_epi64((__m128i *)&flat2_oq[4 * 16 + i * 8], _mm_packus_epi16(workp_shft, workp_shft)); a = _mm_add_epi16(q7, a); c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p2, q4)), q5); workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4); - _mm_storel_epi64((__m128i *)&flat2_oq[5][i*8], + _mm_storel_epi64((__m128i *)&flat2_oq[5 * 16 + i * 8], _mm_packus_epi16(workp_shft, workp_shft)); a = _mm_add_epi16(q7, a); c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p1, q5)), q6); workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4); - _mm_storel_epi64((__m128i *)&flat2_oq[6][i*8], + _mm_storel_epi64((__m128i *)&flat2_oq[6 * 16 + i * 8], _mm_packus_epi16(workp_shft, workp_shft)); temp_flat2 = _mm_srli_si128(temp_flat2, 8); @@ -730,51 +733,51 @@ static void mb_lpf_horizontal_edge_w_sse2_16(unsigned char *s, // wide flat // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - work_a = _mm_load_si128((__m128i *)ap[2]); - p2 = _mm_load_si128((__m128i *)flat_op[2]); + work_a = _mm_load_si128((__m128i *)&ap[2 * 16]); + p2 = _mm_load_si128((__m128i *)&flat_op[2 * 16]); work_a = _mm_andnot_si128(flat, work_a); p2 = _mm_and_si128(flat, p2); p2 = _mm_or_si128(work_a, p2); - _mm_store_si128((__m128i *)flat_op[2], p2); + _mm_store_si128((__m128i *)&flat_op[2 * 16], p2); - p1 = _mm_load_si128((__m128i *)flat_op[1]); + p1 = _mm_load_si128((__m128i *)&flat_op[1 * 16]); work_a = _mm_andnot_si128(flat, ps1); p1 = _mm_and_si128(flat, p1); p1 = _mm_or_si128(work_a, p1); - _mm_store_si128((__m128i *)flat_op[1], p1); + _mm_store_si128((__m128i *)&flat_op[1 * 16], p1); - p0 = _mm_load_si128((__m128i *)flat_op[0]); + p0 = _mm_load_si128((__m128i *)&flat_op[0]); work_a = _mm_andnot_si128(flat, ps0); p0 = _mm_and_si128(flat, p0); p0 = _mm_or_si128(work_a, p0); - _mm_store_si128((__m128i *)flat_op[0], p0); + _mm_store_si128((__m128i *)&flat_op[0], p0); - q0 = _mm_load_si128((__m128i *)flat_oq[0]); + q0 = _mm_load_si128((__m128i *)&flat_oq[0]); work_a = _mm_andnot_si128(flat, qs0); q0 = _mm_and_si128(flat, q0); q0 = _mm_or_si128(work_a, q0); - _mm_store_si128((__m128i *)flat_oq[0], q0); + _mm_store_si128((__m128i *)&flat_oq[0], q0); - q1 = _mm_load_si128((__m128i *)flat_oq[1]); + q1 = _mm_load_si128((__m128i *)&flat_oq[1 * 16]); work_a = _mm_andnot_si128(flat, qs1); q1 = _mm_and_si128(flat, q1); q1 = _mm_or_si128(work_a, q1); - _mm_store_si128((__m128i *)flat_oq[1], q1); + _mm_store_si128((__m128i *)&flat_oq[1 * 16], q1); - work_a = _mm_load_si128((__m128i *)aq[2]); - q2 = _mm_load_si128((__m128i *)flat_oq[2]); + work_a = _mm_load_si128((__m128i *)&aq[2 * 16]); + q2 = _mm_load_si128((__m128i *)&flat_oq[2 * 16]); work_a = _mm_andnot_si128(flat, work_a); q2 = _mm_and_si128(flat, q2); q2 = _mm_or_si128(work_a, q2); - _mm_store_si128((__m128i *)flat_oq[2], q2); + _mm_store_si128((__m128i *)&flat_oq[2 * 16], q2); // write out op6 - op3 { unsigned char *dst = (s - 7 * p); for (i = 6; i > 2; i--) { __m128i flat2_output; - work_a = _mm_load_si128((__m128i *)ap[i]); - flat2_output = _mm_load_si128((__m128i *)flat2_op[i]); + work_a = _mm_load_si128((__m128i *)&ap[i * 16]); + flat2_output = _mm_load_si128((__m128i *)&flat2_op[i * 16]); work_a = _mm_andnot_si128(flat2, work_a); flat2_output = _mm_and_si128(flat2, flat2_output); work_a = _mm_or_si128(work_a, flat2_output); @@ -783,43 +786,43 @@ static void mb_lpf_horizontal_edge_w_sse2_16(unsigned char *s, } } - work_a = _mm_load_si128((__m128i *)flat_op[2]); - p2 = _mm_load_si128((__m128i *)flat2_op[2]); + work_a = _mm_load_si128((__m128i *)&flat_op[2 * 16]); + p2 = _mm_load_si128((__m128i *)&flat2_op[2 * 16]); work_a = _mm_andnot_si128(flat2, work_a); p2 = _mm_and_si128(flat2, p2); p2 = _mm_or_si128(work_a, p2); _mm_storeu_si128((__m128i *)(s - 3 * p), p2); - work_a = _mm_load_si128((__m128i *)flat_op[1]); - p1 = _mm_load_si128((__m128i *)flat2_op[1]); + work_a = _mm_load_si128((__m128i *)&flat_op[1 * 16]); + p1 = _mm_load_si128((__m128i *)&flat2_op[1 * 16]); work_a = _mm_andnot_si128(flat2, work_a); p1 = _mm_and_si128(flat2, p1); p1 = _mm_or_si128(work_a, p1); _mm_storeu_si128((__m128i *)(s - 2 * p), p1); - work_a = _mm_load_si128((__m128i *)flat_op[0]); - p0 = _mm_load_si128((__m128i *)flat2_op[0]); + work_a = _mm_load_si128((__m128i *)&flat_op[0]); + p0 = _mm_load_si128((__m128i *)&flat2_op[0]); work_a = _mm_andnot_si128(flat2, work_a); p0 = _mm_and_si128(flat2, p0); p0 = _mm_or_si128(work_a, p0); _mm_storeu_si128((__m128i *)(s - 1 * p), p0); - work_a = _mm_load_si128((__m128i *)flat_oq[0]); - q0 = _mm_load_si128((__m128i *)flat2_oq[0]); + work_a = _mm_load_si128((__m128i *)&flat_oq[0]); + q0 = _mm_load_si128((__m128i *)&flat2_oq[0]); work_a = _mm_andnot_si128(flat2, work_a); q0 = _mm_and_si128(flat2, q0); q0 = _mm_or_si128(work_a, q0); _mm_storeu_si128((__m128i *)(s - 0 * p), q0); - work_a = _mm_load_si128((__m128i *)flat_oq[1]); - q1 = _mm_load_si128((__m128i *)flat2_oq[1]); + work_a = _mm_load_si128((__m128i *)&flat_oq[1 * 16]); + q1 = _mm_load_si128((__m128i *)&flat2_oq[16]); work_a = _mm_andnot_si128(flat2, work_a); q1 = _mm_and_si128(flat2, q1); q1 = _mm_or_si128(work_a, q1); _mm_storeu_si128((__m128i *)(s + 1 * p), q1); - work_a = _mm_load_si128((__m128i *)flat_oq[2]); - q2 = _mm_load_si128((__m128i *)flat2_oq[2]); + work_a = _mm_load_si128((__m128i *)&flat_oq[2 * 16]); + q2 = _mm_load_si128((__m128i *)&flat2_oq[2 * 16]); work_a = _mm_andnot_si128(flat2, work_a); q2 = _mm_and_si128(flat2, q2); q2 = _mm_or_si128(work_a, q2); @@ -830,8 +833,8 @@ static void mb_lpf_horizontal_edge_w_sse2_16(unsigned char *s, unsigned char *dst = (s + 3 * p); for (i = 3; i < 7; i++) { __m128i flat2_output; - work_a = _mm_load_si128((__m128i *)aq[i]); - flat2_output = _mm_load_si128((__m128i *)flat2_oq[i]); + work_a = _mm_load_si128((__m128i *)&aq[i * 16]); + flat2_output = _mm_load_si128((__m128i *)&flat2_oq[i * 16]); work_a = _mm_andnot_si128(flat2, work_a); flat2_output = _mm_and_si128(flat2, flat2_output); work_a = _mm_or_si128(work_a, flat2_output); @@ -842,52 +845,275 @@ static void mb_lpf_horizontal_edge_w_sse2_16(unsigned char *s, } } -void vp9_mb_lpf_horizontal_edge_w_sse2(unsigned char *s, - int p, - const unsigned char *_blimit, - const unsigned char *_limit, - const unsigned char *_thresh, - int count) { +// TODO(yunqingwang): remove count and call these 2 functions(8 or 16) directly. +void vp9_lpf_horizontal_16_sse2(unsigned char *s, int p, + const unsigned char *_blimit, + const unsigned char *_limit, + const unsigned char *_thresh, int count) { if (count == 1) mb_lpf_horizontal_edge_w_sse2_8(s, p, _blimit, _limit, _thresh); else mb_lpf_horizontal_edge_w_sse2_16(s, p, _blimit, _limit, _thresh); } -void vp9_mbloop_filter_horizontal_edge_sse2(unsigned char *s, - int p, - const unsigned char *_blimit, - const unsigned char *_limit, - const unsigned char *_thresh, - int count) { - DECLARE_ALIGNED(16, unsigned char, flat_op2[16]); - DECLARE_ALIGNED(16, unsigned char, flat_op1[16]); - DECLARE_ALIGNED(16, unsigned char, flat_op0[16]); - DECLARE_ALIGNED(16, unsigned char, flat_oq2[16]); - DECLARE_ALIGNED(16, unsigned char, flat_oq1[16]); - DECLARE_ALIGNED(16, unsigned char, flat_oq0[16]); - __m128i mask, hev, flat; +void vp9_lpf_horizontal_8_sse2(unsigned char *s, int p, + const unsigned char *_blimit, + const unsigned char *_limit, + const unsigned char *_thresh, int count) { + DECLARE_ALIGNED_ARRAY(16, unsigned char, flat_op2, 16); + DECLARE_ALIGNED_ARRAY(16, unsigned char, flat_op1, 16); + DECLARE_ALIGNED_ARRAY(16, unsigned char, flat_op0, 16); + DECLARE_ALIGNED_ARRAY(16, unsigned char, flat_oq2, 16); + DECLARE_ALIGNED_ARRAY(16, unsigned char, flat_oq1, 16); + DECLARE_ALIGNED_ARRAY(16, unsigned char, flat_oq0, 16); const __m128i zero = _mm_set1_epi16(0); + const __m128i blimit = _mm_load_si128((const __m128i *)_blimit); + const __m128i limit = _mm_load_si128((const __m128i *)_limit); + const __m128i thresh = _mm_load_si128((const __m128i *)_thresh); + __m128i mask, hev, flat; __m128i p3, p2, p1, p0, q0, q1, q2, q3; - const unsigned int extended_thresh = _thresh[0] * 0x01010101u; - const unsigned int extended_limit = _limit[0] * 0x01010101u; - const unsigned int extended_blimit = _blimit[0] * 0x01010101u; - const __m128i thresh = - _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_thresh), 0); - const __m128i limit = - _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_limit), 0); - const __m128i blimit = - _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_blimit), 0); + __m128i q3p3, q2p2, q1p1, q0p0, p1q1, p0q0; (void)count; - p3 = _mm_loadl_epi64((__m128i *)(s - 4 * p)); - p2 = _mm_loadl_epi64((__m128i *)(s - 3 * p)); - p1 = _mm_loadl_epi64((__m128i *)(s - 2 * p)); - p0 = _mm_loadl_epi64((__m128i *)(s - 1 * p)); - q0 = _mm_loadl_epi64((__m128i *)(s - 0 * p)); - q1 = _mm_loadl_epi64((__m128i *)(s + 1 * p)); - q2 = _mm_loadl_epi64((__m128i *)(s + 2 * p)); - q3 = _mm_loadl_epi64((__m128i *)(s + 3 * p)); + + q3p3 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(s - 4 * p)), + _mm_loadl_epi64((__m128i *)(s + 3 * p))); + q2p2 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(s - 3 * p)), + _mm_loadl_epi64((__m128i *)(s + 2 * p))); + q1p1 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(s - 2 * p)), + _mm_loadl_epi64((__m128i *)(s + 1 * p))); + q0p0 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(s - 1 * p)), + _mm_loadl_epi64((__m128i *)(s - 0 * p))); + p1q1 = _mm_shuffle_epi32(q1p1, 78); + p0q0 = _mm_shuffle_epi32(q0p0, 78); + + { + // filter_mask and hev_mask + const __m128i one = _mm_set1_epi8(1); + const __m128i fe = _mm_set1_epi8(0xfe); + const __m128i ff = _mm_cmpeq_epi8(fe, fe); + __m128i abs_p1q1, abs_p0q0, abs_q1q0, abs_p1p0, work; + abs_p1p0 = _mm_or_si128(_mm_subs_epu8(q1p1, q0p0), + _mm_subs_epu8(q0p0, q1p1)); + abs_q1q0 = _mm_srli_si128(abs_p1p0, 8); + + abs_p0q0 = _mm_or_si128(_mm_subs_epu8(q0p0, p0q0), + _mm_subs_epu8(p0q0, q0p0)); + abs_p1q1 = _mm_or_si128(_mm_subs_epu8(q1p1, p1q1), + _mm_subs_epu8(p1q1, q1p1)); + flat = _mm_max_epu8(abs_p1p0, abs_q1q0); + hev = _mm_subs_epu8(flat, thresh); + hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff); + + abs_p0q0 =_mm_adds_epu8(abs_p0q0, abs_p0q0); + abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1); + mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit); + mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff); + // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1; + mask = _mm_max_epu8(abs_p1p0, mask); + // mask |= (abs(p1 - p0) > limit) * -1; + // mask |= (abs(q1 - q0) > limit) * -1; + + work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(q2p2, q1p1), + _mm_subs_epu8(q1p1, q2p2)), + _mm_or_si128(_mm_subs_epu8(q3p3, q2p2), + _mm_subs_epu8(q2p2, q3p3))); + mask = _mm_max_epu8(work, mask); + mask = _mm_max_epu8(mask, _mm_srli_si128(mask, 8)); + mask = _mm_subs_epu8(mask, limit); + mask = _mm_cmpeq_epi8(mask, zero); + + // flat_mask4 + + flat = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(q2p2, q0p0), + _mm_subs_epu8(q0p0, q2p2)), + _mm_or_si128(_mm_subs_epu8(q3p3, q0p0), + _mm_subs_epu8(q0p0, q3p3))); + flat = _mm_max_epu8(abs_p1p0, flat); + flat = _mm_max_epu8(flat, _mm_srli_si128(flat, 8)); + flat = _mm_subs_epu8(flat, one); + flat = _mm_cmpeq_epi8(flat, zero); + flat = _mm_and_si128(flat, mask); + } + + { + const __m128i four = _mm_set1_epi16(4); + unsigned char *src = s; + { + __m128i workp_a, workp_b, workp_shft; + p3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 4 * p)), zero); + p2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 3 * p)), zero); + p1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 2 * p)), zero); + p0 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 1 * p)), zero); + q0 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 0 * p)), zero); + q1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 1 * p)), zero); + q2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 2 * p)), zero); + q3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 3 * p)), zero); + + workp_a = _mm_add_epi16(_mm_add_epi16(p3, p3), _mm_add_epi16(p2, p1)); + workp_a = _mm_add_epi16(_mm_add_epi16(workp_a, four), p0); + workp_b = _mm_add_epi16(_mm_add_epi16(q0, p2), p3); + workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3); + _mm_storel_epi64((__m128i *)&flat_op2[0], + _mm_packus_epi16(workp_shft, workp_shft)); + + workp_b = _mm_add_epi16(_mm_add_epi16(q0, q1), p1); + workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3); + _mm_storel_epi64((__m128i *)&flat_op1[0], + _mm_packus_epi16(workp_shft, workp_shft)); + + workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p3), q2); + workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, p1), p0); + workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3); + _mm_storel_epi64((__m128i *)&flat_op0[0], + _mm_packus_epi16(workp_shft, workp_shft)); + + workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p3), q3); + workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, p0), q0); + workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3); + _mm_storel_epi64((__m128i *)&flat_oq0[0], + _mm_packus_epi16(workp_shft, workp_shft)); + + workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p2), q3); + workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, q0), q1); + workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3); + _mm_storel_epi64((__m128i *)&flat_oq1[0], + _mm_packus_epi16(workp_shft, workp_shft)); + + workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p1), q3); + workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, q1), q2); + workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3); + _mm_storel_epi64((__m128i *)&flat_oq2[0], + _mm_packus_epi16(workp_shft, workp_shft)); + } + } + // lp filter + { + const __m128i t4 = _mm_set1_epi8(4); + const __m128i t3 = _mm_set1_epi8(3); + const __m128i t80 = _mm_set1_epi8(0x80); + const __m128i t1 = _mm_set1_epi8(0x1); + const __m128i ps1 = _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s - 2 * p)), + t80); + const __m128i ps0 = _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s - 1 * p)), + t80); + const __m128i qs0 = _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s + 0 * p)), + t80); + const __m128i qs1 = _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s + 1 * p)), + t80); + __m128i filt; + __m128i work_a; + __m128i filter1, filter2; + + filt = _mm_and_si128(_mm_subs_epi8(ps1, qs1), hev); + work_a = _mm_subs_epi8(qs0, ps0); + filt = _mm_adds_epi8(filt, work_a); + filt = _mm_adds_epi8(filt, work_a); + filt = _mm_adds_epi8(filt, work_a); + // (vp9_filter + 3 * (qs0 - ps0)) & mask + filt = _mm_and_si128(filt, mask); + + filter1 = _mm_adds_epi8(filt, t4); + filter2 = _mm_adds_epi8(filt, t3); + + // Filter1 >> 3 + filter1 = _mm_unpacklo_epi8(zero, filter1); + filter1 = _mm_srai_epi16(filter1, 11); + filter1 = _mm_packs_epi16(filter1, filter1); + + // Filter2 >> 3 + filter2 = _mm_unpacklo_epi8(zero, filter2); + filter2 = _mm_srai_epi16(filter2, 11); + filter2 = _mm_packs_epi16(filter2, zero); + + // filt >> 1 + filt = _mm_adds_epi8(filter1, t1); + filt = _mm_unpacklo_epi8(zero, filt); + filt = _mm_srai_epi16(filt, 9); + filt = _mm_packs_epi16(filt, zero); + + filt = _mm_andnot_si128(hev, filt); + + work_a = _mm_xor_si128(_mm_subs_epi8(qs0, filter1), t80); + q0 = _mm_loadl_epi64((__m128i *)flat_oq0); + work_a = _mm_andnot_si128(flat, work_a); + q0 = _mm_and_si128(flat, q0); + q0 = _mm_or_si128(work_a, q0); + + work_a = _mm_xor_si128(_mm_subs_epi8(qs1, filt), t80); + q1 = _mm_loadl_epi64((__m128i *)flat_oq1); + work_a = _mm_andnot_si128(flat, work_a); + q1 = _mm_and_si128(flat, q1); + q1 = _mm_or_si128(work_a, q1); + + work_a = _mm_loadu_si128((__m128i *)(s + 2 * p)); + q2 = _mm_loadl_epi64((__m128i *)flat_oq2); + work_a = _mm_andnot_si128(flat, work_a); + q2 = _mm_and_si128(flat, q2); + q2 = _mm_or_si128(work_a, q2); + + work_a = _mm_xor_si128(_mm_adds_epi8(ps0, filter2), t80); + p0 = _mm_loadl_epi64((__m128i *)flat_op0); + work_a = _mm_andnot_si128(flat, work_a); + p0 = _mm_and_si128(flat, p0); + p0 = _mm_or_si128(work_a, p0); + + work_a = _mm_xor_si128(_mm_adds_epi8(ps1, filt), t80); + p1 = _mm_loadl_epi64((__m128i *)flat_op1); + work_a = _mm_andnot_si128(flat, work_a); + p1 = _mm_and_si128(flat, p1); + p1 = _mm_or_si128(work_a, p1); + + work_a = _mm_loadu_si128((__m128i *)(s - 3 * p)); + p2 = _mm_loadl_epi64((__m128i *)flat_op2); + work_a = _mm_andnot_si128(flat, work_a); + p2 = _mm_and_si128(flat, p2); + p2 = _mm_or_si128(work_a, p2); + + _mm_storel_epi64((__m128i *)(s - 3 * p), p2); + _mm_storel_epi64((__m128i *)(s - 2 * p), p1); + _mm_storel_epi64((__m128i *)(s - 1 * p), p0); + _mm_storel_epi64((__m128i *)(s + 0 * p), q0); + _mm_storel_epi64((__m128i *)(s + 1 * p), q1); + _mm_storel_epi64((__m128i *)(s + 2 * p), q2); + } +} + +void vp9_lpf_horizontal_8_dual_sse2(uint8_t *s, int p, + const uint8_t *_blimit0, + const uint8_t *_limit0, + const uint8_t *_thresh0, + const uint8_t *_blimit1, + const uint8_t *_limit1, + const uint8_t *_thresh1) { + DECLARE_ALIGNED_ARRAY(16, unsigned char, flat_op2, 16); + DECLARE_ALIGNED_ARRAY(16, unsigned char, flat_op1, 16); + DECLARE_ALIGNED_ARRAY(16, unsigned char, flat_op0, 16); + DECLARE_ALIGNED_ARRAY(16, unsigned char, flat_oq2, 16); + DECLARE_ALIGNED_ARRAY(16, unsigned char, flat_oq1, 16); + DECLARE_ALIGNED_ARRAY(16, unsigned char, flat_oq0, 16); + const __m128i zero = _mm_set1_epi16(0); + const __m128i blimit = + _mm_unpacklo_epi64(_mm_load_si128((const __m128i *)_blimit0), + _mm_load_si128((const __m128i *)_blimit1)); + const __m128i limit = + _mm_unpacklo_epi64(_mm_load_si128((const __m128i *)_limit0), + _mm_load_si128((const __m128i *)_limit1)); + const __m128i thresh = + _mm_unpacklo_epi64(_mm_load_si128((const __m128i *)_thresh0), + _mm_load_si128((const __m128i *)_thresh1)); + + __m128i mask, hev, flat; + __m128i p3, p2, p1, p0, q0, q1, q2, q3; + + p3 = _mm_loadu_si128((__m128i *)(s - 4 * p)); + p2 = _mm_loadu_si128((__m128i *)(s - 3 * p)); + p1 = _mm_loadu_si128((__m128i *)(s - 2 * p)); + p0 = _mm_loadu_si128((__m128i *)(s - 1 * p)); + q0 = _mm_loadu_si128((__m128i *)(s - 0 * p)); + q1 = _mm_loadu_si128((__m128i *)(s + 1 * p)); + q2 = _mm_loadu_si128((__m128i *)(s + 2 * p)); + q3 = _mm_loadu_si128((__m128i *)(s + 3 * p)); { const __m128i abs_p1p0 = _mm_or_si128(_mm_subs_epu8(p1, p0), _mm_subs_epu8(p0, p1)); @@ -901,6 +1127,8 @@ void vp9_mbloop_filter_horizontal_edge_sse2(unsigned char *s, __m128i abs_p1q1 = _mm_or_si128(_mm_subs_epu8(p1, q1), _mm_subs_epu8(q1, p1)); __m128i work; + + // filter_mask and hev_mask flat = _mm_max_epu8(abs_p1p0, abs_q1q0); hev = _mm_subs_epu8(flat, thresh); hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff); @@ -926,6 +1154,7 @@ void vp9_mbloop_filter_horizontal_edge_sse2(unsigned char *s, mask = _mm_subs_epu8(mask, limit); mask = _mm_cmpeq_epi8(mask, zero); + // flat_mask4 work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p2, p0), _mm_subs_epu8(p0, p2)), _mm_or_si128(_mm_subs_epu8(q2, q0), @@ -943,7 +1172,9 @@ void vp9_mbloop_filter_horizontal_edge_sse2(unsigned char *s, { const __m128i four = _mm_set1_epi16(4); unsigned char *src = s; - { + int i = 0; + + do { __m128i workp_a, workp_b, workp_shft; p3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 4 * p)), zero); p2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 3 * p)), zero); @@ -958,38 +1189,40 @@ void vp9_mbloop_filter_horizontal_edge_sse2(unsigned char *s, workp_a = _mm_add_epi16(_mm_add_epi16(workp_a, four), p0); workp_b = _mm_add_epi16(_mm_add_epi16(q0, p2), p3); workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3); - _mm_storel_epi64((__m128i *)&flat_op2[0], + _mm_storel_epi64((__m128i *)&flat_op2[i * 8], _mm_packus_epi16(workp_shft, workp_shft)); workp_b = _mm_add_epi16(_mm_add_epi16(q0, q1), p1); workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3); - _mm_storel_epi64((__m128i *)&flat_op1[0], + _mm_storel_epi64((__m128i *)&flat_op1[i * 8], _mm_packus_epi16(workp_shft, workp_shft)); workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p3), q2); workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, p1), p0); workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3); - _mm_storel_epi64((__m128i *)&flat_op0[0], + _mm_storel_epi64((__m128i *)&flat_op0[i * 8], _mm_packus_epi16(workp_shft, workp_shft)); workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p3), q3); workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, p0), q0); workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3); - _mm_storel_epi64((__m128i *)&flat_oq0[0], + _mm_storel_epi64((__m128i *)&flat_oq0[i * 8], _mm_packus_epi16(workp_shft, workp_shft)); workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p2), q3); workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, q0), q1); workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3); - _mm_storel_epi64((__m128i *)&flat_oq1[0], + _mm_storel_epi64((__m128i *)&flat_oq1[i * 8], _mm_packus_epi16(workp_shft, workp_shft)); workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p1), q3); workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, q1), q2); workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3); - _mm_storel_epi64((__m128i *)&flat_oq2[0], + _mm_storel_epi64((__m128i *)&flat_oq2[i * 8], _mm_packus_epi16(workp_shft, workp_shft)); - } + + src += 8; + } while (++i < 2); } // lp filter { @@ -1001,13 +1234,13 @@ void vp9_mbloop_filter_horizontal_edge_sse2(unsigned char *s, const __m128i t1 = _mm_set1_epi8(0x1); const __m128i t7f = _mm_set1_epi8(0x7f); - const __m128i ps1 = _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s - 2 * p)), + const __m128i ps1 = _mm_xor_si128(_mm_loadu_si128((__m128i *)(s - 2 * p)), t80); - const __m128i ps0 = _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s - 1 * p)), + const __m128i ps0 = _mm_xor_si128(_mm_loadu_si128((__m128i *)(s - 1 * p)), t80); - const __m128i qs0 = _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s + 0 * p)), + const __m128i qs0 = _mm_xor_si128(_mm_loadu_si128((__m128i *)(s + 0 * p)), t80); - const __m128i qs1 = _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s + 1 * p)), + const __m128i qs1 = _mm_xor_si128(_mm_loadu_si128((__m128i *)(s + 1 * p)), t80); __m128i filt; __m128i work_a; @@ -1018,27 +1251,27 @@ void vp9_mbloop_filter_horizontal_edge_sse2(unsigned char *s, filt = _mm_adds_epi8(filt, work_a); filt = _mm_adds_epi8(filt, work_a); filt = _mm_adds_epi8(filt, work_a); - /* (vp9_filter + 3 * (qs0 - ps0)) & mask */ + // (vp9_filter + 3 * (qs0 - ps0)) & mask filt = _mm_and_si128(filt, mask); filter1 = _mm_adds_epi8(filt, t4); filter2 = _mm_adds_epi8(filt, t3); - /* Filter1 >> 3 */ + // Filter1 >> 3 work_a = _mm_cmpgt_epi8(zero, filter1); filter1 = _mm_srli_epi16(filter1, 3); work_a = _mm_and_si128(work_a, te0); filter1 = _mm_and_si128(filter1, t1f); filter1 = _mm_or_si128(filter1, work_a); - /* Filter2 >> 3 */ + // Filter2 >> 3 work_a = _mm_cmpgt_epi8(zero, filter2); filter2 = _mm_srli_epi16(filter2, 3); work_a = _mm_and_si128(work_a, te0); filter2 = _mm_and_si128(filter2, t1f); filter2 = _mm_or_si128(filter2, work_a); - /* filt >> 1 */ + // filt >> 1 filt = _mm_adds_epi8(filter1, t1); work_a = _mm_cmpgt_epi8(zero, filt); filt = _mm_srli_epi16(filt, 1); @@ -1049,47 +1282,185 @@ void vp9_mbloop_filter_horizontal_edge_sse2(unsigned char *s, filt = _mm_andnot_si128(hev, filt); work_a = _mm_xor_si128(_mm_subs_epi8(qs0, filter1), t80); - q0 = _mm_loadl_epi64((__m128i *)flat_oq0); + q0 = _mm_load_si128((__m128i *)flat_oq0); work_a = _mm_andnot_si128(flat, work_a); q0 = _mm_and_si128(flat, q0); q0 = _mm_or_si128(work_a, q0); work_a = _mm_xor_si128(_mm_subs_epi8(qs1, filt), t80); - q1 = _mm_loadl_epi64((__m128i *)flat_oq1); + q1 = _mm_load_si128((__m128i *)flat_oq1); work_a = _mm_andnot_si128(flat, work_a); q1 = _mm_and_si128(flat, q1); q1 = _mm_or_si128(work_a, q1); work_a = _mm_loadu_si128((__m128i *)(s + 2 * p)); - q2 = _mm_loadl_epi64((__m128i *)flat_oq2); + q2 = _mm_load_si128((__m128i *)flat_oq2); work_a = _mm_andnot_si128(flat, work_a); q2 = _mm_and_si128(flat, q2); q2 = _mm_or_si128(work_a, q2); work_a = _mm_xor_si128(_mm_adds_epi8(ps0, filter2), t80); - p0 = _mm_loadl_epi64((__m128i *)flat_op0); + p0 = _mm_load_si128((__m128i *)flat_op0); work_a = _mm_andnot_si128(flat, work_a); p0 = _mm_and_si128(flat, p0); p0 = _mm_or_si128(work_a, p0); work_a = _mm_xor_si128(_mm_adds_epi8(ps1, filt), t80); - p1 = _mm_loadl_epi64((__m128i *)flat_op1); + p1 = _mm_load_si128((__m128i *)flat_op1); work_a = _mm_andnot_si128(flat, work_a); p1 = _mm_and_si128(flat, p1); p1 = _mm_or_si128(work_a, p1); work_a = _mm_loadu_si128((__m128i *)(s - 3 * p)); - p2 = _mm_loadl_epi64((__m128i *)flat_op2); + p2 = _mm_load_si128((__m128i *)flat_op2); work_a = _mm_andnot_si128(flat, work_a); p2 = _mm_and_si128(flat, p2); p2 = _mm_or_si128(work_a, p2); - _mm_storel_epi64((__m128i *)(s - 3 * p), p2); - _mm_storel_epi64((__m128i *)(s - 2 * p), p1); - _mm_storel_epi64((__m128i *)(s - 1 * p), p0); - _mm_storel_epi64((__m128i *)(s + 0 * p), q0); - _mm_storel_epi64((__m128i *)(s + 1 * p), q1); - _mm_storel_epi64((__m128i *)(s + 2 * p), q2); + _mm_storeu_si128((__m128i *)(s - 3 * p), p2); + _mm_storeu_si128((__m128i *)(s - 2 * p), p1); + _mm_storeu_si128((__m128i *)(s - 1 * p), p0); + _mm_storeu_si128((__m128i *)(s + 0 * p), q0); + _mm_storeu_si128((__m128i *)(s + 1 * p), q1); + _mm_storeu_si128((__m128i *)(s + 2 * p), q2); + } +} + +void vp9_lpf_horizontal_4_dual_sse2(unsigned char *s, int p, + const unsigned char *_blimit0, + const unsigned char *_limit0, + const unsigned char *_thresh0, + const unsigned char *_blimit1, + const unsigned char *_limit1, + const unsigned char *_thresh1) { + const __m128i blimit = + _mm_unpacklo_epi64(_mm_load_si128((const __m128i *)_blimit0), + _mm_load_si128((const __m128i *)_blimit1)); + const __m128i limit = + _mm_unpacklo_epi64(_mm_load_si128((const __m128i *)_limit0), + _mm_load_si128((const __m128i *)_limit1)); + const __m128i thresh = + _mm_unpacklo_epi64(_mm_load_si128((const __m128i *)_thresh0), + _mm_load_si128((const __m128i *)_thresh1)); + const __m128i zero = _mm_set1_epi16(0); + __m128i p3, p2, p1, p0, q0, q1, q2, q3; + __m128i mask, hev, flat; + + p3 = _mm_loadu_si128((__m128i *)(s - 4 * p)); + p2 = _mm_loadu_si128((__m128i *)(s - 3 * p)); + p1 = _mm_loadu_si128((__m128i *)(s - 2 * p)); + p0 = _mm_loadu_si128((__m128i *)(s - 1 * p)); + q0 = _mm_loadu_si128((__m128i *)(s - 0 * p)); + q1 = _mm_loadu_si128((__m128i *)(s + 1 * p)); + q2 = _mm_loadu_si128((__m128i *)(s + 2 * p)); + q3 = _mm_loadu_si128((__m128i *)(s + 3 * p)); + + // filter_mask and hev_mask + { + const __m128i abs_p1p0 = _mm_or_si128(_mm_subs_epu8(p1, p0), + _mm_subs_epu8(p0, p1)); + const __m128i abs_q1q0 = _mm_or_si128(_mm_subs_epu8(q1, q0), + _mm_subs_epu8(q0, q1)); + const __m128i fe = _mm_set1_epi8(0xfe); + const __m128i ff = _mm_cmpeq_epi8(abs_p1p0, abs_p1p0); + __m128i abs_p0q0 = _mm_or_si128(_mm_subs_epu8(p0, q0), + _mm_subs_epu8(q0, p0)); + __m128i abs_p1q1 = _mm_or_si128(_mm_subs_epu8(p1, q1), + _mm_subs_epu8(q1, p1)); + __m128i work; + + flat = _mm_max_epu8(abs_p1p0, abs_q1q0); + hev = _mm_subs_epu8(flat, thresh); + hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff); + + abs_p0q0 =_mm_adds_epu8(abs_p0q0, abs_p0q0); + abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1); + mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit); + mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff); + // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1; + mask = _mm_max_epu8(flat, mask); + // mask |= (abs(p1 - p0) > limit) * -1; + // mask |= (abs(q1 - q0) > limit) * -1; + work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p2, p1), + _mm_subs_epu8(p1, p2)), + _mm_or_si128(_mm_subs_epu8(p3, p2), + _mm_subs_epu8(p2, p3))); + mask = _mm_max_epu8(work, mask); + work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(q2, q1), + _mm_subs_epu8(q1, q2)), + _mm_or_si128(_mm_subs_epu8(q3, q2), + _mm_subs_epu8(q2, q3))); + mask = _mm_max_epu8(work, mask); + mask = _mm_subs_epu8(mask, limit); + mask = _mm_cmpeq_epi8(mask, zero); + } + + // filter4 + { + const __m128i t4 = _mm_set1_epi8(4); + const __m128i t3 = _mm_set1_epi8(3); + const __m128i t80 = _mm_set1_epi8(0x80); + const __m128i te0 = _mm_set1_epi8(0xe0); + const __m128i t1f = _mm_set1_epi8(0x1f); + const __m128i t1 = _mm_set1_epi8(0x1); + const __m128i t7f = _mm_set1_epi8(0x7f); + + const __m128i ps1 = _mm_xor_si128(_mm_loadu_si128((__m128i *)(s - 2 * p)), + t80); + const __m128i ps0 = _mm_xor_si128(_mm_loadu_si128((__m128i *)(s - 1 * p)), + t80); + const __m128i qs0 = _mm_xor_si128(_mm_loadu_si128((__m128i *)(s + 0 * p)), + t80); + const __m128i qs1 = _mm_xor_si128(_mm_loadu_si128((__m128i *)(s + 1 * p)), + t80); + __m128i filt; + __m128i work_a; + __m128i filter1, filter2; + + filt = _mm_and_si128(_mm_subs_epi8(ps1, qs1), hev); + work_a = _mm_subs_epi8(qs0, ps0); + filt = _mm_adds_epi8(filt, work_a); + filt = _mm_adds_epi8(filt, work_a); + filt = _mm_adds_epi8(filt, work_a); + // (vp9_filter + 3 * (qs0 - ps0)) & mask + filt = _mm_and_si128(filt, mask); + + filter1 = _mm_adds_epi8(filt, t4); + filter2 = _mm_adds_epi8(filt, t3); + + // Filter1 >> 3 + work_a = _mm_cmpgt_epi8(zero, filter1); + filter1 = _mm_srli_epi16(filter1, 3); + work_a = _mm_and_si128(work_a, te0); + filter1 = _mm_and_si128(filter1, t1f); + filter1 = _mm_or_si128(filter1, work_a); + + // Filter2 >> 3 + work_a = _mm_cmpgt_epi8(zero, filter2); + filter2 = _mm_srli_epi16(filter2, 3); + work_a = _mm_and_si128(work_a, te0); + filter2 = _mm_and_si128(filter2, t1f); + filter2 = _mm_or_si128(filter2, work_a); + + // filt >> 1 + filt = _mm_adds_epi8(filter1, t1); + work_a = _mm_cmpgt_epi8(zero, filt); + filt = _mm_srli_epi16(filt, 1); + work_a = _mm_and_si128(work_a, t80); + filt = _mm_and_si128(filt, t7f); + filt = _mm_or_si128(filt, work_a); + + filt = _mm_andnot_si128(hev, filt); + + q0 = _mm_xor_si128(_mm_subs_epi8(qs0, filter1), t80); + q1 = _mm_xor_si128(_mm_subs_epi8(qs1, filt), t80); + p0 = _mm_xor_si128(_mm_adds_epi8(ps0, filter2), t80); + p1 = _mm_xor_si128(_mm_adds_epi8(ps1, filt), t80); + + _mm_storeu_si128((__m128i *)(s - 2 * p), p1); + _mm_storeu_si128((__m128i *)(s - 1 * p), p0); + _mm_storeu_si128((__m128i *)(s + 0 * p), q0); + _mm_storeu_si128((__m128i *)(s + 1 * p), q1); } } @@ -1098,7 +1469,7 @@ static INLINE void transpose8x16(unsigned char *in0, unsigned char *in1, __m128i x0, x1, x2, x3, x4, x5, x6, x7; __m128i x8, x9, x10, x11, x12, x13, x14, x15; - /* Read in 16 lines */ + // Read in 16 lines x0 = _mm_loadl_epi64((__m128i *)in0); x8 = _mm_loadl_epi64((__m128i *)in1); x1 = _mm_loadl_epi64((__m128i *)(in0 + in_p)); @@ -1136,7 +1507,7 @@ static INLINE void transpose8x16(unsigned char *in0, unsigned char *in1, x14 = _mm_unpacklo_epi32(x12, x13); x15 = _mm_unpackhi_epi32(x12, x13); - /* Store first 4-line result */ + // Store first 4-line result _mm_storeu_si128((__m128i *)out, _mm_unpacklo_epi64(x6, x14)); _mm_storeu_si128((__m128i *)(out + out_p), _mm_unpackhi_epi64(x6, x14)); _mm_storeu_si128((__m128i *)(out + 2 * out_p), _mm_unpacklo_epi64(x7, x15)); @@ -1152,7 +1523,7 @@ static INLINE void transpose8x16(unsigned char *in0, unsigned char *in1, x14 = _mm_unpacklo_epi32(x12, x13); x15 = _mm_unpackhi_epi32(x12, x13); - /* Store second 4-line result */ + // Store second 4-line result _mm_storeu_si128((__m128i *)(out + 4 * out_p), _mm_unpacklo_epi64(x6, x14)); _mm_storeu_si128((__m128i *)(out + 5 * out_p), _mm_unpackhi_epi64(x6, x14)); _mm_storeu_si128((__m128i *)(out + 6 * out_p), _mm_unpacklo_epi64(x7, x15)); @@ -1222,61 +1593,124 @@ static INLINE void transpose(unsigned char *src[], int in_p, } while (++idx8x8 < num_8x8_to_transpose); } -void vp9_mbloop_filter_vertical_edge_sse2(unsigned char *s, - int p, - const unsigned char *blimit, - const unsigned char *limit, - const unsigned char *thresh, - int count) { - DECLARE_ALIGNED_ARRAY(16, unsigned char, t_dst, 256); +void vp9_lpf_vertical_4_dual_sse2(uint8_t *s, int p, const uint8_t *blimit0, + const uint8_t *limit0, + const uint8_t *thresh0, + const uint8_t *blimit1, + const uint8_t *limit1, + const uint8_t *thresh1) { + DECLARE_ALIGNED_ARRAY(16, unsigned char, t_dst, 16 * 8); unsigned char *src[2]; unsigned char *dst[2]; - (void)count; - /* Transpose 16x16 */ - transpose8x16(s - 8, s - 8 + p * 8, p, t_dst, 16); - transpose8x16(s, s + p * 8, p, t_dst + 16 * 8, 16); - - /* Loop filtering */ - vp9_mbloop_filter_horizontal_edge_sse2(t_dst + 8 * 16, 16, blimit, limit, - thresh, 1); - src[0] = t_dst + 3 * 16; - src[1] = t_dst + 3 * 16 + 8; + // Transpose 8x16 + transpose8x16(s - 4, s - 4 + p * 8, p, t_dst, 16); - dst[0] = s - 5; - dst[1] = s - 5 + p * 8; + // Loop filtering + vp9_lpf_horizontal_4_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0, thresh0, + blimit1, limit1, thresh1); + src[0] = t_dst; + src[1] = t_dst + 8; + dst[0] = s - 4; + dst[1] = s - 4 + p * 8; - /* Transpose 16x8 */ + // Transpose back transpose(src, 16, dst, p, 2); } -void vp9_mb_lpf_vertical_edge_w_sse2(unsigned char *s, - int p, - const unsigned char *blimit, - const unsigned char *limit, - const unsigned char *thresh) { - DECLARE_ALIGNED_ARRAY(16, unsigned char, t_dst, 256); - unsigned char *src[4]; - unsigned char *dst[4]; +void vp9_lpf_vertical_8_sse2(unsigned char *s, int p, + const unsigned char *blimit, + const unsigned char *limit, + const unsigned char *thresh, int count) { + DECLARE_ALIGNED_ARRAY(8, unsigned char, t_dst, 8 * 8); + unsigned char *src[1]; + unsigned char *dst[1]; + (void)count; + // Transpose 8x8 + src[0] = s - 4; dst[0] = t_dst; - dst[1] = t_dst + 8 * 16; - src[0] = s - 8; - src[1] = s - 8 + 8; + transpose(src, p, dst, 8, 1); + + // Loop filtering + vp9_lpf_horizontal_8_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh, 1); + + src[0] = t_dst; + dst[0] = s - 4; - /* Transpose 16x16 */ - transpose(src, p, dst, 16, 2); + // Transpose back + transpose(src, 8, dst, p, 1); +} + +void vp9_lpf_vertical_8_dual_sse2(uint8_t *s, int p, const uint8_t *blimit0, + const uint8_t *limit0, + const uint8_t *thresh0, + const uint8_t *blimit1, + const uint8_t *limit1, + const uint8_t *thresh1) { + DECLARE_ALIGNED_ARRAY(16, unsigned char, t_dst, 16 * 8); + unsigned char *src[2]; + unsigned char *dst[2]; - /* Loop filtering */ - vp9_mb_lpf_horizontal_edge_w_sse2(t_dst + 8 * 16, 16, blimit, limit, - thresh, 1); + // Transpose 8x16 + transpose8x16(s - 4, s - 4 + p * 8, p, t_dst, 16); + // Loop filtering + vp9_lpf_horizontal_8_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0, thresh0, + blimit1, limit1, thresh1); src[0] = t_dst; - src[1] = t_dst + 8 * 16; + src[1] = t_dst + 8; - dst[0] = s - 8; - dst[1] = s - 8 + 8; + dst[0] = s - 4; + dst[1] = s - 4 + p * 8; + // Transpose back transpose(src, 16, dst, p, 2); } + +void vp9_lpf_vertical_16_sse2(unsigned char *s, int p, + const unsigned char *blimit, + const unsigned char *limit, + const unsigned char *thresh) { + DECLARE_ALIGNED_ARRAY(8, unsigned char, t_dst, 8 * 16); + unsigned char *src[2]; + unsigned char *dst[2]; + + src[0] = s - 8; + src[1] = s; + dst[0] = t_dst; + dst[1] = t_dst + 8 * 8; + + // Transpose 16x8 + transpose(src, p, dst, 8, 2); + + // Loop filtering + mb_lpf_horizontal_edge_w_sse2_8(t_dst + 8 * 8, 8, blimit, limit, thresh); + + src[0] = t_dst; + src[1] = t_dst + 8 * 8; + dst[0] = s - 8; + dst[1] = s; + + // Transpose back + transpose(src, 8, dst, p, 2); +} + +void vp9_lpf_vertical_16_dual_sse2(unsigned char *s, int p, + const uint8_t *blimit, const uint8_t *limit, + const uint8_t *thresh) { + DECLARE_ALIGNED_ARRAY(16, unsigned char, t_dst, 256); + + // Transpose 16x16 + transpose8x16(s - 8, s - 8 + 8 * p, p, t_dst, 16); + transpose8x16(s, s + 8 * p, p, t_dst + 8 * 16, 16); + + // Loop filtering + mb_lpf_horizontal_edge_w_sse2_16(t_dst + 8 * 16, 16, blimit, limit, + thresh); + + // Transpose back + transpose8x16(t_dst, t_dst + 8 * 16, 16, s - 8, p); + transpose8x16(t_dst + 8, t_dst + 8 + 8 * 16, 16, s - 8 + 8 * p, p); +} diff --git a/libvpx/vp9/common/x86/vp9_loopfilter_mmx.asm b/libvpx/vp9/common/x86/vp9_loopfilter_mmx.asm index 4ebb51b..91055b9 100644 --- a/libvpx/vp9/common/x86/vp9_loopfilter_mmx.asm +++ b/libvpx/vp9/common/x86/vp9_loopfilter_mmx.asm @@ -12,7 +12,7 @@ %include "vpx_ports/x86_abi_support.asm" -;void vp9_loop_filter_horizontal_edge_mmx +;void vp9_lpf_horizontal_4_mmx ;( ; unsigned char *src_ptr, ; int src_pixel_step, @@ -21,8 +21,8 @@ ; const char *thresh, ; int count ;) -global sym(vp9_loop_filter_horizontal_edge_mmx) PRIVATE -sym(vp9_loop_filter_horizontal_edge_mmx): +global sym(vp9_lpf_horizontal_4_mmx) PRIVATE +sym(vp9_lpf_horizontal_4_mmx): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -224,7 +224,7 @@ sym(vp9_loop_filter_horizontal_edge_mmx): ret -;void vp9_loop_filter_vertical_edge_mmx +;void vp9_lpf_vertical_4_mmx ;( ; unsigned char *src_ptr, ; int src_pixel_step, @@ -233,8 +233,8 @@ sym(vp9_loop_filter_horizontal_edge_mmx): ; const char *thresh, ; int count ;) -global sym(vp9_loop_filter_vertical_edge_mmx) PRIVATE -sym(vp9_loop_filter_vertical_edge_mmx): +global sym(vp9_lpf_vertical_4_mmx) PRIVATE +sym(vp9_lpf_vertical_4_mmx): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -527,7 +527,7 @@ sym(vp9_loop_filter_vertical_edge_mmx): pxor mm7, [GLOBAL(t80)] ; unoffset ; mm7 = q1 - ; tranpose and write back + ; transpose and write back ; mm1 = 72 62 52 42 32 22 12 02 ; mm6 = 73 63 53 43 33 23 13 03 ; mm3 = 74 64 54 44 34 24 14 04 diff --git a/libvpx/vp9/common/x86/vp9_postproc_x86.h b/libvpx/vp9/common/x86/vp9_postproc_x86.h index 8870215..cab9d34 100644 --- a/libvpx/vp9/common/x86/vp9_postproc_x86.h +++ b/libvpx/vp9/common/x86/vp9_postproc_x86.h @@ -12,6 +12,10 @@ #ifndef VP9_COMMON_X86_VP9_POSTPROC_X86_H_ #define VP9_COMMON_X86_VP9_POSTPROC_X86_H_ +#ifdef __cplusplus +extern "C" { +#endif + /* Note: * * This platform is commonly built for runtime CPU detection. If you modify @@ -61,4 +65,8 @@ extern prototype_postproc_addnoise(vp9_plane_add_noise_wmt); #endif #endif +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_COMMON_X86_VP9_POSTPROC_X86_H_ diff --git a/libvpx/vp9/common/x86/vp9_subpixel_8t_intrin_avx2.c b/libvpx/vp9/common/x86/vp9_subpixel_8t_intrin_avx2.c new file mode 100644 index 0000000..7e9cc84 --- /dev/null +++ b/libvpx/vp9/common/x86/vp9_subpixel_8t_intrin_avx2.c @@ -0,0 +1,543 @@ +/* + * Copyright (c) 2010 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include <immintrin.h> +#include "vpx_ports/mem.h" + +// filters for 16_h8 and 16_v8 +DECLARE_ALIGNED(32, static const uint8_t, filt1_global_avx2[32]) = { + 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, + 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 +}; + +DECLARE_ALIGNED(32, static const uint8_t, filt2_global_avx2[32]) = { + 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, + 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10 +}; + +DECLARE_ALIGNED(32, static const uint8_t, filt3_global_avx2[32]) = { + 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, + 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12 +}; + +DECLARE_ALIGNED(32, static const uint8_t, filt4_global_avx2[32]) = { + 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, + 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14 +}; + +#if defined(__clang__) +# if __clang_major__ < 3 || (__clang_major__ == 3 && __clang_minor__ <= 3) +# define MM256_BROADCASTSI128_SI256(x) \ + _mm_broadcastsi128_si256((__m128i const *)&(x)) +# else // clang > 3.3 +# define MM256_BROADCASTSI128_SI256(x) _mm256_broadcastsi128_si256(x) +# endif // clang <= 3.3 +#elif defined(__GNUC__) +# if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ <= 6) +# define MM256_BROADCASTSI128_SI256(x) \ + _mm_broadcastsi128_si256((__m128i const *)&(x)) +# elif __GNUC__ == 4 && __GNUC_MINOR__ == 7 +# define MM256_BROADCASTSI128_SI256(x) _mm_broadcastsi128_si256(x) +# else // gcc > 4.7 +# define MM256_BROADCASTSI128_SI256(x) _mm256_broadcastsi128_si256(x) +# endif // gcc <= 4.6 +#else // !(gcc || clang) +# define MM256_BROADCASTSI128_SI256(x) _mm256_broadcastsi128_si256(x) +#endif // __clang__ + +void vp9_filter_block1d16_h8_avx2(unsigned char *src_ptr, + unsigned int src_pixels_per_line, + unsigned char *output_ptr, + unsigned int output_pitch, + unsigned int output_height, + int16_t *filter) { + __m128i filtersReg; + __m256i addFilterReg64, filt1Reg, filt2Reg, filt3Reg, filt4Reg; + __m256i firstFilters, secondFilters, thirdFilters, forthFilters; + __m256i srcRegFilt32b1_1, srcRegFilt32b2_1, srcRegFilt32b2, srcRegFilt32b3; + __m256i srcReg32b1, srcReg32b2, filtersReg32; + unsigned int i; + unsigned int src_stride, dst_stride; + + // create a register with 0,64,0,64,0,64,0,64,0,64,0,64,0,64,0,64 + addFilterReg64 = _mm256_set1_epi32((int)0x0400040u); + filtersReg = _mm_loadu_si128((__m128i *)filter); + // converting the 16 bit (short) to 8 bit (byte) and have the same data + // in both lanes of 128 bit register. + filtersReg =_mm_packs_epi16(filtersReg, filtersReg); + // have the same data in both lanes of a 256 bit register + filtersReg32 = MM256_BROADCASTSI128_SI256(filtersReg); + + // duplicate only the first 16 bits (first and second byte) + // across 256 bit register + firstFilters = _mm256_shuffle_epi8(filtersReg32, + _mm256_set1_epi16(0x100u)); + // duplicate only the second 16 bits (third and forth byte) + // across 256 bit register + secondFilters = _mm256_shuffle_epi8(filtersReg32, + _mm256_set1_epi16(0x302u)); + // duplicate only the third 16 bits (fifth and sixth byte) + // across 256 bit register + thirdFilters = _mm256_shuffle_epi8(filtersReg32, + _mm256_set1_epi16(0x504u)); + // duplicate only the forth 16 bits (seventh and eighth byte) + // across 256 bit register + forthFilters = _mm256_shuffle_epi8(filtersReg32, + _mm256_set1_epi16(0x706u)); + + filt1Reg = _mm256_load_si256((__m256i const *)filt1_global_avx2); + filt2Reg = _mm256_load_si256((__m256i const *)filt2_global_avx2); + filt3Reg = _mm256_load_si256((__m256i const *)filt3_global_avx2); + filt4Reg = _mm256_load_si256((__m256i const *)filt4_global_avx2); + + // multiple the size of the source and destination stride by two + src_stride = src_pixels_per_line << 1; + dst_stride = output_pitch << 1; + for (i = output_height; i > 1; i-=2) { + // load the 2 strides of source + srcReg32b1 = _mm256_castsi128_si256( + _mm_loadu_si128((__m128i *)(src_ptr-3))); + srcReg32b1 = _mm256_inserti128_si256(srcReg32b1, + _mm_loadu_si128((__m128i *) + (src_ptr+src_pixels_per_line-3)), 1); + + // filter the source buffer + srcRegFilt32b1_1= _mm256_shuffle_epi8(srcReg32b1, filt1Reg); + srcRegFilt32b2= _mm256_shuffle_epi8(srcReg32b1, filt2Reg); + + // multiply 2 adjacent elements with the filter and add the result + srcRegFilt32b1_1 = _mm256_maddubs_epi16(srcRegFilt32b1_1, firstFilters); + srcRegFilt32b2 = _mm256_maddubs_epi16(srcRegFilt32b2, secondFilters); + + // add and saturate the results together + srcRegFilt32b1_1 = _mm256_adds_epi16(srcRegFilt32b1_1, srcRegFilt32b2); + + // filter the source buffer + srcRegFilt32b3= _mm256_shuffle_epi8(srcReg32b1, filt4Reg); + srcRegFilt32b2= _mm256_shuffle_epi8(srcReg32b1, filt3Reg); + + // multiply 2 adjacent elements with the filter and add the result + srcRegFilt32b3 = _mm256_maddubs_epi16(srcRegFilt32b3, forthFilters); + srcRegFilt32b2 = _mm256_maddubs_epi16(srcRegFilt32b2, thirdFilters); + + // add and saturate the results together + srcRegFilt32b1_1 = _mm256_adds_epi16(srcRegFilt32b1_1, + _mm256_min_epi16(srcRegFilt32b3, srcRegFilt32b2)); + + // reading 2 strides of the next 16 bytes + // (part of it was being read by earlier read) + srcReg32b2 = _mm256_castsi128_si256( + _mm_loadu_si128((__m128i *)(src_ptr+5))); + srcReg32b2 = _mm256_inserti128_si256(srcReg32b2, + _mm_loadu_si128((__m128i *) + (src_ptr+src_pixels_per_line+5)), 1); + + // add and saturate the results together + srcRegFilt32b1_1 = _mm256_adds_epi16(srcRegFilt32b1_1, + _mm256_max_epi16(srcRegFilt32b3, srcRegFilt32b2)); + + // filter the source buffer + srcRegFilt32b2_1 = _mm256_shuffle_epi8(srcReg32b2, filt1Reg); + srcRegFilt32b2 = _mm256_shuffle_epi8(srcReg32b2, filt2Reg); + + // multiply 2 adjacent elements with the filter and add the result + srcRegFilt32b2_1 = _mm256_maddubs_epi16(srcRegFilt32b2_1, firstFilters); + srcRegFilt32b2 = _mm256_maddubs_epi16(srcRegFilt32b2, secondFilters); + + // add and saturate the results together + srcRegFilt32b2_1 = _mm256_adds_epi16(srcRegFilt32b2_1, srcRegFilt32b2); + + // filter the source buffer + srcRegFilt32b3= _mm256_shuffle_epi8(srcReg32b2, filt4Reg); + srcRegFilt32b2= _mm256_shuffle_epi8(srcReg32b2, filt3Reg); + + // multiply 2 adjacent elements with the filter and add the result + srcRegFilt32b3 = _mm256_maddubs_epi16(srcRegFilt32b3, forthFilters); + srcRegFilt32b2 = _mm256_maddubs_epi16(srcRegFilt32b2, thirdFilters); + + // add and saturate the results together + srcRegFilt32b2_1 = _mm256_adds_epi16(srcRegFilt32b2_1, + _mm256_min_epi16(srcRegFilt32b3, srcRegFilt32b2)); + srcRegFilt32b2_1 = _mm256_adds_epi16(srcRegFilt32b2_1, + _mm256_max_epi16(srcRegFilt32b3, srcRegFilt32b2)); + + + srcRegFilt32b1_1 = _mm256_adds_epi16(srcRegFilt32b1_1, addFilterReg64); + + srcRegFilt32b2_1 = _mm256_adds_epi16(srcRegFilt32b2_1, addFilterReg64); + + // shift by 7 bit each 16 bit + srcRegFilt32b1_1 = _mm256_srai_epi16(srcRegFilt32b1_1, 7); + srcRegFilt32b2_1 = _mm256_srai_epi16(srcRegFilt32b2_1, 7); + + // shrink to 8 bit each 16 bits, the first lane contain the first + // convolve result and the second lane contain the second convolve + // result + srcRegFilt32b1_1 = _mm256_packus_epi16(srcRegFilt32b1_1, + srcRegFilt32b2_1); + + src_ptr+=src_stride; + + // save 16 bytes + _mm_store_si128((__m128i*)output_ptr, + _mm256_castsi256_si128(srcRegFilt32b1_1)); + + // save the next 16 bits + _mm_store_si128((__m128i*)(output_ptr+output_pitch), + _mm256_extractf128_si256(srcRegFilt32b1_1, 1)); + output_ptr+=dst_stride; + } + + // if the number of strides is odd. + // process only 16 bytes + if (i > 0) { + __m128i srcReg1, srcReg2, srcRegFilt1_1, srcRegFilt2_1; + __m128i srcRegFilt2, srcRegFilt3; + + srcReg1 = _mm_loadu_si128((__m128i *)(src_ptr-3)); + + // filter the source buffer + srcRegFilt1_1 = _mm_shuffle_epi8(srcReg1, + _mm256_castsi256_si128(filt1Reg)); + srcRegFilt2 = _mm_shuffle_epi8(srcReg1, + _mm256_castsi256_si128(filt2Reg)); + + // multiply 2 adjacent elements with the filter and add the result + srcRegFilt1_1 = _mm_maddubs_epi16(srcRegFilt1_1, + _mm256_castsi256_si128(firstFilters)); + srcRegFilt2 = _mm_maddubs_epi16(srcRegFilt2, + _mm256_castsi256_si128(secondFilters)); + + // add and saturate the results together + srcRegFilt1_1 = _mm_adds_epi16(srcRegFilt1_1, srcRegFilt2); + + // filter the source buffer + srcRegFilt3= _mm_shuffle_epi8(srcReg1, + _mm256_castsi256_si128(filt4Reg)); + srcRegFilt2= _mm_shuffle_epi8(srcReg1, + _mm256_castsi256_si128(filt3Reg)); + + // multiply 2 adjacent elements with the filter and add the result + srcRegFilt3 = _mm_maddubs_epi16(srcRegFilt3, + _mm256_castsi256_si128(forthFilters)); + srcRegFilt2 = _mm_maddubs_epi16(srcRegFilt2, + _mm256_castsi256_si128(thirdFilters)); + + // add and saturate the results together + srcRegFilt1_1 = _mm_adds_epi16(srcRegFilt1_1, + _mm_min_epi16(srcRegFilt3, srcRegFilt2)); + + // reading the next 16 bytes + // (part of it was being read by earlier read) + srcReg2 = _mm_loadu_si128((__m128i *)(src_ptr+5)); + + // add and saturate the results together + srcRegFilt1_1 = _mm_adds_epi16(srcRegFilt1_1, + _mm_max_epi16(srcRegFilt3, srcRegFilt2)); + + // filter the source buffer + srcRegFilt2_1 = _mm_shuffle_epi8(srcReg2, + _mm256_castsi256_si128(filt1Reg)); + srcRegFilt2 = _mm_shuffle_epi8(srcReg2, + _mm256_castsi256_si128(filt2Reg)); + + // multiply 2 adjacent elements with the filter and add the result + srcRegFilt2_1 = _mm_maddubs_epi16(srcRegFilt2_1, + _mm256_castsi256_si128(firstFilters)); + srcRegFilt2 = _mm_maddubs_epi16(srcRegFilt2, + _mm256_castsi256_si128(secondFilters)); + + // add and saturate the results together + srcRegFilt2_1 = _mm_adds_epi16(srcRegFilt2_1, srcRegFilt2); + + // filter the source buffer + srcRegFilt3 = _mm_shuffle_epi8(srcReg2, + _mm256_castsi256_si128(filt4Reg)); + srcRegFilt2 = _mm_shuffle_epi8(srcReg2, + _mm256_castsi256_si128(filt3Reg)); + + // multiply 2 adjacent elements with the filter and add the result + srcRegFilt3 = _mm_maddubs_epi16(srcRegFilt3, + _mm256_castsi256_si128(forthFilters)); + srcRegFilt2 = _mm_maddubs_epi16(srcRegFilt2, + _mm256_castsi256_si128(thirdFilters)); + + // add and saturate the results together + srcRegFilt2_1 = _mm_adds_epi16(srcRegFilt2_1, + _mm_min_epi16(srcRegFilt3, srcRegFilt2)); + srcRegFilt2_1 = _mm_adds_epi16(srcRegFilt2_1, + _mm_max_epi16(srcRegFilt3, srcRegFilt2)); + + + srcRegFilt1_1 = _mm_adds_epi16(srcRegFilt1_1, + _mm256_castsi256_si128(addFilterReg64)); + + srcRegFilt2_1 = _mm_adds_epi16(srcRegFilt2_1, + _mm256_castsi256_si128(addFilterReg64)); + + // shift by 7 bit each 16 bit + srcRegFilt1_1 = _mm_srai_epi16(srcRegFilt1_1, 7); + srcRegFilt2_1 = _mm_srai_epi16(srcRegFilt2_1, 7); + + // shrink to 8 bit each 16 bits, the first lane contain the first + // convolve result and the second lane contain the second convolve + // result + srcRegFilt1_1 = _mm_packus_epi16(srcRegFilt1_1, srcRegFilt2_1); + + // save 16 bytes + _mm_store_si128((__m128i*)output_ptr, srcRegFilt1_1); + } +} + +void vp9_filter_block1d16_v8_avx2(unsigned char *src_ptr, + unsigned int src_pitch, + unsigned char *output_ptr, + unsigned int out_pitch, + unsigned int output_height, + int16_t *filter) { + __m128i filtersReg; + __m256i addFilterReg64; + __m256i srcReg32b1, srcReg32b2, srcReg32b3, srcReg32b4, srcReg32b5; + __m256i srcReg32b6, srcReg32b7, srcReg32b8, srcReg32b9, srcReg32b10; + __m256i srcReg32b11, srcReg32b12, srcReg32b13, filtersReg32; + __m256i firstFilters, secondFilters, thirdFilters, forthFilters; + unsigned int i; + unsigned int src_stride, dst_stride; + + // create a register with 0,64,0,64,0,64,0,64,0,64,0,64,0,64,0,64 + addFilterReg64 = _mm256_set1_epi32((int)0x0400040u); + filtersReg = _mm_loadu_si128((__m128i *)filter); + // converting the 16 bit (short) to 8 bit (byte) and have the + // same data in both lanes of 128 bit register. + filtersReg =_mm_packs_epi16(filtersReg, filtersReg); + // have the same data in both lanes of a 256 bit register + filtersReg32 = MM256_BROADCASTSI128_SI256(filtersReg); + + // duplicate only the first 16 bits (first and second byte) + // across 256 bit register + firstFilters = _mm256_shuffle_epi8(filtersReg32, + _mm256_set1_epi16(0x100u)); + // duplicate only the second 16 bits (third and forth byte) + // across 256 bit register + secondFilters = _mm256_shuffle_epi8(filtersReg32, + _mm256_set1_epi16(0x302u)); + // duplicate only the third 16 bits (fifth and sixth byte) + // across 256 bit register + thirdFilters = _mm256_shuffle_epi8(filtersReg32, + _mm256_set1_epi16(0x504u)); + // duplicate only the forth 16 bits (seventh and eighth byte) + // across 256 bit register + forthFilters = _mm256_shuffle_epi8(filtersReg32, + _mm256_set1_epi16(0x706u)); + + // multiple the size of the source and destination stride by two + src_stride = src_pitch << 1; + dst_stride = out_pitch << 1; + + // load 16 bytes 7 times in stride of src_pitch + srcReg32b1 = _mm256_castsi128_si256( + _mm_loadu_si128((__m128i *)(src_ptr))); + srcReg32b2 = _mm256_castsi128_si256( + _mm_loadu_si128((__m128i *)(src_ptr+src_pitch))); + srcReg32b3 = _mm256_castsi128_si256( + _mm_loadu_si128((__m128i *)(src_ptr+src_pitch*2))); + srcReg32b4 = _mm256_castsi128_si256( + _mm_loadu_si128((__m128i *)(src_ptr+src_pitch*3))); + srcReg32b5 = _mm256_castsi128_si256( + _mm_loadu_si128((__m128i *)(src_ptr+src_pitch*4))); + srcReg32b6 = _mm256_castsi128_si256( + _mm_loadu_si128((__m128i *)(src_ptr+src_pitch*5))); + srcReg32b7 = _mm256_castsi128_si256( + _mm_loadu_si128((__m128i *)(src_ptr+src_pitch*6))); + + // have each consecutive loads on the same 256 register + srcReg32b1 = _mm256_inserti128_si256(srcReg32b1, + _mm256_castsi256_si128(srcReg32b2), 1); + srcReg32b2 = _mm256_inserti128_si256(srcReg32b2, + _mm256_castsi256_si128(srcReg32b3), 1); + srcReg32b3 = _mm256_inserti128_si256(srcReg32b3, + _mm256_castsi256_si128(srcReg32b4), 1); + srcReg32b4 = _mm256_inserti128_si256(srcReg32b4, + _mm256_castsi256_si128(srcReg32b5), 1); + srcReg32b5 = _mm256_inserti128_si256(srcReg32b5, + _mm256_castsi256_si128(srcReg32b6), 1); + srcReg32b6 = _mm256_inserti128_si256(srcReg32b6, + _mm256_castsi256_si128(srcReg32b7), 1); + + // merge every two consecutive registers except the last one + srcReg32b10 = _mm256_unpacklo_epi8(srcReg32b1, srcReg32b2); + srcReg32b1 = _mm256_unpackhi_epi8(srcReg32b1, srcReg32b2); + + // save + srcReg32b11 = _mm256_unpacklo_epi8(srcReg32b3, srcReg32b4); + + // save + srcReg32b3 = _mm256_unpackhi_epi8(srcReg32b3, srcReg32b4); + + // save + srcReg32b2 = _mm256_unpacklo_epi8(srcReg32b5, srcReg32b6); + + // save + srcReg32b5 = _mm256_unpackhi_epi8(srcReg32b5, srcReg32b6); + + + for (i = output_height; i > 1; i-=2) { + // load the last 2 loads of 16 bytes and have every two + // consecutive loads in the same 256 bit register + srcReg32b8 = _mm256_castsi128_si256( + _mm_loadu_si128((__m128i *)(src_ptr+src_pitch*7))); + srcReg32b7 = _mm256_inserti128_si256(srcReg32b7, + _mm256_castsi256_si128(srcReg32b8), 1); + srcReg32b9 = _mm256_castsi128_si256( + _mm_loadu_si128((__m128i *)(src_ptr+src_pitch*8))); + srcReg32b8 = _mm256_inserti128_si256(srcReg32b8, + _mm256_castsi256_si128(srcReg32b9), 1); + + // merge every two consecutive registers + // save + srcReg32b4 = _mm256_unpacklo_epi8(srcReg32b7, srcReg32b8); + srcReg32b7 = _mm256_unpackhi_epi8(srcReg32b7, srcReg32b8); + + // multiply 2 adjacent elements with the filter and add the result + srcReg32b10 = _mm256_maddubs_epi16(srcReg32b10, firstFilters); + srcReg32b6 = _mm256_maddubs_epi16(srcReg32b4, forthFilters); + srcReg32b1 = _mm256_maddubs_epi16(srcReg32b1, firstFilters); + srcReg32b8 = _mm256_maddubs_epi16(srcReg32b7, forthFilters); + + // add and saturate the results together + srcReg32b10 = _mm256_adds_epi16(srcReg32b10, srcReg32b6); + srcReg32b1 = _mm256_adds_epi16(srcReg32b1, srcReg32b8); + + + // multiply 2 adjacent elements with the filter and add the result + srcReg32b8 = _mm256_maddubs_epi16(srcReg32b11, secondFilters); + srcReg32b6 = _mm256_maddubs_epi16(srcReg32b3, secondFilters); + + // multiply 2 adjacent elements with the filter and add the result + srcReg32b12 = _mm256_maddubs_epi16(srcReg32b2, thirdFilters); + srcReg32b13 = _mm256_maddubs_epi16(srcReg32b5, thirdFilters); + + + // add and saturate the results together + srcReg32b10 = _mm256_adds_epi16(srcReg32b10, + _mm256_min_epi16(srcReg32b8, srcReg32b12)); + srcReg32b1 = _mm256_adds_epi16(srcReg32b1, + _mm256_min_epi16(srcReg32b6, srcReg32b13)); + + // add and saturate the results together + srcReg32b10 = _mm256_adds_epi16(srcReg32b10, + _mm256_max_epi16(srcReg32b8, srcReg32b12)); + srcReg32b1 = _mm256_adds_epi16(srcReg32b1, + _mm256_max_epi16(srcReg32b6, srcReg32b13)); + + + srcReg32b10 = _mm256_adds_epi16(srcReg32b10, addFilterReg64); + srcReg32b1 = _mm256_adds_epi16(srcReg32b1, addFilterReg64); + + // shift by 7 bit each 16 bit + srcReg32b10 = _mm256_srai_epi16(srcReg32b10, 7); + srcReg32b1 = _mm256_srai_epi16(srcReg32b1, 7); + + // shrink to 8 bit each 16 bits, the first lane contain the first + // convolve result and the second lane contain the second convolve + // result + srcReg32b1 = _mm256_packus_epi16(srcReg32b10, srcReg32b1); + + src_ptr+=src_stride; + + // save 16 bytes + _mm_store_si128((__m128i*)output_ptr, + _mm256_castsi256_si128(srcReg32b1)); + + // save the next 16 bits + _mm_store_si128((__m128i*)(output_ptr+out_pitch), + _mm256_extractf128_si256(srcReg32b1, 1)); + + output_ptr+=dst_stride; + + // save part of the registers for next strides + srcReg32b10 = srcReg32b11; + srcReg32b1 = srcReg32b3; + srcReg32b11 = srcReg32b2; + srcReg32b3 = srcReg32b5; + srcReg32b2 = srcReg32b4; + srcReg32b5 = srcReg32b7; + srcReg32b7 = srcReg32b9; + } + if (i > 0) { + __m128i srcRegFilt1, srcRegFilt3, srcRegFilt4, srcRegFilt5; + __m128i srcRegFilt6, srcRegFilt7, srcRegFilt8; + // load the last 16 bytes + srcRegFilt8 = _mm_loadu_si128((__m128i *)(src_ptr+src_pitch*7)); + + // merge the last 2 results together + srcRegFilt4 = _mm_unpacklo_epi8( + _mm256_castsi256_si128(srcReg32b7), srcRegFilt8); + srcRegFilt7 = _mm_unpackhi_epi8( + _mm256_castsi256_si128(srcReg32b7), srcRegFilt8); + + // multiply 2 adjacent elements with the filter and add the result + srcRegFilt1 = _mm_maddubs_epi16(_mm256_castsi256_si128(srcReg32b10), + _mm256_castsi256_si128(firstFilters)); + srcRegFilt4 = _mm_maddubs_epi16(srcRegFilt4, + _mm256_castsi256_si128(forthFilters)); + srcRegFilt3 = _mm_maddubs_epi16(_mm256_castsi256_si128(srcReg32b1), + _mm256_castsi256_si128(firstFilters)); + srcRegFilt7 = _mm_maddubs_epi16(srcRegFilt7, + _mm256_castsi256_si128(forthFilters)); + + // add and saturate the results together + srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, srcRegFilt4); + srcRegFilt3 = _mm_adds_epi16(srcRegFilt3, srcRegFilt7); + + + // multiply 2 adjacent elements with the filter and add the result + srcRegFilt4 = _mm_maddubs_epi16(_mm256_castsi256_si128(srcReg32b11), + _mm256_castsi256_si128(secondFilters)); + srcRegFilt5 = _mm_maddubs_epi16(_mm256_castsi256_si128(srcReg32b3), + _mm256_castsi256_si128(secondFilters)); + + // multiply 2 adjacent elements with the filter and add the result + srcRegFilt6 = _mm_maddubs_epi16(_mm256_castsi256_si128(srcReg32b2), + _mm256_castsi256_si128(thirdFilters)); + srcRegFilt7 = _mm_maddubs_epi16(_mm256_castsi256_si128(srcReg32b5), + _mm256_castsi256_si128(thirdFilters)); + + // add and saturate the results together + srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, + _mm_min_epi16(srcRegFilt4, srcRegFilt6)); + srcRegFilt3 = _mm_adds_epi16(srcRegFilt3, + _mm_min_epi16(srcRegFilt5, srcRegFilt7)); + + // add and saturate the results together + srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, + _mm_max_epi16(srcRegFilt4, srcRegFilt6)); + srcRegFilt3 = _mm_adds_epi16(srcRegFilt3, + _mm_max_epi16(srcRegFilt5, srcRegFilt7)); + + + srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, + _mm256_castsi256_si128(addFilterReg64)); + srcRegFilt3 = _mm_adds_epi16(srcRegFilt3, + _mm256_castsi256_si128(addFilterReg64)); + + // shift by 7 bit each 16 bit + srcRegFilt1 = _mm_srai_epi16(srcRegFilt1, 7); + srcRegFilt3 = _mm_srai_epi16(srcRegFilt3, 7); + + // shrink to 8 bit each 16 bits, the first lane contain the first + // convolve result and the second lane contain the second convolve + // result + srcRegFilt1 = _mm_packus_epi16(srcRegFilt1, srcRegFilt3); + + // save 16 bytes + _mm_store_si128((__m128i*)output_ptr, srcRegFilt1); + } +} diff --git a/libvpx/vp9/common/x86/vp9_subpixel_8t_intrin_ssse3.c b/libvpx/vp9/common/x86/vp9_subpixel_8t_intrin_ssse3.c new file mode 100644 index 0000000..cf28d8d --- /dev/null +++ b/libvpx/vp9/common/x86/vp9_subpixel_8t_intrin_ssse3.c @@ -0,0 +1,490 @@ +/* + * Copyright (c) 2010 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include <tmmintrin.h> +#include "vpx_ports/mem.h" +#include "vpx_ports/emmintrin_compat.h" + +// filters only for the 4_h8 convolution +DECLARE_ALIGNED(16, static const uint8_t, filt1_4_h8[16]) = { + 0, 1, 1, 2, 2, 3, 3, 4, 2, 3, 3, 4, 4, 5, 5, 6 +}; + +DECLARE_ALIGNED(16, static const uint8_t, filt2_4_h8[16]) = { + 4, 5, 5, 6, 6, 7, 7, 8, 6, 7, 7, 8, 8, 9, 9, 10 +}; + +// filters for 8_h8 and 16_h8 +DECLARE_ALIGNED(16, static const uint8_t, filt1_global[16]) = { + 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 +}; + +DECLARE_ALIGNED(16, static const uint8_t, filt2_global[16]) = { + 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10 +}; + +DECLARE_ALIGNED(16, static const uint8_t, filt3_global[16]) = { + 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12 +}; + +DECLARE_ALIGNED(16, static const uint8_t, filt4_global[16]) = { + 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14 +}; + +void vp9_filter_block1d4_h8_intrin_ssse3(unsigned char *src_ptr, + unsigned int src_pixels_per_line, + unsigned char *output_ptr, + unsigned int output_pitch, + unsigned int output_height, + int16_t *filter) { + __m128i firstFilters, secondFilters, thirdFilters, forthFilters; + __m128i srcRegFilt1, srcRegFilt2, srcRegFilt3, srcRegFilt4; + __m128i addFilterReg64, filtersReg, srcReg, minReg; + unsigned int i; + + // create a register with 0,64,0,64,0,64,0,64,0,64,0,64,0,64,0,64 + addFilterReg64 =_mm_set1_epi32((int)0x0400040u); + filtersReg = _mm_loadu_si128((__m128i *)filter); + // converting the 16 bit (short) to 8 bit (byte) and have the same data + // in both lanes of 128 bit register. + filtersReg =_mm_packs_epi16(filtersReg, filtersReg); + + // duplicate only the first 16 bits in the filter into the first lane + firstFilters = _mm_shufflelo_epi16(filtersReg, 0); + // duplicate only the third 16 bit in the filter into the first lane + secondFilters = _mm_shufflelo_epi16(filtersReg, 0xAAu); + // duplicate only the seconds 16 bits in the filter into the second lane + firstFilters = _mm_shufflehi_epi16(firstFilters, 0x55u); + // duplicate only the forth 16 bits in the filter into the second lane + secondFilters = _mm_shufflehi_epi16(secondFilters, 0xFFu); + + // loading the local filters + thirdFilters =_mm_load_si128((__m128i const *)filt1_4_h8); + forthFilters = _mm_load_si128((__m128i const *)filt2_4_h8); + + for (i = 0; i < output_height; i++) { + srcReg = _mm_loadu_si128((__m128i *)(src_ptr-3)); + + // filter the source buffer + srcRegFilt1= _mm_shuffle_epi8(srcReg, thirdFilters); + srcRegFilt2= _mm_shuffle_epi8(srcReg, forthFilters); + + // multiply 2 adjacent elements with the filter and add the result + srcRegFilt1 = _mm_maddubs_epi16(srcRegFilt1, firstFilters); + srcRegFilt2 = _mm_maddubs_epi16(srcRegFilt2, secondFilters); + + // extract the higher half of the lane + srcRegFilt3 = _mm_srli_si128(srcRegFilt1, 8); + srcRegFilt4 = _mm_srli_si128(srcRegFilt2, 8); + + minReg = _mm_min_epi16(srcRegFilt3, srcRegFilt2); + + // add and saturate all the results together + srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, srcRegFilt4); + srcRegFilt3 = _mm_max_epi16(srcRegFilt3, srcRegFilt2); + srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, minReg); + srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, srcRegFilt3); + srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, addFilterReg64); + + // shift by 7 bit each 16 bits + srcRegFilt1 = _mm_srai_epi16(srcRegFilt1, 7); + + // shrink to 8 bit each 16 bits + srcRegFilt1 = _mm_packus_epi16(srcRegFilt1, srcRegFilt1); + src_ptr+=src_pixels_per_line; + + // save only 4 bytes + *((int*)&output_ptr[0])= _mm_cvtsi128_si32(srcRegFilt1); + + output_ptr+=output_pitch; + } +} + +void vp9_filter_block1d8_h8_intrin_ssse3(unsigned char *src_ptr, + unsigned int src_pixels_per_line, + unsigned char *output_ptr, + unsigned int output_pitch, + unsigned int output_height, + int16_t *filter) { + __m128i firstFilters, secondFilters, thirdFilters, forthFilters, srcReg; + __m128i filt1Reg, filt2Reg, filt3Reg, filt4Reg; + __m128i srcRegFilt1, srcRegFilt2, srcRegFilt3, srcRegFilt4; + __m128i addFilterReg64, filtersReg, minReg; + unsigned int i; + + // create a register with 0,64,0,64,0,64,0,64,0,64,0,64,0,64,0,64 + addFilterReg64 = _mm_set1_epi32((int)0x0400040u); + filtersReg = _mm_loadu_si128((__m128i *)filter); + // converting the 16 bit (short) to 8 bit (byte) and have the same data + // in both lanes of 128 bit register. + filtersReg =_mm_packs_epi16(filtersReg, filtersReg); + + // duplicate only the first 16 bits (first and second byte) + // across 128 bit register + firstFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x100u)); + // duplicate only the second 16 bits (third and forth byte) + // across 128 bit register + secondFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x302u)); + // duplicate only the third 16 bits (fifth and sixth byte) + // across 128 bit register + thirdFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x504u)); + // duplicate only the forth 16 bits (seventh and eighth byte) + // across 128 bit register + forthFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x706u)); + + filt1Reg = _mm_load_si128((__m128i const *)filt1_global); + filt2Reg = _mm_load_si128((__m128i const *)filt2_global); + filt3Reg = _mm_load_si128((__m128i const *)filt3_global); + filt4Reg = _mm_load_si128((__m128i const *)filt4_global); + + for (i = 0; i < output_height; i++) { + srcReg = _mm_loadu_si128((__m128i *)(src_ptr-3)); + + // filter the source buffer + srcRegFilt1= _mm_shuffle_epi8(srcReg, filt1Reg); + srcRegFilt2= _mm_shuffle_epi8(srcReg, filt2Reg); + + // multiply 2 adjacent elements with the filter and add the result + srcRegFilt1 = _mm_maddubs_epi16(srcRegFilt1, firstFilters); + srcRegFilt2 = _mm_maddubs_epi16(srcRegFilt2, secondFilters); + + // filter the source buffer + srcRegFilt3= _mm_shuffle_epi8(srcReg, filt3Reg); + srcRegFilt4= _mm_shuffle_epi8(srcReg, filt4Reg); + + // multiply 2 adjacent elements with the filter and add the result + srcRegFilt3 = _mm_maddubs_epi16(srcRegFilt3, thirdFilters); + srcRegFilt4 = _mm_maddubs_epi16(srcRegFilt4, forthFilters); + + // add and saturate all the results together + minReg = _mm_min_epi16(srcRegFilt4, srcRegFilt3); + srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, srcRegFilt2); + + srcRegFilt4= _mm_max_epi16(srcRegFilt4, srcRegFilt3); + srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, minReg); + srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, srcRegFilt4); + srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, addFilterReg64); + + // shift by 7 bit each 16 bits + srcRegFilt1 = _mm_srai_epi16(srcRegFilt1, 7); + + // shrink to 8 bit each 16 bits + srcRegFilt1 = _mm_packus_epi16(srcRegFilt1, srcRegFilt1); + + src_ptr+=src_pixels_per_line; + + // save only 8 bytes + _mm_storel_epi64((__m128i*)&output_ptr[0], srcRegFilt1); + + output_ptr+=output_pitch; + } +} + +void vp9_filter_block1d16_h8_intrin_ssse3(unsigned char *src_ptr, + unsigned int src_pixels_per_line, + unsigned char *output_ptr, + unsigned int output_pitch, + unsigned int output_height, + int16_t *filter) { + __m128i addFilterReg64, filtersReg, srcReg1, srcReg2; + __m128i filt1Reg, filt2Reg, filt3Reg, filt4Reg; + __m128i firstFilters, secondFilters, thirdFilters, forthFilters; + __m128i srcRegFilt1_1, srcRegFilt2_1, srcRegFilt2, srcRegFilt3; + unsigned int i; + + // create a register with 0,64,0,64,0,64,0,64,0,64,0,64,0,64,0,64 + addFilterReg64 = _mm_set1_epi32((int)0x0400040u); + filtersReg = _mm_loadu_si128((__m128i *)filter); + // converting the 16 bit (short) to 8 bit (byte) and have the same data + // in both lanes of 128 bit register. + filtersReg =_mm_packs_epi16(filtersReg, filtersReg); + + // duplicate only the first 16 bits (first and second byte) + // across 128 bit register + firstFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x100u)); + // duplicate only the second 16 bits (third and forth byte) + // across 128 bit register + secondFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x302u)); + // duplicate only the third 16 bits (fifth and sixth byte) + // across 128 bit register + thirdFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x504u)); + // duplicate only the forth 16 bits (seventh and eighth byte) + // across 128 bit register + forthFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x706u)); + + filt1Reg = _mm_load_si128((__m128i const *)filt1_global); + filt2Reg = _mm_load_si128((__m128i const *)filt2_global); + filt3Reg = _mm_load_si128((__m128i const *)filt3_global); + filt4Reg = _mm_load_si128((__m128i const *)filt4_global); + + for (i = 0; i < output_height; i++) { + srcReg1 = _mm_loadu_si128((__m128i *)(src_ptr-3)); + + // filter the source buffer + srcRegFilt1_1= _mm_shuffle_epi8(srcReg1, filt1Reg); + srcRegFilt2= _mm_shuffle_epi8(srcReg1, filt2Reg); + + // multiply 2 adjacent elements with the filter and add the result + srcRegFilt1_1 = _mm_maddubs_epi16(srcRegFilt1_1, firstFilters); + srcRegFilt2 = _mm_maddubs_epi16(srcRegFilt2, secondFilters); + + // add and saturate the results together + srcRegFilt1_1 = _mm_adds_epi16(srcRegFilt1_1, srcRegFilt2); + + // filter the source buffer + srcRegFilt3= _mm_shuffle_epi8(srcReg1, filt4Reg); + srcRegFilt2= _mm_shuffle_epi8(srcReg1, filt3Reg); + + // multiply 2 adjacent elements with the filter and add the result + srcRegFilt3 = _mm_maddubs_epi16(srcRegFilt3, forthFilters); + srcRegFilt2 = _mm_maddubs_epi16(srcRegFilt2, thirdFilters); + + // add and saturate the results together + srcRegFilt1_1 = _mm_adds_epi16(srcRegFilt1_1, + _mm_min_epi16(srcRegFilt3, srcRegFilt2)); + + // reading the next 16 bytes. + // (part of it was being read by earlier read) + srcReg2 = _mm_loadu_si128((__m128i *)(src_ptr+5)); + + // add and saturate the results together + srcRegFilt1_1 = _mm_adds_epi16(srcRegFilt1_1, + _mm_max_epi16(srcRegFilt3, srcRegFilt2)); + + // filter the source buffer + srcRegFilt2_1= _mm_shuffle_epi8(srcReg2, filt1Reg); + srcRegFilt2= _mm_shuffle_epi8(srcReg2, filt2Reg); + + // multiply 2 adjacent elements with the filter and add the result + srcRegFilt2_1 = _mm_maddubs_epi16(srcRegFilt2_1, firstFilters); + srcRegFilt2 = _mm_maddubs_epi16(srcRegFilt2, secondFilters); + + // add and saturate the results together + srcRegFilt2_1 = _mm_adds_epi16(srcRegFilt2_1, srcRegFilt2); + + // filter the source buffer + srcRegFilt3= _mm_shuffle_epi8(srcReg2, filt4Reg); + srcRegFilt2= _mm_shuffle_epi8(srcReg2, filt3Reg); + + // multiply 2 adjacent elements with the filter and add the result + srcRegFilt3 = _mm_maddubs_epi16(srcRegFilt3, forthFilters); + srcRegFilt2 = _mm_maddubs_epi16(srcRegFilt2, thirdFilters); + + // add and saturate the results together + srcRegFilt2_1 = _mm_adds_epi16(srcRegFilt2_1, + _mm_min_epi16(srcRegFilt3, srcRegFilt2)); + srcRegFilt2_1 = _mm_adds_epi16(srcRegFilt2_1, + _mm_max_epi16(srcRegFilt3, srcRegFilt2)); + + srcRegFilt1_1 = _mm_adds_epi16(srcRegFilt1_1, addFilterReg64); + srcRegFilt2_1 = _mm_adds_epi16(srcRegFilt2_1, addFilterReg64); + + // shift by 7 bit each 16 bit + srcRegFilt1_1 = _mm_srai_epi16(srcRegFilt1_1, 7); + srcRegFilt2_1 = _mm_srai_epi16(srcRegFilt2_1, 7); + + // shrink to 8 bit each 16 bits, the first lane contain the first + // convolve result and the second lane contain the second convolve + // result + srcRegFilt1_1 = _mm_packus_epi16(srcRegFilt1_1, srcRegFilt2_1); + + src_ptr+=src_pixels_per_line; + + // save 16 bytes + _mm_store_si128((__m128i*)output_ptr, srcRegFilt1_1); + + output_ptr+=output_pitch; + } +} + +void vp9_filter_block1d8_v8_intrin_ssse3(unsigned char *src_ptr, + unsigned int src_pitch, + unsigned char *output_ptr, + unsigned int out_pitch, + unsigned int output_height, + int16_t *filter) { + __m128i addFilterReg64, filtersReg, minReg, srcRegFilt6; + __m128i firstFilters, secondFilters, thirdFilters, forthFilters; + __m128i srcRegFilt1, srcRegFilt2, srcRegFilt3, srcRegFilt4, srcRegFilt5; + unsigned int i; + + // create a register with 0,64,0,64,0,64,0,64,0,64,0,64,0,64,0,64 + addFilterReg64 = _mm_set1_epi32((int)0x0400040u); + filtersReg = _mm_loadu_si128((__m128i *)filter); + // converting the 16 bit (short) to 8 bit (byte) and have the same data + // in both lanes of 128 bit register. + filtersReg =_mm_packs_epi16(filtersReg, filtersReg); + + // duplicate only the first 16 bits in the filter + firstFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x100u)); + // duplicate only the second 16 bits in the filter + secondFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x302u)); + // duplicate only the third 16 bits in the filter + thirdFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x504u)); + // duplicate only the forth 16 bits in the filter + forthFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x706u)); + + for (i = 0; i < output_height; i++) { + // load the first 8 bytes + srcRegFilt1 = _mm_loadl_epi64((__m128i *)&src_ptr[0]); + // load the next 8 bytes in stride of src_pitch + srcRegFilt2 = _mm_loadl_epi64((__m128i *)&(src_ptr+src_pitch)[0]); + srcRegFilt3 = _mm_loadl_epi64((__m128i *)&(src_ptr+src_pitch*2)[0]); + srcRegFilt4 = _mm_loadl_epi64((__m128i *)&(src_ptr+src_pitch*3)[0]); + + // merge the result together + srcRegFilt1 = _mm_unpacklo_epi8(srcRegFilt1, srcRegFilt2); + srcRegFilt3 = _mm_unpacklo_epi8(srcRegFilt3, srcRegFilt4); + + // load the next 8 bytes in stride of src_pitch + srcRegFilt2 = _mm_loadl_epi64((__m128i *)&(src_ptr+src_pitch*4)[0]); + srcRegFilt4 = _mm_loadl_epi64((__m128i *)&(src_ptr+src_pitch*5)[0]); + srcRegFilt5 = _mm_loadl_epi64((__m128i *)&(src_ptr+src_pitch*6)[0]); + srcRegFilt6 = _mm_loadl_epi64((__m128i *)&(src_ptr+src_pitch*7)[0]); + + // merge the result together + srcRegFilt2 = _mm_unpacklo_epi8(srcRegFilt2, srcRegFilt4); + srcRegFilt5 = _mm_unpacklo_epi8(srcRegFilt5, srcRegFilt6); + + // multiply 2 adjacent elements with the filter and add the result + srcRegFilt1 = _mm_maddubs_epi16(srcRegFilt1, firstFilters); + srcRegFilt3 = _mm_maddubs_epi16(srcRegFilt3, secondFilters); + srcRegFilt2 = _mm_maddubs_epi16(srcRegFilt2, thirdFilters); + srcRegFilt5 = _mm_maddubs_epi16(srcRegFilt5, forthFilters); + + // add and saturate the results together + minReg = _mm_min_epi16(srcRegFilt2, srcRegFilt3); + srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, srcRegFilt5); + srcRegFilt2 = _mm_max_epi16(srcRegFilt2, srcRegFilt3); + srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, minReg); + srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, srcRegFilt2); + srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, addFilterReg64); + + // shift by 7 bit each 16 bit + srcRegFilt1 = _mm_srai_epi16(srcRegFilt1, 7); + + // shrink to 8 bit each 16 bits + srcRegFilt1 = _mm_packus_epi16(srcRegFilt1, srcRegFilt1); + + src_ptr+=src_pitch; + + // save only 8 bytes convolve result + _mm_storel_epi64((__m128i*)&output_ptr[0], srcRegFilt1); + + output_ptr+=out_pitch; + } +} + +void vp9_filter_block1d16_v8_intrin_ssse3(unsigned char *src_ptr, + unsigned int src_pitch, + unsigned char *output_ptr, + unsigned int out_pitch, + unsigned int output_height, + int16_t *filter) { + __m128i addFilterReg64, filtersReg, srcRegFilt1, srcRegFilt2, srcRegFilt3; + __m128i firstFilters, secondFilters, thirdFilters, forthFilters; + __m128i srcRegFilt4, srcRegFilt5, srcRegFilt6, srcRegFilt7, srcRegFilt8; + unsigned int i; + + // create a register with 0,64,0,64,0,64,0,64,0,64,0,64,0,64,0,64 + addFilterReg64 = _mm_set1_epi32((int)0x0400040u); + filtersReg = _mm_loadu_si128((__m128i *)filter); + // converting the 16 bit (short) to 8 bit (byte) and have the same data + // in both lanes of 128 bit register. + filtersReg =_mm_packs_epi16(filtersReg, filtersReg); + + // duplicate only the first 16 bits in the filter + firstFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x100u)); + // duplicate only the second 16 bits in the filter + secondFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x302u)); + // duplicate only the third 16 bits in the filter + thirdFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x504u)); + // duplicate only the forth 16 bits in the filter + forthFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x706u)); + + for (i = 0; i < output_height; i++) { + // load the first 16 bytes + srcRegFilt1 = _mm_loadu_si128((__m128i *)(src_ptr)); + // load the next 16 bytes in stride of src_pitch + srcRegFilt2 = _mm_loadu_si128((__m128i *)(src_ptr+src_pitch)); + srcRegFilt3 = _mm_loadu_si128((__m128i *)(src_ptr+src_pitch*6)); + srcRegFilt4 = _mm_loadu_si128((__m128i *)(src_ptr+src_pitch*7)); + + // merge the result together + srcRegFilt5 = _mm_unpacklo_epi8(srcRegFilt1, srcRegFilt2); + srcRegFilt6 = _mm_unpacklo_epi8(srcRegFilt3, srcRegFilt4); + srcRegFilt1 = _mm_unpackhi_epi8(srcRegFilt1, srcRegFilt2); + srcRegFilt3 = _mm_unpackhi_epi8(srcRegFilt3, srcRegFilt4); + + // multiply 2 adjacent elements with the filter and add the result + srcRegFilt5 = _mm_maddubs_epi16(srcRegFilt5, firstFilters); + srcRegFilt6 = _mm_maddubs_epi16(srcRegFilt6, forthFilters); + srcRegFilt1 = _mm_maddubs_epi16(srcRegFilt1, firstFilters); + srcRegFilt3 = _mm_maddubs_epi16(srcRegFilt3, forthFilters); + + // add and saturate the results together + srcRegFilt5 = _mm_adds_epi16(srcRegFilt5, srcRegFilt6); + srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, srcRegFilt3); + + // load the next 16 bytes in stride of two/three src_pitch + srcRegFilt2 = _mm_loadu_si128((__m128i *)(src_ptr+src_pitch*2)); + srcRegFilt3 = _mm_loadu_si128((__m128i *)(src_ptr+src_pitch*3)); + + // merge the result together + srcRegFilt4 = _mm_unpacklo_epi8(srcRegFilt2, srcRegFilt3); + srcRegFilt6 = _mm_unpackhi_epi8(srcRegFilt2, srcRegFilt3); + + // multiply 2 adjacent elements with the filter and add the result + srcRegFilt4 = _mm_maddubs_epi16(srcRegFilt4, secondFilters); + srcRegFilt6 = _mm_maddubs_epi16(srcRegFilt6, secondFilters); + + // load the next 16 bytes in stride of four/five src_pitch + srcRegFilt2 = _mm_loadu_si128((__m128i *)(src_ptr+src_pitch*4)); + srcRegFilt3 = _mm_loadu_si128((__m128i *)(src_ptr+src_pitch*5)); + + // merge the result together + srcRegFilt7 = _mm_unpacklo_epi8(srcRegFilt2, srcRegFilt3); + srcRegFilt8 = _mm_unpackhi_epi8(srcRegFilt2, srcRegFilt3); + + // multiply 2 adjacent elements with the filter and add the result + srcRegFilt7 = _mm_maddubs_epi16(srcRegFilt7, thirdFilters); + srcRegFilt8 = _mm_maddubs_epi16(srcRegFilt8, thirdFilters); + + // add and saturate the results together + srcRegFilt5 = _mm_adds_epi16(srcRegFilt5, + _mm_min_epi16(srcRegFilt4, srcRegFilt7)); + srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, + _mm_min_epi16(srcRegFilt6, srcRegFilt8)); + + // add and saturate the results together + srcRegFilt5 = _mm_adds_epi16(srcRegFilt5, + _mm_max_epi16(srcRegFilt4, srcRegFilt7)); + srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, + _mm_max_epi16(srcRegFilt6, srcRegFilt8)); + srcRegFilt5 = _mm_adds_epi16(srcRegFilt5, addFilterReg64); + srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, addFilterReg64); + + // shift by 7 bit each 16 bit + srcRegFilt5 = _mm_srai_epi16(srcRegFilt5, 7); + srcRegFilt1 = _mm_srai_epi16(srcRegFilt1, 7); + + // shrink to 8 bit each 16 bits, the first lane contain the first + // convolve result and the second lane contain the second convolve + // result + srcRegFilt1 = _mm_packus_epi16(srcRegFilt5, srcRegFilt1); + + src_ptr+=src_pitch; + + // save 16 bytes convolve result + _mm_store_si128((__m128i*)output_ptr, srcRegFilt1); + + output_ptr+=out_pitch; + } +} diff --git a/libvpx/vp9/common/x86/vp9_subpixel_8t_ssse3.asm b/libvpx/vp9/common/x86/vp9_subpixel_8t_ssse3.asm index 7a5cca0..634fa77 100644 --- a/libvpx/vp9/common/x86/vp9_subpixel_8t_ssse3.asm +++ b/libvpx/vp9/common/x86/vp9_subpixel_8t_ssse3.asm @@ -11,17 +11,6 @@ %include "vpx_ports/x86_abi_support.asm" -;/************************************************************************************ -; Notes: filter_block1d_h6 applies a 6 tap filter horizontally to the input pixels. The -; input pixel array has output_height rows. This routine assumes that output_height is an -; even number. This function handles 8 pixels in horizontal direction, calculating ONE -; rows each iteration to take advantage of the 128 bits operations. -; -; This is an implementation of some of the SSE optimizations first seen in ffvp8 -; -;*************************************************************************************/ - - %macro VERTx4 1 mov rdx, arg(5) ;filter ptr mov rsi, arg(0) ;src_ptr @@ -81,11 +70,14 @@ pmaddubsw xmm4, k4k5 pmaddubsw xmm6, k6k7 + movdqa xmm1, xmm2 paddsw xmm0, xmm6 - paddsw xmm0, xmm2 + pmaxsw xmm2, xmm4 + pminsw xmm4, xmm1 paddsw xmm0, xmm4 - paddsw xmm0, krd + paddsw xmm0, xmm2 + paddsw xmm0, krd psraw xmm0, 7 packuswb xmm0, xmm0 @@ -166,10 +158,13 @@ pmaddubsw xmm6, k6k7 paddsw xmm0, xmm6 - paddsw xmm0, xmm2 + movdqa xmm1, xmm2 + pmaxsw xmm2, xmm4 + pminsw xmm4, xmm1 paddsw xmm0, xmm4 - paddsw xmm0, krd + paddsw xmm0, xmm2 + paddsw xmm0, krd psraw xmm0, 7 packuswb xmm0, xmm0 @@ -251,10 +246,13 @@ pmaddubsw xmm6, k6k7 paddsw xmm0, xmm6 - paddsw xmm0, xmm2 + movdqa xmm1, xmm2 + pmaxsw xmm2, xmm4 + pminsw xmm4, xmm1 paddsw xmm0, xmm4 - paddsw xmm0, krd + paddsw xmm0, xmm2 + paddsw xmm0, krd psraw xmm0, 7 packuswb xmm0, xmm0 %if %1 @@ -538,14 +536,22 @@ sym(vp9_filter_block1d16_v8_avg_ssse3): movdqa %2, %1 pshufb %1, [GLOBAL(shuf_t0t1)] pshufb %2, [GLOBAL(shuf_t2t3)] - pmaddubsw %1, xmm6 - pmaddubsw %2, xmm7 + pmaddubsw %1, k0k1k4k5 + pmaddubsw %2, k2k3k6k7 - paddsw %1, %2 - movdqa %2, %1 + movdqa xmm4, %1 + movdqa xmm5, %2 + psrldq %1, 8 psrldq %2, 8 - paddsw %1, %2 - paddsw %1, xmm5 + movdqa xmm6, xmm5 + + paddsw xmm4, %2 + pmaxsw xmm5, %1 + pminsw %1, xmm6 + paddsw %1, xmm4 + paddsw %1, xmm5 + + paddsw %1, krd psraw %1, 7 packuswb %1, %1 %endm @@ -565,6 +571,10 @@ sym(vp9_filter_block1d16_v8_avg_ssse3): pshufhw xmm7, xmm7, 11111111b ;k2_k3_k6_k7 pshufd xmm5, xmm5, 0 ;rounding + movdqa k0k1k4k5, xmm6 + movdqa k2k3k6k7, xmm7 + movdqa krd, xmm5 + movsxd rax, dword ptr arg(1) ;src_pixels_per_line movsxd rdx, dword ptr arg(3) ;output_pitch movsxd rcx, dword ptr arg(4) ;output_height @@ -631,9 +641,13 @@ sym(vp9_filter_block1d16_v8_avg_ssse3): pmaddubsw %3, k4k5 pmaddubsw %4, k6k7 - paddsw %1, %2 paddsw %1, %4 + movdqa %4, %2 + pmaxsw %2, %3 + pminsw %3, %4 paddsw %1, %3 + paddsw %1, %2 + paddsw %1, krd psraw %1, 7 packuswb %1, %1 @@ -779,12 +793,19 @@ sym(vp9_filter_block1d16_v8_avg_ssse3): pmaddubsw xmm6, k4k5 pmaddubsw xmm7, k6k7 - paddsw xmm0, xmm1 paddsw xmm0, xmm3 + movdqa xmm3, xmm1 + pmaxsw xmm1, xmm2 + pminsw xmm2, xmm3 paddsw xmm0, xmm2 - paddsw xmm4, xmm5 + paddsw xmm0, xmm1 + paddsw xmm4, xmm7 + movdqa xmm7, xmm5 + pmaxsw xmm5, xmm6 + pminsw xmm6, xmm7 paddsw xmm4, xmm6 + paddsw xmm4, xmm5 paddsw xmm0, krd paddsw xmm4, krd @@ -826,8 +847,16 @@ sym(vp9_filter_block1d4_h8_ssse3): push rdi ; end prolog + ALIGN_STACK 16, rax + sub rsp, 16 * 3 + %define k0k1k4k5 [rsp + 16 * 0] + %define k2k3k6k7 [rsp + 16 * 1] + %define krd [rsp + 16 * 2] + HORIZx4 0 + add rsp, 16 * 3 + pop rsp ; begin epilog pop rdi pop rsi @@ -932,8 +961,16 @@ sym(vp9_filter_block1d4_h8_avg_ssse3): push rdi ; end prolog + ALIGN_STACK 16, rax + sub rsp, 16 * 3 + %define k0k1k4k5 [rsp + 16 * 0] + %define k2k3k6k7 [rsp + 16 * 1] + %define krd [rsp + 16 * 2] + HORIZx4 1 + add rsp, 16 * 3 + pop rsp ; begin epilog pop rdi pop rsi diff --git a/libvpx/vp9/common/x86/vp9_subpixel_bilinear_sse2.asm b/libvpx/vp9/common/x86/vp9_subpixel_bilinear_sse2.asm new file mode 100644 index 0000000..d94ccf2 --- /dev/null +++ b/libvpx/vp9/common/x86/vp9_subpixel_bilinear_sse2.asm @@ -0,0 +1,448 @@ +; +; Copyright (c) 2014 The WebM project authors. All Rights Reserved. +; +; Use of this source code is governed by a BSD-style license +; that can be found in the LICENSE file in the root of the source +; tree. An additional intellectual property rights grant can be found +; in the file PATENTS. All contributing project authors may +; be found in the AUTHORS file in the root of the source tree. +; + +%include "vpx_ports/x86_abi_support.asm" + +%macro GET_PARAM_4 0 + mov rdx, arg(5) ;filter ptr + mov rsi, arg(0) ;src_ptr + mov rdi, arg(2) ;output_ptr + mov rcx, 0x0400040 + + movdqa xmm3, [rdx] ;load filters + pshuflw xmm4, xmm3, 11111111b ;k3 + psrldq xmm3, 8 + pshuflw xmm3, xmm3, 0b ;k4 + punpcklqdq xmm4, xmm3 ;k3k4 + + movq xmm3, rcx ;rounding + pshufd xmm3, xmm3, 0 + + pxor xmm2, xmm2 + + movsxd rax, DWORD PTR arg(1) ;pixels_per_line + movsxd rdx, DWORD PTR arg(3) ;out_pitch + movsxd rcx, DWORD PTR arg(4) ;output_height +%endm + +%macro APPLY_FILTER_4 1 + + punpckldq xmm0, xmm1 ;two row in one register + punpcklbw xmm0, xmm2 ;unpack to word + pmullw xmm0, xmm4 ;multiply the filter factors + + movdqa xmm1, xmm0 + psrldq xmm1, 8 + paddsw xmm0, xmm1 + + paddsw xmm0, xmm3 ;rounding + psraw xmm0, 7 ;shift + packuswb xmm0, xmm0 ;pack to byte + +%if %1 + movd xmm1, [rdi] + pavgb xmm0, xmm1 +%endif + + movd [rdi], xmm0 + lea rsi, [rsi + rax] + lea rdi, [rdi + rdx] + dec rcx +%endm + +%macro GET_PARAM 0 + mov rdx, arg(5) ;filter ptr + mov rsi, arg(0) ;src_ptr + mov rdi, arg(2) ;output_ptr + mov rcx, 0x0400040 + + movdqa xmm7, [rdx] ;load filters + + pshuflw xmm6, xmm7, 11111111b ;k3 + pshufhw xmm7, xmm7, 0b ;k4 + punpcklwd xmm6, xmm6 + punpckhwd xmm7, xmm7 + + movq xmm4, rcx ;rounding + pshufd xmm4, xmm4, 0 + + pxor xmm5, xmm5 + + movsxd rax, DWORD PTR arg(1) ;pixels_per_line + movsxd rdx, DWORD PTR arg(3) ;out_pitch + movsxd rcx, DWORD PTR arg(4) ;output_height +%endm + +%macro APPLY_FILTER_8 1 + punpcklbw xmm0, xmm5 + punpcklbw xmm1, xmm5 + + pmullw xmm0, xmm6 + pmullw xmm1, xmm7 + paddsw xmm0, xmm1 + paddsw xmm0, xmm4 ;rounding + psraw xmm0, 7 ;shift + packuswb xmm0, xmm0 ;pack back to byte +%if %1 + movq xmm1, [rdi] + pavgb xmm0, xmm1 +%endif + movq [rdi], xmm0 ;store the result + + lea rsi, [rsi + rax] + lea rdi, [rdi + rdx] + dec rcx +%endm + +%macro APPLY_FILTER_16 1 + punpcklbw xmm0, xmm5 + punpcklbw xmm1, xmm5 + punpckhbw xmm2, xmm5 + punpckhbw xmm3, xmm5 + + pmullw xmm0, xmm6 + pmullw xmm1, xmm7 + pmullw xmm2, xmm6 + pmullw xmm3, xmm7 + + paddsw xmm0, xmm1 + paddsw xmm2, xmm3 + + paddsw xmm0, xmm4 ;rounding + paddsw xmm2, xmm4 + psraw xmm0, 7 ;shift + psraw xmm2, 7 + packuswb xmm0, xmm2 ;pack back to byte +%if %1 + movdqu xmm1, [rdi] + pavgb xmm0, xmm1 +%endif + movdqu [rdi], xmm0 ;store the result + + lea rsi, [rsi + rax] + lea rdi, [rdi + rdx] + dec rcx +%endm + +global sym(vp9_filter_block1d4_v2_sse2) PRIVATE +sym(vp9_filter_block1d4_v2_sse2): + push rbp + mov rbp, rsp + SHADOW_ARGS_TO_STACK 6 + push rsi + push rdi + ; end prolog + + GET_PARAM_4 +.loop: + movd xmm0, [rsi] ;load src + movd xmm1, [rsi + rax] + + APPLY_FILTER_4 0 + jnz .loop + + ; begin epilog + pop rdi + pop rsi + UNSHADOW_ARGS + pop rbp + ret + +global sym(vp9_filter_block1d8_v2_sse2) PRIVATE +sym(vp9_filter_block1d8_v2_sse2): + push rbp + mov rbp, rsp + SHADOW_ARGS_TO_STACK 6 + SAVE_XMM 7 + push rsi + push rdi + ; end prolog + + GET_PARAM +.loop: + movq xmm0, [rsi] ;0 + movq xmm1, [rsi + rax] ;1 + + APPLY_FILTER_8 0 + jnz .loop + + ; begin epilog + pop rdi + pop rsi + RESTORE_XMM + UNSHADOW_ARGS + pop rbp + ret + +global sym(vp9_filter_block1d16_v2_sse2) PRIVATE +sym(vp9_filter_block1d16_v2_sse2): + push rbp + mov rbp, rsp + SHADOW_ARGS_TO_STACK 6 + SAVE_XMM 7 + push rsi + push rdi + ; end prolog + + GET_PARAM +.loop: + movdqu xmm0, [rsi] ;0 + movdqu xmm1, [rsi + rax] ;1 + movdqa xmm2, xmm0 + movdqa xmm3, xmm1 + + APPLY_FILTER_16 0 + jnz .loop + + ; begin epilog + pop rdi + pop rsi + RESTORE_XMM + UNSHADOW_ARGS + pop rbp + ret + +global sym(vp9_filter_block1d4_v2_avg_sse2) PRIVATE +sym(vp9_filter_block1d4_v2_avg_sse2): + push rbp + mov rbp, rsp + SHADOW_ARGS_TO_STACK 6 + push rsi + push rdi + ; end prolog + + GET_PARAM_4 +.loop: + movd xmm0, [rsi] ;load src + movd xmm1, [rsi + rax] + + APPLY_FILTER_4 1 + jnz .loop + + ; begin epilog + pop rdi + pop rsi + UNSHADOW_ARGS + pop rbp + ret + +global sym(vp9_filter_block1d8_v2_avg_sse2) PRIVATE +sym(vp9_filter_block1d8_v2_avg_sse2): + push rbp + mov rbp, rsp + SHADOW_ARGS_TO_STACK 6 + SAVE_XMM 7 + push rsi + push rdi + ; end prolog + + GET_PARAM +.loop: + movq xmm0, [rsi] ;0 + movq xmm1, [rsi + rax] ;1 + + APPLY_FILTER_8 1 + jnz .loop + + ; begin epilog + pop rdi + pop rsi + RESTORE_XMM + UNSHADOW_ARGS + pop rbp + ret + +global sym(vp9_filter_block1d16_v2_avg_sse2) PRIVATE +sym(vp9_filter_block1d16_v2_avg_sse2): + push rbp + mov rbp, rsp + SHADOW_ARGS_TO_STACK 6 + SAVE_XMM 7 + push rsi + push rdi + ; end prolog + + GET_PARAM +.loop: + movdqu xmm0, [rsi] ;0 + movdqu xmm1, [rsi + rax] ;1 + movdqa xmm2, xmm0 + movdqa xmm3, xmm1 + + APPLY_FILTER_16 1 + jnz .loop + + ; begin epilog + pop rdi + pop rsi + RESTORE_XMM + UNSHADOW_ARGS + pop rbp + ret + +global sym(vp9_filter_block1d4_h2_sse2) PRIVATE +sym(vp9_filter_block1d4_h2_sse2): + push rbp + mov rbp, rsp + SHADOW_ARGS_TO_STACK 6 + push rsi + push rdi + ; end prolog + + GET_PARAM_4 +.loop: + movdqu xmm0, [rsi] ;load src + movdqa xmm1, xmm0 + psrldq xmm1, 1 + + APPLY_FILTER_4 0 + jnz .loop + + ; begin epilog + pop rdi + pop rsi + UNSHADOW_ARGS + pop rbp + ret + +global sym(vp9_filter_block1d8_h2_sse2) PRIVATE +sym(vp9_filter_block1d8_h2_sse2): + push rbp + mov rbp, rsp + SHADOW_ARGS_TO_STACK 6 + SAVE_XMM 7 + push rsi + push rdi + ; end prolog + + GET_PARAM +.loop: + movdqu xmm0, [rsi] ;load src + movdqa xmm1, xmm0 + psrldq xmm1, 1 + + APPLY_FILTER_8 0 + jnz .loop + + ; begin epilog + pop rdi + pop rsi + RESTORE_XMM + UNSHADOW_ARGS + pop rbp + ret + +global sym(vp9_filter_block1d16_h2_sse2) PRIVATE +sym(vp9_filter_block1d16_h2_sse2): + push rbp + mov rbp, rsp + SHADOW_ARGS_TO_STACK 6 + SAVE_XMM 7 + push rsi + push rdi + ; end prolog + + GET_PARAM +.loop: + movdqu xmm0, [rsi] ;load src + movdqu xmm1, [rsi + 1] + movdqa xmm2, xmm0 + movdqa xmm3, xmm1 + + APPLY_FILTER_16 0 + jnz .loop + + ; begin epilog + pop rdi + pop rsi + RESTORE_XMM + UNSHADOW_ARGS + pop rbp + ret + +global sym(vp9_filter_block1d4_h2_avg_sse2) PRIVATE +sym(vp9_filter_block1d4_h2_avg_sse2): + push rbp + mov rbp, rsp + SHADOW_ARGS_TO_STACK 6 + push rsi + push rdi + ; end prolog + + GET_PARAM_4 +.loop: + movdqu xmm0, [rsi] ;load src + movdqa xmm1, xmm0 + psrldq xmm1, 1 + + APPLY_FILTER_4 1 + jnz .loop + + ; begin epilog + pop rdi + pop rsi + UNSHADOW_ARGS + pop rbp + ret + +global sym(vp9_filter_block1d8_h2_avg_sse2) PRIVATE +sym(vp9_filter_block1d8_h2_avg_sse2): + push rbp + mov rbp, rsp + SHADOW_ARGS_TO_STACK 6 + SAVE_XMM 7 + push rsi + push rdi + ; end prolog + + GET_PARAM +.loop: + movdqu xmm0, [rsi] ;load src + movdqa xmm1, xmm0 + psrldq xmm1, 1 + + APPLY_FILTER_8 1 + jnz .loop + + ; begin epilog + pop rdi + pop rsi + RESTORE_XMM + UNSHADOW_ARGS + pop rbp + ret + +global sym(vp9_filter_block1d16_h2_avg_sse2) PRIVATE +sym(vp9_filter_block1d16_h2_avg_sse2): + push rbp + mov rbp, rsp + SHADOW_ARGS_TO_STACK 6 + SAVE_XMM 7 + push rsi + push rdi + ; end prolog + + GET_PARAM +.loop: + movdqu xmm0, [rsi] ;load src + movdqu xmm1, [rsi + 1] + movdqa xmm2, xmm0 + movdqa xmm3, xmm1 + + APPLY_FILTER_16 1 + jnz .loop + + ; begin epilog + pop rdi + pop rsi + RESTORE_XMM + UNSHADOW_ARGS + pop rbp + ret diff --git a/libvpx/vp9/common/x86/vp9_subpixel_bilinear_ssse3.asm b/libvpx/vp9/common/x86/vp9_subpixel_bilinear_ssse3.asm new file mode 100644 index 0000000..b5e18fe --- /dev/null +++ b/libvpx/vp9/common/x86/vp9_subpixel_bilinear_ssse3.asm @@ -0,0 +1,422 @@ +; +; Copyright (c) 2014 The WebM project authors. All Rights Reserved. +; +; Use of this source code is governed by a BSD-style license +; that can be found in the LICENSE file in the root of the source +; tree. An additional intellectual property rights grant can be found +; in the file PATENTS. All contributing project authors may +; be found in the AUTHORS file in the root of the source tree. +; + +%include "vpx_ports/x86_abi_support.asm" + +%macro GET_PARAM_4 0 + mov rdx, arg(5) ;filter ptr + mov rsi, arg(0) ;src_ptr + mov rdi, arg(2) ;output_ptr + mov rcx, 0x0400040 + + movdqa xmm3, [rdx] ;load filters + psrldq xmm3, 6 + packsswb xmm3, xmm3 + pshuflw xmm3, xmm3, 0b ;k3_k4 + + movq xmm2, rcx ;rounding + pshufd xmm2, xmm2, 0 + + movsxd rax, DWORD PTR arg(1) ;pixels_per_line + movsxd rdx, DWORD PTR arg(3) ;out_pitch + movsxd rcx, DWORD PTR arg(4) ;output_height +%endm + +%macro APPLY_FILTER_4 1 + punpcklbw xmm0, xmm1 + pmaddubsw xmm0, xmm3 + + paddsw xmm0, xmm2 ;rounding + psraw xmm0, 7 ;shift + packuswb xmm0, xmm0 ;pack to byte + +%if %1 + movd xmm1, [rdi] + pavgb xmm0, xmm1 +%endif + movd [rdi], xmm0 + lea rsi, [rsi + rax] + lea rdi, [rdi + rdx] + dec rcx +%endm + +%macro GET_PARAM 0 + mov rdx, arg(5) ;filter ptr + mov rsi, arg(0) ;src_ptr + mov rdi, arg(2) ;output_ptr + mov rcx, 0x0400040 + + movdqa xmm7, [rdx] ;load filters + psrldq xmm7, 6 + packsswb xmm7, xmm7 + pshuflw xmm7, xmm7, 0b ;k3_k4 + punpcklwd xmm7, xmm7 + + movq xmm6, rcx ;rounding + pshufd xmm6, xmm6, 0 + + movsxd rax, DWORD PTR arg(1) ;pixels_per_line + movsxd rdx, DWORD PTR arg(3) ;out_pitch + movsxd rcx, DWORD PTR arg(4) ;output_height +%endm + +%macro APPLY_FILTER_8 1 + punpcklbw xmm0, xmm1 + pmaddubsw xmm0, xmm7 + + paddsw xmm0, xmm6 ;rounding + psraw xmm0, 7 ;shift + packuswb xmm0, xmm0 ;pack back to byte + +%if %1 + movq xmm1, [rdi] + pavgb xmm0, xmm1 +%endif + movq [rdi], xmm0 ;store the result + + lea rsi, [rsi + rax] + lea rdi, [rdi + rdx] + dec rcx +%endm + +%macro APPLY_FILTER_16 1 + punpcklbw xmm0, xmm1 + punpckhbw xmm2, xmm1 + pmaddubsw xmm0, xmm7 + pmaddubsw xmm2, xmm7 + + paddsw xmm0, xmm6 ;rounding + paddsw xmm2, xmm6 + psraw xmm0, 7 ;shift + psraw xmm2, 7 + packuswb xmm0, xmm2 ;pack back to byte + +%if %1 + movdqu xmm1, [rdi] + pavgb xmm0, xmm1 +%endif + movdqu [rdi], xmm0 ;store the result + + lea rsi, [rsi + rax] + lea rdi, [rdi + rdx] + dec rcx +%endm + +global sym(vp9_filter_block1d4_v2_ssse3) PRIVATE +sym(vp9_filter_block1d4_v2_ssse3): + push rbp + mov rbp, rsp + SHADOW_ARGS_TO_STACK 6 + push rsi + push rdi + ; end prolog + + GET_PARAM_4 +.loop: + movd xmm0, [rsi] ;load src + movd xmm1, [rsi + rax] + + APPLY_FILTER_4 0 + jnz .loop + + ; begin epilog + pop rdi + pop rsi + UNSHADOW_ARGS + pop rbp + ret + +global sym(vp9_filter_block1d8_v2_ssse3) PRIVATE +sym(vp9_filter_block1d8_v2_ssse3): + push rbp + mov rbp, rsp + SHADOW_ARGS_TO_STACK 6 + SAVE_XMM 7 + push rsi + push rdi + ; end prolog + + GET_PARAM +.loop: + movq xmm0, [rsi] ;0 + movq xmm1, [rsi + rax] ;1 + + APPLY_FILTER_8 0 + jnz .loop + + ; begin epilog + pop rdi + pop rsi + RESTORE_XMM + UNSHADOW_ARGS + pop rbp + ret + +global sym(vp9_filter_block1d16_v2_ssse3) PRIVATE +sym(vp9_filter_block1d16_v2_ssse3): + push rbp + mov rbp, rsp + SHADOW_ARGS_TO_STACK 6 + SAVE_XMM 7 + push rsi + push rdi + ; end prolog + + GET_PARAM +.loop: + movdqu xmm0, [rsi] ;0 + movdqu xmm1, [rsi + rax] ;1 + movdqa xmm2, xmm0 + + APPLY_FILTER_16 0 + jnz .loop + + ; begin epilog + pop rdi + pop rsi + RESTORE_XMM + UNSHADOW_ARGS + pop rbp + ret + +global sym(vp9_filter_block1d4_v2_avg_ssse3) PRIVATE +sym(vp9_filter_block1d4_v2_avg_ssse3): + push rbp + mov rbp, rsp + SHADOW_ARGS_TO_STACK 6 + push rsi + push rdi + ; end prolog + + GET_PARAM_4 +.loop: + movd xmm0, [rsi] ;load src + movd xmm1, [rsi + rax] + + APPLY_FILTER_4 1 + jnz .loop + + ; begin epilog + pop rdi + pop rsi + UNSHADOW_ARGS + pop rbp + ret + +global sym(vp9_filter_block1d8_v2_avg_ssse3) PRIVATE +sym(vp9_filter_block1d8_v2_avg_ssse3): + push rbp + mov rbp, rsp + SHADOW_ARGS_TO_STACK 6 + SAVE_XMM 7 + push rsi + push rdi + ; end prolog + + GET_PARAM +.loop: + movq xmm0, [rsi] ;0 + movq xmm1, [rsi + rax] ;1 + + APPLY_FILTER_8 1 + jnz .loop + + ; begin epilog + pop rdi + pop rsi + RESTORE_XMM + UNSHADOW_ARGS + pop rbp + ret + +global sym(vp9_filter_block1d16_v2_avg_ssse3) PRIVATE +sym(vp9_filter_block1d16_v2_avg_ssse3): + push rbp + mov rbp, rsp + SHADOW_ARGS_TO_STACK 6 + SAVE_XMM 7 + push rsi + push rdi + ; end prolog + + GET_PARAM +.loop: + movdqu xmm0, [rsi] ;0 + movdqu xmm1, [rsi + rax] ;1 + movdqa xmm2, xmm0 + + APPLY_FILTER_16 1 + jnz .loop + + ; begin epilog + pop rdi + pop rsi + RESTORE_XMM + UNSHADOW_ARGS + pop rbp + ret + +global sym(vp9_filter_block1d4_h2_ssse3) PRIVATE +sym(vp9_filter_block1d4_h2_ssse3): + push rbp + mov rbp, rsp + SHADOW_ARGS_TO_STACK 6 + push rsi + push rdi + ; end prolog + + GET_PARAM_4 +.loop: + movdqu xmm0, [rsi] ;load src + movdqa xmm1, xmm0 + psrldq xmm1, 1 + + APPLY_FILTER_4 0 + jnz .loop + + ; begin epilog + pop rdi + pop rsi + UNSHADOW_ARGS + pop rbp + ret + +global sym(vp9_filter_block1d8_h2_ssse3) PRIVATE +sym(vp9_filter_block1d8_h2_ssse3): + push rbp + mov rbp, rsp + SHADOW_ARGS_TO_STACK 6 + SAVE_XMM 7 + push rsi + push rdi + ; end prolog + + GET_PARAM +.loop: + movdqu xmm0, [rsi] ;load src + movdqa xmm1, xmm0 + psrldq xmm1, 1 + + APPLY_FILTER_8 0 + jnz .loop + + ; begin epilog + pop rdi + pop rsi + RESTORE_XMM + UNSHADOW_ARGS + pop rbp + ret + +global sym(vp9_filter_block1d16_h2_ssse3) PRIVATE +sym(vp9_filter_block1d16_h2_ssse3): + push rbp + mov rbp, rsp + SHADOW_ARGS_TO_STACK 6 + SAVE_XMM 7 + push rsi + push rdi + ; end prolog + + GET_PARAM +.loop: + movdqu xmm0, [rsi] ;load src + movdqu xmm1, [rsi + 1] + movdqa xmm2, xmm0 + + APPLY_FILTER_16 0 + jnz .loop + + ; begin epilog + pop rdi + pop rsi + RESTORE_XMM + UNSHADOW_ARGS + pop rbp + ret + +global sym(vp9_filter_block1d4_h2_avg_ssse3) PRIVATE +sym(vp9_filter_block1d4_h2_avg_ssse3): + push rbp + mov rbp, rsp + SHADOW_ARGS_TO_STACK 6 + push rsi + push rdi + ; end prolog + + GET_PARAM_4 +.loop: + movdqu xmm0, [rsi] ;load src + movdqa xmm1, xmm0 + psrldq xmm1, 1 + + APPLY_FILTER_4 1 + jnz .loop + + ; begin epilog + pop rdi + pop rsi + UNSHADOW_ARGS + pop rbp + ret + +global sym(vp9_filter_block1d8_h2_avg_ssse3) PRIVATE +sym(vp9_filter_block1d8_h2_avg_ssse3): + push rbp + mov rbp, rsp + SHADOW_ARGS_TO_STACK 6 + SAVE_XMM 7 + push rsi + push rdi + ; end prolog + + GET_PARAM +.loop: + movdqu xmm0, [rsi] ;load src + movdqa xmm1, xmm0 + psrldq xmm1, 1 + + APPLY_FILTER_8 1 + jnz .loop + + ; begin epilog + pop rdi + pop rsi + RESTORE_XMM + UNSHADOW_ARGS + pop rbp + ret + +global sym(vp9_filter_block1d16_h2_avg_ssse3) PRIVATE +sym(vp9_filter_block1d16_h2_avg_ssse3): + push rbp + mov rbp, rsp + SHADOW_ARGS_TO_STACK 6 + SAVE_XMM 7 + push rsi + push rdi + ; end prolog + + GET_PARAM +.loop: + movdqu xmm0, [rsi] ;load src + movdqu xmm1, [rsi + 1] + movdqa xmm2, xmm0 + + APPLY_FILTER_16 1 + jnz .loop + + ; begin epilog + pop rdi + pop rsi + RESTORE_XMM + UNSHADOW_ARGS + pop rbp + ret diff --git a/libvpx/vp9/decoder/vp9_decodframe.c b/libvpx/vp9/decoder/vp9_decodeframe.c index 3c4781b..1e203b1 100644 --- a/libvpx/vp9/decoder/vp9_decodframe.c +++ b/libvpx/vp9/decoder/vp9_decodeframe.c @@ -9,16 +9,19 @@ */ #include <assert.h> +#include <stdlib.h> // qsort() #include "./vp9_rtcd.h" +#include "./vpx_scale_rtcd.h" + #include "vpx_mem/vpx_mem.h" +#include "vpx_ports/mem_ops.h" #include "vpx_scale/vpx_scale.h" #include "vp9/common/vp9_alloccommon.h" #include "vp9/common/vp9_common.h" #include "vp9/common/vp9_entropy.h" #include "vp9/common/vp9_entropymode.h" -#include "vp9/common/vp9_extend.h" #include "vp9/common/vp9_idct.h" #include "vp9/common/vp9_pred_common.h" #include "vp9/common/vp9_quant_common.h" @@ -27,40 +30,26 @@ #include "vp9/common/vp9_seg_common.h" #include "vp9/common/vp9_tile_common.h" -#include "vp9/decoder/vp9_dboolhuff.h" -#include "vp9/decoder/vp9_decodframe.h" +#include "vp9/decoder/vp9_decodeframe.h" #include "vp9/decoder/vp9_detokenize.h" #include "vp9/decoder/vp9_decodemv.h" +#include "vp9/decoder/vp9_decoder.h" #include "vp9/decoder/vp9_dsubexp.h" -#include "vp9/decoder/vp9_onyxd_int.h" +#include "vp9/decoder/vp9_dthread.h" #include "vp9/decoder/vp9_read_bit_buffer.h" +#include "vp9/decoder/vp9_reader.h" #include "vp9/decoder/vp9_thread.h" -#include "vp9/decoder/vp9_treereader.h" - -typedef struct TileWorkerData { - VP9_COMMON *cm; - vp9_reader bit_reader; - DECLARE_ALIGNED(16, MACROBLOCKD, xd); - DECLARE_ALIGNED(16, unsigned char, token_cache[1024]); - DECLARE_ALIGNED(16, int16_t, qcoeff[MAX_MB_PLANE][64 * 64]); - DECLARE_ALIGNED(16, int16_t, dqcoeff[MAX_MB_PLANE][64 * 64]); - DECLARE_ALIGNED(16, uint16_t, eobs[MAX_MB_PLANE][256]); -} TileWorkerData; - -static int read_be32(const uint8_t *p) { - return (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]; -} -static int is_compound_prediction_allowed(const VP9_COMMON *cm) { +static int is_compound_reference_allowed(const VP9_COMMON *cm) { int i; - for (i = 1; i < ALLOWED_REFS_PER_FRAME; ++i) - if (cm->ref_frame_sign_bias[i + 1] != cm->ref_frame_sign_bias[1]) + for (i = 1; i < REFS_PER_FRAME; ++i) + if (cm->ref_frame_sign_bias[i + 1] != cm->ref_frame_sign_bias[1]) return 1; return 0; } -static void setup_compound_prediction(VP9_COMMON *cm) { +static void setup_compound_reference_mode(VP9_COMMON *cm) { if (cm->ref_frame_sign_bias[LAST_FRAME] == cm->ref_frame_sign_bias[GOLDEN_FRAME]) { cm->comp_fixed_ref = ALTREF_FRAME; @@ -78,9 +67,8 @@ static void setup_compound_prediction(VP9_COMMON *cm) { } } -// len == 0 is not allowed static int read_is_valid(const uint8_t *start, size_t len, const uint8_t *end) { - return start + len > start && start + len <= end; + return len != 0 && len <= (size_t)(end - start); } static int decode_unsigned_max(struct vp9_read_bit_buffer *rb, int max) { @@ -95,7 +83,7 @@ static TX_MODE read_tx_mode(vp9_reader *r) { return tx_mode; } -static void read_tx_probs(struct tx_probs *tx_probs, vp9_reader *r) { +static void read_tx_mode_probs(struct tx_probs *tx_probs, vp9_reader *r) { int i, j; for (i = 0; i < TX_SIZE_CONTEXTS; ++i) @@ -125,42 +113,41 @@ static void read_inter_mode_probs(FRAME_CONTEXT *fc, vp9_reader *r) { vp9_diff_update_prob(r, &fc->inter_mode_probs[i][j]); } -static INLINE COMPPREDMODE_TYPE read_comp_pred_mode(vp9_reader *r) { - COMPPREDMODE_TYPE mode = vp9_read_bit(r); - if (mode) - mode += vp9_read_bit(r); - return mode; +static REFERENCE_MODE read_frame_reference_mode(const VP9_COMMON *cm, + vp9_reader *r) { + if (is_compound_reference_allowed(cm)) { + return vp9_read_bit(r) ? (vp9_read_bit(r) ? REFERENCE_MODE_SELECT + : COMPOUND_REFERENCE) + : SINGLE_REFERENCE; + } else { + return SINGLE_REFERENCE; + } } -static void read_comp_pred(VP9_COMMON *cm, vp9_reader *r) { +static void read_frame_reference_mode_probs(VP9_COMMON *cm, vp9_reader *r) { + FRAME_CONTEXT *const fc = &cm->fc; int i; - const int compound_allowed = is_compound_prediction_allowed(cm); - cm->comp_pred_mode = compound_allowed ? read_comp_pred_mode(r) - : SINGLE_PREDICTION_ONLY; - if (compound_allowed) - setup_compound_prediction(cm); - - if (cm->comp_pred_mode == HYBRID_PREDICTION) - for (i = 0; i < COMP_INTER_CONTEXTS; i++) - vp9_diff_update_prob(r, &cm->fc.comp_inter_prob[i]); + if (cm->reference_mode == REFERENCE_MODE_SELECT) + for (i = 0; i < COMP_INTER_CONTEXTS; ++i) + vp9_diff_update_prob(r, &fc->comp_inter_prob[i]); - if (cm->comp_pred_mode != COMP_PREDICTION_ONLY) - for (i = 0; i < REF_CONTEXTS; i++) { - vp9_diff_update_prob(r, &cm->fc.single_ref_prob[i][0]); - vp9_diff_update_prob(r, &cm->fc.single_ref_prob[i][1]); + if (cm->reference_mode != COMPOUND_REFERENCE) + for (i = 0; i < REF_CONTEXTS; ++i) { + vp9_diff_update_prob(r, &fc->single_ref_prob[i][0]); + vp9_diff_update_prob(r, &fc->single_ref_prob[i][1]); } - if (cm->comp_pred_mode != SINGLE_PREDICTION_ONLY) - for (i = 0; i < REF_CONTEXTS; i++) - vp9_diff_update_prob(r, &cm->fc.comp_ref_prob[i]); + if (cm->reference_mode != SINGLE_REFERENCE) + for (i = 0; i < REF_CONTEXTS; ++i) + vp9_diff_update_prob(r, &fc->comp_ref_prob[i]); } static void update_mv_probs(vp9_prob *p, int n, vp9_reader *r) { int i; for (i = 0; i < n; ++i) - if (vp9_read(r, NMV_UPDATE_PROB)) - p[i] = (vp9_read_literal(r, 7) << 1) | 1; + if (vp9_read(r, MV_UPDATE_PROB)) + p[i] = (vp9_read_literal(r, 7) << 1) | 1; } static void read_mv_probs(nmv_context *ctx, int allow_hp, vp9_reader *r) { @@ -179,7 +166,7 @@ static void read_mv_probs(nmv_context *ctx, int allow_hp, vp9_reader *r) { for (i = 0; i < 2; ++i) { nmv_component *const comp_ctx = &ctx->comps[i]; for (j = 0; j < CLASS0_SIZE; ++j) - update_mv_probs(comp_ctx->class0_fp[j], 3, r); + update_mv_probs(comp_ctx->class0_fp[j], MV_FP_SIZE - 1, r); update_mv_probs(comp_ctx->fp, 3, r); } @@ -242,16 +229,13 @@ static void alloc_tile_storage(VP9D_COMP *pbi, int tile_rows, int tile_cols) { } static void inverse_transform_block(MACROBLOCKD* xd, int plane, int block, - TX_SIZE tx_size, int x, int y) { + TX_SIZE tx_size, uint8_t *dst, int stride, + int eob) { struct macroblockd_plane *const pd = &xd->plane[plane]; - const int eob = pd->eobs[block]; if (eob > 0) { TX_TYPE tx_type; const int plane_type = pd->plane_type; - const int stride = pd->dst.stride; int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); - uint8_t *const dst = &pd->dst.buf[4 * y * stride + 4 * x]; - switch (tx_size) { case TX_4X4: tx_type = get_tx_type_4x4(plane_type, xd, block); @@ -261,11 +245,11 @@ static void inverse_transform_block(MACROBLOCKD* xd, int plane, int block, vp9_iht4x4_16_add(dqcoeff, dst, stride, tx_type); break; case TX_8X8: - tx_type = get_tx_type_8x8(plane_type, xd); + tx_type = get_tx_type(plane_type, xd); vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob); break; case TX_16X16: - tx_type = get_tx_type_16x16(plane_type, xd); + tx_type = get_tx_type(plane_type, xd); vp9_iht16x16_add(tx_type, dqcoeff, dst, stride, eob); break; case TX_32X32: @@ -273,7 +257,7 @@ static void inverse_transform_block(MACROBLOCKD* xd, int plane, int block, vp9_idct32x32_add(dqcoeff, dst, stride, eob); break; default: - assert(!"Invalid transform size"); + assert(0 && "Invalid transform size"); } if (eob == 1) { @@ -293,7 +277,6 @@ struct intra_args { VP9_COMMON *cm; MACROBLOCKD *xd; vp9_reader *r; - uint8_t *token_cache; }; static void predict_and_reconstruct_intra_block(int plane, int block, @@ -304,26 +287,24 @@ static void predict_and_reconstruct_intra_block(int plane, int block, MACROBLOCKD *const xd = args->xd; struct macroblockd_plane *const pd = &xd->plane[plane]; MODE_INFO *const mi = xd->mi_8x8[0]; - const MB_PREDICTION_MODE mode = (plane == 0) - ? ((mi->mbmi.sb_type < BLOCK_8X8) ? mi->bmi[block].as_mode - : mi->mbmi.mode) - : mi->mbmi.uv_mode; + const MB_PREDICTION_MODE mode = (plane == 0) ? get_y_mode(mi, block) + : mi->mbmi.uv_mode; int x, y; uint8_t *dst; txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y); dst = &pd->dst.buf[4 * y * pd->dst.stride + 4 * x]; - if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0) - extend_for_intra(xd, plane_bsize, plane, block, tx_size); - vp9_predict_intra_block(xd, block >> (tx_size << 1), b_width_log2(plane_bsize), tx_size, mode, - dst, pd->dst.stride, dst, pd->dst.stride); - - if (!mi->mbmi.skip_coeff) { - vp9_decode_block_tokens(cm, xd, plane, block, plane_bsize, x, y, tx_size, - args->r, args->token_cache); - inverse_transform_block(xd, plane, block, tx_size, x, y); + dst, pd->dst.stride, dst, pd->dst.stride, + x, y, plane); + + if (!mi->mbmi.skip) { + const int eob = vp9_decode_block_tokens(cm, xd, plane, block, + plane_bsize, x, y, tx_size, + args->r); + inverse_transform_block(xd, plane, block, tx_size, dst, pd->dst.stride, + eob); } } @@ -332,7 +313,6 @@ struct inter_args { MACROBLOCKD *xd; vp9_reader *r; int *eobtotal; - uint8_t *token_cache; }; static void reconstruct_inter_block(int plane, int block, @@ -341,34 +321,40 @@ static void reconstruct_inter_block(int plane, int block, struct inter_args *args = arg; VP9_COMMON *const cm = args->cm; MACROBLOCKD *const xd = args->xd; - int x, y; + struct macroblockd_plane *const pd = &xd->plane[plane]; + int x, y, eob; txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y); - - *args->eobtotal += vp9_decode_block_tokens(cm, xd, plane, block, - plane_bsize, x, y, tx_size, - args->r, args->token_cache); - inverse_transform_block(xd, plane, block, tx_size, x, y); + eob = vp9_decode_block_tokens(cm, xd, plane, block, plane_bsize, x, y, + tx_size, args->r); + inverse_transform_block(xd, plane, block, tx_size, + &pd->dst.buf[4 * y * pd->dst.stride + 4 * x], + pd->dst.stride, eob); + *args->eobtotal += eob; } static void set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd, const TileInfo *const tile, BLOCK_SIZE bsize, int mi_row, int mi_col) { - const int bh = num_8x8_blocks_high_lookup[bsize]; const int bw = num_8x8_blocks_wide_lookup[bsize]; + const int bh = num_8x8_blocks_high_lookup[bsize]; + const int x_mis = MIN(bw, cm->mi_cols - mi_col); + const int y_mis = MIN(bh, cm->mi_rows - mi_row); const int offset = mi_row * cm->mode_info_stride + mi_col; const int tile_offset = tile->mi_row_start * cm->mode_info_stride + tile->mi_col_start; + int x, y; xd->mi_8x8 = cm->mi_grid_visible + offset; xd->prev_mi_8x8 = cm->prev_mi_grid_visible + offset; - // we are using the mode info context stream here + xd->last_mi = cm->coding_use_prev_mi && cm->prev_mi ? + xd->prev_mi_8x8[0] : NULL; + xd->mi_8x8[0] = xd->mi_stream + offset - tile_offset; xd->mi_8x8[0]->mbmi.sb_type = bsize; - - // Special case: if prev_mi is NULL, the previous mode info context - // cannot be used. - xd->last_mi = cm->prev_mi ? xd->prev_mi_8x8[0] : NULL; + for (y = 0; y < y_mis; ++y) + for (x = !y; x < x_mis; ++x) + xd->mi_8x8[y * cm->mode_info_stride + x] = xd->mi_8x8[0]; set_skip_context(xd, xd->above_context, xd->left_context, mi_row, mi_col); @@ -376,29 +362,26 @@ static void set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd, // as they are always compared to values that are in 1/8th pel units set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols); - setup_dst_planes(xd, get_frame_new_buffer(cm), mi_row, mi_col); + vp9_setup_dst_planes(xd, get_frame_new_buffer(cm), mi_row, mi_col); } static void set_ref(VP9_COMMON *const cm, MACROBLOCKD *const xd, int idx, int mi_row, int mi_col) { MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi; - const int ref = mbmi->ref_frame[idx] - LAST_FRAME; - const YV12_BUFFER_CONFIG *cfg = get_frame_ref_buffer(cm, ref); - const struct scale_factors_common *sfc = &cm->active_ref_scale_comm[ref]; - if (!vp9_is_valid_scale(sfc)) + RefBuffer *ref_buffer = &cm->frame_refs[mbmi->ref_frame[idx] - LAST_FRAME]; + xd->block_refs[idx] = ref_buffer; + if (!vp9_is_valid_scale(&ref_buffer->sf)) vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, "Invalid scale factors"); - - xd->scale_factor[idx].sfc = sfc; - setup_pre_planes(xd, idx, cfg, mi_row, mi_col, &xd->scale_factor[idx]); - xd->corrupted |= cfg->corrupted; + vp9_setup_pre_planes(xd, idx, ref_buffer->buf, mi_row, mi_col, + &ref_buffer->sf); + xd->corrupted |= ref_buffer->buf->corrupted; } static void decode_modes_b(VP9_COMMON *const cm, MACROBLOCKD *const xd, const TileInfo *const tile, int mi_row, int mi_col, - vp9_reader *r, BLOCK_SIZE bsize, - uint8_t *token_cache) { + vp9_reader *r, BLOCK_SIZE bsize) { const int less8x8 = bsize < BLOCK_8X8; MB_MODE_INFO *mbmi; @@ -411,7 +394,7 @@ static void decode_modes_b(VP9_COMMON *const cm, MACROBLOCKD *const xd, // Has to be called after set_offsets mbmi = &xd->mi_8x8[0]->mbmi; - if (mbmi->skip_coeff) { + if (mbmi->skip) { reset_skip_context(xd, bsize); } else { if (cm->seg.enabled) @@ -420,32 +403,27 @@ static void decode_modes_b(VP9_COMMON *const cm, MACROBLOCKD *const xd, } if (!is_inter_block(mbmi)) { - struct intra_args arg = { - cm, xd, r, token_cache - }; - foreach_transformed_block(xd, bsize, predict_and_reconstruct_intra_block, - &arg); + struct intra_args arg = { cm, xd, r }; + vp9_foreach_transformed_block(xd, bsize, + predict_and_reconstruct_intra_block, &arg); } else { // Setup set_ref(cm, xd, 0, mi_row, mi_col); if (has_second_ref(mbmi)) set_ref(cm, xd, 1, mi_row, mi_col); - xd->subpix.filter_x = xd->subpix.filter_y = - vp9_get_filter_kernel(mbmi->interp_filter); + xd->interp_kernel = vp9_get_interp_kernel(mbmi->interp_filter); // Prediction - vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize); + vp9_dec_build_inter_predictors_sb(xd, mi_row, mi_col, bsize); // Reconstruction - if (!mbmi->skip_coeff) { + if (!mbmi->skip) { int eobtotal = 0; - struct inter_args arg = { - cm, xd, r, &eobtotal, token_cache - }; - foreach_transformed_block(xd, bsize, reconstruct_inter_block, &arg); + struct inter_args arg = { cm, xd, r, &eobtotal }; + vp9_foreach_transformed_block(xd, bsize, reconstruct_inter_block, &arg); if (!less8x8 && eobtotal == 0) - mbmi->skip_coeff = 1; // skip loopfilter + mbmi->skip = 1; // skip loopfilter } } @@ -464,7 +442,7 @@ static PARTITION_TYPE read_partition(VP9_COMMON *cm, MACROBLOCKD *xd, int hbs, PARTITION_TYPE p; if (has_rows && has_cols) - p = treed_read(r, vp9_partition_tree, probs); + p = vp9_read_tree(r, vp9_partition_tree, probs); else if (!has_rows && has_cols) p = vp9_read(r, probs[1]) ? PARTITION_SPLIT : PARTITION_HORZ; else if (has_rows && !has_cols) @@ -481,8 +459,7 @@ static PARTITION_TYPE read_partition(VP9_COMMON *cm, MACROBLOCKD *xd, int hbs, static void decode_modes_sb(VP9_COMMON *const cm, MACROBLOCKD *const xd, const TileInfo *const tile, int mi_row, int mi_col, - vp9_reader* r, BLOCK_SIZE bsize, - uint8_t *token_cache) { + vp9_reader* r, BLOCK_SIZE bsize) { const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2; PARTITION_TYPE partition; BLOCK_SIZE subsize; @@ -493,36 +470,30 @@ static void decode_modes_sb(VP9_COMMON *const cm, MACROBLOCKD *const xd, partition = read_partition(cm, xd, hbs, mi_row, mi_col, bsize, r); subsize = get_subsize(bsize, partition); if (subsize < BLOCK_8X8) { - decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize, token_cache); + decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize); } else { switch (partition) { case PARTITION_NONE: - decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize, token_cache); + decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize); break; case PARTITION_HORZ: - decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize, token_cache); + decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize); if (mi_row + hbs < cm->mi_rows) - decode_modes_b(cm, xd, tile, mi_row + hbs, mi_col, r, subsize, - token_cache); + decode_modes_b(cm, xd, tile, mi_row + hbs, mi_col, r, subsize); break; case PARTITION_VERT: - decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize, token_cache); + decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize); if (mi_col + hbs < cm->mi_cols) - decode_modes_b(cm, xd, tile, mi_row, mi_col + hbs, r, subsize, - token_cache); + decode_modes_b(cm, xd, tile, mi_row, mi_col + hbs, r, subsize); break; case PARTITION_SPLIT: - decode_modes_sb(cm, xd, tile, mi_row, mi_col, r, subsize, - token_cache); - decode_modes_sb(cm, xd, tile, mi_row, mi_col + hbs, r, subsize, - token_cache); - decode_modes_sb(cm, xd, tile, mi_row + hbs, mi_col, r, subsize, - token_cache); - decode_modes_sb(cm, xd, tile, mi_row + hbs, mi_col + hbs, r, subsize, - token_cache); + decode_modes_sb(cm, xd, tile, mi_row, mi_col, r, subsize); + decode_modes_sb(cm, xd, tile, mi_row, mi_col + hbs, r, subsize); + decode_modes_sb(cm, xd, tile, mi_row + hbs, mi_col, r, subsize); + decode_modes_sb(cm, xd, tile, mi_row + hbs, mi_col + hbs, r, subsize); break; default: - assert(!"Invalid partition type"); + assert(0 && "Invalid partition type"); } } @@ -555,13 +526,12 @@ static void read_coef_probs_common(vp9_coeff_probs_model *coef_probs, int i, j, k, l, m; if (vp9_read_bit(r)) - for (i = 0; i < BLOCK_TYPES; i++) - for (j = 0; j < REF_TYPES; j++) - for (k = 0; k < COEF_BANDS; k++) - for (l = 0; l < PREV_COEF_CONTEXTS; l++) - if (k > 0 || l < 3) - for (m = 0; m < UNCONSTRAINED_NODES; m++) - vp9_diff_update_prob(r, &coef_probs[i][j][k][l][m]); + for (i = 0; i < PLANE_TYPES; ++i) + for (j = 0; j < REF_TYPES; ++j) + for (k = 0; k < COEF_BANDS; ++k) + for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) + for (m = 0; m < UNCONSTRAINED_NODES; ++m) + vp9_diff_update_prob(r, &coef_probs[i][j][k][l][m]); } static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode, @@ -675,14 +645,13 @@ static void setup_quantization(VP9_COMMON *const cm, MACROBLOCKD *const xd, xd->itxm_add = xd->lossless ? vp9_iwht4x4_add : vp9_idct4x4_add; } -static INTERPOLATION_TYPE read_interp_filter_type( - struct vp9_read_bit_buffer *rb) { - const INTERPOLATION_TYPE literal_to_type[] = { EIGHTTAP_SMOOTH, - EIGHTTAP, - EIGHTTAP_SHARP, - BILINEAR }; +static INTERP_FILTER read_interp_filter(struct vp9_read_bit_buffer *rb) { + const INTERP_FILTER literal_to_filter[] = { EIGHTTAP_SMOOTH, + EIGHTTAP, + EIGHTTAP_SHARP, + BILINEAR }; return vp9_rb_read_bit(rb) ? SWITCHABLE - : literal_to_type[vp9_rb_read_literal(rb, 2)]; + : literal_to_filter[vp9_rb_read_literal(rb, 2)]; } static void read_frame_size(struct vp9_read_bit_buffer *rb, @@ -705,12 +674,6 @@ static void apply_frame_size(VP9D_COMP *pbi, int width, int height) { if (cm->width != width || cm->height != height) { // Change in frame size. - if (cm->width == 0 || cm->height == 0) { - // Assign new frame buffer on first call. - cm->new_fb_idx = NUM_YV12_BUFFERS - 1; - cm->fb_idx_ref_cnt[cm->new_fb_idx] = 1; - } - // TODO(agrange) Don't test width/height, check overall size. if (width > cm->width || height > cm->height) { // Rescale frame buffers only if they're not big enough already. @@ -725,9 +688,14 @@ static void apply_frame_size(VP9D_COMP *pbi, int width, int height) { vp9_update_frame_size(cm); } - vp9_realloc_frame_buffer(get_frame_new_buffer(cm), cm->width, cm->height, - cm->subsampling_x, cm->subsampling_y, - VP9BORDERINPIXELS); + if (vp9_realloc_frame_buffer( + get_frame_new_buffer(cm), cm->width, cm->height, + cm->subsampling_x, cm->subsampling_y, VP9_DEC_BORDER_IN_PIXELS, + &cm->frame_bufs[cm->new_fb_idx].raw_frame_buffer, cm->get_fb_cb, + cm->cb_priv)) { + vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, + "Failed to allocate frame buffer"); + } } static void setup_frame_size(VP9D_COMP *pbi, @@ -744,11 +712,11 @@ static void setup_frame_size_with_refs(VP9D_COMP *pbi, int width, height; int found = 0, i; - for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) { + for (i = 0; i < REFS_PER_FRAME; ++i) { if (vp9_rb_read_bit(rb)) { - YV12_BUFFER_CONFIG *const cfg = get_frame_ref_buffer(cm, i); - width = cfg->y_crop_width; - height = cfg->y_crop_height; + YV12_BUFFER_CONFIG *const buf = cm->frame_refs[i].buf; + width = buf->y_crop_width; + height = buf->y_crop_height; found = 1; break; } @@ -757,7 +725,7 @@ static void setup_frame_size_with_refs(VP9D_COMP *pbi, if (!found) read_frame_size(rb, &width, &height); - if (!width || !height) + if (width <= 0 || height <= 0) vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Referenced frame with invalid size"); @@ -802,8 +770,7 @@ static void decode_tile(VP9D_COMP *pbi, const TileInfo *const tile, vp9_zero(xd->left_seg_context); for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; mi_col += MI_BLOCK_SIZE) { - decode_modes_sb(cm, xd, tile, mi_row, mi_col, r, BLOCK_64X64, - pbi->token_cache); + decode_modes_sb(cm, xd, tile, mi_row, mi_col, r, BLOCK_64X64); } if (pbi->do_loopfilter_inline) { @@ -864,10 +831,14 @@ static size_t get_tile(const uint8_t *const data_end, if (!is_last) { if (!read_is_valid(*data, 4, data_end)) vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME, - "Truncated packet or corrupt tile length"); + "Truncated packet or corrupt tile length"); - size = read_be32(*data); + size = mem_get_be32(*data); *data += 4; + + if (size > (size_t)(data_end - *data)) + vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME, + "Truncated packet or corrupt tile size"); } else { size = data_end - *data; } @@ -877,6 +848,7 @@ static size_t get_tile(const uint8_t *const data_end, typedef struct TileBuffer { const uint8_t *data; size_t size; + int col; // only used with multi-threaded decoding } TileBuffer; static const uint8_t *decode_tiles(VP9D_COMP *pbi, const uint8_t *data) { @@ -944,9 +916,7 @@ static void setup_tile_macroblockd(TileWorkerData *const tile_data) { int i; for (i = 0; i < MAX_MB_PLANE; ++i) { - pd[i].qcoeff = tile_data->qcoeff[i]; pd[i].dqcoeff = tile_data->dqcoeff[i]; - pd[i].eobs = tile_data->eobs[i]; vpx_memset(xd->plane[i].dqcoeff, 0, 64 * 64 * sizeof(int16_t)); } } @@ -963,22 +933,38 @@ static int tile_worker_hook(void *arg1, void *arg2) { for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; mi_col += MI_BLOCK_SIZE) { decode_modes_sb(tile_data->cm, &tile_data->xd, tile, - mi_row, mi_col, &tile_data->bit_reader, BLOCK_64X64, - tile_data->token_cache); + mi_row, mi_col, &tile_data->bit_reader, BLOCK_64X64); } } return !tile_data->xd.corrupted; } +// sorts in descending order +static int compare_tile_buffers(const void *a, const void *b) { + const TileBuffer *const buf1 = (const TileBuffer*)a; + const TileBuffer *const buf2 = (const TileBuffer*)b; + if (buf1->size < buf2->size) { + return 1; + } else if (buf1->size == buf2->size) { + return 0; + } else { + return -1; + } +} + static const uint8_t *decode_tiles_mt(VP9D_COMP *pbi, const uint8_t *data) { VP9_COMMON *const cm = &pbi->common; + const uint8_t *bit_reader_end = NULL; const uint8_t *const data_end = pbi->source + pbi->source_sz; const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols); const int tile_cols = 1 << cm->log2_tile_cols; const int tile_rows = 1 << cm->log2_tile_rows; const int num_workers = MIN(pbi->oxcf.max_threads & ~1, tile_cols); - int tile_col = 0; + TileBuffer tile_buffers[1 << 6]; + int n; + int final_worker = -1; + assert(tile_cols <= (1 << 6)); assert(tile_rows == 1); (void)tile_rows; @@ -992,7 +978,6 @@ static const uint8_t *decode_tiles_mt(VP9D_COMP *pbi, const uint8_t *data) { ++pbi->num_tile_workers; vp9_worker_init(worker); - worker->hook = (VP9WorkerHook)tile_worker_hook; CHECK_MEM_ERROR(cm, worker->data1, vpx_memalign(32, sizeof(TileWorkerData))); CHECK_MEM_ERROR(cm, worker->data2, vpx_malloc(sizeof(TileInfo))); @@ -1003,6 +988,11 @@ static const uint8_t *decode_tiles_mt(VP9D_COMP *pbi, const uint8_t *data) { } } + // Reset tile decoding hook + for (n = 0; n < pbi->num_tile_workers; ++n) { + pbi->tile_workers[n].hook = (VP9WorkerHook)tile_worker_hook; + } + // Note: this memset assumes above_context[0], [1] and [2] // are allocated as part of the same buffer. vpx_memset(pbi->above_context[0], 0, @@ -1011,48 +1001,82 @@ static const uint8_t *decode_tiles_mt(VP9D_COMP *pbi, const uint8_t *data) { vpx_memset(pbi->above_seg_context, 0, sizeof(*pbi->above_seg_context) * aligned_mi_cols); - while (tile_col < tile_cols) { + // Load tile data into tile_buffers + for (n = 0; n < tile_cols; ++n) { + const size_t size = + get_tile(data_end, n == tile_cols - 1, &cm->error, &data); + TileBuffer *const buf = &tile_buffers[n]; + buf->data = data; + buf->size = size; + buf->col = n; + data += size; + } + + // Sort the buffers based on size in descending order. + qsort(tile_buffers, tile_cols, sizeof(tile_buffers[0]), compare_tile_buffers); + + // Rearrange the tile buffers such that per-tile group the largest, and + // presumably the most difficult, tile will be decoded in the main thread. + // This should help minimize the number of instances where the main thread is + // waiting for a worker to complete. + { + int group_start = 0; + while (group_start < tile_cols) { + const TileBuffer largest = tile_buffers[group_start]; + const int group_end = MIN(group_start + num_workers, tile_cols) - 1; + memmove(tile_buffers + group_start, tile_buffers + group_start + 1, + (group_end - group_start) * sizeof(tile_buffers[0])); + tile_buffers[group_end] = largest; + group_start = group_end + 1; + } + } + + n = 0; + while (n < tile_cols) { int i; - for (i = 0; i < num_workers && tile_col < tile_cols; ++i) { + for (i = 0; i < num_workers && n < tile_cols; ++i) { VP9Worker *const worker = &pbi->tile_workers[i]; TileWorkerData *const tile_data = (TileWorkerData*)worker->data1; TileInfo *const tile = (TileInfo*)worker->data2; - const size_t size = - get_tile(data_end, tile_col == tile_cols - 1, &cm->error, &data); + TileBuffer *const buf = &tile_buffers[n]; tile_data->cm = cm; tile_data->xd = pbi->mb; tile_data->xd.corrupted = 0; - vp9_tile_init(tile, tile_data->cm, 0, tile_col); + vp9_tile_init(tile, tile_data->cm, 0, buf->col); - setup_token_decoder(data, data_end, size, &cm->error, + setup_token_decoder(buf->data, data_end, buf->size, &cm->error, &tile_data->bit_reader); - setup_tile_context(pbi, &tile_data->xd, 0, tile_col); + setup_tile_context(pbi, &tile_data->xd, 0, buf->col); setup_tile_macroblockd(tile_data); worker->had_error = 0; - if (i == num_workers - 1 || tile_col == tile_cols - 1) { + if (i == num_workers - 1 || n == tile_cols - 1) { vp9_worker_execute(worker); } else { vp9_worker_launch(worker); } - data += size; - ++tile_col; + if (buf->col == tile_cols - 1) { + final_worker = i; + } + + ++n; } for (; i > 0; --i) { VP9Worker *const worker = &pbi->tile_workers[i - 1]; pbi->mb.corrupted |= !vp9_worker_sync(worker); } + if (final_worker > -1) { + TileWorkerData *const tile_data = + (TileWorkerData*)pbi->tile_workers[final_worker].data1; + bit_reader_end = vp9_reader_find_end(&tile_data->bit_reader); + final_worker = -1; + } } - { - const int final_worker = (tile_cols + num_workers - 1) % num_workers; - TileWorkerData *const tile_data = - (TileWorkerData*)pbi->tile_workers[final_worker].data1; - return vp9_reader_find_end(&tile_data->bit_reader); - } + return bit_reader_end; } static void check_sync_code(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) { @@ -1064,7 +1088,7 @@ static void check_sync_code(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) { } } -static void error_handler(void *data, size_t bit_offset) { +static void error_handler(void *data) { VP9_COMMON *const cm = (VP9_COMMON *)data; vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Truncated packet"); } @@ -1089,12 +1113,20 @@ static size_t read_uncompressed_header(VP9D_COMP *pbi, cm->version = vp9_rb_read_bit(rb); RESERVED; - if (vp9_rb_read_bit(rb)) { - // show an existing frame directly - int frame_to_show = cm->ref_frame_map[vp9_rb_read_literal(rb, 3)]; - ref_cnt_fb(cm->fb_idx_ref_cnt, &cm->new_fb_idx, frame_to_show); + cm->show_existing_frame = vp9_rb_read_bit(rb); + if (cm->show_existing_frame) { + // Show an existing frame directly. + const int frame_to_show = cm->ref_frame_map[vp9_rb_read_literal(rb, 3)]; + + if (cm->frame_bufs[frame_to_show].ref_count < 1) + vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, + "Buffer %d does not contain a decoded frame", + frame_to_show); + + ref_cnt_fb(cm->frame_bufs, &cm->new_fb_idx, frame_to_show); pbi->refresh_frame_flags = 0; cm->lf.filter_level = 0; + cm->show_frame = 1; return 0; } @@ -1125,10 +1157,12 @@ static size_t read_uncompressed_header(VP9D_COMP *pbi, } } - pbi->refresh_frame_flags = (1 << NUM_REF_FRAMES) - 1; + pbi->refresh_frame_flags = (1 << REF_FRAMES) - 1; - for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) - cm->active_ref_idx[i] = cm->new_fb_idx; + for (i = 0; i < REFS_PER_FRAME; ++i) { + cm->frame_refs[i].idx = cm->new_fb_idx; + cm->frame_refs[i].buf = get_frame_new_buffer(cm); + } setup_frame_size(pbi, rb); } else { @@ -1140,38 +1174,49 @@ static size_t read_uncompressed_header(VP9D_COMP *pbi, if (cm->intra_only) { check_sync_code(cm, rb); - pbi->refresh_frame_flags = vp9_rb_read_literal(rb, NUM_REF_FRAMES); + pbi->refresh_frame_flags = vp9_rb_read_literal(rb, REF_FRAMES); setup_frame_size(pbi, rb); } else { - pbi->refresh_frame_flags = vp9_rb_read_literal(rb, NUM_REF_FRAMES); + pbi->refresh_frame_flags = vp9_rb_read_literal(rb, REF_FRAMES); - for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) { - const int ref = vp9_rb_read_literal(rb, NUM_REF_FRAMES_LOG2); - cm->active_ref_idx[i] = cm->ref_frame_map[ref]; + for (i = 0; i < REFS_PER_FRAME; ++i) { + const int ref = vp9_rb_read_literal(rb, REF_FRAMES_LOG2); + const int idx = cm->ref_frame_map[ref]; + cm->frame_refs[i].idx = idx; + cm->frame_refs[i].buf = &cm->frame_bufs[idx].buf; cm->ref_frame_sign_bias[LAST_FRAME + i] = vp9_rb_read_bit(rb); } setup_frame_size_with_refs(pbi, rb); cm->allow_high_precision_mv = vp9_rb_read_bit(rb); - cm->mcomp_filter_type = read_interp_filter_type(rb); - - for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) - vp9_setup_scale_factors(cm, i); + cm->interp_filter = read_interp_filter(rb); + + for (i = 0; i < REFS_PER_FRAME; ++i) { + RefBuffer *const ref_buf = &cm->frame_refs[i]; + vp9_setup_scale_factors_for_frame(&ref_buf->sf, + ref_buf->buf->y_crop_width, + ref_buf->buf->y_crop_height, + cm->width, cm->height); + if (vp9_is_scaled(&ref_buf->sf)) + vp9_extend_frame_borders(ref_buf->buf); + } } } if (!cm->error_resilient_mode) { + cm->coding_use_prev_mi = 1; cm->refresh_frame_context = vp9_rb_read_bit(rb); cm->frame_parallel_decoding_mode = vp9_rb_read_bit(rb); } else { + cm->coding_use_prev_mi = 0; cm->refresh_frame_context = 0; cm->frame_parallel_decoding_mode = 1; } // This flag will be overridden by the call to vp9_setup_past_independence // below, forcing the use of context 0 for those frame types. - cm->frame_context_idx = vp9_rb_read_literal(rb, NUM_FRAME_CONTEXTS_LOG2); + cm->frame_context_idx = vp9_rb_read_literal(rb, FRAME_CONTEXTS_LOG2); if (frame_is_intra_only(cm) || cm->error_resilient_mode) vp9_setup_past_independence(cm); @@ -1204,11 +1249,11 @@ static int read_compressed_header(VP9D_COMP *pbi, const uint8_t *data, cm->tx_mode = xd->lossless ? ONLY_4X4 : read_tx_mode(&r); if (cm->tx_mode == TX_MODE_SELECT) - read_tx_probs(&fc->tx_probs, &r); + read_tx_mode_probs(&fc->tx_probs, &r); read_coef_probs(fc, cm->tx_mode, &r); - for (k = 0; k < MBSKIP_CONTEXTS; ++k) - vp9_diff_update_prob(&r, &fc->mbskip_probs[k]); + for (k = 0; k < SKIP_CONTEXTS; ++k) + vp9_diff_update_prob(&r, &fc->skip_probs[k]); if (!frame_is_intra_only(cm)) { nmv_context *const nmvc = &fc->nmvc; @@ -1216,13 +1261,16 @@ static int read_compressed_header(VP9D_COMP *pbi, const uint8_t *data, read_inter_mode_probs(fc, &r); - if (cm->mcomp_filter_type == SWITCHABLE) + if (cm->interp_filter == SWITCHABLE) read_switchable_interp_probs(fc, &r); for (i = 0; i < INTRA_INTER_CONTEXTS; i++) vp9_diff_update_prob(&r, &fc->intra_inter_prob[i]); - read_comp_pred(cm, &r); + cm->reference_mode = read_frame_reference_mode(cm, &r); + if (cm->reference_mode != SINGLE_REFERENCE) + setup_compound_reference_mode(cm); + read_frame_reference_mode_probs(cm, &r); for (j = 0; j < BLOCK_SIZE_GROUPS; j++) for (i = 0; i < INTRA_MODES - 1; ++i) @@ -1282,8 +1330,7 @@ static void debug_check_frame_counts(const VP9_COMMON *const cm) { assert(!memcmp(cm->counts.comp_ref, zero_counts.comp_ref, sizeof(cm->counts.comp_ref))); assert(!memcmp(&cm->counts.tx, &zero_counts.tx, sizeof(cm->counts.tx))); - assert(!memcmp(cm->counts.mbskip, zero_counts.mbskip, - sizeof(cm->counts.mbskip))); + assert(!memcmp(cm->counts.skip, zero_counts.skip, sizeof(cm->counts.skip))); assert(!memcmp(&cm->counts.mv, &zero_counts.mv, sizeof(cm->counts.mv))); } #endif // NDEBUG @@ -1302,6 +1349,7 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) { const int tile_rows = 1 << cm->log2_tile_rows; const int tile_cols = 1 << cm->log2_tile_cols; YV12_BUFFER_CONFIG *const new_fb = get_frame_new_buffer(cm); + xd->cur_buf = new_fb; if (!first_partition_size) { // showing a frame directly @@ -1331,10 +1379,13 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) { alloc_tile_storage(pbi, tile_rows, tile_cols); xd->mode_info_stride = cm->mode_info_stride; - set_prev_mi(cm); + if (cm->coding_use_prev_mi) + set_prev_mi(cm); + else + cm->prev_mi = NULL; setup_plane_dequants(cm, xd, cm->base_qindex); - setup_block_dptrs(xd, cm->subsampling_x, cm->subsampling_y); + vp9_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y); cm->fc = cm->frame_contexts[cm->frame_context_idx]; vp9_zero(cm->counts); @@ -1353,9 +1404,6 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) { *p_data_end = decode_tiles(pbi, data + first_partition_size); } - cm->last_width = cm->width; - cm->last_height = cm->height; - new_fb->corrupted |= xd->corrupted; if (!pbi->decoded_key_frame) { diff --git a/libvpx/vp9/decoder/vp9_decodframe.h b/libvpx/vp9/decoder/vp9_decodeframe.h index c665f6f..4537bc8 100644 --- a/libvpx/vp9/decoder/vp9_decodframe.h +++ b/libvpx/vp9/decoder/vp9_decodeframe.h @@ -9,8 +9,12 @@ */ -#ifndef VP9_DECODER_VP9_DECODFRAME_H_ -#define VP9_DECODER_VP9_DECODFRAME_H_ +#ifndef VP9_DECODER_VP9_DECODEFRAME_H_ +#define VP9_DECODER_VP9_DECODEFRAME_H_ + +#ifdef __cplusplus +extern "C" { +#endif struct VP9Common; struct VP9Decompressor; @@ -18,4 +22,8 @@ struct VP9Decompressor; void vp9_init_dequantizer(struct VP9Common *cm); int vp9_decode_frame(struct VP9Decompressor *cpi, const uint8_t **p_data_end); -#endif // VP9_DECODER_VP9_DECODFRAME_H_ +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // VP9_DECODER_VP9_DECODEFRAME_H_ diff --git a/libvpx/vp9/decoder/vp9_decodemv.c b/libvpx/vp9/decoder/vp9_decodemv.c index b948429..799a82a 100644 --- a/libvpx/vp9/decoder/vp9_decodemv.c +++ b/libvpx/vp9/decoder/vp9_decodemv.c @@ -14,19 +14,17 @@ #include "vp9/common/vp9_entropy.h" #include "vp9/common/vp9_entropymode.h" #include "vp9/common/vp9_entropymv.h" -#include "vp9/common/vp9_findnearmv.h" #include "vp9/common/vp9_mvref_common.h" #include "vp9/common/vp9_pred_common.h" #include "vp9/common/vp9_reconinter.h" #include "vp9/common/vp9_seg_common.h" #include "vp9/decoder/vp9_decodemv.h" -#include "vp9/decoder/vp9_decodframe.h" -#include "vp9/decoder/vp9_onyxd_int.h" -#include "vp9/decoder/vp9_treereader.h" +#include "vp9/decoder/vp9_decodeframe.h" +#include "vp9/decoder/vp9_reader.h" static MB_PREDICTION_MODE read_intra_mode(vp9_reader *r, const vp9_prob *p) { - return (MB_PREDICTION_MODE)treed_read(r, vp9_intra_mode_tree, p); + return (MB_PREDICTION_MODE)vp9_read_tree(r, vp9_intra_mode_tree, p); } static MB_PREDICTION_MODE read_intra_mode_y(VP9_COMMON *cm, vp9_reader *r, @@ -49,8 +47,8 @@ static MB_PREDICTION_MODE read_intra_mode_uv(VP9_COMMON *cm, vp9_reader *r, static MB_PREDICTION_MODE read_inter_mode(VP9_COMMON *cm, vp9_reader *r, int ctx) { - const int mode = treed_read(r, vp9_inter_mode_tree, - cm->fc.inter_mode_probs[ctx]); + const int mode = vp9_read_tree(r, vp9_inter_mode_tree, + cm->fc.inter_mode_probs[ctx]); if (!cm->frame_parallel_decoding_mode) ++cm->counts.inter_mode[ctx][mode]; @@ -58,12 +56,12 @@ static MB_PREDICTION_MODE read_inter_mode(VP9_COMMON *cm, vp9_reader *r, } static int read_segment_id(vp9_reader *r, const struct segmentation *seg) { - return treed_read(r, vp9_segment_tree, seg->tree_probs); + return vp9_read_tree(r, vp9_segment_tree, seg->tree_probs); } static TX_SIZE read_selected_tx_size(VP9_COMMON *cm, MACROBLOCKD *xd, TX_SIZE max_tx_size, vp9_reader *r) { - const int ctx = vp9_get_pred_context_tx_size(xd); + const int ctx = vp9_get_tx_size_context(xd); const vp9_prob *tx_probs = get_tx_probs(max_tx_size, ctx, &cm->fc.tx_probs); TX_SIZE tx_size = vp9_read(r, tx_probs[0]); if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) { @@ -123,23 +121,23 @@ static int read_intra_segment_id(VP9_COMMON *const cm, MACROBLOCKD *const xd, static int read_inter_segment_id(VP9_COMMON *const cm, MACROBLOCKD *const xd, int mi_row, int mi_col, vp9_reader *r) { struct segmentation *const seg = &cm->seg; - const BLOCK_SIZE bsize = xd->mi_8x8[0]->mbmi.sb_type; - int pred_segment_id, segment_id; + MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi; + const BLOCK_SIZE bsize = mbmi->sb_type; + int predicted_segment_id, segment_id; if (!seg->enabled) return 0; // Default for disabled segmentation - pred_segment_id = vp9_get_segment_id(cm, cm->last_frame_seg_map, - bsize, mi_row, mi_col); + predicted_segment_id = vp9_get_segment_id(cm, cm->last_frame_seg_map, + bsize, mi_row, mi_col); if (!seg->update_map) - return pred_segment_id; + return predicted_segment_id; if (seg->temporal_update) { const vp9_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd); - const int pred_flag = vp9_read(r, pred_prob); - vp9_set_pred_flag_seg_id(xd, pred_flag); - segment_id = pred_flag ? pred_segment_id - : read_segment_id(r, seg); + mbmi->seg_id_predicted = vp9_read(r, pred_prob); + segment_id = mbmi->seg_id_predicted ? predicted_segment_id + : read_segment_id(r, seg); } else { segment_id = read_segment_id(r, seg); } @@ -147,38 +145,36 @@ static int read_inter_segment_id(VP9_COMMON *const cm, MACROBLOCKD *const xd, return segment_id; } -static int read_skip_coeff(VP9_COMMON *cm, const MACROBLOCKD *xd, - int segment_id, vp9_reader *r) { +static int read_skip(VP9_COMMON *cm, const MACROBLOCKD *xd, + int segment_id, vp9_reader *r) { if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) { return 1; } else { - const int ctx = vp9_get_pred_context_mbskip(xd); - const int skip = vp9_read(r, cm->fc.mbskip_probs[ctx]); + const int ctx = vp9_get_skip_context(xd); + const int skip = vp9_read(r, cm->fc.skip_probs[ctx]); if (!cm->frame_parallel_decoding_mode) - ++cm->counts.mbskip[ctx][skip]; + ++cm->counts.skip[ctx][skip]; return skip; } } static void read_intra_frame_mode_info(VP9_COMMON *const cm, MACROBLOCKD *const xd, - MODE_INFO *const m, int mi_row, int mi_col, vp9_reader *r) { - MB_MODE_INFO *const mbmi = &m->mbmi; - const BLOCK_SIZE bsize = mbmi->sb_type; + MODE_INFO *const mi = xd->mi_8x8[0]; + MB_MODE_INFO *const mbmi = &mi->mbmi; const MODE_INFO *above_mi = xd->mi_8x8[-cm->mode_info_stride]; const MODE_INFO *left_mi = xd->left_available ? xd->mi_8x8[-1] : NULL; + const BLOCK_SIZE bsize = mbmi->sb_type; mbmi->segment_id = read_intra_segment_id(cm, xd, mi_row, mi_col, r); - mbmi->skip_coeff = read_skip_coeff(cm, xd, mbmi->segment_id, r); + mbmi->skip = read_skip(cm, xd, mbmi->segment_id, r); mbmi->tx_size = read_tx_size(cm, xd, cm->tx_mode, bsize, 1, r); mbmi->ref_frame[0] = INTRA_FRAME; mbmi->ref_frame[1] = NONE; if (bsize >= BLOCK_8X8) { - const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, 0); - const MB_PREDICTION_MODE L = left_block_mode(m, left_mi, 0); - mbmi->mode = read_intra_mode(r, vp9_kf_y_mode_prob[A][L]); + mbmi->mode = read_intra_mode(r, get_y_mode_probs(mi, above_mi, left_mi, 0)); } else { // Only 4x4, 4x8, 8x4 blocks const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; // 1 or 2 @@ -187,20 +183,18 @@ static void read_intra_frame_mode_info(VP9_COMMON *const cm, for (idy = 0; idy < 2; idy += num_4x4_h) { for (idx = 0; idx < 2; idx += num_4x4_w) { - const int ib = idy * 2 + idx; - const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, ib); - const MB_PREDICTION_MODE L = left_block_mode(m, left_mi, ib); - const MB_PREDICTION_MODE b_mode = read_intra_mode(r, - vp9_kf_y_mode_prob[A][L]); - m->bmi[ib].as_mode = b_mode; + const int block = idy * 2 + idx; + const MB_PREDICTION_MODE mode = + read_intra_mode(r, get_y_mode_probs(mi, above_mi, left_mi, block)); + mi->bmi[block].as_mode = mode; if (num_4x4_h == 2) - m->bmi[ib + 2].as_mode = b_mode; + mi->bmi[block + 2].as_mode = mode; if (num_4x4_w == 2) - m->bmi[ib + 1].as_mode = b_mode; + mi->bmi[block + 1].as_mode = mode; } } - mbmi->mode = m->bmi[3].as_mode; + mbmi->mode = mi->bmi[3].as_mode; } mbmi->uv_mode = read_intra_mode(r, vp9_kf_uv_mode_prob[mbmi->mode]); @@ -210,12 +204,12 @@ static int read_mv_component(vp9_reader *r, const nmv_component *mvcomp, int usehp) { int mag, d, fr, hp; const int sign = vp9_read(r, mvcomp->sign); - const int mv_class = treed_read(r, vp9_mv_class_tree, mvcomp->classes); + const int mv_class = vp9_read_tree(r, vp9_mv_class_tree, mvcomp->classes); const int class0 = mv_class == MV_CLASS_0; // Integer part if (class0) { - d = treed_read(r, vp9_mv_class0_tree, mvcomp->class0); + d = vp9_read_tree(r, vp9_mv_class0_tree, mvcomp->class0); } else { int i; const int n = mv_class + CLASS0_BITS - 1; // number of bits @@ -226,8 +220,8 @@ static int read_mv_component(vp9_reader *r, } // Fractional part - fr = treed_read(r, vp9_mv_fp_tree, - class0 ? mvcomp->class0_fp[d] : mvcomp->fp); + fr = vp9_read_tree(r, vp9_mv_fp_tree, class0 ? mvcomp->class0_fp[d] + : mvcomp->fp); // High precision part (if hp is not used, the default value of the hp is 1) @@ -242,7 +236,7 @@ static int read_mv_component(vp9_reader *r, static INLINE void read_mv(vp9_reader *r, MV *mv, const MV *ref, const nmv_context *ctx, nmv_context_counts *counts, int allow_hp) { - const MV_JOINT_TYPE j = treed_read(r, vp9_mv_joint_tree, ctx->joints); + const MV_JOINT_TYPE j = vp9_read_tree(r, vp9_mv_joint_tree, ctx->joints); const int use_hp = allow_hp && vp9_use_mv_hp(ref); MV diff = {0, 0}; @@ -258,14 +252,18 @@ static INLINE void read_mv(vp9_reader *r, MV *mv, const MV *ref, mv->col = ref->col + diff.col; } -static COMPPREDMODE_TYPE read_reference_mode(VP9_COMMON *cm, - const MACROBLOCKD *xd, - vp9_reader *r) { - const int ctx = vp9_get_pred_context_comp_inter_inter(cm, xd); - const int mode = vp9_read(r, cm->fc.comp_inter_prob[ctx]); - if (!cm->frame_parallel_decoding_mode) - ++cm->counts.comp_inter[ctx][mode]; - return mode; // SINGLE_PREDICTION_ONLY or COMP_PREDICTION_ONLY +static REFERENCE_MODE read_block_reference_mode(VP9_COMMON *cm, + const MACROBLOCKD *xd, + vp9_reader *r) { + if (cm->reference_mode == REFERENCE_MODE_SELECT) { + const int ctx = vp9_get_reference_mode_context(cm, xd); + const int mode = vp9_read(r, cm->fc.comp_inter_prob[ctx]); + if (!cm->frame_parallel_decoding_mode) + ++cm->counts.comp_inter[ctx][mode]; + return mode; // SINGLE_REFERENCE or COMPOUND_REFERENCE + } else { + return cm->reference_mode; + } } // Read the referncence frame @@ -279,12 +277,9 @@ static void read_ref_frames(VP9_COMMON *const cm, MACROBLOCKD *const xd, ref_frame[0] = vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME); ref_frame[1] = NONE; } else { - const COMPPREDMODE_TYPE mode = (cm->comp_pred_mode == HYBRID_PREDICTION) - ? read_reference_mode(cm, xd, r) - : cm->comp_pred_mode; - + const REFERENCE_MODE mode = read_block_reference_mode(cm, xd, r); // FIXME(rbultje) I'm pretty sure this breaks segmentation ref frame coding - if (mode == COMP_PREDICTION_ONLY) { + if (mode == COMPOUND_REFERENCE) { const int idx = cm->ref_frame_sign_bias[cm->comp_fixed_ref]; const int ctx = vp9_get_pred_context_comp_ref_p(cm, xd); const int bit = vp9_read(r, fc->comp_ref_prob[ctx]); @@ -292,7 +287,7 @@ static void read_ref_frames(VP9_COMMON *const cm, MACROBLOCKD *const xd, ++counts->comp_ref[ctx][bit]; ref_frame[idx] = cm->comp_fixed_ref; ref_frame[!idx] = cm->comp_var_ref[bit]; - } else if (mode == SINGLE_PREDICTION_ONLY) { + } else if (mode == SINGLE_REFERENCE) { const int ctx0 = vp9_get_pred_context_single_ref_p1(xd); const int bit0 = vp9_read(r, fc->single_ref_prob[ctx0][0]); if (!cm->frame_parallel_decoding_mode) @@ -309,17 +304,17 @@ static void read_ref_frames(VP9_COMMON *const cm, MACROBLOCKD *const xd, ref_frame[1] = NONE; } else { - assert(!"Invalid prediction mode."); + assert(0 && "Invalid prediction mode."); } } } -static INLINE INTERPOLATION_TYPE read_switchable_filter_type( +static INLINE INTERP_FILTER read_switchable_interp_filter( VP9_COMMON *const cm, MACROBLOCKD *const xd, vp9_reader *r) { const int ctx = vp9_get_pred_context_switchable_interp(xd); - const int type = treed_read(r, vp9_switchable_interp_tree, - cm->fc.switchable_interp_prob[ctx]); + const int type = vp9_read_tree(r, vp9_switchable_interp_tree, + cm->fc.switchable_interp_prob[ctx]); if (!cm->frame_parallel_decoding_mode) ++cm->counts.switchable_interp[ctx][type]; return type; @@ -358,10 +353,15 @@ static void read_intra_block_mode_info(VP9_COMMON *const cm, MODE_INFO *mi, mbmi->uv_mode = read_intra_mode_uv(cm, r, mbmi->mode); } +static INLINE int is_mv_valid(const MV *mv) { + return mv->row > MV_LOW && mv->row < MV_UPP && + mv->col > MV_LOW && mv->col < MV_UPP; +} + static INLINE int assign_mv(VP9_COMMON *cm, MB_PREDICTION_MODE mode, - int_mv mv[2], int_mv best_mv[2], - int_mv nearest_mv[2], int_mv near_mv[2], - int is_compound, int allow_hp, vp9_reader *r) { + int_mv mv[2], int_mv ref_mv[2], + int_mv nearest_mv[2], int_mv near_mv[2], + int is_compound, int allow_hp, vp9_reader *r) { int i; int ret = 1; @@ -369,30 +369,29 @@ static INLINE int assign_mv(VP9_COMMON *cm, MB_PREDICTION_MODE mode, case NEWMV: { nmv_context_counts *const mv_counts = cm->frame_parallel_decoding_mode ? NULL : &cm->counts.mv; - read_mv(r, &mv[0].as_mv, &best_mv[0].as_mv, - &cm->fc.nmvc, mv_counts, allow_hp); - if (is_compound) - read_mv(r, &mv[1].as_mv, &best_mv[1].as_mv, - &cm->fc.nmvc, mv_counts, allow_hp); for (i = 0; i < 1 + is_compound; ++i) { - ret = ret && mv[i].as_mv.row < MV_UPP && mv[i].as_mv.row > MV_LOW; - ret = ret && mv[i].as_mv.col < MV_UPP && mv[i].as_mv.col > MV_LOW; + read_mv(r, &mv[i].as_mv, &ref_mv[i].as_mv, &cm->fc.nmvc, mv_counts, + allow_hp); + ret = ret && is_mv_valid(&mv[i].as_mv); } break; } case NEARESTMV: { mv[0].as_int = nearest_mv[0].as_int; - if (is_compound) mv[1].as_int = nearest_mv[1].as_int; + if (is_compound) + mv[1].as_int = nearest_mv[1].as_int; break; } case NEARMV: { mv[0].as_int = near_mv[0].as_int; - if (is_compound) mv[1].as_int = near_mv[1].as_int; + if (is_compound) + mv[1].as_int = near_mv[1].as_int; break; } case ZEROMV: { mv[0].as_int = 0; - if (is_compound) mv[1].as_int = 0; + if (is_compound) + mv[1].as_int = 0; break; } default: { @@ -408,8 +407,8 @@ static int read_is_inter_block(VP9_COMMON *const cm, MACROBLOCKD *const xd, return vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME) != INTRA_FRAME; } else { - const int ctx = vp9_get_pred_context_intra_inter(xd); - const int is_inter = vp9_read(r, vp9_get_pred_prob_intra_inter(cm, xd)); + const int ctx = vp9_get_intra_inter_context(xd); + const int is_inter = vp9_read(r, cm->fc.intra_inter_prob[ctx]); if (!cm->frame_parallel_decoding_mode) ++cm->counts.intra_inter[ctx][is_inter]; return is_inter; @@ -425,20 +424,19 @@ static void read_inter_block_mode_info(VP9_COMMON *const cm, const BLOCK_SIZE bsize = mbmi->sb_type; const int allow_hp = cm->allow_high_precision_mv; - int_mv nearest[2], nearmv[2], best[2]; - uint8_t inter_mode_ctx; - MV_REFERENCE_FRAME ref0; - int is_compound; + int_mv nearestmv[2], nearmv[2]; + int inter_mode_ctx, ref, is_compound; - mbmi->uv_mode = DC_PRED; read_ref_frames(cm, xd, r, mbmi->segment_id, mbmi->ref_frame); - ref0 = mbmi->ref_frame[0]; is_compound = has_second_ref(mbmi); - vp9_find_mv_refs(cm, xd, tile, mi, xd->last_mi, ref0, mbmi->ref_mvs[ref0], - mi_row, mi_col); + for (ref = 0; ref < 1 + is_compound; ++ref) { + const MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref]; + vp9_find_mv_refs(cm, xd, tile, mi, xd->last_mi, frame, mbmi->ref_mvs[frame], + mi_row, mi_col); + } - inter_mode_ctx = mbmi->mode_context[ref0]; + inter_mode_ctx = mbmi->mode_context[mbmi->ref_frame[0]]; if (vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) { mbmi->mode = ZEROMV; @@ -452,58 +450,42 @@ static void read_inter_block_mode_info(VP9_COMMON *const cm, mbmi->mode = read_inter_mode(cm, r, inter_mode_ctx); } - // nearest, nearby if (bsize < BLOCK_8X8 || mbmi->mode != ZEROMV) { - vp9_find_best_ref_mvs(xd, allow_hp, - mbmi->ref_mvs[ref0], &nearest[0], &nearmv[0]); - best[0].as_int = nearest[0].as_int; - } - - if (is_compound) { - const MV_REFERENCE_FRAME ref1 = mbmi->ref_frame[1]; - vp9_find_mv_refs(cm, xd, tile, mi, xd->last_mi, - ref1, mbmi->ref_mvs[ref1], mi_row, mi_col); - - if (bsize < BLOCK_8X8 || mbmi->mode != ZEROMV) { - vp9_find_best_ref_mvs(xd, allow_hp, - mbmi->ref_mvs[ref1], &nearest[1], &nearmv[1]); - best[1].as_int = nearest[1].as_int; + for (ref = 0; ref < 1 + is_compound; ++ref) { + vp9_find_best_ref_mvs(xd, allow_hp, mbmi->ref_mvs[mbmi->ref_frame[ref]], + &nearestmv[ref], &nearmv[ref]); } } - mbmi->interp_filter = (cm->mcomp_filter_type == SWITCHABLE) - ? read_switchable_filter_type(cm, xd, r) - : cm->mcomp_filter_type; + mbmi->interp_filter = (cm->interp_filter == SWITCHABLE) + ? read_switchable_interp_filter(cm, xd, r) + : cm->interp_filter; if (bsize < BLOCK_8X8) { const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; // 1 or 2 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; // 1 or 2 int idx, idy; int b_mode; + int_mv nearest_sub8x8[2], near_sub8x8[2]; for (idy = 0; idy < 2; idy += num_4x4_h) { for (idx = 0; idx < 2; idx += num_4x4_w) { int_mv block[2]; const int j = idy * 2 + idx; b_mode = read_inter_mode(cm, r, inter_mode_ctx); - if (b_mode == NEARESTMV || b_mode == NEARMV) { - vp9_append_sub8x8_mvs_for_idx(cm, xd, tile, &nearest[0], - &nearmv[0], j, 0, - mi_row, mi_col); + if (b_mode == NEARESTMV || b_mode == NEARMV) + for (ref = 0; ref < 1 + is_compound; ++ref) + vp9_append_sub8x8_mvs_for_idx(cm, xd, tile, j, ref, mi_row, mi_col, + &nearest_sub8x8[ref], + &near_sub8x8[ref]); - if (is_compound) - vp9_append_sub8x8_mvs_for_idx(cm, xd, tile, &nearest[1], - &nearmv[1], j, 1, - mi_row, mi_col); - } - - if (!assign_mv(cm, b_mode, block, best, nearest, nearmv, + if (!assign_mv(cm, b_mode, block, nearestmv, + nearest_sub8x8, near_sub8x8, is_compound, allow_hp, r)) { xd->corrupted |= 1; break; }; - mi->bmi[j].as_mv[0].as_int = block[0].as_int; if (is_compound) mi->bmi[j].as_mv[1].as_int = block[1].as_int; @@ -520,27 +502,26 @@ static void read_inter_block_mode_info(VP9_COMMON *const cm, mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int; mbmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int; } else { - xd->corrupted |= !assign_mv(cm, mbmi->mode, mbmi->mv, - best, nearest, nearmv, - is_compound, allow_hp, r); + xd->corrupted |= !assign_mv(cm, mbmi->mode, mbmi->mv, nearestmv, + nearestmv, nearmv, is_compound, allow_hp, r); } } static void read_inter_frame_mode_info(VP9_COMMON *const cm, MACROBLOCKD *const xd, const TileInfo *const tile, - MODE_INFO *const mi, int mi_row, int mi_col, vp9_reader *r) { + MODE_INFO *const mi = xd->mi_8x8[0]; MB_MODE_INFO *const mbmi = &mi->mbmi; int inter_block; mbmi->mv[0].as_int = 0; mbmi->mv[1].as_int = 0; mbmi->segment_id = read_inter_segment_id(cm, xd, mi_row, mi_col, r); - mbmi->skip_coeff = read_skip_coeff(cm, xd, mbmi->segment_id, r); + mbmi->skip = read_skip(cm, xd, mbmi->segment_id, r); inter_block = read_is_inter_block(cm, xd, mbmi->segment_id, r); mbmi->tx_size = read_tx_size(cm, xd, cm->tx_mode, mbmi->sb_type, - !mbmi->skip_coeff || !inter_block, r); + !mbmi->skip || !inter_block, r); if (inter_block) read_inter_block_mode_info(cm, xd, tile, mi, mi_row, mi_col, r); @@ -551,22 +532,8 @@ static void read_inter_frame_mode_info(VP9_COMMON *const cm, void vp9_read_mode_info(VP9_COMMON *cm, MACROBLOCKD *xd, const TileInfo *const tile, int mi_row, int mi_col, vp9_reader *r) { - MODE_INFO *const mi = xd->mi_8x8[0]; - const BLOCK_SIZE bsize = mi->mbmi.sb_type; - const int bw = num_8x8_blocks_wide_lookup[bsize]; - const int bh = num_8x8_blocks_high_lookup[bsize]; - const int y_mis = MIN(bh, cm->mi_rows - mi_row); - const int x_mis = MIN(bw, cm->mi_cols - mi_col); - int x, y, z; - if (frame_is_intra_only(cm)) - read_intra_frame_mode_info(cm, xd, mi, mi_row, mi_col, r); + read_intra_frame_mode_info(cm, xd, mi_row, mi_col, r); else - read_inter_frame_mode_info(cm, xd, tile, mi, mi_row, mi_col, r); - - for (y = 0, z = 0; y < y_mis; y++, z += cm->mode_info_stride) { - for (x = !y; x < x_mis; x++) { - xd->mi_8x8[z + x] = mi; - } - } + read_inter_frame_mode_info(cm, xd, tile, mi_row, mi_col, r); } diff --git a/libvpx/vp9/decoder/vp9_decodemv.h b/libvpx/vp9/decoder/vp9_decodemv.h index 8e9ae4a..7394b62 100644 --- a/libvpx/vp9/decoder/vp9_decodemv.h +++ b/libvpx/vp9/decoder/vp9_decodemv.h @@ -11,8 +11,11 @@ #ifndef VP9_DECODER_VP9_DECODEMV_H_ #define VP9_DECODER_VP9_DECODEMV_H_ -#include "vp9/decoder/vp9_onyxd_int.h" -#include "vp9/decoder/vp9_dboolhuff.h" +#include "vp9/decoder/vp9_reader.h" + +#ifdef __cplusplus +extern "C" { +#endif struct TileInfo; @@ -20,4 +23,8 @@ void vp9_read_mode_info(VP9_COMMON *cm, MACROBLOCKD *xd, const struct TileInfo *const tile, int mi_row, int mi_col, vp9_reader *r); +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_DECODER_VP9_DECODEMV_H_ diff --git a/libvpx/vp9/decoder/vp9_onyxd_if.c b/libvpx/vp9/decoder/vp9_decoder.c index cb45d37..77985c9 100644 --- a/libvpx/vp9/decoder/vp9_onyxd_if.c +++ b/libvpx/vp9/decoder/vp9_decoder.c @@ -12,22 +12,25 @@ #include <limits.h> #include <stdio.h> +#include "./vpx_scale_rtcd.h" + +#include "vpx_mem/vpx_mem.h" +#include "vpx_ports/vpx_timer.h" +#include "vpx_scale/vpx_scale.h" + +#include "vp9/common/vp9_alloccommon.h" +#include "vp9/common/vp9_loopfilter.h" #include "vp9/common/vp9_onyxc_int.h" #if CONFIG_VP9_POSTPROC #include "vp9/common/vp9_postproc.h" #endif -#include "vp9/decoder/vp9_onyxd.h" -#include "vp9/decoder/vp9_onyxd_int.h" -#include "vpx_mem/vpx_mem.h" -#include "vp9/common/vp9_alloccommon.h" -#include "vp9/common/vp9_loopfilter.h" #include "vp9/common/vp9_quant_common.h" -#include "vpx_scale/vpx_scale.h" #include "vp9/common/vp9_systemdependent.h" -#include "vpx_ports/vpx_timer.h" -#include "vp9/decoder/vp9_decodframe.h" + +#include "vp9/decoder/vp9_decodeframe.h" +#include "vp9/decoder/vp9_decoder.h" #include "vp9/decoder/vp9_detokenize.h" -#include "./vpx_scale_rtcd.h" +#include "vp9/decoder/vp9_dthread.h" #define WRITE_RECON_BUFFER 0 #if WRITE_RECON_BUFFER == 1 @@ -112,14 +115,11 @@ static void init_macroblockd(VP9D_COMP *const pbi) { struct macroblockd_plane *const pd = xd->plane; int i; - for (i = 0; i < MAX_MB_PLANE; ++i) { - pd[i].qcoeff = pbi->qcoeff[i]; + for (i = 0; i < MAX_MB_PLANE; ++i) pd[i].dqcoeff = pbi->dqcoeff[i]; - pd[i].eobs = pbi->eobs[i]; - } } -VP9D_PTR vp9_create_decompressor(VP9D_CONFIG *oxcf) { +VP9D_COMP *vp9_create_decompressor(VP9D_CONFIG *oxcf) { VP9D_COMP *const pbi = vpx_memalign(32, sizeof(VP9D_COMP)); VP9_COMMON *const cm = pbi ? &pbi->common : NULL; @@ -128,6 +128,9 @@ VP9D_PTR vp9_create_decompressor(VP9D_CONFIG *oxcf) { vp9_zero(*pbi); + // Initialize the references to not point to any frame buffers. + memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map)); + if (setjmp(cm->error.jmp)) { cm->error.setjmp = 0; vp9_remove_decompressor(pbi); @@ -137,7 +140,7 @@ VP9D_PTR vp9_create_decompressor(VP9D_CONFIG *oxcf) { cm->error.setjmp = 1; vp9_initialize_dec(); - vp9_create_common(cm); + vp9_rtcd(); pbi->oxcf = *oxcf; pbi->ready_for_new_data = 1; @@ -160,9 +163,8 @@ VP9D_PTR vp9_create_decompressor(VP9D_CONFIG *oxcf) { return pbi; } -void vp9_remove_decompressor(VP9D_PTR ptr) { +void vp9_remove_decompressor(VP9D_COMP *pbi) { int i; - VP9D_COMP *const pbi = (VP9D_COMP *)ptr; if (!pbi) return; @@ -177,21 +179,31 @@ void vp9_remove_decompressor(VP9D_PTR ptr) { vpx_free(worker->data2); } vpx_free(pbi->tile_workers); + + if (pbi->num_tile_workers) { + VP9_COMMON *const cm = &pbi->common; + const int sb_rows = + mi_cols_aligned_to_sb(cm->mi_rows) >> MI_BLOCK_SIZE_LOG2; + VP9LfSync *const lf_sync = &pbi->lf_row_sync; + + vp9_loop_filter_dealloc(lf_sync, sb_rows); + } + vpx_free(pbi->mi_streams); vpx_free(pbi->above_context[0]); vpx_free(pbi->above_seg_context); vpx_free(pbi); } -static int equal_dimensions(YV12_BUFFER_CONFIG *a, YV12_BUFFER_CONFIG *b) { +static int equal_dimensions(const YV12_BUFFER_CONFIG *a, + const YV12_BUFFER_CONFIG *b) { return a->y_height == b->y_height && a->y_width == b->y_width && a->uv_height == b->uv_height && a->uv_width == b->uv_width; } -vpx_codec_err_t vp9_copy_reference_dec(VP9D_PTR ptr, +vpx_codec_err_t vp9_copy_reference_dec(VP9D_COMP *pbi, VP9_REFFRAME ref_frame_flag, YV12_BUFFER_CONFIG *sd) { - VP9D_COMP *pbi = (VP9D_COMP *) ptr; VP9_COMMON *cm = &pbi->common; /* TODO(jkoleszar): The decoder doesn't have any real knowledge of what the @@ -200,7 +212,8 @@ vpx_codec_err_t vp9_copy_reference_dec(VP9D_PTR ptr, * later commit that adds VP9-specific controls for this functionality. */ if (ref_frame_flag == VP9_LAST_FLAG) { - YV12_BUFFER_CONFIG *cfg = &cm->yv12_fb[cm->ref_frame_map[0]]; + const YV12_BUFFER_CONFIG *const cfg = + &cm->frame_bufs[cm->ref_frame_map[0]].buf; if (!equal_dimensions(cfg, sd)) vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Incorrect buffer dimensions"); @@ -215,11 +228,11 @@ vpx_codec_err_t vp9_copy_reference_dec(VP9D_PTR ptr, } -vpx_codec_err_t vp9_set_reference_dec(VP9D_PTR ptr, VP9_REFFRAME ref_frame_flag, +vpx_codec_err_t vp9_set_reference_dec(VP9D_COMP *pbi, + VP9_REFFRAME ref_frame_flag, YV12_BUFFER_CONFIG *sd) { - VP9D_COMP *pbi = (VP9D_COMP *) ptr; VP9_COMMON *cm = &pbi->common; - int *ref_fb_ptr = NULL; + RefBuffer *ref_buf = NULL; /* TODO(jkoleszar): The decoder doesn't have any real knowledge of what the * encoder is using the frame buffers for. This is just a stub to keep the @@ -227,44 +240,46 @@ vpx_codec_err_t vp9_set_reference_dec(VP9D_PTR ptr, VP9_REFFRAME ref_frame_flag, * later commit that adds VP9-specific controls for this functionality. */ if (ref_frame_flag == VP9_LAST_FLAG) { - ref_fb_ptr = &pbi->common.active_ref_idx[0]; + ref_buf = &cm->frame_refs[0]; } else if (ref_frame_flag == VP9_GOLD_FLAG) { - ref_fb_ptr = &pbi->common.active_ref_idx[1]; + ref_buf = &cm->frame_refs[1]; } else if (ref_frame_flag == VP9_ALT_FLAG) { - ref_fb_ptr = &pbi->common.active_ref_idx[2]; + ref_buf = &cm->frame_refs[2]; } else { vpx_internal_error(&pbi->common.error, VPX_CODEC_ERROR, "Invalid reference frame"); return pbi->common.error.error_code; } - if (!equal_dimensions(&cm->yv12_fb[*ref_fb_ptr], sd)) { + if (!equal_dimensions(ref_buf->buf, sd)) { vpx_internal_error(&pbi->common.error, VPX_CODEC_ERROR, "Incorrect buffer dimensions"); } else { + int *ref_fb_ptr = &ref_buf->idx; + // Find an empty frame buffer. const int free_fb = get_free_fb(cm); - // Decrease fb_idx_ref_cnt since it will be increased again in + // Decrease ref_count since it will be increased again in // ref_cnt_fb() below. - cm->fb_idx_ref_cnt[free_fb]--; + cm->frame_bufs[free_fb].ref_count--; // Manage the reference counters and copy image. - ref_cnt_fb(cm->fb_idx_ref_cnt, ref_fb_ptr, free_fb); - vp8_yv12_copy_frame(sd, &cm->yv12_fb[*ref_fb_ptr]); + ref_cnt_fb(cm->frame_bufs, ref_fb_ptr, free_fb); + ref_buf->buf = &cm->frame_bufs[*ref_fb_ptr].buf; + vp8_yv12_copy_frame(sd, ref_buf->buf); } return pbi->common.error.error_code; } -int vp9_get_reference_dec(VP9D_PTR ptr, int index, YV12_BUFFER_CONFIG **fb) { - VP9D_COMP *pbi = (VP9D_COMP *) ptr; +int vp9_get_reference_dec(VP9D_COMP *pbi, int index, YV12_BUFFER_CONFIG **fb) { VP9_COMMON *cm = &pbi->common; - if (index < 0 || index >= NUM_REF_FRAMES) + if (index < 0 || index >= REF_FRAMES) return -1; - *fb = &cm->yv12_fb[cm->ref_frame_map[index]]; + *fb = &cm->frame_bufs[cm->ref_frame_map[index]].buf; return 0; } @@ -274,34 +289,39 @@ static void swap_frame_buffers(VP9D_COMP *pbi) { VP9_COMMON *const cm = &pbi->common; for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) { - if (mask & 1) - ref_cnt_fb(cm->fb_idx_ref_cnt, &cm->ref_frame_map[ref_index], + if (mask & 1) { + const int old_idx = cm->ref_frame_map[ref_index]; + ref_cnt_fb(cm->frame_bufs, &cm->ref_frame_map[ref_index], cm->new_fb_idx); + if (old_idx >= 0 && cm->frame_bufs[old_idx].ref_count == 0) + cm->release_fb_cb(cm->cb_priv, + &cm->frame_bufs[old_idx].raw_frame_buffer); + } ++ref_index; } cm->frame_to_show = get_frame_new_buffer(cm); - cm->fb_idx_ref_cnt[cm->new_fb_idx]--; + cm->frame_bufs[cm->new_fb_idx].ref_count--; // Invalidate these references until the next frame starts. for (ref_index = 0; ref_index < 3; ref_index++) - cm->active_ref_idx[ref_index] = INT_MAX; + cm->frame_refs[ref_index].idx = INT_MAX; } -int vp9_receive_compressed_data(VP9D_PTR ptr, +int vp9_receive_compressed_data(VP9D_COMP *pbi, size_t size, const uint8_t **psource, int64_t time_stamp) { - VP9D_COMP *pbi = (VP9D_COMP *) ptr; - VP9_COMMON *cm = &pbi->common; + VP9_COMMON *cm = NULL; const uint8_t *source = *psource; int retcode = 0; /*if(pbi->ready_for_new_data == 0) return -1;*/ - if (ptr == 0) + if (!pbi) return -1; + cm = &pbi->common; cm->error.error_code = VPX_CODEC_OK; pbi->source = source; @@ -317,10 +337,14 @@ int vp9_receive_compressed_data(VP9D_PTR ptr, * at this point, but if it becomes so, [0] may not always be the correct * thing to do here. */ - if (cm->active_ref_idx[0] != INT_MAX) - get_frame_ref_buffer(cm, 0)->corrupted = 1; + if (cm->frame_refs[0].idx != INT_MAX) + cm->frame_refs[0].buf->corrupted = 1; } + // Check if the previous frame was a frame without any references to it. + if (cm->new_fb_idx >= 0 && cm->frame_bufs[cm->new_fb_idx].ref_count == 0) + cm->release_fb_cb(cm->cb_priv, + &cm->frame_bufs[cm->new_fb_idx].raw_frame_buffer); cm->new_fb_idx = get_free_fb(cm); if (setjmp(cm->error.jmp)) { @@ -334,11 +358,11 @@ int vp9_receive_compressed_data(VP9D_PTR ptr, * at this point, but if it becomes so, [0] may not always be the correct * thing to do here. */ - if (cm->active_ref_idx[0] != INT_MAX) - get_frame_ref_buffer(cm, 0)->corrupted = 1; + if (cm->frame_refs[0].idx != INT_MAX) + cm->frame_refs[0].buf->corrupted = 1; - if (cm->fb_idx_ref_cnt[cm->new_fb_idx] > 0) - cm->fb_idx_ref_cnt[cm->new_fb_idx]--; + if (cm->frame_bufs[cm->new_fb_idx].ref_count > 0) + cm->frame_bufs[cm->new_fb_idx].ref_count--; return -1; } @@ -350,8 +374,8 @@ int vp9_receive_compressed_data(VP9D_PTR ptr, if (retcode < 0) { cm->error.error_code = VPX_CODEC_ERROR; cm->error.setjmp = 0; - if (cm->fb_idx_ref_cnt[cm->new_fb_idx] > 0) - cm->fb_idx_ref_cnt[cm->new_fb_idx]--; + if (cm->frame_bufs[cm->new_fb_idx].ref_count > 0) + cm->frame_bufs[cm->new_fb_idx].ref_count--; return retcode; } @@ -367,7 +391,13 @@ int vp9_receive_compressed_data(VP9D_PTR ptr, #endif if (!pbi->do_loopfilter_inline) { - vp9_loop_filter_frame(cm, &pbi->mb, pbi->common.lf.filter_level, 0, 0); + // If multiple threads are used to decode tiles, then we use those threads + // to do parallel loopfiltering. + if (pbi->num_tile_workers) { + vp9_loop_filter_frame_mt(pbi, cm, &pbi->mb, cm->lf.filter_level, 0, 0); + } else { + vp9_loop_filter_frame(cm, &pbi->mb, cm->lf.filter_level, 0, 0); + } } #if WRITE_RECON_BUFFER == 2 @@ -379,10 +409,6 @@ int vp9_receive_compressed_data(VP9D_PTR ptr, cm->current_video_frame + 3000); #endif - vp9_extend_frame_inner_borders(cm->frame_to_show, - cm->subsampling_x, - cm->subsampling_y); - #if WRITE_RECON_BUFFER == 1 if (cm->show_frame) recon_write_yuv_frame("recon.yuv", cm->frame_to_show, @@ -391,25 +417,31 @@ int vp9_receive_compressed_data(VP9D_PTR ptr, vp9_clear_system_state(); - cm->last_show_frame = cm->show_frame; - if (cm->show_frame) { - // current mip will be the prev_mip for the next frame - MODE_INFO *temp = cm->prev_mip; - MODE_INFO **temp2 = cm->prev_mi_grid_base; - cm->prev_mip = cm->mip; - cm->mip = temp; - cm->prev_mi_grid_base = cm->mi_grid_base; - cm->mi_grid_base = temp2; - - // update the upper left visible macroblock ptrs - cm->mi = cm->mip + cm->mode_info_stride + 1; - cm->prev_mi = cm->prev_mip + cm->mode_info_stride + 1; - cm->mi_grid_visible = cm->mi_grid_base + cm->mode_info_stride + 1; - cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mode_info_stride + 1; - - pbi->mb.mi_8x8 = cm->mi_grid_visible; - pbi->mb.mi_8x8[0] = cm->mi; + cm->last_width = cm->width; + cm->last_height = cm->height; + if (!cm->show_existing_frame) + cm->last_show_frame = cm->show_frame; + if (cm->show_frame) { + if (!cm->show_existing_frame) { + // current mip will be the prev_mip for the next frame + MODE_INFO *temp = cm->prev_mip; + MODE_INFO **temp2 = cm->prev_mi_grid_base; + cm->prev_mip = cm->mip; + cm->mip = temp; + cm->prev_mi_grid_base = cm->mi_grid_base; + cm->mi_grid_base = temp2; + + // update the upper left visible macroblock ptrs + cm->mi = cm->mip + cm->mode_info_stride + 1; + cm->prev_mi = cm->prev_mip + cm->mode_info_stride + 1; + cm->mi_grid_visible = cm->mi_grid_base + cm->mode_info_stride + 1; + cm->prev_mi_grid_visible = cm->prev_mi_grid_base + + cm->mode_info_stride + 1; + + pbi->mb.mi_8x8 = cm->mi_grid_visible; + pbi->mb.mi_8x8[0] = cm->mi; + } cm->current_video_frame++; } @@ -421,11 +453,10 @@ int vp9_receive_compressed_data(VP9D_PTR ptr, return retcode; } -int vp9_get_raw_frame(VP9D_PTR ptr, YV12_BUFFER_CONFIG *sd, +int vp9_get_raw_frame(VP9D_COMP *pbi, YV12_BUFFER_CONFIG *sd, int64_t *time_stamp, int64_t *time_end_stamp, vp9_ppflags_t *flags) { int ret = -1; - VP9D_COMP *pbi = (VP9D_COMP *) ptr; if (pbi->ready_for_new_data == 1) return ret; diff --git a/libvpx/vp9/decoder/vp9_decoder.h b/libvpx/vp9/decoder/vp9_decoder.h new file mode 100644 index 0000000..e6edaf9 --- /dev/null +++ b/libvpx/vp9/decoder/vp9_decoder.h @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2010 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VP9_DECODER_VP9_DECODER_H_ +#define VP9_DECODER_VP9_DECODER_H_ + +#include "./vpx_config.h" + +#include "vpx/vpx_codec.h" +#include "vpx_scale/yv12config.h" + +#include "vp9/common/vp9_onyxc_int.h" +#include "vp9/common/vp9_ppflags.h" + +#include "vp9/decoder/vp9_decoder.h" +#include "vp9/decoder/vp9_dthread.h" +#include "vp9/decoder/vp9_thread.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + int width; + int height; + int version; + int postprocess; + int max_threads; + int inv_tile_order; + int input_partition; +} VP9D_CONFIG; + +typedef enum { + VP9_LAST_FLAG = 1, + VP9_GOLD_FLAG = 2, + VP9_ALT_FLAG = 4 +} VP9_REFFRAME; + +typedef struct VP9Decompressor { + DECLARE_ALIGNED(16, MACROBLOCKD, mb); + + DECLARE_ALIGNED(16, VP9_COMMON, common); + + DECLARE_ALIGNED(16, int16_t, dqcoeff[MAX_MB_PLANE][64 * 64]); + + VP9D_CONFIG oxcf; + + const uint8_t *source; + size_t source_sz; + + int64_t last_time_stamp; + int ready_for_new_data; + + int refresh_frame_flags; + + int decoded_key_frame; + + int initial_width; + int initial_height; + + int do_loopfilter_inline; // apply loopfilter to available rows immediately + VP9Worker lf_worker; + + VP9Worker *tile_workers; + int num_tile_workers; + + VP9LfSync lf_row_sync; + + /* Each tile column has its own MODE_INFO stream. This array indexes them by + tile column index. */ + MODE_INFO **mi_streams; + + ENTROPY_CONTEXT *above_context[MAX_MB_PLANE]; + PARTITION_CONTEXT *above_seg_context; +} VP9D_COMP; + +void vp9_initialize_dec(); + +int vp9_receive_compressed_data(struct VP9Decompressor *pbi, + size_t size, const uint8_t **dest, + int64_t time_stamp); + +int vp9_get_raw_frame(struct VP9Decompressor *pbi, + YV12_BUFFER_CONFIG *sd, + int64_t *time_stamp, int64_t *time_end_stamp, + vp9_ppflags_t *flags); + +vpx_codec_err_t vp9_copy_reference_dec(struct VP9Decompressor *pbi, + VP9_REFFRAME ref_frame_flag, + YV12_BUFFER_CONFIG *sd); + +vpx_codec_err_t vp9_set_reference_dec(struct VP9Decompressor *pbi, + VP9_REFFRAME ref_frame_flag, + YV12_BUFFER_CONFIG *sd); + +int vp9_get_reference_dec(struct VP9Decompressor *pbi, + int index, YV12_BUFFER_CONFIG **fb); + + +struct VP9Decompressor *vp9_create_decompressor(VP9D_CONFIG *oxcf); + +void vp9_remove_decompressor(struct VP9Decompressor *pbi); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // VP9_DECODER_VP9_DECODER_H_ diff --git a/libvpx/vp9/decoder/vp9_detokenize.c b/libvpx/vp9/decoder/vp9_detokenize.c index 05a2b42..52e78cd 100644 --- a/libvpx/vp9/decoder/vp9_detokenize.c +++ b/libvpx/vp9/decoder/vp9_detokenize.c @@ -13,24 +13,20 @@ #include "vp9/common/vp9_blockd.h" #include "vp9/common/vp9_common.h" -#include "vp9/common/vp9_seg_common.h" -#include "vp9/decoder/vp9_dboolhuff.h" #include "vp9/decoder/vp9_detokenize.h" -#include "vp9/decoder/vp9_onyxd_int.h" -#include "vp9/decoder/vp9_treereader.h" #define EOB_CONTEXT_NODE 0 #define ZERO_CONTEXT_NODE 1 #define ONE_CONTEXT_NODE 2 -#define LOW_VAL_CONTEXT_NODE 3 -#define TWO_CONTEXT_NODE 4 -#define THREE_CONTEXT_NODE 5 -#define HIGH_LOW_CONTEXT_NODE 6 -#define CAT_ONE_CONTEXT_NODE 7 -#define CAT_THREEFOUR_CONTEXT_NODE 8 -#define CAT_THREE_CONTEXT_NODE 9 -#define CAT_FIVE_CONTEXT_NODE 10 +#define LOW_VAL_CONTEXT_NODE 0 +#define TWO_CONTEXT_NODE 1 +#define THREE_CONTEXT_NODE 2 +#define HIGH_LOW_CONTEXT_NODE 3 +#define CAT_ONE_CONTEXT_NODE 4 +#define CAT_THREEFOUR_CONTEXT_NODE 5 +#define CAT_THREE_CONTEXT_NODE 6 +#define CAT_FIVE_CONTEXT_NODE 7 #define CAT1_MIN_VAL 5 #define CAT2_MIN_VAL 7 @@ -61,26 +57,20 @@ static const vp9_prob cat6_prob[15] = { 254, 254, 254, 252, 249, 243, 230, 196, 177, 153, 140, 133, 130, 129, 0 }; -static const int token_to_counttoken[MAX_ENTROPY_TOKENS] = { - ZERO_TOKEN, ONE_TOKEN, TWO_TOKEN, TWO_TOKEN, - TWO_TOKEN, TWO_TOKEN, TWO_TOKEN, TWO_TOKEN, - TWO_TOKEN, TWO_TOKEN, TWO_TOKEN, DCT_EOB_MODEL_TOKEN -}; - #define INCREMENT_COUNT(token) \ do { \ if (!cm->frame_parallel_decoding_mode) \ - ++coef_counts[band][pt][token_to_counttoken[token]]; \ + ++coef_counts[band][ctx][token]; \ } while (0) - #define WRITE_COEF_CONTINUE(val, token) \ { \ - dqcoeff_ptr[scan[c]] = (vp9_read_bit(r) ? -val : val) * \ - dq[c > 0] / (1 + (tx_size == TX_32X32)); \ - INCREMENT_COUNT(token); \ + v = (val * dqv) >> dq_shift; \ + dqcoeff[scan[c]] = vp9_read_bit(r) ? -v : v; \ token_cache[scan[c]] = vp9_pt_energy_class[token]; \ ++c; \ + ctx = get_coef_context(nb, token_cache, c); \ + dqv = dq[1]; \ continue; \ } @@ -89,69 +79,62 @@ static const int token_to_counttoken[MAX_ENTROPY_TOKENS] = { val += (vp9_read(r, prob) << bits_count); \ } while (0) -static int decode_coefs(VP9_COMMON *cm, const MACROBLOCKD *xd, - vp9_reader *r, int block_idx, - PLANE_TYPE type, int seg_eob, int16_t *dqcoeff_ptr, - TX_SIZE tx_size, const int16_t *dq, int pt, - uint8_t *token_cache) { +static int decode_coefs(VP9_COMMON *cm, const MACROBLOCKD *xd, PLANE_TYPE type, + int16_t *dqcoeff, TX_SIZE tx_size, const int16_t *dq, + int ctx, const int16_t *scan, const int16_t *nb, + vp9_reader *r) { + const int max_eob = 16 << (tx_size << 1); const FRAME_CONTEXT *const fc = &cm->fc; FRAME_COUNTS *const counts = &cm->counts; const int ref = is_inter_block(&xd->mi_8x8[0]->mbmi); int band, c = 0; - const vp9_prob (*coef_probs)[PREV_COEF_CONTEXTS][UNCONSTRAINED_NODES] = + const vp9_prob (*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] = fc->coef_probs[tx_size][type][ref]; - vp9_prob coef_probs_full[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES]; - uint8_t load_map[COEF_BANDS][PREV_COEF_CONTEXTS] = { { 0 } }; const vp9_prob *prob; - unsigned int (*coef_counts)[PREV_COEF_CONTEXTS][UNCONSTRAINED_NODES + 1] = + unsigned int (*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1] = counts->coef[tx_size][type][ref]; - unsigned int (*eob_branch_count)[PREV_COEF_CONTEXTS] = + unsigned int (*eob_branch_count)[COEFF_CONTEXTS] = counts->eob_branch[tx_size][type][ref]; - const int16_t *scan, *nb; + uint8_t token_cache[32 * 32]; const uint8_t *cat6; const uint8_t *band_translate = get_band_translate(tx_size); - get_scan(xd, tx_size, type, block_idx, &scan, &nb); + const int dq_shift = (tx_size == TX_32X32); + int v; + int16_t dqv = dq[0]; - while (c < seg_eob) { + while (c < max_eob) { int val; - if (c) - pt = get_coef_context(nb, token_cache, c); band = *band_translate++; - prob = coef_probs[band][pt]; + prob = coef_probs[band][ctx]; if (!cm->frame_parallel_decoding_mode) - ++eob_branch_count[band][pt]; - if (!vp9_read(r, prob[EOB_CONTEXT_NODE])) - break; - goto DECODE_ZERO; - - SKIP_START: - if (c >= seg_eob) + ++eob_branch_count[band][ctx]; + if (!vp9_read(r, prob[EOB_CONTEXT_NODE])) { + INCREMENT_COUNT(EOB_MODEL_TOKEN); break; - if (c) - pt = get_coef_context(nb, token_cache, c); - band = *band_translate++; - prob = coef_probs[band][pt]; + } - DECODE_ZERO: - if (!vp9_read(r, prob[ZERO_CONTEXT_NODE])) { + while (!vp9_read(r, prob[ZERO_CONTEXT_NODE])) { INCREMENT_COUNT(ZERO_TOKEN); - token_cache[scan[c]] = vp9_pt_energy_class[ZERO_TOKEN]; + dqv = dq[1]; + token_cache[scan[c]] = 0; ++c; - goto SKIP_START; + if (c >= max_eob) + return c; // zero tokens at the end (no eob token) + ctx = get_coef_context(nb, token_cache, c); + band = *band_translate++; + prob = coef_probs[band][ctx]; } // ONE_CONTEXT_NODE_0_ if (!vp9_read(r, prob[ONE_CONTEXT_NODE])) { + INCREMENT_COUNT(ONE_TOKEN); WRITE_COEF_CONTINUE(1, ONE_TOKEN); } - // Load full probabilities if not already loaded - if (!load_map[band][pt]) { - vp9_model_to_full_probs(coef_probs[band][pt], - coef_probs_full[band][pt]); - load_map[band][pt] = 1; - } - prob = coef_probs_full[band][pt]; - // LOW_VAL_CONTEXT_NODE_0_ + + INCREMENT_COUNT(TWO_TOKEN); + + prob = vp9_pareto8_full[prob[PIVOT_NODE] - 1]; + if (!vp9_read(r, prob[LOW_VAL_CONTEXT_NODE])) { if (!vp9_read(r, prob[TWO_CONTEXT_NODE])) { WRITE_COEF_CONTINUE(2, TWO_TOKEN); @@ -161,35 +144,35 @@ static int decode_coefs(VP9_COMMON *cm, const MACROBLOCKD *xd, } WRITE_COEF_CONTINUE(4, FOUR_TOKEN); } - // HIGH_LOW_CONTEXT_NODE_0_ + if (!vp9_read(r, prob[HIGH_LOW_CONTEXT_NODE])) { if (!vp9_read(r, prob[CAT_ONE_CONTEXT_NODE])) { val = CAT1_MIN_VAL; ADJUST_COEF(CAT1_PROB0, 0); - WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY1); + WRITE_COEF_CONTINUE(val, CATEGORY1_TOKEN); } val = CAT2_MIN_VAL; ADJUST_COEF(CAT2_PROB1, 1); ADJUST_COEF(CAT2_PROB0, 0); - WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY2); + WRITE_COEF_CONTINUE(val, CATEGORY2_TOKEN); } - // CAT_THREEFOUR_CONTEXT_NODE_0_ + if (!vp9_read(r, prob[CAT_THREEFOUR_CONTEXT_NODE])) { if (!vp9_read(r, prob[CAT_THREE_CONTEXT_NODE])) { val = CAT3_MIN_VAL; ADJUST_COEF(CAT3_PROB2, 2); ADJUST_COEF(CAT3_PROB1, 1); ADJUST_COEF(CAT3_PROB0, 0); - WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY3); + WRITE_COEF_CONTINUE(val, CATEGORY3_TOKEN); } val = CAT4_MIN_VAL; ADJUST_COEF(CAT4_PROB3, 3); ADJUST_COEF(CAT4_PROB2, 2); ADJUST_COEF(CAT4_PROB1, 1); ADJUST_COEF(CAT4_PROB0, 0); - WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY4); + WRITE_COEF_CONTINUE(val, CATEGORY4_TOKEN); } - // CAT_FIVE_CONTEXT_NODE_0_: + if (!vp9_read(r, prob[CAT_FIVE_CONTEXT_NODE])) { val = CAT5_MIN_VAL; ADJUST_COEF(CAT5_PROB4, 4); @@ -197,7 +180,7 @@ static int decode_coefs(VP9_COMMON *cm, const MACROBLOCKD *xd, ADJUST_COEF(CAT5_PROB2, 2); ADJUST_COEF(CAT5_PROB1, 1); ADJUST_COEF(CAT5_PROB0, 0); - WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY5); + WRITE_COEF_CONTINUE(val, CATEGORY5_TOKEN); } val = 0; cat6 = cat6_prob; @@ -205,12 +188,7 @@ static int decode_coefs(VP9_COMMON *cm, const MACROBLOCKD *xd, val = (val << 1) | vp9_read(r, *cat6++); val += CAT6_MIN_VAL; - WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY6); - } - - if (c < seg_eob) { - if (!cm->frame_parallel_decoding_mode) - ++coef_counts[band][pt][DCT_EOB_MODEL_TOKEN]; + WRITE_COEF_CONTINUE(val, CATEGORY6_TOKEN); } return c; @@ -218,18 +196,15 @@ static int decode_coefs(VP9_COMMON *cm, const MACROBLOCKD *xd, int vp9_decode_block_tokens(VP9_COMMON *cm, MACROBLOCKD *xd, int plane, int block, BLOCK_SIZE plane_bsize, - int x, int y, TX_SIZE tx_size, vp9_reader *r, - uint8_t *token_cache) { + int x, int y, TX_SIZE tx_size, vp9_reader *r) { struct macroblockd_plane *const pd = &xd->plane[plane]; - const int seg_eob = get_tx_eob(&cm->seg, xd->mi_8x8[0]->mbmi.segment_id, - tx_size); - const int pt = get_entropy_context(tx_size, pd->above_context + x, - pd->left_context + y); - const int eob = decode_coefs(cm, xd, r, block, pd->plane_type, seg_eob, + const int ctx = get_entropy_context(tx_size, pd->above_context + x, + pd->left_context + y); + const scan_order *so = get_scan(xd, tx_size, pd->plane_type, block); + const int eob = decode_coefs(cm, xd, pd->plane_type, BLOCK_OFFSET(pd->dqcoeff, block), tx_size, - pd->dequant, pt, token_cache); - set_contexts(xd, pd, plane_bsize, tx_size, eob > 0, x, y); - pd->eobs[block] = eob; + pd->dequant, ctx, so->scan, so->neighbors, r); + vp9_set_contexts(xd, pd, plane_bsize, tx_size, eob > 0, x, y); return eob; } diff --git a/libvpx/vp9/decoder/vp9_detokenize.h b/libvpx/vp9/decoder/vp9_detokenize.h index e858a19..5278e97 100644 --- a/libvpx/vp9/decoder/vp9_detokenize.h +++ b/libvpx/vp9/decoder/vp9_detokenize.h @@ -12,12 +12,19 @@ #ifndef VP9_DECODER_VP9_DETOKENIZE_H_ #define VP9_DECODER_VP9_DETOKENIZE_H_ -#include "vp9/decoder/vp9_onyxd_int.h" -#include "vp9/decoder/vp9_dboolhuff.h" +#include "vp9/decoder/vp9_decoder.h" +#include "vp9/decoder/vp9_reader.h" + +#ifdef __cplusplus +extern "C" { +#endif int vp9_decode_block_tokens(VP9_COMMON *cm, MACROBLOCKD *xd, int plane, int block, BLOCK_SIZE plane_bsize, - int x, int y, TX_SIZE tx_size, vp9_reader *r, - uint8_t *token_cache); + int x, int y, TX_SIZE tx_size, vp9_reader *r); + +#ifdef __cplusplus +} // extern "C" +#endif #endif // VP9_DECODER_VP9_DETOKENIZE_H_ diff --git a/libvpx/vp9/decoder/vp9_dsubexp.c b/libvpx/vp9/decoder/vp9_dsubexp.c index fcca017..e67b372 100644 --- a/libvpx/vp9/decoder/vp9_dsubexp.c +++ b/libvpx/vp9/decoder/vp9_dsubexp.c @@ -19,14 +19,10 @@ static int inv_recenter_nonneg(int v, int m) { return v % 2 ? m - (v + 1) / 2 : m + v / 2; } -static int decode_uniform(vp9_reader *r, int n) { - int v; - const int l = get_unsigned_bits(n); - const int m = (1 << l) - n; - if (!l) - return 0; - - v = vp9_read_literal(r, l - 1); +static int decode_uniform(vp9_reader *r) { + const int l = 8; + const int m = (1 << l) - 191; + const int v = vp9_read_literal(r, l - 1); return v < m ? v : (v << 1) - m + vp9_read_bit(r); } @@ -78,30 +74,19 @@ static int inv_remap_prob(int v, int m) { } } -static int decode_term_subexp(vp9_reader *r, int k, int num_syms) { - int i = 0, mk = 0, word; - while (1) { - const int b = i ? k + i - 1 : k; - const int a = 1 << b; - if (num_syms <= mk + 3 * a) { - word = decode_uniform(r, num_syms - mk) + mk; - break; - } else { - if (vp9_read_bit(r)) { - i++; - mk += a; - } else { - word = vp9_read_literal(r, b) + mk; - break; - } - } - } - return word; +static int decode_term_subexp(vp9_reader *r) { + if (!vp9_read_bit(r)) + return vp9_read_literal(r, 4); + if (!vp9_read_bit(r)) + return vp9_read_literal(r, 4) + 16; + if (!vp9_read_bit(r)) + return vp9_read_literal(r, 5) + 32; + return decode_uniform(r) + 64; } void vp9_diff_update_prob(vp9_reader *r, vp9_prob* p) { if (vp9_read(r, DIFF_UPDATE_PROB)) { - const int delp = decode_term_subexp(r, SUBEXP_PARAM, 255); + const int delp = decode_term_subexp(r); *p = (vp9_prob)inv_remap_prob(delp, *p); } } diff --git a/libvpx/vp9/decoder/vp9_dsubexp.h b/libvpx/vp9/decoder/vp9_dsubexp.h index aeb9399..436f434 100644 --- a/libvpx/vp9/decoder/vp9_dsubexp.h +++ b/libvpx/vp9/decoder/vp9_dsubexp.h @@ -12,8 +12,16 @@ #ifndef VP9_DECODER_VP9_DSUBEXP_H_ #define VP9_DECODER_VP9_DSUBEXP_H_ -#include "vp9/decoder/vp9_dboolhuff.h" +#include "vp9/decoder/vp9_reader.h" + +#ifdef __cplusplus +extern "C" { +#endif void vp9_diff_update_prob(vp9_reader *r, vp9_prob* p); +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_DECODER_VP9_DSUBEXP_H_ diff --git a/libvpx/vp9/decoder/vp9_dthread.c b/libvpx/vp9/decoder/vp9_dthread.c new file mode 100644 index 0000000..4df8509 --- /dev/null +++ b/libvpx/vp9/decoder/vp9_dthread.c @@ -0,0 +1,275 @@ +/* + * Copyright (c) 2014 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "./vpx_config.h" + +#include "vpx_mem/vpx_mem.h" + +#include "vp9/common/vp9_reconinter.h" + +#include "vp9/decoder/vp9_dthread.h" +#include "vp9/decoder/vp9_decoder.h" + +#if CONFIG_MULTITHREAD +static INLINE void mutex_lock(pthread_mutex_t *const mutex) { + const int kMaxTryLocks = 4000; + int locked = 0; + int i; + + for (i = 0; i < kMaxTryLocks; ++i) { + if (!pthread_mutex_trylock(mutex)) { + locked = 1; + break; + } + } + + if (!locked) + pthread_mutex_lock(mutex); +} +#endif // CONFIG_MULTITHREAD + +static INLINE void sync_read(VP9LfSync *const lf_sync, int r, int c) { +#if CONFIG_MULTITHREAD + const int nsync = lf_sync->sync_range; + + if (r && !(c & (nsync - 1))) { + mutex_lock(&lf_sync->mutex_[r - 1]); + + while (c > lf_sync->cur_sb_col[r - 1] - nsync) { + pthread_cond_wait(&lf_sync->cond_[r - 1], + &lf_sync->mutex_[r - 1]); + } + pthread_mutex_unlock(&lf_sync->mutex_[r - 1]); + } +#else + (void)lf_sync; + (void)r; + (void)c; +#endif // CONFIG_MULTITHREAD +} + +static INLINE void sync_write(VP9LfSync *const lf_sync, int r, int c, + const int sb_cols) { +#if CONFIG_MULTITHREAD + const int nsync = lf_sync->sync_range; + int cur; + // Only signal when there are enough filtered SB for next row to run. + int sig = 1; + + if (c < sb_cols - 1) { + cur = c; + if (c % nsync) + sig = 0; + } else { + cur = sb_cols + nsync; + } + + if (sig) { + mutex_lock(&lf_sync->mutex_[r]); + + lf_sync->cur_sb_col[r] = cur; + + pthread_cond_signal(&lf_sync->cond_[r]); + pthread_mutex_unlock(&lf_sync->mutex_[r]); + } +#else + (void)lf_sync; + (void)r; + (void)c; + (void)sb_cols; +#endif // CONFIG_MULTITHREAD +} + +// Implement row loopfiltering for each thread. +static void loop_filter_rows_mt(const YV12_BUFFER_CONFIG *const frame_buffer, + VP9_COMMON *const cm, MACROBLOCKD *const xd, + int start, int stop, int y_only, + VP9LfSync *const lf_sync, int num_lf_workers) { + const int num_planes = y_only ? 1 : MAX_MB_PLANE; + int r, c; // SB row and col + LOOP_FILTER_MASK lfm; + const int sb_cols = mi_cols_aligned_to_sb(cm->mi_cols) >> MI_BLOCK_SIZE_LOG2; + + for (r = start; r < stop; r += num_lf_workers) { + const int mi_row = r << MI_BLOCK_SIZE_LOG2; + MODE_INFO **mi_8x8 = cm->mi_grid_visible + mi_row * cm->mode_info_stride; + + for (c = 0; c < sb_cols; ++c) { + const int mi_col = c << MI_BLOCK_SIZE_LOG2; + int plane; + + sync_read(lf_sync, r, c); + + vp9_setup_dst_planes(xd, frame_buffer, mi_row, mi_col); + vp9_setup_mask(cm, mi_row, mi_col, mi_8x8 + mi_col, cm->mode_info_stride, + &lfm); + + for (plane = 0; plane < num_planes; ++plane) { + vp9_filter_block_plane(cm, &xd->plane[plane], mi_row, &lfm); + } + + sync_write(lf_sync, r, c, sb_cols); + } + } +} + +// Row-based multi-threaded loopfilter hook +static int loop_filter_row_worker(void *arg1, void *arg2) { + TileWorkerData *const tile_data = (TileWorkerData*)arg1; + LFWorkerData *const lf_data = &tile_data->lfdata; + + loop_filter_rows_mt(lf_data->frame_buffer, lf_data->cm, &lf_data->xd, + lf_data->start, lf_data->stop, lf_data->y_only, + lf_data->lf_sync, lf_data->num_lf_workers); + return 1; +} + +// VP9 decoder: Implement multi-threaded loopfilter that uses the tile +// threads. +void vp9_loop_filter_frame_mt(VP9D_COMP *pbi, + VP9_COMMON *cm, + MACROBLOCKD *xd, + int frame_filter_level, + int y_only, int partial_frame) { + // Number of superblock rows and cols + const int sb_rows = mi_cols_aligned_to_sb(cm->mi_rows) >> MI_BLOCK_SIZE_LOG2; + int i; + + // Allocate memory used in thread synchronization. + // This always needs to be done even if frame_filter_level is 0. + if (!cm->current_video_frame || cm->last_height != cm->height) { + VP9LfSync *const lf_sync = &pbi->lf_row_sync; + + if (cm->last_height != cm->height) { + const int aligned_last_height = + ALIGN_POWER_OF_TWO(cm->last_height, MI_SIZE_LOG2); + const int last_sb_rows = + mi_cols_aligned_to_sb(aligned_last_height >> MI_SIZE_LOG2) >> + MI_BLOCK_SIZE_LOG2; + + vp9_loop_filter_dealloc(lf_sync, last_sb_rows); + } + + vp9_loop_filter_alloc(cm, lf_sync, sb_rows, cm->width); + } + + if (!frame_filter_level) return; + + vp9_loop_filter_frame_init(cm, frame_filter_level); + + // Initialize cur_sb_col to -1 for all SB rows. + vpx_memset(pbi->lf_row_sync.cur_sb_col, -1, + sizeof(*pbi->lf_row_sync.cur_sb_col) * sb_rows); + + // Set up loopfilter thread data. + for (i = 0; i < pbi->num_tile_workers; ++i) { + VP9Worker *const worker = &pbi->tile_workers[i]; + TileWorkerData *const tile_data = (TileWorkerData*)worker->data1; + LFWorkerData *const lf_data = &tile_data->lfdata; + + worker->hook = (VP9WorkerHook)loop_filter_row_worker; + + // Loopfilter data + lf_data->frame_buffer = get_frame_new_buffer(cm); + lf_data->cm = cm; + lf_data->xd = pbi->mb; + lf_data->start = i; + lf_data->stop = sb_rows; + lf_data->y_only = y_only; // always do all planes in decoder + + lf_data->lf_sync = &pbi->lf_row_sync; + lf_data->num_lf_workers = pbi->num_tile_workers; + + // Start loopfiltering + if (i == pbi->num_tile_workers - 1) { + vp9_worker_execute(worker); + } else { + vp9_worker_launch(worker); + } + } + + // Wait till all rows are finished + for (i = 0; i < pbi->num_tile_workers; ++i) { + vp9_worker_sync(&pbi->tile_workers[i]); + } +} + +// Set up nsync by width. +static int get_sync_range(int width) { + // nsync numbers are picked by testing. For example, for 4k + // video, using 4 gives best performance. + if (width < 640) + return 1; + else if (width <= 1280) + return 2; + else if (width <= 4096) + return 4; + else + return 8; +} + +// Allocate memory for lf row synchronization +void vp9_loop_filter_alloc(VP9_COMMON *cm, VP9LfSync *lf_sync, int rows, + int width) { +#if CONFIG_MULTITHREAD + int i; + + CHECK_MEM_ERROR(cm, lf_sync->mutex_, + vpx_malloc(sizeof(*lf_sync->mutex_) * rows)); + for (i = 0; i < rows; ++i) { + pthread_mutex_init(&lf_sync->mutex_[i], NULL); + } + + CHECK_MEM_ERROR(cm, lf_sync->cond_, + vpx_malloc(sizeof(*lf_sync->cond_) * rows)); + for (i = 0; i < rows; ++i) { + pthread_cond_init(&lf_sync->cond_[i], NULL); + } +#endif // CONFIG_MULTITHREAD + + CHECK_MEM_ERROR(cm, lf_sync->cur_sb_col, + vpx_malloc(sizeof(*lf_sync->cur_sb_col) * rows)); + + // Set up nsync. + lf_sync->sync_range = get_sync_range(width); +} + +// Deallocate lf synchronization related mutex and data +void vp9_loop_filter_dealloc(VP9LfSync *lf_sync, int rows) { +#if CONFIG_MULTITHREAD + if (lf_sync != NULL) { + int i; + + if (lf_sync->mutex_ != NULL) { + for (i = 0; i < rows; ++i) { + pthread_mutex_destroy(&lf_sync->mutex_[i]); + } + vpx_free(lf_sync->mutex_); + } + if (lf_sync->cond_ != NULL) { + for (i = 0; i < rows; ++i) { + pthread_cond_destroy(&lf_sync->cond_[i]); + } + vpx_free(lf_sync->cond_); + } + + vpx_free(lf_sync->cur_sb_col); + // clear the structure as the source of this call may be a resize in which + // case this call will be followed by an _alloc() which may fail. + vpx_memset(lf_sync, 0, sizeof(*lf_sync)); + } +#else + (void)rows; + if (lf_sync != NULL) { + vpx_free(lf_sync->cur_sb_col); + vpx_memset(lf_sync, 0, sizeof(*lf_sync)); + } +#endif // CONFIG_MULTITHREAD +} diff --git a/libvpx/vp9/decoder/vp9_dthread.h b/libvpx/vp9/decoder/vp9_dthread.h new file mode 100644 index 0000000..6d4450f --- /dev/null +++ b/libvpx/vp9/decoder/vp9_dthread.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2014 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VP9_DECODER_VP9_DTHREAD_H_ +#define VP9_DECODER_VP9_DTHREAD_H_ + +#include "./vpx_config.h" +#include "vp9/common/vp9_loopfilter.h" +#include "vp9/decoder/vp9_reader.h" +#include "vp9/decoder/vp9_thread.h" + +struct macroblockd; +struct VP9Common; +struct VP9Decompressor; + +typedef struct TileWorkerData { + struct VP9Common *cm; + vp9_reader bit_reader; + DECLARE_ALIGNED(16, struct macroblockd, xd); + DECLARE_ALIGNED(16, int16_t, dqcoeff[MAX_MB_PLANE][64 * 64]); + + // Row-based parallel loopfilter data + LFWorkerData lfdata; +} TileWorkerData; + +// Loopfilter row synchronization +typedef struct VP9LfSyncData { +#if CONFIG_MULTITHREAD + pthread_mutex_t *mutex_; + pthread_cond_t *cond_; +#endif + // Allocate memory to store the loop-filtered superblock index in each row. + int *cur_sb_col; + // The optimal sync_range for different resolution and platform should be + // determined by testing. Currently, it is chosen to be a power-of-2 number. + int sync_range; +} VP9LfSync; + +// Allocate memory for loopfilter row synchronization. +void vp9_loop_filter_alloc(struct VP9Common *cm, struct VP9LfSyncData *lf_sync, + int rows, int width); + +// Deallocate loopfilter synchronization related mutex and data. +void vp9_loop_filter_dealloc(struct VP9LfSyncData *lf_sync, int rows); + +// Multi-threaded loopfilter that uses the tile threads. +void vp9_loop_filter_frame_mt(struct VP9Decompressor *pbi, + struct VP9Common *cm, + struct macroblockd *xd, + int frame_filter_level, + int y_only, int partial_frame); + +#endif // VP9_DECODER_VP9_DTHREAD_H_ diff --git a/libvpx/vp9/decoder/vp9_onyxd.h b/libvpx/vp9/decoder/vp9_onyxd.h deleted file mode 100644 index a4b9c24..0000000 --- a/libvpx/vp9/decoder/vp9_onyxd.h +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP9_DECODER_VP9_ONYXD_H_ -#define VP9_DECODER_VP9_ONYXD_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -#include "vpx_scale/yv12config.h" -#include "vp9/common/vp9_ppflags.h" -#include "vpx/vpx_codec.h" - -typedef void *VP9D_PTR; - -typedef struct { - int width; - int height; - int version; - int postprocess; - int max_threads; - int inv_tile_order; - int input_partition; -} VP9D_CONFIG; - -typedef enum { - VP9_LAST_FLAG = 1, - VP9_GOLD_FLAG = 2, - VP9_ALT_FLAG = 4 -} VP9_REFFRAME; - -void vp9_initialize_dec(); - -int vp9_receive_compressed_data(VP9D_PTR comp, - size_t size, const uint8_t **dest, - int64_t time_stamp); - -int vp9_get_raw_frame(VP9D_PTR comp, YV12_BUFFER_CONFIG *sd, - int64_t *time_stamp, int64_t *time_end_stamp, - vp9_ppflags_t *flags); - -vpx_codec_err_t vp9_copy_reference_dec(VP9D_PTR comp, - VP9_REFFRAME ref_frame_flag, - YV12_BUFFER_CONFIG *sd); - -vpx_codec_err_t vp9_set_reference_dec(VP9D_PTR comp, - VP9_REFFRAME ref_frame_flag, - YV12_BUFFER_CONFIG *sd); - -int vp9_get_reference_dec(VP9D_PTR ptr, int index, YV12_BUFFER_CONFIG **fb); - - -VP9D_PTR vp9_create_decompressor(VP9D_CONFIG *oxcf); - -void vp9_remove_decompressor(VP9D_PTR comp); - -#ifdef __cplusplus -} -#endif - -#endif // VP9_DECODER_VP9_ONYXD_H_ diff --git a/libvpx/vp9/decoder/vp9_onyxd_int.h b/libvpx/vp9/decoder/vp9_onyxd_int.h deleted file mode 100644 index d3d29e9..0000000 --- a/libvpx/vp9/decoder/vp9_onyxd_int.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP9_DECODER_VP9_ONYXD_INT_H_ -#define VP9_DECODER_VP9_ONYXD_INT_H_ - -#include "./vpx_config.h" - -#include "vp9/common/vp9_onyxc_int.h" -#include "vp9/decoder/vp9_onyxd.h" -#include "vp9/decoder/vp9_thread.h" - -typedef struct VP9Decompressor { - DECLARE_ALIGNED(16, MACROBLOCKD, mb); - - DECLARE_ALIGNED(16, VP9_COMMON, common); - - DECLARE_ALIGNED(16, int16_t, qcoeff[MAX_MB_PLANE][64 * 64]); - DECLARE_ALIGNED(16, int16_t, dqcoeff[MAX_MB_PLANE][64 * 64]); - DECLARE_ALIGNED(16, uint16_t, eobs[MAX_MB_PLANE][256]); - - VP9D_CONFIG oxcf; - - const uint8_t *source; - size_t source_sz; - - int64_t last_time_stamp; - int ready_for_new_data; - - int refresh_frame_flags; - - int decoded_key_frame; - - int initial_width; - int initial_height; - - int do_loopfilter_inline; // apply loopfilter to available rows immediately - VP9Worker lf_worker; - - VP9Worker *tile_workers; - int num_tile_workers; - - /* Each tile column has its own MODE_INFO stream. This array indexes them by - tile column index. */ - MODE_INFO **mi_streams; - - ENTROPY_CONTEXT *above_context[MAX_MB_PLANE]; - PARTITION_CONTEXT *above_seg_context; - - DECLARE_ALIGNED(16, uint8_t, token_cache[1024]); -} VP9D_COMP; - -#endif // VP9_DECODER_VP9_ONYXD_INT_H_ diff --git a/libvpx/vp9/decoder/vp9_read_bit_buffer.c b/libvpx/vp9/decoder/vp9_read_bit_buffer.c new file mode 100644 index 0000000..778a635 --- /dev/null +++ b/libvpx/vp9/decoder/vp9_read_bit_buffer.c @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2013 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "vp9/decoder/vp9_read_bit_buffer.h" + +size_t vp9_rb_bytes_read(struct vp9_read_bit_buffer *rb) { + return rb->bit_offset / CHAR_BIT + (rb->bit_offset % CHAR_BIT > 0); +} + +int vp9_rb_read_bit(struct vp9_read_bit_buffer *rb) { + const size_t off = rb->bit_offset; + const size_t p = off / CHAR_BIT; + const int q = CHAR_BIT - 1 - (int)off % CHAR_BIT; + if (rb->bit_buffer + p >= rb->bit_buffer_end) { + rb->error_handler(rb->error_handler_data); + return 0; + } else { + const int bit = (rb->bit_buffer[p] & (1 << q)) >> q; + rb->bit_offset = off + 1; + return bit; + } +} + +int vp9_rb_read_literal(struct vp9_read_bit_buffer *rb, int bits) { + int value = 0, bit; + for (bit = bits - 1; bit >= 0; bit--) + value |= vp9_rb_read_bit(rb) << bit; + return value; +} + +int vp9_rb_read_signed_literal(struct vp9_read_bit_buffer *rb, + int bits) { + const int value = vp9_rb_read_literal(rb, bits); + return vp9_rb_read_bit(rb) ? -value : value; +} diff --git a/libvpx/vp9/decoder/vp9_read_bit_buffer.h b/libvpx/vp9/decoder/vp9_read_bit_buffer.h index 41a6868..fc88bd7 100644 --- a/libvpx/vp9/decoder/vp9_read_bit_buffer.h +++ b/libvpx/vp9/decoder/vp9_read_bit_buffer.h @@ -15,7 +15,11 @@ #include "vpx/vpx_integer.h" -typedef void (*vp9_rb_error_handler)(void *data, size_t bit_offset); +#ifdef __cplusplus +extern "C" { +#endif + +typedef void (*vp9_rb_error_handler)(void *data); struct vp9_read_bit_buffer { const uint8_t *bit_buffer; @@ -26,35 +30,16 @@ struct vp9_read_bit_buffer { vp9_rb_error_handler error_handler; }; -static size_t vp9_rb_bytes_read(struct vp9_read_bit_buffer *rb) { - return rb->bit_offset / CHAR_BIT + (rb->bit_offset % CHAR_BIT > 0); -} - -static int vp9_rb_read_bit(struct vp9_read_bit_buffer *rb) { - const size_t off = rb->bit_offset; - const size_t p = off / CHAR_BIT; - const int q = CHAR_BIT - 1 - (int)off % CHAR_BIT; - if (rb->bit_buffer + p >= rb->bit_buffer_end) { - rb->error_handler(rb->error_handler_data, rb->bit_offset); - return 0; - } else { - const int bit = (rb->bit_buffer[p] & (1 << q)) >> q; - rb->bit_offset = off + 1; - return bit; - } -} - -static int vp9_rb_read_literal(struct vp9_read_bit_buffer *rb, int bits) { - int value = 0, bit; - for (bit = bits - 1; bit >= 0; bit--) - value |= vp9_rb_read_bit(rb) << bit; - return value; -} - -static int vp9_rb_read_signed_literal(struct vp9_read_bit_buffer *rb, - int bits) { - const int value = vp9_rb_read_literal(rb, bits); - return vp9_rb_read_bit(rb) ? -value : value; -} +size_t vp9_rb_bytes_read(struct vp9_read_bit_buffer *rb); + +int vp9_rb_read_bit(struct vp9_read_bit_buffer *rb); + +int vp9_rb_read_literal(struct vp9_read_bit_buffer *rb, int bits); + +int vp9_rb_read_signed_literal(struct vp9_read_bit_buffer *rb, int bits); + +#ifdef __cplusplus +} // extern "C" +#endif #endif // VP9_DECODER_VP9_READ_BIT_BUFFER_H_ diff --git a/libvpx/vp9/decoder/vp9_dboolhuff.c b/libvpx/vp9/decoder/vp9_reader.c index 06acec4..fb44c88 100644 --- a/libvpx/vp9/decoder/vp9_dboolhuff.c +++ b/libvpx/vp9/decoder/vp9_reader.c @@ -11,39 +11,35 @@ #include "vpx_ports/mem.h" #include "vpx_mem/vpx_mem.h" -#include "vp9/decoder/vp9_dboolhuff.h" +#include "vp9/decoder/vp9_reader.h" // This is meant to be a large, positive constant that can still be efficiently // loaded as an immediate (on platforms like ARM, for example). // Even relatively modest values like 100 would work fine. #define LOTS_OF_BITS 0x40000000 - int vp9_reader_init(vp9_reader *r, const uint8_t *buffer, size_t size) { - int marker_bit; - - r->buffer_end = buffer + size; - r->buffer = buffer; - r->value = 0; - r->count = -8; - r->range = 255; - - if (size && !buffer) + if (size && !buffer) { return 1; - - vp9_reader_fill(r); - marker_bit = vp9_read_bit(r); - return marker_bit != 0; + } else { + r->buffer_end = buffer + size; + r->buffer = buffer; + r->value = 0; + r->count = -8; + r->range = 255; + vp9_reader_fill(r); + return vp9_read_bit(r) != 0; // marker bit + } } void vp9_reader_fill(vp9_reader *r) { const uint8_t *const buffer_end = r->buffer_end; const uint8_t *buffer = r->buffer; - VP9_BD_VALUE value = r->value; + BD_VALUE value = r->value; int count = r->count; - int shift = BD_VALUE_SIZE - 8 - (count + 8); + int shift = BD_VALUE_SIZE - CHAR_BIT - (count + CHAR_BIT); int loop_end = 0; - const int bits_left = (int)((buffer_end - buffer)*CHAR_BIT); + const int bits_left = (int)((buffer_end - buffer) * CHAR_BIT); const int x = shift + CHAR_BIT - bits_left; if (x >= 0) { @@ -54,7 +50,7 @@ void vp9_reader_fill(vp9_reader *r) { if (x < 0 || bits_left) { while (shift >= loop_end) { count += CHAR_BIT; - value |= (VP9_BD_VALUE)*buffer++ << shift; + value |= (BD_VALUE)*buffer++ << shift; shift -= CHAR_BIT; } } diff --git a/libvpx/vp9/decoder/vp9_dboolhuff.h b/libvpx/vp9/decoder/vp9_reader.h index fd8e74c..8fe6acb 100644 --- a/libvpx/vp9/decoder/vp9_dboolhuff.h +++ b/libvpx/vp9/decoder/vp9_reader.h @@ -8,8 +8,8 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef VP9_DECODER_VP9_DBOOLHUFF_H_ -#define VP9_DECODER_VP9_DBOOLHUFF_H_ +#ifndef VP9_DECODER_VP9_READER_H_ +#define VP9_DECODER_VP9_READER_H_ #include <stddef.h> #include <limits.h> @@ -18,46 +18,52 @@ #include "vpx_ports/mem.h" #include "vpx/vpx_integer.h" -typedef size_t VP9_BD_VALUE; +#include "vp9/common/vp9_prob.h" -#define BD_VALUE_SIZE ((int)sizeof(VP9_BD_VALUE)*CHAR_BIT) +#ifdef __cplusplus +extern "C" { +#endif + +typedef size_t BD_VALUE; + +#define BD_VALUE_SIZE ((int)sizeof(BD_VALUE) * CHAR_BIT) typedef struct { const uint8_t *buffer_end; const uint8_t *buffer; - VP9_BD_VALUE value; + BD_VALUE value; int count; unsigned int range; } vp9_reader; -DECLARE_ALIGNED(16, extern const uint8_t, vp9_norm[256]); - int vp9_reader_init(vp9_reader *r, const uint8_t *buffer, size_t size); void vp9_reader_fill(vp9_reader *r); +int vp9_reader_has_error(vp9_reader *r); + const uint8_t *vp9_reader_find_end(vp9_reader *r); -static int vp9_read(vp9_reader *br, int probability) { +static int vp9_read(vp9_reader *r, int prob) { unsigned int bit = 0; - VP9_BD_VALUE value; - VP9_BD_VALUE bigsplit; + BD_VALUE value; + BD_VALUE bigsplit; int count; unsigned int range; - unsigned int split = ((br->range * probability) + (256 - probability)) >> 8; + unsigned int split = (r->range * prob + (256 - prob)) >> CHAR_BIT; - if (br->count < 0) - vp9_reader_fill(br); + if (r->count < 0) + vp9_reader_fill(r); - value = br->value; - count = br->count; + value = r->value; + count = r->count; - bigsplit = (VP9_BD_VALUE)split << (BD_VALUE_SIZE - 8); + bigsplit = (BD_VALUE)split << (BD_VALUE_SIZE - CHAR_BIT); range = split; if (value >= bigsplit) { - range = br->range - split; + range = r->range - split; value = value - bigsplit; bit = 1; } @@ -68,9 +74,9 @@ static int vp9_read(vp9_reader *br, int probability) { value <<= shift; count -= shift; } - br->value = value; - br->count = count; - br->range = range; + r->value = value; + r->count = count; + r->range = range; return bit; } @@ -79,15 +85,27 @@ static int vp9_read_bit(vp9_reader *r) { return vp9_read(r, 128); // vp9_prob_half } -static int vp9_read_literal(vp9_reader *br, int bits) { - int z = 0, bit; +static int vp9_read_literal(vp9_reader *r, int bits) { + int literal = 0, bit; for (bit = bits - 1; bit >= 0; bit--) - z |= vp9_read_bit(br) << bit; + literal |= vp9_read_bit(r) << bit; - return z; + return literal; } -int vp9_reader_has_error(vp9_reader *r); +static int vp9_read_tree(vp9_reader *r, const vp9_tree_index *tree, + const vp9_prob *probs) { + vp9_tree_index i = 0; + + while ((i = tree[i + vp9_read(r, probs[i >> 1])]) > 0) + continue; + + return -i; +} + +#ifdef __cplusplus +} // extern "C" +#endif -#endif // VP9_DECODER_VP9_DBOOLHUFF_H_ +#endif // VP9_DECODER_VP9_READER_H_ diff --git a/libvpx/vp9/decoder/vp9_thread.c b/libvpx/vp9/decoder/vp9_thread.c index d953e72..5d31d3d 100644 --- a/libvpx/vp9/decoder/vp9_thread.c +++ b/libvpx/vp9/decoder/vp9_thread.c @@ -24,116 +24,6 @@ extern "C" { #if CONFIG_MULTITHREAD -#if defined(_WIN32) - -//------------------------------------------------------------------------------ -// simplistic pthread emulation layer - -#include <process.h> // NOLINT - -// _beginthreadex requires __stdcall -#define THREADFN unsigned int __stdcall -#define THREAD_RETURN(val) (unsigned int)((DWORD_PTR)val) - -static int pthread_create(pthread_t* const thread, const void* attr, - unsigned int (__stdcall *start)(void*), void* arg) { - (void)attr; - *thread = (pthread_t)_beginthreadex(NULL, /* void *security */ - 0, /* unsigned stack_size */ - start, - arg, - 0, /* unsigned initflag */ - NULL); /* unsigned *thrdaddr */ - if (*thread == NULL) return 1; - SetThreadPriority(*thread, THREAD_PRIORITY_ABOVE_NORMAL); - return 0; -} - -static int pthread_join(pthread_t thread, void** value_ptr) { - (void)value_ptr; - return (WaitForSingleObject(thread, INFINITE) != WAIT_OBJECT_0 || - CloseHandle(thread) == 0); -} - -// Mutex -static int pthread_mutex_init(pthread_mutex_t* const mutex, void* mutexattr) { - (void)mutexattr; - InitializeCriticalSection(mutex); - return 0; -} - -static int pthread_mutex_lock(pthread_mutex_t* const mutex) { - EnterCriticalSection(mutex); - return 0; -} - -static int pthread_mutex_unlock(pthread_mutex_t* const mutex) { - LeaveCriticalSection(mutex); - return 0; -} - -static int pthread_mutex_destroy(pthread_mutex_t* const mutex) { - DeleteCriticalSection(mutex); - return 0; -} - -// Condition -static int pthread_cond_destroy(pthread_cond_t* const condition) { - int ok = 1; - ok &= (CloseHandle(condition->waiting_sem_) != 0); - ok &= (CloseHandle(condition->received_sem_) != 0); - ok &= (CloseHandle(condition->signal_event_) != 0); - return !ok; -} - -static int pthread_cond_init(pthread_cond_t* const condition, void* cond_attr) { - (void)cond_attr; - condition->waiting_sem_ = CreateSemaphore(NULL, 0, 1, NULL); - condition->received_sem_ = CreateSemaphore(NULL, 0, 1, NULL); - condition->signal_event_ = CreateEvent(NULL, FALSE, FALSE, NULL); - if (condition->waiting_sem_ == NULL || - condition->received_sem_ == NULL || - condition->signal_event_ == NULL) { - pthread_cond_destroy(condition); - return 1; - } - return 0; -} - -static int pthread_cond_signal(pthread_cond_t* const condition) { - int ok = 1; - if (WaitForSingleObject(condition->waiting_sem_, 0) == WAIT_OBJECT_0) { - // a thread is waiting in pthread_cond_wait: allow it to be notified - ok = SetEvent(condition->signal_event_); - // wait until the event is consumed so the signaler cannot consume - // the event via its own pthread_cond_wait. - ok &= (WaitForSingleObject(condition->received_sem_, INFINITE) != - WAIT_OBJECT_0); - } - return !ok; -} - -static int pthread_cond_wait(pthread_cond_t* const condition, - pthread_mutex_t* const mutex) { - int ok; - // note that there is a consumer available so the signal isn't dropped in - // pthread_cond_signal - if (!ReleaseSemaphore(condition->waiting_sem_, 1, NULL)) - return 1; - // now unlock the mutex so pthread_cond_signal may be issued - pthread_mutex_unlock(mutex); - ok = (WaitForSingleObject(condition->signal_event_, INFINITE) == - WAIT_OBJECT_0); - ok &= ReleaseSemaphore(condition->received_sem_, 1, NULL); - pthread_mutex_lock(mutex); - return !ok; -} - -#else // _WIN32 -# define THREADFN void* -# define THREAD_RETURN(val) val -#endif - //------------------------------------------------------------------------------ static THREADFN thread_loop(void *ptr) { // thread loop diff --git a/libvpx/vp9/decoder/vp9_thread.h b/libvpx/vp9/decoder/vp9_thread.h index a624f3c..2f8728d 100644 --- a/libvpx/vp9/decoder/vp9_thread.h +++ b/libvpx/vp9/decoder/vp9_thread.h @@ -19,14 +19,15 @@ #include "./vpx_config.h" -#if defined(__cplusplus) || defined(c_plusplus) +#ifdef __cplusplus extern "C" { #endif #if CONFIG_MULTITHREAD #if defined(_WIN32) - +#include <errno.h> // NOLINT +#include <process.h> // NOLINT #include <windows.h> // NOLINT typedef HANDLE pthread_t; typedef CRITICAL_SECTION pthread_mutex_t; @@ -36,12 +37,120 @@ typedef struct { HANDLE signal_event_; } pthread_cond_t; -#else - +//------------------------------------------------------------------------------ +// simplistic pthread emulation layer + +// _beginthreadex requires __stdcall +#define THREADFN unsigned int __stdcall +#define THREAD_RETURN(val) (unsigned int)((DWORD_PTR)val) + +static INLINE int pthread_create(pthread_t* const thread, const void* attr, + unsigned int (__stdcall *start)(void*), + void* arg) { + (void)attr; + *thread = (pthread_t)_beginthreadex(NULL, /* void *security */ + 0, /* unsigned stack_size */ + start, + arg, + 0, /* unsigned initflag */ + NULL); /* unsigned *thrdaddr */ + if (*thread == NULL) return 1; + SetThreadPriority(*thread, THREAD_PRIORITY_ABOVE_NORMAL); + return 0; +} + +static INLINE int pthread_join(pthread_t thread, void** value_ptr) { + (void)value_ptr; + return (WaitForSingleObject(thread, INFINITE) != WAIT_OBJECT_0 || + CloseHandle(thread) == 0); +} + +// Mutex +static INLINE int pthread_mutex_init(pthread_mutex_t *const mutex, + void* mutexattr) { + (void)mutexattr; + InitializeCriticalSection(mutex); + return 0; +} + +static INLINE int pthread_mutex_trylock(pthread_mutex_t *const mutex) { + return TryEnterCriticalSection(mutex) ? 0 : EBUSY; +} + +static INLINE int pthread_mutex_lock(pthread_mutex_t *const mutex) { + EnterCriticalSection(mutex); + return 0; +} + +static INLINE int pthread_mutex_unlock(pthread_mutex_t *const mutex) { + LeaveCriticalSection(mutex); + return 0; +} + +static INLINE int pthread_mutex_destroy(pthread_mutex_t *const mutex) { + DeleteCriticalSection(mutex); + return 0; +} + +// Condition +static INLINE int pthread_cond_destroy(pthread_cond_t *const condition) { + int ok = 1; + ok &= (CloseHandle(condition->waiting_sem_) != 0); + ok &= (CloseHandle(condition->received_sem_) != 0); + ok &= (CloseHandle(condition->signal_event_) != 0); + return !ok; +} + +static INLINE int pthread_cond_init(pthread_cond_t *const condition, + void* cond_attr) { + (void)cond_attr; + condition->waiting_sem_ = CreateSemaphore(NULL, 0, 1, NULL); + condition->received_sem_ = CreateSemaphore(NULL, 0, 1, NULL); + condition->signal_event_ = CreateEvent(NULL, FALSE, FALSE, NULL); + if (condition->waiting_sem_ == NULL || + condition->received_sem_ == NULL || + condition->signal_event_ == NULL) { + pthread_cond_destroy(condition); + return 1; + } + return 0; +} + +static INLINE int pthread_cond_signal(pthread_cond_t *const condition) { + int ok = 1; + if (WaitForSingleObject(condition->waiting_sem_, 0) == WAIT_OBJECT_0) { + // a thread is waiting in pthread_cond_wait: allow it to be notified + ok = SetEvent(condition->signal_event_); + // wait until the event is consumed so the signaler cannot consume + // the event via its own pthread_cond_wait. + ok &= (WaitForSingleObject(condition->received_sem_, INFINITE) != + WAIT_OBJECT_0); + } + return !ok; +} + +static INLINE int pthread_cond_wait(pthread_cond_t *const condition, + pthread_mutex_t *const mutex) { + int ok; + // note that there is a consumer available so the signal isn't dropped in + // pthread_cond_signal + if (!ReleaseSemaphore(condition->waiting_sem_, 1, NULL)) + return 1; + // now unlock the mutex so pthread_cond_signal may be issued + pthread_mutex_unlock(mutex); + ok = (WaitForSingleObject(condition->signal_event_, INFINITE) == + WAIT_OBJECT_0); + ok &= ReleaseSemaphore(condition->received_sem_, 1, NULL); + pthread_mutex_lock(mutex); + return !ok; +} +#else // _WIN32 #include <pthread.h> // NOLINT +# define THREADFN void* +# define THREAD_RETURN(val) val +#endif -#endif /* _WIN32 */ -#endif /* CONFIG_MULTITHREAD */ +#endif // CONFIG_MULTITHREAD // State of the worker thread object typedef enum { @@ -91,7 +200,7 @@ void vp9_worker_end(VP9Worker* const worker); //------------------------------------------------------------------------------ -#if defined(__cplusplus) || defined(c_plusplus) +#ifdef __cplusplus } // extern "C" #endif diff --git a/libvpx/vp9/decoder/vp9_treereader.h b/libvpx/vp9/decoder/vp9_treereader.h deleted file mode 100644 index 41680d2..0000000 --- a/libvpx/vp9/decoder/vp9_treereader.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - - -#ifndef VP9_DECODER_VP9_TREEREADER_H_ -#define VP9_DECODER_VP9_TREEREADER_H_ - -#include "vp9/common/vp9_treecoder.h" -#include "vp9/decoder/vp9_dboolhuff.h" - -// Intent of tree data structure is to make decoding trivial. -static int treed_read(vp9_reader *const r, /* !!! must return a 0 or 1 !!! */ - vp9_tree t, - const vp9_prob *const p) { - register vp9_tree_index i = 0; - - while ((i = t[ i + vp9_read(r, p[i >> 1])]) > 0) - continue; - - return -i; -} - -#endif // VP9_DECODER_VP9_TREEREADER_H_ diff --git a/libvpx/vp9/encoder/vp9_bitstream.c b/libvpx/vp9/encoder/vp9_bitstream.c index efbadba..1b4a6cc 100644 --- a/libvpx/vp9/encoder/vp9_bitstream.c +++ b/libvpx/vp9/encoder/vp9_bitstream.c @@ -14,179 +14,72 @@ #include "vpx/vpx_encoder.h" #include "vpx_mem/vpx_mem.h" +#include "vpx_ports/mem_ops.h" +#include "vp9/common/vp9_entropy.h" #include "vp9/common/vp9_entropymode.h" #include "vp9/common/vp9_entropymv.h" -#include "vp9/common/vp9_findnearmv.h" -#include "vp9/common/vp9_tile_common.h" -#include "vp9/common/vp9_seg_common.h" -#include "vp9/common/vp9_pred_common.h" -#include "vp9/common/vp9_entropy.h" #include "vp9/common/vp9_mvref_common.h" -#include "vp9/common/vp9_treecoder.h" -#include "vp9/common/vp9_systemdependent.h" #include "vp9/common/vp9_pragmas.h" +#include "vp9/common/vp9_pred_common.h" +#include "vp9/common/vp9_seg_common.h" +#include "vp9/common/vp9_systemdependent.h" +#include "vp9/common/vp9_tile_common.h" -#include "vp9/encoder/vp9_mcomp.h" -#include "vp9/encoder/vp9_encodemv.h" +#include "vp9/encoder/vp9_cost.h" #include "vp9/encoder/vp9_bitstream.h" +#include "vp9/encoder/vp9_encodemv.h" +#include "vp9/encoder/vp9_mcomp.h" #include "vp9/encoder/vp9_segmentation.h" #include "vp9/encoder/vp9_subexp.h" +#include "vp9/encoder/vp9_tokenize.h" #include "vp9/encoder/vp9_write_bit_buffer.h" +static struct vp9_token intra_mode_encodings[INTRA_MODES]; +static struct vp9_token switchable_interp_encodings[SWITCHABLE_FILTERS]; +static struct vp9_token partition_encodings[PARTITION_TYPES]; +static struct vp9_token inter_mode_encodings[INTER_MODES]; -#if defined(SECTIONBITS_OUTPUT) -unsigned __int64 Sectionbits[500]; -#endif - -#ifdef ENTROPY_STATS -int intra_mode_stats[INTRA_MODES] - [INTRA_MODES] - [INTRA_MODES]; -vp9_coeff_stats tree_update_hist[TX_SIZES][BLOCK_TYPES]; - -extern unsigned int active_section; -#endif - - -#ifdef MODE_STATS -int64_t tx_count_32x32p_stats[TX_SIZE_CONTEXTS][TX_SIZES]; -int64_t tx_count_16x16p_stats[TX_SIZE_CONTEXTS][TX_SIZES - 1]; -int64_t tx_count_8x8p_stats[TX_SIZE_CONTEXTS][TX_SIZES - 2]; -int64_t switchable_interp_stats[SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS]; - -void init_tx_count_stats() { - vp9_zero(tx_count_32x32p_stats); - vp9_zero(tx_count_16x16p_stats); - vp9_zero(tx_count_8x8p_stats); -} - -void init_switchable_interp_stats() { - vp9_zero(switchable_interp_stats); +void vp9_entropy_mode_init() { + vp9_tokens_from_tree(intra_mode_encodings, vp9_intra_mode_tree); + vp9_tokens_from_tree(switchable_interp_encodings, vp9_switchable_interp_tree); + vp9_tokens_from_tree(partition_encodings, vp9_partition_tree); + vp9_tokens_from_tree(inter_mode_encodings, vp9_inter_mode_tree); } -static void update_tx_count_stats(VP9_COMMON *cm) { - int i, j; - for (i = 0; i < TX_SIZE_CONTEXTS; i++) { - for (j = 0; j < TX_SIZES; j++) { - tx_count_32x32p_stats[i][j] += cm->fc.tx_count_32x32p[i][j]; - } - } - for (i = 0; i < TX_SIZE_CONTEXTS; i++) { - for (j = 0; j < TX_SIZES - 1; j++) { - tx_count_16x16p_stats[i][j] += cm->fc.tx_count_16x16p[i][j]; - } - } - for (i = 0; i < TX_SIZE_CONTEXTS; i++) { - for (j = 0; j < TX_SIZES - 2; j++) { - tx_count_8x8p_stats[i][j] += cm->fc.tx_count_8x8p[i][j]; - } - } +static void write_intra_mode(vp9_writer *w, MB_PREDICTION_MODE mode, + const vp9_prob *probs) { + vp9_write_token(w, vp9_intra_mode_tree, probs, &intra_mode_encodings[mode]); } -static void update_switchable_interp_stats(VP9_COMMON *cm) { - int i, j; - for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) - for (j = 0; j < SWITCHABLE_FILTERS; ++j) - switchable_interp_stats[i][j] += cm->fc.switchable_interp_count[i][j]; -} - -void write_tx_count_stats() { - int i, j; - FILE *fp = fopen("tx_count.bin", "wb"); - fwrite(tx_count_32x32p_stats, sizeof(tx_count_32x32p_stats), 1, fp); - fwrite(tx_count_16x16p_stats, sizeof(tx_count_16x16p_stats), 1, fp); - fwrite(tx_count_8x8p_stats, sizeof(tx_count_8x8p_stats), 1, fp); - fclose(fp); - - printf( - "vp9_default_tx_count_32x32p[TX_SIZE_CONTEXTS][TX_SIZES] = {\n"); - for (i = 0; i < TX_SIZE_CONTEXTS; i++) { - printf(" { "); - for (j = 0; j < TX_SIZES; j++) { - printf("%"PRId64", ", tx_count_32x32p_stats[i][j]); - } - printf("},\n"); - } - printf("};\n"); - printf( - "vp9_default_tx_count_16x16p[TX_SIZE_CONTEXTS][TX_SIZES-1] = {\n"); - for (i = 0; i < TX_SIZE_CONTEXTS; i++) { - printf(" { "); - for (j = 0; j < TX_SIZES - 1; j++) { - printf("%"PRId64", ", tx_count_16x16p_stats[i][j]); - } - printf("},\n"); - } - printf("};\n"); - printf( - "vp9_default_tx_count_8x8p[TX_SIZE_CONTEXTS][TX_SIZES-2] = {\n"); - for (i = 0; i < TX_SIZE_CONTEXTS; i++) { - printf(" { "); - for (j = 0; j < TX_SIZES - 2; j++) { - printf("%"PRId64", ", tx_count_8x8p_stats[i][j]); - } - printf("},\n"); - } - printf("};\n"); -} - -void write_switchable_interp_stats() { - int i, j; - FILE *fp = fopen("switchable_interp.bin", "wb"); - fwrite(switchable_interp_stats, sizeof(switchable_interp_stats), 1, fp); - fclose(fp); - - printf( - "vp9_default_switchable_filter_count[SWITCHABLE_FILTER_CONTEXTS]" - "[SWITCHABLE_FILTERS] = {\n"); - for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) { - printf(" { "); - for (j = 0; j < SWITCHABLE_FILTERS; j++) { - printf("%"PRId64", ", switchable_interp_stats[i][j]); - } - printf("},\n"); - } - printf("};\n"); -} -#endif - -static INLINE void write_be32(uint8_t *p, int value) { - p[0] = value >> 24; - p[1] = value >> 16; - p[2] = value >> 8; - p[3] = value; +static void write_inter_mode(vp9_writer *w, MB_PREDICTION_MODE mode, + const vp9_prob *probs) { + assert(is_inter_mode(mode)); + vp9_write_token(w, vp9_inter_mode_tree, probs, + &inter_mode_encodings[INTER_OFFSET(mode)]); } -void vp9_encode_unsigned_max(struct vp9_write_bit_buffer *wb, - int data, int max) { +static void encode_unsigned_max(struct vp9_write_bit_buffer *wb, + int data, int max) { vp9_wb_write_literal(wb, data, get_unsigned_bits(max)); } -static void update_mode(vp9_writer *w, int n, vp9_tree tree, - vp9_prob Pcur[/* n-1 */], - unsigned int bct[/* n-1 */][2], - const unsigned int num_events[/* n */]) { - int i = 0; +static void prob_diff_update(const vp9_tree_index *tree, + vp9_prob probs[/*n - 1*/], + const unsigned int counts[/*n - 1*/], + int n, vp9_writer *w) { + int i; + unsigned int branch_ct[32][2]; - vp9_tree_probs_from_distribution(tree, bct, num_events); - for (i = 0; i < n - 1; ++i) - vp9_cond_prob_diff_update(w, &Pcur[i], bct[i]); -} + // Assuming max number of probabilities <= 32 + assert(n <= 32); -static void update_mbintra_mode_probs(VP9_COMP* const cpi, - vp9_writer* const bc) { - VP9_COMMON *const cm = &cpi->common; - int j; - unsigned int bct[INTRA_MODES - 1][2]; - - for (j = 0; j < BLOCK_SIZE_GROUPS; j++) - update_mode(bc, INTRA_MODES, vp9_intra_mode_tree, - cm->fc.y_mode_prob[j], bct, - (unsigned int *)cpi->y_mode_count[j]); + vp9_tree_probs_from_distribution(tree, branch_ct, counts); + for (i = 0; i < n - 1; ++i) + vp9_cond_prob_diff_update(w, &probs[i], branch_ct[i]); } -static void write_selected_tx_size(const VP9_COMP *cpi, MODE_INFO *m, +static void write_selected_tx_size(const VP9_COMP *cpi, TX_SIZE tx_size, BLOCK_SIZE bsize, vp9_writer *w) { const TX_SIZE max_tx_size = max_txsize_lookup[bsize]; @@ -201,66 +94,35 @@ static void write_selected_tx_size(const VP9_COMP *cpi, MODE_INFO *m, } } -static int write_skip_coeff(const VP9_COMP *cpi, int segment_id, MODE_INFO *m, - vp9_writer *w) { +static int write_skip(const VP9_COMP *cpi, int segment_id, const MODE_INFO *mi, + vp9_writer *w) { const MACROBLOCKD *const xd = &cpi->mb.e_mbd; if (vp9_segfeature_active(&cpi->common.seg, segment_id, SEG_LVL_SKIP)) { return 1; } else { - const int skip_coeff = m->mbmi.skip_coeff; - vp9_write(w, skip_coeff, vp9_get_pred_prob_mbskip(&cpi->common, xd)); - return skip_coeff; + const int skip = mi->mbmi.skip; + vp9_write(w, skip, vp9_get_skip_prob(&cpi->common, xd)); + return skip; } } -void vp9_update_skip_probs(VP9_COMP *cpi, vp9_writer *w) { - VP9_COMMON *cm = &cpi->common; +static void update_skip_probs(VP9_COMMON *cm, vp9_writer *w) { int k; - for (k = 0; k < MBSKIP_CONTEXTS; ++k) - vp9_cond_prob_diff_update(w, &cm->fc.mbskip_probs[k], cm->counts.mbskip[k]); -} - -static void write_intra_mode(vp9_writer *bc, int m, const vp9_prob *p) { - write_token(bc, vp9_intra_mode_tree, p, vp9_intra_mode_encodings + m); + for (k = 0; k < SKIP_CONTEXTS; ++k) + vp9_cond_prob_diff_update(w, &cm->fc.skip_probs[k], cm->counts.skip[k]); } -static void update_switchable_interp_probs(VP9_COMP *cpi, vp9_writer *w) { - VP9_COMMON *const cm = &cpi->common; - unsigned int branch_ct[SWITCHABLE_FILTERS - 1][2]; - int i, j; - for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) { - vp9_tree_probs_from_distribution(vp9_switchable_interp_tree, branch_ct, - cm->counts.switchable_interp[j]); - - for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i) - vp9_cond_prob_diff_update(w, &cm->fc.switchable_interp_prob[j][i], - branch_ct[i]); - } - -#ifdef MODE_STATS - if (!cpi->dummy_packing) - update_switchable_interp_stats(cm); -#endif -} - -static void update_inter_mode_probs(VP9_COMMON *cm, vp9_writer *w) { - int i, j; - - for (i = 0; i < INTER_MODE_CONTEXTS; ++i) { - unsigned int branch_ct[INTER_MODES - 1][2]; - vp9_tree_probs_from_distribution(vp9_inter_mode_tree, branch_ct, - cm->counts.inter_mode[i]); - - for (j = 0; j < INTER_MODES - 1; ++j) - vp9_cond_prob_diff_update(w, &cm->fc.inter_mode_probs[i][j], - branch_ct[j]); - } +static void update_switchable_interp_probs(VP9_COMMON *cm, vp9_writer *w) { + int j; + for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) + prob_diff_update(vp9_switchable_interp_tree, + cm->fc.switchable_interp_prob[j], + cm->counts.switchable_interp[j], SWITCHABLE_FILTERS, w); } -static void pack_mb_tokens(vp9_writer* const w, - TOKENEXTRA **tp, - const TOKENEXTRA *const stop) { +static void pack_mb_tokens(vp9_writer *w, + TOKENEXTRA **tp, const TOKENEXTRA *stop) { TOKENEXTRA *p = *tp; while (p < stop && p->token != EOSB_TOKEN) { @@ -268,18 +130,8 @@ static void pack_mb_tokens(vp9_writer* const w, const struct vp9_token *const a = &vp9_coef_encodings[t]; const vp9_extra_bit *const b = &vp9_extra_bits[t]; int i = 0; - const vp9_prob *pp; int v = a->value; int n = a->len; - vp9_prob probs[ENTROPY_NODES]; - - if (t >= TWO_TOKEN) { - vp9_model_to_full_probs(p->context_tree, probs); - pp = probs; - } else { - pp = p->context_tree; - } - assert(pp != 0); /* skip one or two nodes */ if (p->skip_eob_node) { @@ -287,11 +139,24 @@ static void pack_mb_tokens(vp9_writer* const w, i = 2 * p->skip_eob_node; } - do { - const int bb = (v >> --n) & 1; - vp9_write(w, bb, pp[i >> 1]); - i = vp9_coef_tree[i + bb]; - } while (n); + // TODO(jbb): expanding this can lead to big gains. It allows + // much better branch prediction and would enable us to avoid numerous + // lookups and compares. + + // If we have a token that's in the constrained set, the coefficient tree + // is split into two treed writes. The first treed write takes care of the + // unconstrained nodes. The second treed write takes care of the + // constrained nodes. + if (t >= TWO_TOKEN && t < EOB_TOKEN) { + int len = UNCONSTRAINED_NODES - p->skip_eob_node; + int bits = v >> (n - len); + vp9_write_tree(w, vp9_coef_tree, p->context_tree, bits, len, i); + vp9_write_tree(w, vp9_coef_con_tree, + vp9_pareto8_full[p->context_tree[PIVOT_NODE] - 1], + v, n - len, 0); + } else { + vp9_write_tree(w, vp9_coef_tree, p->context_tree, v, n, i); + } if (b->base_val) { const int e = p->extra, l = b->len; @@ -317,231 +182,190 @@ static void pack_mb_tokens(vp9_writer* const w, *tp = p + (p->token == EOSB_TOKEN); } -static void write_sb_mv_ref(vp9_writer *w, MB_PREDICTION_MODE mode, - const vp9_prob *p) { - assert(is_inter_mode(mode)); - write_token(w, vp9_inter_mode_tree, p, - &vp9_inter_mode_encodings[INTER_OFFSET(mode)]); -} - - static void write_segment_id(vp9_writer *w, const struct segmentation *seg, int segment_id) { if (seg->enabled && seg->update_map) - treed_write(w, vp9_segment_tree, seg->tree_probs, segment_id, 3); + vp9_write_tree(w, vp9_segment_tree, seg->tree_probs, segment_id, 3, 0); } // This function encodes the reference frame -static void encode_ref_frame(VP9_COMP *cpi, vp9_writer *bc) { - VP9_COMMON *const cm = &cpi->common; - MACROBLOCK *const x = &cpi->mb; - MACROBLOCKD *const xd = &x->e_mbd; - MB_MODE_INFO *mi = &xd->mi_8x8[0]->mbmi; - const int segment_id = mi->segment_id; - int seg_ref_active = vp9_segfeature_active(&cm->seg, segment_id, - SEG_LVL_REF_FRAME); +static void write_ref_frames(const VP9_COMP *cpi, vp9_writer *w) { + const VP9_COMMON *const cm = &cpi->common; + const MACROBLOCKD *const xd = &cpi->mb.e_mbd; + const MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi; + const int is_compound = has_second_ref(mbmi); + const int segment_id = mbmi->segment_id; + // If segment level coding of this signal is disabled... // or the segment allows multiple reference frame options - if (!seg_ref_active) { + if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) { + assert(!is_compound); + assert(mbmi->ref_frame[0] == + vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME)); + } else { // does the feature use compound prediction or not // (if not specified at the frame/segment level) - if (cm->comp_pred_mode == HYBRID_PREDICTION) { - vp9_write(bc, mi->ref_frame[1] > INTRA_FRAME, - vp9_get_pred_prob_comp_inter_inter(cm, xd)); + if (cm->reference_mode == REFERENCE_MODE_SELECT) { + vp9_write(w, is_compound, vp9_get_reference_mode_prob(cm, xd)); } else { - assert((mi->ref_frame[1] <= INTRA_FRAME) == - (cm->comp_pred_mode == SINGLE_PREDICTION_ONLY)); + assert(!is_compound == (cm->reference_mode == SINGLE_REFERENCE)); } - if (mi->ref_frame[1] > INTRA_FRAME) { - vp9_write(bc, mi->ref_frame[0] == GOLDEN_FRAME, + if (is_compound) { + vp9_write(w, mbmi->ref_frame[0] == GOLDEN_FRAME, vp9_get_pred_prob_comp_ref_p(cm, xd)); } else { - vp9_write(bc, mi->ref_frame[0] != LAST_FRAME, - vp9_get_pred_prob_single_ref_p1(cm, xd)); - if (mi->ref_frame[0] != LAST_FRAME) - vp9_write(bc, mi->ref_frame[0] != GOLDEN_FRAME, - vp9_get_pred_prob_single_ref_p2(cm, xd)); + const int bit0 = mbmi->ref_frame[0] != LAST_FRAME; + vp9_write(w, bit0, vp9_get_pred_prob_single_ref_p1(cm, xd)); + if (bit0) { + const int bit1 = mbmi->ref_frame[0] != GOLDEN_FRAME; + vp9_write(w, bit1, vp9_get_pred_prob_single_ref_p2(cm, xd)); + } } - } else { - assert(mi->ref_frame[1] <= INTRA_FRAME); - assert(vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME) == - mi->ref_frame[0]); } - - // If using the prediction model we have nothing further to do because - // the reference frame is fully coded by the segment. } -static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) { +static void pack_inter_mode_mvs(VP9_COMP *cpi, const MODE_INFO *mi, + vp9_writer *w) { VP9_COMMON *const cm = &cpi->common; const nmv_context *nmvc = &cm->fc.nmvc; - MACROBLOCK *const x = &cpi->mb; - MACROBLOCKD *const xd = &x->e_mbd; - struct segmentation *seg = &cm->seg; - MB_MODE_INFO *const mi = &m->mbmi; - const MV_REFERENCE_FRAME rf = mi->ref_frame[0]; - const MB_PREDICTION_MODE mode = mi->mode; - const int segment_id = mi->segment_id; - int skip_coeff; - const BLOCK_SIZE bsize = mi->sb_type; + const MACROBLOCK *const x = &cpi->mb; + const MACROBLOCKD *const xd = &x->e_mbd; + const struct segmentation *const seg = &cm->seg; + const MB_MODE_INFO *const mbmi = &mi->mbmi; + const MB_PREDICTION_MODE mode = mbmi->mode; + const int segment_id = mbmi->segment_id; + const BLOCK_SIZE bsize = mbmi->sb_type; const int allow_hp = cm->allow_high_precision_mv; - -#ifdef ENTROPY_STATS - active_section = 9; -#endif + const int is_inter = is_inter_block(mbmi); + const int is_compound = has_second_ref(mbmi); + int skip, ref; if (seg->update_map) { if (seg->temporal_update) { - const int pred_flag = mi->seg_id_predicted; + const int pred_flag = mbmi->seg_id_predicted; vp9_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd); - vp9_write(bc, pred_flag, pred_prob); + vp9_write(w, pred_flag, pred_prob); if (!pred_flag) - write_segment_id(bc, seg, segment_id); + write_segment_id(w, seg, segment_id); } else { - write_segment_id(bc, seg, segment_id); + write_segment_id(w, seg, segment_id); } } - skip_coeff = write_skip_coeff(cpi, segment_id, m, bc); + skip = write_skip(cpi, segment_id, mi, w); if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) - vp9_write(bc, rf != INTRA_FRAME, - vp9_get_pred_prob_intra_inter(cm, xd)); + vp9_write(w, is_inter, vp9_get_intra_inter_prob(cm, xd)); if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT && - !(rf != INTRA_FRAME && - (skip_coeff || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) { - write_selected_tx_size(cpi, m, mi->tx_size, bsize, bc); + !(is_inter && + (skip || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) { + write_selected_tx_size(cpi, mbmi->tx_size, bsize, w); } - if (rf == INTRA_FRAME) { -#ifdef ENTROPY_STATS - active_section = 6; -#endif - + if (!is_inter) { if (bsize >= BLOCK_8X8) { - write_intra_mode(bc, mode, cm->fc.y_mode_prob[size_group_lookup[bsize]]); + write_intra_mode(w, mode, cm->fc.y_mode_prob[size_group_lookup[bsize]]); } else { int idx, idy; - const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; - const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; - for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { - for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { - const MB_PREDICTION_MODE bm = m->bmi[idy * 2 + idx].as_mode; - write_intra_mode(bc, bm, cm->fc.y_mode_prob[0]); + const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; + const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; + for (idy = 0; idy < 2; idy += num_4x4_h) { + for (idx = 0; idx < 2; idx += num_4x4_w) { + const MB_PREDICTION_MODE b_mode = mi->bmi[idy * 2 + idx].as_mode; + write_intra_mode(w, b_mode, cm->fc.y_mode_prob[0]); } } } - write_intra_mode(bc, mi->uv_mode, cm->fc.uv_mode_prob[mode]); + write_intra_mode(w, mbmi->uv_mode, cm->fc.uv_mode_prob[mode]); } else { - vp9_prob *mv_ref_p; - encode_ref_frame(cpi, bc); - mv_ref_p = cpi->common.fc.inter_mode_probs[mi->mode_context[rf]]; - -#ifdef ENTROPY_STATS - active_section = 3; -#endif + const int mode_ctx = mbmi->mode_context[mbmi->ref_frame[0]]; + const vp9_prob *const inter_probs = cm->fc.inter_mode_probs[mode_ctx]; + write_ref_frames(cpi, w); // If segment skip is not enabled code the mode. if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) { if (bsize >= BLOCK_8X8) { - write_sb_mv_ref(bc, mode, mv_ref_p); - ++cm->counts.inter_mode[mi->mode_context[rf]] - [INTER_OFFSET(mode)]; + write_inter_mode(w, mode, inter_probs); + ++cm->counts.inter_mode[mode_ctx][INTER_OFFSET(mode)]; } } - if (cm->mcomp_filter_type == SWITCHABLE) { + if (cm->interp_filter == SWITCHABLE) { const int ctx = vp9_get_pred_context_switchable_interp(xd); - write_token(bc, vp9_switchable_interp_tree, - cm->fc.switchable_interp_prob[ctx], - &vp9_switchable_interp_encodings[mi->interp_filter]); + vp9_write_token(w, vp9_switchable_interp_tree, + cm->fc.switchable_interp_prob[ctx], + &switchable_interp_encodings[mbmi->interp_filter]); } else { - assert(mi->interp_filter == cm->mcomp_filter_type); + assert(mbmi->interp_filter == cm->interp_filter); } if (bsize < BLOCK_8X8) { - const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; - const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; + const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; + const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; int idx, idy; - for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { - for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { + for (idy = 0; idy < 2; idy += num_4x4_h) { + for (idx = 0; idx < 2; idx += num_4x4_w) { const int j = idy * 2 + idx; - const MB_PREDICTION_MODE blockmode = m->bmi[j].as_mode; - write_sb_mv_ref(bc, blockmode, mv_ref_p); - ++cm->counts.inter_mode[mi->mode_context[rf]] - [INTER_OFFSET(blockmode)]; - - if (blockmode == NEWMV) { -#ifdef ENTROPY_STATS - active_section = 11; -#endif - vp9_encode_mv(cpi, bc, &m->bmi[j].as_mv[0].as_mv, - &mi->best_mv[0].as_mv, nmvc, allow_hp); - - if (has_second_ref(mi)) - vp9_encode_mv(cpi, bc, &m->bmi[j].as_mv[1].as_mv, - &mi->best_mv[1].as_mv, nmvc, allow_hp); + const MB_PREDICTION_MODE b_mode = mi->bmi[j].as_mode; + write_inter_mode(w, b_mode, inter_probs); + ++cm->counts.inter_mode[mode_ctx][INTER_OFFSET(b_mode)]; + if (b_mode == NEWMV) { + for (ref = 0; ref < 1 + is_compound; ++ref) + vp9_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv, + &mbmi->ref_mvs[mbmi->ref_frame[ref]][0].as_mv, + nmvc, allow_hp); } } } - } else if (mode == NEWMV) { -#ifdef ENTROPY_STATS - active_section = 5; -#endif - vp9_encode_mv(cpi, bc, &mi->mv[0].as_mv, - &mi->best_mv[0].as_mv, nmvc, allow_hp); - - if (has_second_ref(mi)) - vp9_encode_mv(cpi, bc, &mi->mv[1].as_mv, - &mi->best_mv[1].as_mv, nmvc, allow_hp); + } else { + if (mode == NEWMV) { + for (ref = 0; ref < 1 + is_compound; ++ref) + vp9_encode_mv(cpi, w, &mbmi->mv[ref].as_mv, + &mbmi->ref_mvs[mbmi->ref_frame[ref]][0].as_mv, nmvc, + allow_hp); + } } } } static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO **mi_8x8, - vp9_writer *bc) { + vp9_writer *w) { const VP9_COMMON *const cm = &cpi->common; const MACROBLOCKD *const xd = &cpi->mb.e_mbd; const struct segmentation *const seg = &cm->seg; - MODE_INFO *m = mi_8x8[0]; - const int ym = m->mbmi.mode; - const int segment_id = m->mbmi.segment_id; - MODE_INFO *above_mi = mi_8x8[-xd->mode_info_stride]; - MODE_INFO *left_mi = xd->left_available ? mi_8x8[-1] : NULL; + const MODE_INFO *const mi = mi_8x8[0]; + const MODE_INFO *const above_mi = mi_8x8[-xd->mode_info_stride]; + const MODE_INFO *const left_mi = xd->left_available ? mi_8x8[-1] : NULL; + const MB_MODE_INFO *const mbmi = &mi->mbmi; + const BLOCK_SIZE bsize = mbmi->sb_type; if (seg->update_map) - write_segment_id(bc, seg, m->mbmi.segment_id); + write_segment_id(w, seg, mbmi->segment_id); - write_skip_coeff(cpi, segment_id, m, bc); + write_skip(cpi, mbmi->segment_id, mi, w); - if (m->mbmi.sb_type >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT) - write_selected_tx_size(cpi, m, m->mbmi.tx_size, m->mbmi.sb_type, bc); + if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT) + write_selected_tx_size(cpi, mbmi->tx_size, bsize, w); - if (m->mbmi.sb_type >= BLOCK_8X8) { - const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, 0); - const MB_PREDICTION_MODE L = left_block_mode(m, left_mi, 0); - write_intra_mode(bc, ym, vp9_kf_y_mode_prob[A][L]); + if (bsize >= BLOCK_8X8) { + write_intra_mode(w, mbmi->mode, get_y_mode_probs(mi, above_mi, left_mi, 0)); } else { + const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; + const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; int idx, idy; - const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[m->mbmi.sb_type]; - const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[m->mbmi.sb_type]; - for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { - for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { - int i = idy * 2 + idx; - const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, i); - const MB_PREDICTION_MODE L = left_block_mode(m, left_mi, i); - const int bm = m->bmi[i].as_mode; -#ifdef ENTROPY_STATS - ++intra_mode_stats[A][L][bm]; -#endif - write_intra_mode(bc, bm, vp9_kf_y_mode_prob[A][L]); + + for (idy = 0; idy < 2; idy += num_4x4_h) { + for (idx = 0; idx < 2; idx += num_4x4_w) { + const int block = idy * 2 + idx; + write_intra_mode(w, mi->bmi[block].as_mode, + get_y_mode_probs(mi, above_mi, left_mi, block)); } } } - write_intra_mode(bc, m->mbmi.uv_mode, vp9_kf_uv_mode_prob[ym]); + write_intra_mode(w, mbmi->uv_mode, vp9_kf_uv_mode_prob[mbmi->mode]); } static void write_modes_b(VP9_COMP *cpi, const TileInfo *const tile, @@ -560,14 +384,8 @@ static void write_modes_b(VP9_COMP *cpi, const TileInfo *const tile, cm->mi_rows, cm->mi_cols); if (frame_is_intra_only(cm)) { write_mb_modes_kf(cpi, xd->mi_8x8, w); -#ifdef ENTROPY_STATS - active_section = 8; -#endif } else { pack_inter_mode_mvs(cpi, m, w); -#ifdef ENTROPY_STATS - active_section = 1; -#endif } assert(*tok < tok_end); @@ -585,7 +403,7 @@ static void write_partition(VP9_COMP *cpi, int hbs, int mi_row, int mi_col, const int has_cols = (mi_col + hbs) < cm->mi_cols; if (has_rows && has_cols) { - write_token(w, vp9_partition_tree, probs, &vp9_partition_encodings[p]); + vp9_write_token(w, vp9_partition_tree, probs, &partition_encodings[p]); } else if (!has_rows && has_cols) { assert(p == PARTITION_SPLIT || p == PARTITION_HORZ); vp9_write(w, p == PARTITION_SPLIT, probs[1]); @@ -667,17 +485,15 @@ static void write_modes(VP9_COMP *cpi, const TileInfo *const tile, static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE tx_size) { vp9_coeff_probs_model *coef_probs = cpi->frame_coef_probs[tx_size]; vp9_coeff_count *coef_counts = cpi->coef_counts[tx_size]; - unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS] = + unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] = cpi->common.counts.eob_branch[tx_size]; vp9_coeff_stats *coef_branch_ct = cpi->frame_branch_ct[tx_size]; int i, j, k, l, m; - for (i = 0; i < BLOCK_TYPES; ++i) { + for (i = 0; i < PLANE_TYPES; ++i) { for (j = 0; j < REF_TYPES; ++j) { for (k = 0; k < COEF_BANDS; ++k) { - for (l = 0; l < PREV_COEF_CONTEXTS; ++l) { - if (l >= 3 && k == 0) - continue; + for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { vp9_tree_probs_from_distribution(vp9_coef_tree, coef_branch_ct[i][j][k][l], coef_counts[i][j][k][l]); @@ -687,28 +503,12 @@ static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE tx_size) { coef_probs[i][j][k][l][m] = get_binary_prob( coef_branch_ct[i][j][k][l][m][0], coef_branch_ct[i][j][k][l][m][1]); -#ifdef ENTROPY_STATS - if (!cpi->dummy_packing) { - int t; - for (t = 0; t < MAX_ENTROPY_TOKENS; ++t) - context_counters[tx_size][i][j][k][l][t] += - coef_counts[i][j][k][l][t]; - context_counters[tx_size][i][j][k][l][MAX_ENTROPY_TOKENS] += - eob_branch_ct[i][j][k][l]; - } -#endif } } } } } -static void build_coeff_contexts(VP9_COMP *cpi) { - TX_SIZE t; - for (t = TX_4X4; t <= TX_32X32; t++) - build_tree_distribution(cpi, t); -} - static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi, TX_SIZE tx_size) { vp9_coeff_probs_model *new_frame_coef_probs = cpi->frame_coef_probs[tx_size]; @@ -723,22 +523,19 @@ static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi, /* dry run to see if there is any udpate at all needed */ int savings = 0; int update[2] = {0, 0}; - for (i = 0; i < BLOCK_TYPES; ++i) { + for (i = 0; i < PLANE_TYPES; ++i) { for (j = 0; j < REF_TYPES; ++j) { for (k = 0; k < COEF_BANDS; ++k) { - for (l = 0; l < PREV_COEF_CONTEXTS; ++l) { + for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { for (t = 0; t < entropy_nodes_update; ++t) { vp9_prob newp = new_frame_coef_probs[i][j][k][l][t]; const vp9_prob oldp = old_frame_coef_probs[i][j][k][l][t]; int s; int u = 0; - - if (l >= 3 && k == 0) - continue; if (t == PIVOT_NODE) s = vp9_prob_diff_update_savings_search_model( frame_branch_ct[i][j][k][l][0], - old_frame_coef_probs[i][j][k][l], &newp, upd, i, j); + old_frame_coef_probs[i][j][k][l], &newp, upd); else s = vp9_prob_diff_update_savings_search( frame_branch_ct[i][j][k][l][t], oldp, &newp, upd); @@ -762,10 +559,10 @@ static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi, return; } vp9_write_bit(bc, 1); - for (i = 0; i < BLOCK_TYPES; ++i) { + for (i = 0; i < PLANE_TYPES; ++i) { for (j = 0; j < REF_TYPES; ++j) { for (k = 0; k < COEF_BANDS; ++k) { - for (l = 0; l < PREV_COEF_CONTEXTS; ++l) { + for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { // calc probs and branch cts for this frame only for (t = 0; t < entropy_nodes_update; ++t) { vp9_prob newp = new_frame_coef_probs[i][j][k][l][t]; @@ -773,12 +570,10 @@ static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi, const vp9_prob upd = DIFF_UPDATE_PROB; int s; int u = 0; - if (l >= 3 && k == 0) - continue; if (t == PIVOT_NODE) s = vp9_prob_diff_update_savings_search_model( frame_branch_ct[i][j][k][l][0], - old_frame_coef_probs[i][j][k][l], &newp, upd, i, j); + old_frame_coef_probs[i][j][k][l], &newp, upd); else s = vp9_prob_diff_update_savings_search( frame_branch_ct[i][j][k][l][t], @@ -786,10 +581,6 @@ static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi, if (s > 0 && newp != *oldp) u = 1; vp9_write(bc, u, upd); -#ifdef ENTROPY_STATS - if (!cpi->dummy_packing) - ++tree_update_hist[tx_size][i][j][k][l][t][u]; -#endif if (u) { /* send/use new probability */ vp9_write_prob_diff_update(bc, newp, *oldp); @@ -806,25 +597,23 @@ static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi, case 1: case 2: { const int prev_coef_contexts_to_update = - (cpi->sf.use_fast_coef_updates == 2 ? - PREV_COEF_CONTEXTS >> 1 : PREV_COEF_CONTEXTS); + cpi->sf.use_fast_coef_updates == 2 ? COEFF_CONTEXTS >> 1 + : COEFF_CONTEXTS; const int coef_band_to_update = - (cpi->sf.use_fast_coef_updates == 2 ? - COEF_BANDS >> 1 : COEF_BANDS); + cpi->sf.use_fast_coef_updates == 2 ? COEF_BANDS >> 1 + : COEF_BANDS; int updates = 0; int noupdates_before_first = 0; - for (i = 0; i < BLOCK_TYPES; ++i) { + for (i = 0; i < PLANE_TYPES; ++i) { for (j = 0; j < REF_TYPES; ++j) { for (k = 0; k < COEF_BANDS; ++k) { - for (l = 0; l < PREV_COEF_CONTEXTS; ++l) { + for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { // calc probs and branch cts for this frame only for (t = 0; t < entropy_nodes_update; ++t) { vp9_prob newp = new_frame_coef_probs[i][j][k][l][t]; vp9_prob *oldp = old_frame_coef_probs[i][j][k][l] + t; int s; int u = 0; - if (l >= 3 && k == 0) - continue; if (l >= prev_coef_contexts_to_update || k >= coef_band_to_update) { u = 0; @@ -832,7 +621,7 @@ static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi, if (t == PIVOT_NODE) s = vp9_prob_diff_update_savings_search_model( frame_branch_ct[i][j][k][l][0], - old_frame_coef_probs[i][j][k][l], &newp, upd, i, j); + old_frame_coef_probs[i][j][k][l], &newp, upd); else s = vp9_prob_diff_update_savings_search( frame_branch_ct[i][j][k][l][t], @@ -843,10 +632,6 @@ static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi, updates += u; if (u == 0 && updates == 0) { noupdates_before_first++; -#ifdef ENTROPY_STATS - if (!cpi->dummy_packing) - ++tree_update_hist[tx_size][i][j][k][l][t][u]; -#endif continue; } if (u == 1 && updates == 1) { @@ -857,10 +642,6 @@ static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi, vp9_write(bc, 0, upd); } vp9_write(bc, u, upd); -#ifdef ENTROPY_STATS - if (!cpi->dummy_packing) - ++tree_update_hist[tx_size][i][j][k][l][t][u]; -#endif if (u) { /* send/use new probability */ vp9_write_prob_diff_update(bc, newp, *oldp); @@ -882,25 +663,17 @@ static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi, } } -static void update_coef_probs(VP9_COMP* const cpi, vp9_writer* const bc) { +static void update_coef_probs(VP9_COMP *cpi, vp9_writer* w) { const TX_MODE tx_mode = cpi->common.tx_mode; - + const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode]; + TX_SIZE tx_size; vp9_clear_system_state(); - // Build the cofficient contexts based on counts collected in encode loop - build_coeff_contexts(cpi); - - update_coef_probs_common(bc, cpi, TX_4X4); - - // do not do this if not even allowed - if (tx_mode > ONLY_4X4) - update_coef_probs_common(bc, cpi, TX_8X8); - - if (tx_mode > ALLOW_8X8) - update_coef_probs_common(bc, cpi, TX_16X16); + for (tx_size = TX_4X4; tx_size <= TX_32X32; ++tx_size) + build_tree_distribution(cpi, tx_size); - if (tx_mode > ALLOW_16X16) - update_coef_probs_common(bc, cpi, TX_32X32); + for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size) + update_coef_probs_common(w, cpi, tx_size); } static void encode_loopfilter(struct loopfilter *lf, @@ -916,38 +689,27 @@ static void encode_loopfilter(struct loopfilter *lf, vp9_wb_write_bit(wb, lf->mode_ref_delta_enabled); if (lf->mode_ref_delta_enabled) { - // Do the deltas need to be updated vp9_wb_write_bit(wb, lf->mode_ref_delta_update); if (lf->mode_ref_delta_update) { - // Send update for (i = 0; i < MAX_REF_LF_DELTAS; i++) { const int delta = lf->ref_deltas[i]; - - // Frame level data - if (delta != lf->last_ref_deltas[i]) { + const int changed = delta != lf->last_ref_deltas[i]; + vp9_wb_write_bit(wb, changed); + if (changed) { lf->last_ref_deltas[i] = delta; - vp9_wb_write_bit(wb, 1); - - assert(delta != 0); vp9_wb_write_literal(wb, abs(delta) & 0x3F, 6); vp9_wb_write_bit(wb, delta < 0); - } else { - vp9_wb_write_bit(wb, 0); } } - // Send update for (i = 0; i < MAX_MODE_LF_DELTAS; i++) { const int delta = lf->mode_deltas[i]; - if (delta != lf->last_mode_deltas[i]) { + const int changed = delta != lf->last_mode_deltas[i]; + vp9_wb_write_bit(wb, changed); + if (changed) { lf->last_mode_deltas[i] = delta; - vp9_wb_write_bit(wb, 1); - - assert(delta != 0); vp9_wb_write_literal(wb, abs(delta) & 0x3F, 6); vp9_wb_write_bit(wb, delta < 0); - } else { - vp9_wb_write_bit(wb, 0); } } } @@ -1024,10 +786,10 @@ static void encode_segmentation(VP9_COMP *cpi, const int data_max = vp9_seg_feature_data_max(j); if (vp9_is_segfeature_signed(j)) { - vp9_encode_unsigned_max(wb, abs(data), data_max); + encode_unsigned_max(wb, abs(data), data_max); vp9_wb_write_bit(wb, data < 0); } else { - vp9_encode_unsigned_max(wb, data, data_max); + encode_unsigned_max(wb, data, data_max); } } } @@ -1036,9 +798,7 @@ static void encode_segmentation(VP9_COMP *cpi, } -static void encode_txfm_probs(VP9_COMP *cpi, vp9_writer *w) { - VP9_COMMON *const cm = &cpi->common; - +static void encode_txfm_probs(VP9_COMMON *cm, vp9_writer *w) { // Mode vp9_write_literal(w, MIN(cm->tx_mode, ALLOW_32X32), 2); if (cm->tx_mode >= ALLOW_32X32) @@ -1071,26 +831,20 @@ static void encode_txfm_probs(VP9_COMP *cpi, vp9_writer *w) { vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p32x32[i][j], ct_32x32p[j]); } -#ifdef MODE_STATS - if (!cpi->dummy_packing) - update_tx_count_stats(cm); -#endif } } -static void write_interp_filter_type(INTERPOLATION_TYPE type, - struct vp9_write_bit_buffer *wb) { - const int type_to_literal[] = { 1, 0, 2, 3 }; +static void write_interp_filter(INTERP_FILTER filter, + struct vp9_write_bit_buffer *wb) { + const int filter_to_literal[] = { 1, 0, 2, 3 }; - vp9_wb_write_bit(wb, type == SWITCHABLE); - if (type != SWITCHABLE) - vp9_wb_write_literal(wb, type_to_literal[type], 2); + vp9_wb_write_bit(wb, filter == SWITCHABLE); + if (filter != SWITCHABLE) + vp9_wb_write_literal(wb, filter_to_literal[filter], 2); } -static void fix_mcomp_filter_type(VP9_COMP *cpi) { - VP9_COMMON *const cm = &cpi->common; - - if (cm->mcomp_filter_type == SWITCHABLE) { +static void fix_interp_filter(VP9_COMMON *cm) { + if (cm->interp_filter == SWITCHABLE) { // Check to see if only one of the filters is actually used int count[SWITCHABLE_FILTERS]; int i, j, c = 0; @@ -1104,7 +858,7 @@ static void fix_mcomp_filter_type(VP9_COMP *cpi) { // Only one filter is used. So set the filter at frame level for (i = 0; i < SWITCHABLE_FILTERS; ++i) { if (count[i]) { - cm->mcomp_filter_type = i; + cm->interp_filter = i; break; } } @@ -1207,7 +961,7 @@ static size_t encode_tiles(VP9_COMP *cpi, uint8_t *data_ptr) { vp9_stop_encode(&residual_bc); if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) { // size of this tile - write_be32(data_ptr + total_size, residual_bc.pos); + mem_put_be32(data_ptr + total_size, residual_bc.pos); total_size += 4; } @@ -1218,9 +972,8 @@ static size_t encode_tiles(VP9_COMP *cpi, uint8_t *data_ptr) { return total_size; } -static void write_display_size(VP9_COMP *cpi, struct vp9_write_bit_buffer *wb) { - VP9_COMMON *const cm = &cpi->common; - +static void write_display_size(const VP9_COMMON *cm, + struct vp9_write_bit_buffer *wb) { const int scaling_active = cm->width != cm->display_width || cm->height != cm->display_height; vp9_wb_write_bit(wb, scaling_active); @@ -1230,24 +983,22 @@ static void write_display_size(VP9_COMP *cpi, struct vp9_write_bit_buffer *wb) { } } -static void write_frame_size(VP9_COMP *cpi, +static void write_frame_size(const VP9_COMMON *cm, struct vp9_write_bit_buffer *wb) { - VP9_COMMON *const cm = &cpi->common; vp9_wb_write_literal(wb, cm->width - 1, 16); vp9_wb_write_literal(wb, cm->height - 1, 16); - write_display_size(cpi, wb); + write_display_size(cm, wb); } static void write_frame_size_with_refs(VP9_COMP *cpi, struct vp9_write_bit_buffer *wb) { VP9_COMMON *const cm = &cpi->common; - int refs[ALLOWED_REFS_PER_FRAME] = {cpi->lst_fb_idx, cpi->gld_fb_idx, - cpi->alt_fb_idx}; - int i, found = 0; + int found = 0; - for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) { - YV12_BUFFER_CONFIG *cfg = &cm->yv12_fb[cm->ref_frame_map[refs[i]]]; + MV_REFERENCE_FRAME ref_frame; + for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { + YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, ref_frame); found = cm->width == cfg->y_crop_width && cm->height == cfg->y_crop_height; @@ -1267,7 +1018,7 @@ static void write_frame_size_with_refs(VP9_COMP *cpi, vp9_wb_write_literal(wb, cm->height - 1, 16); } - write_display_size(cpi, wb); + write_display_size(cm, wb); } static void write_sync_code(struct vp9_write_bit_buffer *wb) { @@ -1309,10 +1060,8 @@ static void write_uncompressed_header(VP9_COMP *cpi, vp9_wb_write_bit(wb, 0); // has extra plane } - write_frame_size(cpi, wb); + write_frame_size(cm, wb); } else { - const int refs[ALLOWED_REFS_PER_FRAME] = {cpi->lst_fb_idx, cpi->gld_fb_idx, - cpi->alt_fb_idx}; if (!cm->show_frame) vp9_wb_write_bit(wb, cm->intra_only); @@ -1322,22 +1071,23 @@ static void write_uncompressed_header(VP9_COMP *cpi, if (cm->intra_only) { write_sync_code(wb); - vp9_wb_write_literal(wb, get_refresh_mask(cpi), NUM_REF_FRAMES); - write_frame_size(cpi, wb); + vp9_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES); + write_frame_size(cm, wb); } else { - int i; - vp9_wb_write_literal(wb, get_refresh_mask(cpi), NUM_REF_FRAMES); - for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) { - vp9_wb_write_literal(wb, refs[i], NUM_REF_FRAMES_LOG2); - vp9_wb_write_bit(wb, cm->ref_frame_sign_bias[LAST_FRAME + i]); + MV_REFERENCE_FRAME ref_frame; + vp9_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES); + for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { + vp9_wb_write_literal(wb, get_ref_frame_idx(cpi, ref_frame), + REF_FRAMES_LOG2); + vp9_wb_write_bit(wb, cm->ref_frame_sign_bias[ref_frame]); } write_frame_size_with_refs(cpi, wb); vp9_wb_write_bit(wb, cm->allow_high_precision_mv); - fix_mcomp_filter_type(cpi); - write_interp_filter_type(cm->mcomp_filter_type, wb); + fix_interp_filter(cm); + write_interp_filter(cm->interp_filter, wb); } } @@ -1346,7 +1096,7 @@ static void write_uncompressed_header(VP9_COMP *cpi, vp9_wb_write_bit(wb, cm->frame_parallel_decoding_mode); } - vp9_wb_write_literal(wb, cm->frame_context_idx, NUM_FRAME_CONTEXTS_LOG2); + vp9_wb_write_literal(wb, cm->frame_context_idx, FRAME_CONTEXTS_LOG2); encode_loopfilter(&cm->lf, wb); encode_quantization(cm, wb); @@ -1366,36 +1116,30 @@ static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) { if (xd->lossless) cm->tx_mode = ONLY_4X4; else - encode_txfm_probs(cpi, &header_bc); + encode_txfm_probs(cm, &header_bc); update_coef_probs(cpi, &header_bc); - -#ifdef ENTROPY_STATS - active_section = 2; -#endif - - vp9_update_skip_probs(cpi, &header_bc); + update_skip_probs(cm, &header_bc); if (!frame_is_intra_only(cm)) { int i; -#ifdef ENTROPY_STATS - active_section = 1; -#endif - update_inter_mode_probs(cm, &header_bc); + for (i = 0; i < INTER_MODE_CONTEXTS; ++i) + prob_diff_update(vp9_inter_mode_tree, cm->fc.inter_mode_probs[i], + cm->counts.inter_mode[i], INTER_MODES, &header_bc); + vp9_zero(cm->counts.inter_mode); - if (cm->mcomp_filter_type == SWITCHABLE) - update_switchable_interp_probs(cpi, &header_bc); + if (cm->interp_filter == SWITCHABLE) + update_switchable_interp_probs(cm, &header_bc); for (i = 0; i < INTRA_INTER_CONTEXTS; i++) vp9_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i], - cpi->intra_inter_count[i]); + cm->counts.intra_inter[i]); if (cm->allow_comp_inter_inter) { - const int comp_pred_mode = cpi->common.comp_pred_mode; - const int use_compound_pred = comp_pred_mode != SINGLE_PREDICTION_ONLY; - const int use_hybrid_pred = comp_pred_mode == HYBRID_PREDICTION; + const int use_compound_pred = cm->reference_mode != SINGLE_REFERENCE; + const int use_hybrid_pred = cm->reference_mode == REFERENCE_MODE_SELECT; vp9_write_bit(&header_bc, use_compound_pred); if (use_compound_pred) { @@ -1403,34 +1147,33 @@ static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) { if (use_hybrid_pred) for (i = 0; i < COMP_INTER_CONTEXTS; i++) vp9_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i], - cpi->comp_inter_count[i]); + cm->counts.comp_inter[i]); } } - if (cm->comp_pred_mode != COMP_PREDICTION_ONLY) { + if (cm->reference_mode != COMPOUND_REFERENCE) { for (i = 0; i < REF_CONTEXTS; i++) { vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0], - cpi->single_ref_count[i][0]); + cm->counts.single_ref[i][0]); vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1], - cpi->single_ref_count[i][1]); + cm->counts.single_ref[i][1]); } } - if (cm->comp_pred_mode != SINGLE_PREDICTION_ONLY) + if (cm->reference_mode != SINGLE_REFERENCE) for (i = 0; i < REF_CONTEXTS; i++) vp9_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i], - cpi->comp_ref_count[i]); + cm->counts.comp_ref[i]); - update_mbintra_mode_probs(cpi, &header_bc); + for (i = 0; i < BLOCK_SIZE_GROUPS; ++i) + prob_diff_update(vp9_intra_mode_tree, cm->fc.y_mode_prob[i], + cm->counts.y_mode[i], INTRA_MODES, &header_bc); - for (i = 0; i < PARTITION_CONTEXTS; ++i) { - unsigned int bct[PARTITION_TYPES - 1][2]; - update_mode(&header_bc, PARTITION_TYPES, vp9_partition_tree, - fc->partition_prob[i], bct, - (unsigned int *)cpi->partition_count[i]); - } + for (i = 0; i < PARTITION_CONTEXTS; ++i) + prob_diff_update(vp9_partition_tree, fc->partition_prob[i], + cm->counts.partition[i], PARTITION_TYPES, &header_bc); - vp9_write_nmv_probs(cpi, cm->allow_high_precision_mv, &header_bc); + vp9_write_nmv_probs(cm, cm->allow_high_precision_mv, &header_bc); } vp9_stop_encode(&header_bc); @@ -1439,7 +1182,7 @@ static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) { return header_bc.pos; } -void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, unsigned long *size) { +void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, size_t *size) { uint8_t *data = dest; size_t first_part_size; struct vp9_write_bit_buffer wb = {data, 0}; @@ -1453,71 +1196,15 @@ void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, unsigned long *size) { vp9_compute_update_table(); -#ifdef ENTROPY_STATS - if (cm->frame_type == INTER_FRAME) - active_section = 0; - else - active_section = 7; -#endif - - vp9_clear_system_state(); // __asm emms; + vp9_clear_system_state(); first_part_size = write_compressed_header(cpi, data); data += first_part_size; - vp9_wb_write_literal(&saved_wb, first_part_size, 16); + // TODO(jbb): Figure out what to do if first_part_size > 16 bits. + vp9_wb_write_literal(&saved_wb, (int)first_part_size, 16); data += encode_tiles(cpi, data); *size = data - dest; } -#ifdef ENTROPY_STATS -static void print_tree_update_for_type(FILE *f, - vp9_coeff_stats *tree_update_hist, - int block_types, const char *header) { - int i, j, k, l, m; - - fprintf(f, "const vp9_coeff_prob %s = {\n", header); - for (i = 0; i < block_types; i++) { - fprintf(f, " { \n"); - for (j = 0; j < REF_TYPES; j++) { - fprintf(f, " { \n"); - for (k = 0; k < COEF_BANDS; k++) { - fprintf(f, " {\n"); - for (l = 0; l < PREV_COEF_CONTEXTS; l++) { - fprintf(f, " {"); - for (m = 0; m < ENTROPY_NODES; m++) { - fprintf(f, "%3d, ", - get_binary_prob(tree_update_hist[i][j][k][l][m][0], - tree_update_hist[i][j][k][l][m][1])); - } - fprintf(f, "},\n"); - } - fprintf(f, "},\n"); - } - fprintf(f, " },\n"); - } - fprintf(f, " },\n"); - } - fprintf(f, "};\n"); -} - -void print_tree_update_probs() { - FILE *f = fopen("coefupdprob.h", "w"); - fprintf(f, "\n/* Update probabilities for token entropy tree. */\n\n"); - - print_tree_update_for_type(f, tree_update_hist[TX_4X4], BLOCK_TYPES, - "vp9_coef_update_probs_4x4[BLOCK_TYPES]"); - print_tree_update_for_type(f, tree_update_hist[TX_8X8], BLOCK_TYPES, - "vp9_coef_update_probs_8x8[BLOCK_TYPES]"); - print_tree_update_for_type(f, tree_update_hist[TX_16X16], BLOCK_TYPES, - "vp9_coef_update_probs_16x16[BLOCK_TYPES]"); - print_tree_update_for_type(f, tree_update_hist[TX_32X32], BLOCK_TYPES, - "vp9_coef_update_probs_32x32[BLOCK_TYPES]"); - - fclose(f); - f = fopen("treeupdate.bin", "wb"); - fwrite(tree_update_hist, sizeof(tree_update_hist), 1, f); - fclose(f); -} -#endif diff --git a/libvpx/vp9/encoder/vp9_bitstream.h b/libvpx/vp9/encoder/vp9_bitstream.h index b3dbee1..ddfd0ed 100644 --- a/libvpx/vp9/encoder/vp9_bitstream.h +++ b/libvpx/vp9/encoder/vp9_bitstream.h @@ -12,6 +12,18 @@ #ifndef VP9_ENCODER_VP9_BITSTREAM_H_ #define VP9_ENCODER_VP9_BITSTREAM_H_ -void vp9_update_skip_probs(VP9_COMP *cpi, vp9_writer *bc); +#ifdef __cplusplus +extern "C" { +#endif + +struct VP9_COMP; + +void vp9_entropy_mode_init(); + +void vp9_pack_bitstream(struct VP9_COMP *cpi, uint8_t *dest, size_t *size); + +#ifdef __cplusplus +} // extern "C" +#endif #endif // VP9_ENCODER_VP9_BITSTREAM_H_ diff --git a/libvpx/vp9/encoder/vp9_block.h b/libvpx/vp9/encoder/vp9_block.h index 4445970..5d69856 100644 --- a/libvpx/vp9/encoder/vp9_block.h +++ b/libvpx/vp9/encoder/vp9_block.h @@ -11,12 +11,15 @@ #ifndef VP9_ENCODER_VP9_BLOCK_H_ #define VP9_ENCODER_VP9_BLOCK_H_ -#include "vp9/common/vp9_onyx.h" #include "vp9/common/vp9_entropymv.h" #include "vp9/common/vp9_entropy.h" #include "vpx_ports/mem.h" #include "vp9/common/vp9_onyxc_int.h" +#ifdef __cplusplus +extern "C" { +#endif + // motion search site typedef struct { MV mv; @@ -41,12 +44,10 @@ typedef struct { int is_coded; int num_4x4_blk; int skip; - int_mv best_ref_mv; - int_mv second_best_ref_mv; + int_mv best_ref_mv[2]; int_mv ref_mvs[MAX_REF_FRAMES][MAX_MV_REF_CANDIDATES]; int rate; int distortion; - int64_t intra_error; int best_mode_index; int rddiv; int rdmult; @@ -59,17 +60,14 @@ typedef struct { // motion vector cache for adaptive motion search control in partition // search loop int_mv pred_mv[MAX_REF_FRAMES]; - - // Bit flag for each mode whether it has high error in comparison to others. - unsigned int modes_with_high_error; - - // Bit flag for each ref frame whether it has high error compared to others. - unsigned int frames_with_high_error; + INTERP_FILTER pred_interp_filter; } PICK_MODE_CONTEXT; struct macroblock_plane { DECLARE_ALIGNED(16, int16_t, src_diff[64 * 64]); + int16_t *qcoeff; int16_t *coeff; + uint16_t *eobs; struct buf_2d src; // Quantizer setings @@ -84,8 +82,8 @@ struct macroblock_plane { /* The [2] dimension is for whether we skip the EOB node (i.e. if previous * coefficient in this block was zero) or not. */ -typedef unsigned int vp9_coeff_cost[BLOCK_TYPES][REF_TYPES][COEF_BANDS][2] - [PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS]; +typedef unsigned int vp9_coeff_cost[PLANE_TYPES][REF_TYPES][COEF_BANDS][2] + [COEFF_CONTEXTS][ENTROPY_TOKENS]; typedef struct macroblock MACROBLOCK; struct macroblock { @@ -115,6 +113,9 @@ struct macroblock { int mv_best_ref_index[MAX_REF_FRAMES]; unsigned int max_mv_context[MAX_REF_FRAMES]; unsigned int source_variance; + unsigned int pred_sse[MAX_REF_FRAMES]; + int pred_mv_sad[MAX_REF_FRAMES]; + int mode_sad[MAX_REF_FRAMES][INTER_MODES + 1]; int nmvjointcost[MV_JOINTS]; int nmvcosts[2][MV_VALS]; @@ -130,9 +131,9 @@ struct macroblock { int *nmvsadcost_hp[2]; int **mvsadcost; - int mbmode_cost[MB_MODE_COUNT]; + int mbmode_cost[INTRA_MODES]; unsigned inter_mode_cost[INTER_MODE_CONTEXTS][INTER_MODES]; - int intra_uv_mode_cost[2][MB_MODE_COUNT]; + int intra_uv_mode_cost[FRAME_TYPES][INTRA_MODES]; int y_mode_costs[INTRA_MODES][INTRA_MODES][INTRA_MODES]; int switchable_interp_costs[SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS]; @@ -166,9 +167,7 @@ struct macroblock { int skip_encode; // Used to store sub partition's choices. - int fast_ms; int_mv pred_mv[MAX_REF_FRAMES]; - int subblock_ref; // TODO(jingning): Need to refactor the structure arrays that buffers the // coding mode decisions of each partition type. @@ -233,23 +232,8 @@ static PICK_MODE_CONTEXT *get_block_context(MACROBLOCK *x, BLOCK_SIZE bsize) { } } -struct rdcost_block_args { - MACROBLOCK *x; - ENTROPY_CONTEXT t_above[16]; - ENTROPY_CONTEXT t_left[16]; - TX_SIZE tx_size; - int bw; - int bh; - int rate; - int64_t dist; - int64_t sse; - int this_rate; - int64_t this_dist; - int64_t this_sse; - int64_t this_rd; - int64_t best_rd; - int skip; - const int16_t *scan, *nb; -}; +#ifdef __cplusplus +} // extern "C" +#endif #endif // VP9_ENCODER_VP9_BLOCK_H_ diff --git a/libvpx/vp9/encoder/vp9_boolhuff.c b/libvpx/vp9/encoder/vp9_cost.c index 32c136e..1c3c3d2 100644 --- a/libvpx/vp9/encoder/vp9_boolhuff.c +++ b/libvpx/vp9/encoder/vp9_cost.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. + * Copyright (c) 2014 The WebM project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source @@ -8,18 +8,7 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include <assert.h> -#include "vp9/encoder/vp9_boolhuff.h" -#include "vp9/common/vp9_entropy.h" - -#if defined(SECTIONBITS_OUTPUT) -unsigned __int64 Sectionbits[500]; - -#endif - -#ifdef ENTROPY_STATS -unsigned int active_section = 0; -#endif +#include "vp9/encoder/vp9_cost.h" const unsigned int vp9_prob_cost[256] = { 2047, 2047, 1791, 1641, 1535, 1452, 1385, 1328, 1279, 1235, 1196, 1161, @@ -45,24 +34,29 @@ const unsigned int vp9_prob_cost[256] = { 22, 21, 19, 18, 16, 15, 13, 12, 10, 9, 7, 6, 4, 3, 1, 1}; -void vp9_start_encode(vp9_writer *br, uint8_t *source) { - br->lowvalue = 0; - br->range = 255; - br->value = 0; - br->count = -24; - br->buffer = source; - br->pos = 0; - vp9_write_bit(br, 0); -} +static void cost(int *costs, vp9_tree tree, const vp9_prob *probs, + int i, int c) { + const vp9_prob prob = probs[i / 2]; + int b; -void vp9_stop_encode(vp9_writer *br) { - int i; + for (b = 0; b <= 1; ++b) { + const int cc = c + vp9_cost_bit(prob, b); + const vp9_tree_index ii = tree[i + b]; - for (i = 0; i < 32; i++) - vp9_write_bit(br, 0); + if (ii <= 0) + costs[-ii] = cc; + else + cost(costs, tree, probs, ii, cc); + } +} - // Ensure there's no ambigous collision with any index marker bytes - if ((br->buffer[br->pos - 1] & 0xe0) == 0xc0) - br->buffer[br->pos++] = 0; +void vp9_cost_tokens(int *costs, const vp9_prob *probs, vp9_tree tree) { + cost(costs, tree, probs, 0, 0); } +void vp9_cost_tokens_skip(int *costs, const vp9_prob *probs, vp9_tree tree) { + assert(tree[0] <= 0 && tree[1] > 0); + + costs[-tree[0]] = vp9_cost_bit(probs[0], 0); + cost(costs, tree, probs, 2, 0); +} diff --git a/libvpx/vp9/encoder/vp9_cost.h b/libvpx/vp9/encoder/vp9_cost.h new file mode 100644 index 0000000..6d2b940 --- /dev/null +++ b/libvpx/vp9/encoder/vp9_cost.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2014 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VP9_ENCODER_VP9_COST_H_ +#define VP9_ENCODER_VP9_COST_H_ + +#include "vp9/common/vp9_prob.h" + +#ifdef __cplusplus +extern "C" { +#endif + +extern const unsigned int vp9_prob_cost[256]; + +#define vp9_cost_zero(prob) (vp9_prob_cost[prob]) + +#define vp9_cost_one(prob) vp9_cost_zero(vp9_complement(prob)) + +#define vp9_cost_bit(prob, bit) vp9_cost_zero((bit) ? vp9_complement(prob) \ + : (prob)) + +static INLINE unsigned int cost_branch256(const unsigned int ct[2], + vp9_prob p) { + return ct[0] * vp9_cost_zero(p) + ct[1] * vp9_cost_one(p); +} + +static INLINE int treed_cost(vp9_tree tree, const vp9_prob *probs, + int bits, int len) { + int cost = 0; + vp9_tree_index i = 0; + + do { + const int bit = (bits >> --len) & 1; + cost += vp9_cost_bit(probs[i >> 1], bit); + i = tree[i + bit]; + } while (len); + + return cost; +} + +void vp9_cost_tokens(int *costs, const vp9_prob *probs, vp9_tree tree); +void vp9_cost_tokens_skip(int *costs, const vp9_prob *probs, vp9_tree tree); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // VP9_ENCODER_VP9_COST_H_ diff --git a/libvpx/vp9/encoder/vp9_dct.c b/libvpx/vp9/encoder/vp9_dct.c index 065992a..d523239 100644 --- a/libvpx/vp9/encoder/vp9_dct.c +++ b/libvpx/vp9/encoder/vp9_dct.c @@ -18,7 +18,11 @@ #include "vp9/common/vp9_idct.h" #include "vp9/common/vp9_systemdependent.h" -#include "vp9/encoder/vp9_dct.h" +static INLINE int fdct_round_shift(int input) { + int rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS); + assert(INT16_MIN <= rv && rv <= INT16_MAX); + return rv; +} static void fdct4(const int16_t *input, int16_t *output) { int16_t step[4]; @@ -31,19 +35,19 @@ static void fdct4(const int16_t *input, int16_t *output) { temp1 = (step[0] + step[1]) * cospi_16_64; temp2 = (step[0] - step[1]) * cospi_16_64; - output[0] = dct_const_round_shift(temp1); - output[2] = dct_const_round_shift(temp2); + output[0] = fdct_round_shift(temp1); + output[2] = fdct_round_shift(temp2); temp1 = step[2] * cospi_24_64 + step[3] * cospi_8_64; temp2 = -step[2] * cospi_8_64 + step[3] * cospi_24_64; - output[1] = dct_const_round_shift(temp1); - output[3] = dct_const_round_shift(temp2); + output[1] = fdct_round_shift(temp1); + output[3] = fdct_round_shift(temp2); } void vp9_fdct4x4_c(const int16_t *input, int16_t *output, int stride) { // The 2D transform is done with two passes which are actually pretty // similar. In the first one, we transform the columns and transpose // the results. In the second one, we transform the rows. To achieve that, - // as the first pass results are transposed, we tranpose the columns (that + // as the first pass results are transposed, we transpose the columns (that // is the transposed rows) and transpose the results (so that it goes back // in normal/row positions). int pass; @@ -80,12 +84,12 @@ void vp9_fdct4x4_c(const int16_t *input, int16_t *output, int stride) { step[3] = input[0] - input[3]; temp1 = (step[0] + step[1]) * cospi_16_64; temp2 = (step[0] - step[1]) * cospi_16_64; - out[0] = dct_const_round_shift(temp1); - out[2] = dct_const_round_shift(temp2); + out[0] = fdct_round_shift(temp1); + out[2] = fdct_round_shift(temp2); temp1 = step[2] * cospi_24_64 + step[3] * cospi_8_64; temp2 = -step[2] * cospi_8_64 + step[3] * cospi_24_64; - out[1] = dct_const_round_shift(temp1); - out[3] = dct_const_round_shift(temp2); + out[1] = fdct_round_shift(temp1); + out[3] = fdct_round_shift(temp2); // Do next column (which is a transposed row in second/horizontal pass) in++; out += 4; @@ -138,10 +142,10 @@ static void fadst4(const int16_t *input, int16_t *output) { s3 = x2 - x0 + x3; // 1-D transform scaling factor is sqrt(2). - output[0] = dct_const_round_shift(s0); - output[1] = dct_const_round_shift(s1); - output[2] = dct_const_round_shift(s2); - output[3] = dct_const_round_shift(s3); + output[0] = fdct_round_shift(s0); + output[1] = fdct_round_shift(s1); + output[2] = fdct_round_shift(s2); + output[3] = fdct_round_shift(s3); } static const transform_2d FHT_4[] = { @@ -151,32 +155,36 @@ static const transform_2d FHT_4[] = { { fadst4, fadst4 } // ADST_ADST = 3 }; -void vp9_short_fht4x4_c(const int16_t *input, int16_t *output, - int stride, int tx_type) { - int16_t out[4 * 4]; - int16_t *outptr = &out[0]; - int i, j; - int16_t temp_in[4], temp_out[4]; - const transform_2d ht = FHT_4[tx_type]; +void vp9_fht4x4_c(const int16_t *input, int16_t *output, + int stride, int tx_type) { + if (tx_type == DCT_DCT) { + vp9_fdct4x4_c(input, output, stride); + } else { + int16_t out[4 * 4]; + int16_t *outptr = &out[0]; + int i, j; + int16_t temp_in[4], temp_out[4]; + const transform_2d ht = FHT_4[tx_type]; - // Columns - for (i = 0; i < 4; ++i) { - for (j = 0; j < 4; ++j) - temp_in[j] = input[j * stride + i] * 16; - if (i == 0 && temp_in[0]) - temp_in[0] += 1; - ht.cols(temp_in, temp_out); - for (j = 0; j < 4; ++j) - outptr[j * 4 + i] = temp_out[j]; - } + // Columns + for (i = 0; i < 4; ++i) { + for (j = 0; j < 4; ++j) + temp_in[j] = input[j * stride + i] * 16; + if (i == 0 && temp_in[0]) + temp_in[0] += 1; + ht.cols(temp_in, temp_out); + for (j = 0; j < 4; ++j) + outptr[j * 4 + i] = temp_out[j]; + } - // Rows - for (i = 0; i < 4; ++i) { - for (j = 0; j < 4; ++j) - temp_in[j] = out[j + i * 4]; - ht.rows(temp_in, temp_out); - for (j = 0; j < 4; ++j) - output[j + i * 4] = (temp_out[j] + 1) >> 2; + // Rows + for (i = 0; i < 4; ++i) { + for (j = 0; j < 4; ++j) + temp_in[j] = out[j + i * 4]; + ht.rows(temp_in, temp_out); + for (j = 0; j < 4; ++j) + output[j + i * 4] = (temp_out[j] + 1) >> 2; + } } } @@ -204,16 +212,16 @@ static void fdct8(const int16_t *input, int16_t *output) { t1 = (x0 - x1) * cospi_16_64; t2 = x2 * cospi_24_64 + x3 * cospi_8_64; t3 = -x2 * cospi_8_64 + x3 * cospi_24_64; - output[0] = dct_const_round_shift(t0); - output[2] = dct_const_round_shift(t2); - output[4] = dct_const_round_shift(t1); - output[6] = dct_const_round_shift(t3); + output[0] = fdct_round_shift(t0); + output[2] = fdct_round_shift(t2); + output[4] = fdct_round_shift(t1); + output[6] = fdct_round_shift(t3); // Stage 2 t0 = (s6 - s5) * cospi_16_64; t1 = (s6 + s5) * cospi_16_64; - t2 = dct_const_round_shift(t0); - t3 = dct_const_round_shift(t1); + t2 = fdct_round_shift(t0); + t3 = fdct_round_shift(t1); // Stage 3 x0 = s4 + t2; @@ -226,10 +234,10 @@ static void fdct8(const int16_t *input, int16_t *output) { t1 = x1 * cospi_12_64 + x2 * cospi_20_64; t2 = x2 * cospi_12_64 + x1 * -cospi_20_64; t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; - output[1] = dct_const_round_shift(t0); - output[3] = dct_const_round_shift(t2); - output[5] = dct_const_round_shift(t1); - output[7] = dct_const_round_shift(t3); + output[1] = fdct_round_shift(t0); + output[3] = fdct_round_shift(t2); + output[5] = fdct_round_shift(t1); + output[7] = fdct_round_shift(t3); } void vp9_fdct8x8_c(const int16_t *input, int16_t *final_output, int stride) { @@ -264,16 +272,16 @@ void vp9_fdct8x8_c(const int16_t *input, int16_t *final_output, int stride) { t1 = (x0 - x1) * cospi_16_64; t2 = x2 * cospi_24_64 + x3 * cospi_8_64; t3 = -x2 * cospi_8_64 + x3 * cospi_24_64; - output[0 * 8] = dct_const_round_shift(t0); - output[2 * 8] = dct_const_round_shift(t2); - output[4 * 8] = dct_const_round_shift(t1); - output[6 * 8] = dct_const_round_shift(t3); + output[0 * 8] = fdct_round_shift(t0); + output[2 * 8] = fdct_round_shift(t2); + output[4 * 8] = fdct_round_shift(t1); + output[6 * 8] = fdct_round_shift(t3); // Stage 2 t0 = (s6 - s5) * cospi_16_64; t1 = (s6 + s5) * cospi_16_64; - t2 = dct_const_round_shift(t0); - t3 = dct_const_round_shift(t1); + t2 = fdct_round_shift(t0); + t3 = fdct_round_shift(t1); // Stage 3 x0 = s4 + t2; @@ -286,10 +294,10 @@ void vp9_fdct8x8_c(const int16_t *input, int16_t *final_output, int stride) { t1 = x1 * cospi_12_64 + x2 * cospi_20_64; t2 = x2 * cospi_12_64 + x1 * -cospi_20_64; t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; - output[1 * 8] = dct_const_round_shift(t0); - output[3 * 8] = dct_const_round_shift(t2); - output[5 * 8] = dct_const_round_shift(t1); - output[7 * 8] = dct_const_round_shift(t3); + output[1 * 8] = fdct_round_shift(t0); + output[3 * 8] = fdct_round_shift(t2); + output[5 * 8] = fdct_round_shift(t1); + output[7 * 8] = fdct_round_shift(t3); input++; output++; } @@ -307,7 +315,7 @@ void vp9_fdct16x16_c(const int16_t *input, int16_t *output, int stride) { // The 2D transform is done with two passes which are actually pretty // similar. In the first one, we transform the columns and transpose // the results. In the second one, we transform the rows. To achieve that, - // as the first pass results are transposed, we tranpose the columns (that + // as the first pass results are transposed, we transpose the columns (that // is the transposed rows) and transpose the results (so that it goes back // in normal/row positions). int pass; @@ -388,16 +396,16 @@ void vp9_fdct16x16_c(const int16_t *input, int16_t *output, int stride) { t1 = (x0 - x1) * cospi_16_64; t2 = x3 * cospi_8_64 + x2 * cospi_24_64; t3 = x3 * cospi_24_64 - x2 * cospi_8_64; - out[0] = dct_const_round_shift(t0); - out[4] = dct_const_round_shift(t2); - out[8] = dct_const_round_shift(t1); - out[12] = dct_const_round_shift(t3); + out[0] = fdct_round_shift(t0); + out[4] = fdct_round_shift(t2); + out[8] = fdct_round_shift(t1); + out[12] = fdct_round_shift(t3); // Stage 2 t0 = (s6 - s5) * cospi_16_64; t1 = (s6 + s5) * cospi_16_64; - t2 = dct_const_round_shift(t0); - t3 = dct_const_round_shift(t1); + t2 = fdct_round_shift(t0); + t3 = fdct_round_shift(t1); // Stage 3 x0 = s4 + t2; @@ -410,22 +418,22 @@ void vp9_fdct16x16_c(const int16_t *input, int16_t *output, int stride) { t1 = x1 * cospi_12_64 + x2 * cospi_20_64; t2 = x2 * cospi_12_64 + x1 * -cospi_20_64; t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; - out[2] = dct_const_round_shift(t0); - out[6] = dct_const_round_shift(t2); - out[10] = dct_const_round_shift(t1); - out[14] = dct_const_round_shift(t3); + out[2] = fdct_round_shift(t0); + out[6] = fdct_round_shift(t2); + out[10] = fdct_round_shift(t1); + out[14] = fdct_round_shift(t3); } // Work on the next eight values; step1 -> odd_results { // step 2 temp1 = (step1[5] - step1[2]) * cospi_16_64; temp2 = (step1[4] - step1[3]) * cospi_16_64; - step2[2] = dct_const_round_shift(temp1); - step2[3] = dct_const_round_shift(temp2); + step2[2] = fdct_round_shift(temp1); + step2[3] = fdct_round_shift(temp2); temp1 = (step1[4] + step1[3]) * cospi_16_64; temp2 = (step1[5] + step1[2]) * cospi_16_64; - step2[4] = dct_const_round_shift(temp1); - step2[5] = dct_const_round_shift(temp2); + step2[4] = fdct_round_shift(temp1); + step2[5] = fdct_round_shift(temp2); // step 3 step3[0] = step1[0] + step2[3]; step3[1] = step1[1] + step2[2]; @@ -438,12 +446,12 @@ void vp9_fdct16x16_c(const int16_t *input, int16_t *output, int stride) { // step 4 temp1 = step3[1] * -cospi_8_64 + step3[6] * cospi_24_64; temp2 = step3[2] * -cospi_24_64 - step3[5] * cospi_8_64; - step2[1] = dct_const_round_shift(temp1); - step2[2] = dct_const_round_shift(temp2); + step2[1] = fdct_round_shift(temp1); + step2[2] = fdct_round_shift(temp2); temp1 = step3[2] * -cospi_8_64 + step3[5] * cospi_24_64; temp2 = step3[1] * cospi_24_64 + step3[6] * cospi_8_64; - step2[5] = dct_const_round_shift(temp1); - step2[6] = dct_const_round_shift(temp2); + step2[5] = fdct_round_shift(temp1); + step2[6] = fdct_round_shift(temp2); // step 5 step1[0] = step3[0] + step2[1]; step1[1] = step3[0] - step2[1]; @@ -456,20 +464,20 @@ void vp9_fdct16x16_c(const int16_t *input, int16_t *output, int stride) { // step 6 temp1 = step1[0] * cospi_30_64 + step1[7] * cospi_2_64; temp2 = step1[1] * cospi_14_64 + step1[6] * cospi_18_64; - out[1] = dct_const_round_shift(temp1); - out[9] = dct_const_round_shift(temp2); + out[1] = fdct_round_shift(temp1); + out[9] = fdct_round_shift(temp2); temp1 = step1[2] * cospi_22_64 + step1[5] * cospi_10_64; temp2 = step1[3] * cospi_6_64 + step1[4] * cospi_26_64; - out[5] = dct_const_round_shift(temp1); - out[13] = dct_const_round_shift(temp2); + out[5] = fdct_round_shift(temp1); + out[13] = fdct_round_shift(temp2); temp1 = step1[3] * -cospi_26_64 + step1[4] * cospi_6_64; temp2 = step1[2] * -cospi_10_64 + step1[5] * cospi_22_64; - out[3] = dct_const_round_shift(temp1); - out[11] = dct_const_round_shift(temp2); + out[3] = fdct_round_shift(temp1); + out[11] = fdct_round_shift(temp2); temp1 = step1[1] * -cospi_18_64 + step1[6] * cospi_14_64; temp2 = step1[0] * -cospi_2_64 + step1[7] * cospi_30_64; - out[7] = dct_const_round_shift(temp1); - out[15] = dct_const_round_shift(temp2); + out[7] = fdct_round_shift(temp1); + out[15] = fdct_round_shift(temp2); } // Do next column (which is a transposed row in second/horizontal pass) in++; @@ -503,14 +511,14 @@ static void fadst8(const int16_t *input, int16_t *output) { s6 = cospi_26_64 * x6 + cospi_6_64 * x7; s7 = cospi_6_64 * x6 - cospi_26_64 * x7; - x0 = dct_const_round_shift(s0 + s4); - x1 = dct_const_round_shift(s1 + s5); - x2 = dct_const_round_shift(s2 + s6); - x3 = dct_const_round_shift(s3 + s7); - x4 = dct_const_round_shift(s0 - s4); - x5 = dct_const_round_shift(s1 - s5); - x6 = dct_const_round_shift(s2 - s6); - x7 = dct_const_round_shift(s3 - s7); + x0 = fdct_round_shift(s0 + s4); + x1 = fdct_round_shift(s1 + s5); + x2 = fdct_round_shift(s2 + s6); + x3 = fdct_round_shift(s3 + s7); + x4 = fdct_round_shift(s0 - s4); + x5 = fdct_round_shift(s1 - s5); + x6 = fdct_round_shift(s2 - s6); + x7 = fdct_round_shift(s3 - s7); // stage 2 s0 = x0; @@ -526,10 +534,10 @@ static void fadst8(const int16_t *input, int16_t *output) { x1 = s1 + s3; x2 = s0 - s2; x3 = s1 - s3; - x4 = dct_const_round_shift(s4 + s6); - x5 = dct_const_round_shift(s5 + s7); - x6 = dct_const_round_shift(s4 - s6); - x7 = dct_const_round_shift(s5 - s7); + x4 = fdct_round_shift(s4 + s6); + x5 = fdct_round_shift(s5 + s7); + x6 = fdct_round_shift(s4 - s6); + x7 = fdct_round_shift(s5 - s7); // stage 3 s2 = cospi_16_64 * (x2 + x3); @@ -537,10 +545,10 @@ static void fadst8(const int16_t *input, int16_t *output) { s6 = cospi_16_64 * (x6 + x7); s7 = cospi_16_64 * (x6 - x7); - x2 = dct_const_round_shift(s2); - x3 = dct_const_round_shift(s3); - x6 = dct_const_round_shift(s6); - x7 = dct_const_round_shift(s7); + x2 = fdct_round_shift(s2); + x3 = fdct_round_shift(s3); + x6 = fdct_round_shift(s6); + x7 = fdct_round_shift(s7); output[0] = x0; output[1] = - x4; @@ -559,30 +567,34 @@ static const transform_2d FHT_8[] = { { fadst8, fadst8 } // ADST_ADST = 3 }; -void vp9_short_fht8x8_c(const int16_t *input, int16_t *output, - int stride, int tx_type) { - int16_t out[64]; - int16_t *outptr = &out[0]; - int i, j; - int16_t temp_in[8], temp_out[8]; - const transform_2d ht = FHT_8[tx_type]; - - // Columns - for (i = 0; i < 8; ++i) { - for (j = 0; j < 8; ++j) - temp_in[j] = input[j * stride + i] * 4; - ht.cols(temp_in, temp_out); - for (j = 0; j < 8; ++j) - outptr[j * 8 + i] = temp_out[j]; - } +void vp9_fht8x8_c(const int16_t *input, int16_t *output, + int stride, int tx_type) { + if (tx_type == DCT_DCT) { + vp9_fdct8x8_c(input, output, stride); + } else { + int16_t out[64]; + int16_t *outptr = &out[0]; + int i, j; + int16_t temp_in[8], temp_out[8]; + const transform_2d ht = FHT_8[tx_type]; + + // Columns + for (i = 0; i < 8; ++i) { + for (j = 0; j < 8; ++j) + temp_in[j] = input[j * stride + i] * 4; + ht.cols(temp_in, temp_out); + for (j = 0; j < 8; ++j) + outptr[j * 8 + i] = temp_out[j]; + } - // Rows - for (i = 0; i < 8; ++i) { - for (j = 0; j < 8; ++j) - temp_in[j] = out[j + i * 8]; - ht.rows(temp_in, temp_out); - for (j = 0; j < 8; ++j) - output[j + i * 8] = (temp_out[j] + (temp_out[j] < 0)) >> 1; + // Rows + for (i = 0; i < 8; ++i) { + for (j = 0; j < 8; ++j) + temp_in[j] = out[j + i * 8]; + ht.rows(temp_in, temp_out); + for (j = 0; j < 8; ++j) + output[j + i * 8] = (temp_out[j] + (temp_out[j] < 0)) >> 1; + } } } @@ -693,16 +705,16 @@ static void fdct16(const int16_t in[16], int16_t out[16]) { t1 = (x0 - x1) * cospi_16_64; t2 = x3 * cospi_8_64 + x2 * cospi_24_64; t3 = x3 * cospi_24_64 - x2 * cospi_8_64; - out[0] = dct_const_round_shift(t0); - out[4] = dct_const_round_shift(t2); - out[8] = dct_const_round_shift(t1); - out[12] = dct_const_round_shift(t3); + out[0] = fdct_round_shift(t0); + out[4] = fdct_round_shift(t2); + out[8] = fdct_round_shift(t1); + out[12] = fdct_round_shift(t3); // Stage 2 t0 = (s6 - s5) * cospi_16_64; t1 = (s6 + s5) * cospi_16_64; - t2 = dct_const_round_shift(t0); - t3 = dct_const_round_shift(t1); + t2 = fdct_round_shift(t0); + t3 = fdct_round_shift(t1); // Stage 3 x0 = s4 + t2; @@ -715,21 +727,21 @@ static void fdct16(const int16_t in[16], int16_t out[16]) { t1 = x1 * cospi_12_64 + x2 * cospi_20_64; t2 = x2 * cospi_12_64 + x1 * -cospi_20_64; t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; - out[2] = dct_const_round_shift(t0); - out[6] = dct_const_round_shift(t2); - out[10] = dct_const_round_shift(t1); - out[14] = dct_const_round_shift(t3); + out[2] = fdct_round_shift(t0); + out[6] = fdct_round_shift(t2); + out[10] = fdct_round_shift(t1); + out[14] = fdct_round_shift(t3); } // step 2 temp1 = (step1[5] - step1[2]) * cospi_16_64; temp2 = (step1[4] - step1[3]) * cospi_16_64; - step2[2] = dct_const_round_shift(temp1); - step2[3] = dct_const_round_shift(temp2); + step2[2] = fdct_round_shift(temp1); + step2[3] = fdct_round_shift(temp2); temp1 = (step1[4] + step1[3]) * cospi_16_64; temp2 = (step1[5] + step1[2]) * cospi_16_64; - step2[4] = dct_const_round_shift(temp1); - step2[5] = dct_const_round_shift(temp2); + step2[4] = fdct_round_shift(temp1); + step2[5] = fdct_round_shift(temp2); // step 3 step3[0] = step1[0] + step2[3]; @@ -744,12 +756,12 @@ static void fdct16(const int16_t in[16], int16_t out[16]) { // step 4 temp1 = step3[1] * -cospi_8_64 + step3[6] * cospi_24_64; temp2 = step3[2] * -cospi_24_64 - step3[5] * cospi_8_64; - step2[1] = dct_const_round_shift(temp1); - step2[2] = dct_const_round_shift(temp2); + step2[1] = fdct_round_shift(temp1); + step2[2] = fdct_round_shift(temp2); temp1 = step3[2] * -cospi_8_64 + step3[5] * cospi_24_64; temp2 = step3[1] * cospi_24_64 + step3[6] * cospi_8_64; - step2[5] = dct_const_round_shift(temp1); - step2[6] = dct_const_round_shift(temp2); + step2[5] = fdct_round_shift(temp1); + step2[6] = fdct_round_shift(temp2); // step 5 step1[0] = step3[0] + step2[1]; @@ -764,23 +776,23 @@ static void fdct16(const int16_t in[16], int16_t out[16]) { // step 6 temp1 = step1[0] * cospi_30_64 + step1[7] * cospi_2_64; temp2 = step1[1] * cospi_14_64 + step1[6] * cospi_18_64; - out[1] = dct_const_round_shift(temp1); - out[9] = dct_const_round_shift(temp2); + out[1] = fdct_round_shift(temp1); + out[9] = fdct_round_shift(temp2); temp1 = step1[2] * cospi_22_64 + step1[5] * cospi_10_64; temp2 = step1[3] * cospi_6_64 + step1[4] * cospi_26_64; - out[5] = dct_const_round_shift(temp1); - out[13] = dct_const_round_shift(temp2); + out[5] = fdct_round_shift(temp1); + out[13] = fdct_round_shift(temp2); temp1 = step1[3] * -cospi_26_64 + step1[4] * cospi_6_64; temp2 = step1[2] * -cospi_10_64 + step1[5] * cospi_22_64; - out[3] = dct_const_round_shift(temp1); - out[11] = dct_const_round_shift(temp2); + out[3] = fdct_round_shift(temp1); + out[11] = fdct_round_shift(temp2); temp1 = step1[1] * -cospi_18_64 + step1[6] * cospi_14_64; temp2 = step1[0] * -cospi_2_64 + step1[7] * cospi_30_64; - out[7] = dct_const_round_shift(temp1); - out[15] = dct_const_round_shift(temp2); + out[7] = fdct_round_shift(temp1); + out[15] = fdct_round_shift(temp2); } static void fadst16(const int16_t *input, int16_t *output) { @@ -821,22 +833,22 @@ static void fadst16(const int16_t *input, int16_t *output) { s14 = x14 * cospi_29_64 + x15 * cospi_3_64; s15 = x14 * cospi_3_64 - x15 * cospi_29_64; - x0 = dct_const_round_shift(s0 + s8); - x1 = dct_const_round_shift(s1 + s9); - x2 = dct_const_round_shift(s2 + s10); - x3 = dct_const_round_shift(s3 + s11); - x4 = dct_const_round_shift(s4 + s12); - x5 = dct_const_round_shift(s5 + s13); - x6 = dct_const_round_shift(s6 + s14); - x7 = dct_const_round_shift(s7 + s15); - x8 = dct_const_round_shift(s0 - s8); - x9 = dct_const_round_shift(s1 - s9); - x10 = dct_const_round_shift(s2 - s10); - x11 = dct_const_round_shift(s3 - s11); - x12 = dct_const_round_shift(s4 - s12); - x13 = dct_const_round_shift(s5 - s13); - x14 = dct_const_round_shift(s6 - s14); - x15 = dct_const_round_shift(s7 - s15); + x0 = fdct_round_shift(s0 + s8); + x1 = fdct_round_shift(s1 + s9); + x2 = fdct_round_shift(s2 + s10); + x3 = fdct_round_shift(s3 + s11); + x4 = fdct_round_shift(s4 + s12); + x5 = fdct_round_shift(s5 + s13); + x6 = fdct_round_shift(s6 + s14); + x7 = fdct_round_shift(s7 + s15); + x8 = fdct_round_shift(s0 - s8); + x9 = fdct_round_shift(s1 - s9); + x10 = fdct_round_shift(s2 - s10); + x11 = fdct_round_shift(s3 - s11); + x12 = fdct_round_shift(s4 - s12); + x13 = fdct_round_shift(s5 - s13); + x14 = fdct_round_shift(s6 - s14); + x15 = fdct_round_shift(s7 - s15); // stage 2 s0 = x0; @@ -864,14 +876,14 @@ static void fadst16(const int16_t *input, int16_t *output) { x5 = s1 - s5; x6 = s2 - s6; x7 = s3 - s7; - x8 = dct_const_round_shift(s8 + s12); - x9 = dct_const_round_shift(s9 + s13); - x10 = dct_const_round_shift(s10 + s14); - x11 = dct_const_round_shift(s11 + s15); - x12 = dct_const_round_shift(s8 - s12); - x13 = dct_const_round_shift(s9 - s13); - x14 = dct_const_round_shift(s10 - s14); - x15 = dct_const_round_shift(s11 - s15); + x8 = fdct_round_shift(s8 + s12); + x9 = fdct_round_shift(s9 + s13); + x10 = fdct_round_shift(s10 + s14); + x11 = fdct_round_shift(s11 + s15); + x12 = fdct_round_shift(s8 - s12); + x13 = fdct_round_shift(s9 - s13); + x14 = fdct_round_shift(s10 - s14); + x15 = fdct_round_shift(s11 - s15); // stage 3 s0 = x0; @@ -895,18 +907,18 @@ static void fadst16(const int16_t *input, int16_t *output) { x1 = s1 + s3; x2 = s0 - s2; x3 = s1 - s3; - x4 = dct_const_round_shift(s4 + s6); - x5 = dct_const_round_shift(s5 + s7); - x6 = dct_const_round_shift(s4 - s6); - x7 = dct_const_round_shift(s5 - s7); + x4 = fdct_round_shift(s4 + s6); + x5 = fdct_round_shift(s5 + s7); + x6 = fdct_round_shift(s4 - s6); + x7 = fdct_round_shift(s5 - s7); x8 = s8 + s10; x9 = s9 + s11; x10 = s8 - s10; x11 = s9 - s11; - x12 = dct_const_round_shift(s12 + s14); - x13 = dct_const_round_shift(s13 + s15); - x14 = dct_const_round_shift(s12 - s14); - x15 = dct_const_round_shift(s13 - s15); + x12 = fdct_round_shift(s12 + s14); + x13 = fdct_round_shift(s13 + s15); + x14 = fdct_round_shift(s12 - s14); + x15 = fdct_round_shift(s13 - s15); // stage 4 s2 = (- cospi_16_64) * (x2 + x3); @@ -918,14 +930,14 @@ static void fadst16(const int16_t *input, int16_t *output) { s14 = (- cospi_16_64) * (x14 + x15); s15 = cospi_16_64 * (x14 - x15); - x2 = dct_const_round_shift(s2); - x3 = dct_const_round_shift(s3); - x6 = dct_const_round_shift(s6); - x7 = dct_const_round_shift(s7); - x10 = dct_const_round_shift(s10); - x11 = dct_const_round_shift(s11); - x14 = dct_const_round_shift(s14); - x15 = dct_const_round_shift(s15); + x2 = fdct_round_shift(s2); + x3 = fdct_round_shift(s3); + x6 = fdct_round_shift(s6); + x7 = fdct_round_shift(s7); + x10 = fdct_round_shift(s10); + x11 = fdct_round_shift(s11); + x14 = fdct_round_shift(s14); + x15 = fdct_round_shift(s15); output[0] = x0; output[1] = - x8; @@ -952,31 +964,34 @@ static const transform_2d FHT_16[] = { { fadst16, fadst16 } // ADST_ADST = 3 }; -void vp9_short_fht16x16_c(const int16_t *input, int16_t *output, - int stride, int tx_type) { - int16_t out[256]; - int16_t *outptr = &out[0]; - int i, j; - int16_t temp_in[16], temp_out[16]; - const transform_2d ht = FHT_16[tx_type]; - - // Columns - for (i = 0; i < 16; ++i) { - for (j = 0; j < 16; ++j) - temp_in[j] = input[j * stride + i] * 4; - ht.cols(temp_in, temp_out); - for (j = 0; j < 16; ++j) - outptr[j * 16 + i] = (temp_out[j] + 1 + (temp_out[j] < 0)) >> 2; -// outptr[j * 16 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2; - } +void vp9_fht16x16_c(const int16_t *input, int16_t *output, + int stride, int tx_type) { + if (tx_type == DCT_DCT) { + vp9_fdct16x16_c(input, output, stride); + } else { + int16_t out[256]; + int16_t *outptr = &out[0]; + int i, j; + int16_t temp_in[16], temp_out[16]; + const transform_2d ht = FHT_16[tx_type]; + + // Columns + for (i = 0; i < 16; ++i) { + for (j = 0; j < 16; ++j) + temp_in[j] = input[j * stride + i] * 4; + ht.cols(temp_in, temp_out); + for (j = 0; j < 16; ++j) + outptr[j * 16 + i] = (temp_out[j] + 1 + (temp_out[j] < 0)) >> 2; + } - // Rows - for (i = 0; i < 16; ++i) { - for (j = 0; j < 16; ++j) - temp_in[j] = out[j + i * 16]; - ht.rows(temp_in, temp_out); - for (j = 0; j < 16; ++j) - output[j + i * 16] = temp_out[j]; + // Rows + for (i = 0; i < 16; ++i) { + for (j = 0; j < 16; ++j) + temp_in[j] = out[j + i * 16]; + ht.rows(temp_in, temp_out); + for (j = 0; j < 16; ++j) + output[j + i * 16] = temp_out[j]; + } } } @@ -991,7 +1006,7 @@ static INLINE int half_round_shift(int input) { return rv; } -static void dct32_1d(const int *input, int *output, int round) { +static void fdct32(const int *input, int *output, int round) { int step[32]; // Stage 1 step[0] = input[0] + input[(32 - 1)]; @@ -1323,7 +1338,7 @@ void vp9_fdct32x32_c(const int16_t *input, int16_t *out, int stride) { int temp_in[32], temp_out[32]; for (j = 0; j < 32; ++j) temp_in[j] = input[j * stride + i] * 4; - dct32_1d(temp_in, temp_out, 0); + fdct32(temp_in, temp_out, 0); for (j = 0; j < 32; ++j) output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2; } @@ -1333,13 +1348,13 @@ void vp9_fdct32x32_c(const int16_t *input, int16_t *out, int stride) { int temp_in[32], temp_out[32]; for (j = 0; j < 32; ++j) temp_in[j] = output[j + i * 32]; - dct32_1d(temp_in, temp_out, 0); + fdct32(temp_in, temp_out, 0); for (j = 0; j < 32; ++j) out[j + i * 32] = (temp_out[j] + 1 + (temp_out[j] < 0)) >> 2; } } -// Note that although we use dct_32_round in dct32_1d computation flow, +// Note that although we use dct_32_round in dct32 computation flow, // this 2d fdct32x32 for rate-distortion optimization loop is operating // within 16 bits precision. void vp9_fdct32x32_rd_c(const int16_t *input, int16_t *out, int stride) { @@ -1351,7 +1366,7 @@ void vp9_fdct32x32_rd_c(const int16_t *input, int16_t *out, int stride) { int temp_in[32], temp_out[32]; for (j = 0; j < 32; ++j) temp_in[j] = input[j * stride + i] * 4; - dct32_1d(temp_in, temp_out, 0); + fdct32(temp_in, temp_out, 0); for (j = 0; j < 32; ++j) // TODO(cd): see quality impact of only doing // output[j * 32 + i] = (temp_out[j] + 1) >> 2; @@ -1364,32 +1379,8 @@ void vp9_fdct32x32_rd_c(const int16_t *input, int16_t *out, int stride) { int temp_in[32], temp_out[32]; for (j = 0; j < 32; ++j) temp_in[j] = output[j + i * 32]; - dct32_1d(temp_in, temp_out, 1); + fdct32(temp_in, temp_out, 1); for (j = 0; j < 32; ++j) out[j + i * 32] = temp_out[j]; } } - -void vp9_fht4x4(TX_TYPE tx_type, const int16_t *input, int16_t *output, - int stride) { - if (tx_type == DCT_DCT) - vp9_fdct4x4(input, output, stride); - else - vp9_short_fht4x4(input, output, stride, tx_type); -} - -void vp9_fht8x8(TX_TYPE tx_type, const int16_t *input, int16_t *output, - int stride) { - if (tx_type == DCT_DCT) - vp9_fdct8x8(input, output, stride); - else - vp9_short_fht8x8(input, output, stride, tx_type); -} - -void vp9_fht16x16(TX_TYPE tx_type, const int16_t *input, int16_t *output, - int stride) { - if (tx_type == DCT_DCT) - vp9_fdct16x16(input, output, stride); - else - vp9_short_fht16x16(input, output, stride, tx_type); -} diff --git a/libvpx/vp9/encoder/vp9_dct.h b/libvpx/vp9/encoder/vp9_dct.h deleted file mode 100644 index aaf976d..0000000 --- a/libvpx/vp9/encoder/vp9_dct.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2013 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - - -#ifndef VP9_ENCODER_VP9_DCT_H_ -#define VP9_ENCODER_VP9_DCT_H_ - -void vp9_fht4x4(TX_TYPE tx_type, const int16_t *input, int16_t *output, - int stride); - -void vp9_fht8x8(TX_TYPE tx_type, const int16_t *input, int16_t *output, - int stride); - -void vp9_fht16x16(TX_TYPE tx_type, const int16_t *input, int16_t *output, - int stride); - -#endif // VP9_ENCODER_VP9_DCT_H_ diff --git a/libvpx/vp9/encoder/vp9_encodeframe.c b/libvpx/vp9/encoder/vp9_encodeframe.c index 3e75f3b..c7ba70a 100644 --- a/libvpx/vp9/encoder/vp9_encodeframe.c +++ b/libvpx/vp9/encoder/vp9_encodeframe.c @@ -20,8 +20,6 @@ #include "vp9/common/vp9_common.h" #include "vp9/common/vp9_entropy.h" #include "vp9/common/vp9_entropymode.h" -#include "vp9/common/vp9_extend.h" -#include "vp9/common/vp9_findnearmv.h" #include "vp9/common/vp9_idct.h" #include "vp9/common/vp9_mvref_common.h" #include "vp9/common/vp9_pred_common.h" @@ -29,27 +27,20 @@ #include "vp9/common/vp9_reconintra.h" #include "vp9/common/vp9_reconinter.h" #include "vp9/common/vp9_seg_common.h" +#include "vp9/common/vp9_systemdependent.h" #include "vp9/common/vp9_tile_common.h" + #include "vp9/encoder/vp9_encodeframe.h" -#include "vp9/encoder/vp9_encodeintra.h" #include "vp9/encoder/vp9_encodemb.h" #include "vp9/encoder/vp9_encodemv.h" +#include "vp9/encoder/vp9_extend.h" #include "vp9/encoder/vp9_onyx_int.h" +#include "vp9/encoder/vp9_pickmode.h" #include "vp9/encoder/vp9_rdopt.h" #include "vp9/encoder/vp9_segmentation.h" -#include "vp9/common/vp9_systemdependent.h" #include "vp9/encoder/vp9_tokenize.h" #include "vp9/encoder/vp9_vaq.h" - -#define DBG_PRNT_SEGMAP 0 - - -// #define ENC_DEBUG -#ifdef ENC_DEBUG -int enc_debug = 0; -#endif - static INLINE uint8_t *get_sb_index(MACROBLOCK *x, BLOCK_SIZE subsize) { switch (subsize) { case BLOCK_64X64: @@ -80,21 +71,19 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled, static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x); -/* activity_avg must be positive, or flat regions could get a zero weight - * (infinite lambda), which confounds analysis. - * This also avoids the need for divide by zero checks in - * vp9_activity_masking(). - */ -#define ACTIVITY_AVG_MIN (64) +// activity_avg must be positive, or flat regions could get a zero weight +// (infinite lambda), which confounds analysis. +// This also avoids the need for divide by zero checks in +// vp9_activity_masking(). +#define ACTIVITY_AVG_MIN 64 -/* Motion vector component magnitude threshold for defining fast motion. */ -#define FAST_MOTION_MV_THRESH (24) +// Motion vector component magnitude threshold for defining fast motion. +#define FAST_MOTION_MV_THRESH 24 -/* This is used as a reference when computing the source variance for the - * purposes of activity masking. - * Eventually this should be replaced by custom no-reference routines, - * which will be faster. - */ +// This is used as a reference when computing the source variance for the +// purposes of activity masking. +// Eventually this should be replaced by custom no-reference routines, +// which will be faster. static const uint8_t VP9_VAR_OFFS[64] = { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, @@ -106,36 +95,75 @@ static const uint8_t VP9_VAR_OFFS[64] = { 128, 128, 128, 128, 128, 128, 128, 128 }; -static unsigned int get_sby_perpixel_variance(VP9_COMP *cpi, MACROBLOCK *x, +static unsigned int get_sby_perpixel_variance(VP9_COMP *cpi, + MACROBLOCK *x, BLOCK_SIZE bs) { unsigned int var, sse; + var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride, + VP9_VAR_OFFS, 0, &sse); + return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]); +} + +static unsigned int get_sby_perpixel_diff_variance(VP9_COMP *cpi, + MACROBLOCK *x, + int mi_row, + int mi_col, + BLOCK_SIZE bs) { + const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME); + int offset = (mi_row * MI_SIZE) * yv12->y_stride + (mi_col * MI_SIZE); + unsigned int var, sse; var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride, - VP9_VAR_OFFS, 0, &sse); - return (var + (1 << (num_pels_log2_lookup[bs] - 1))) >> - num_pels_log2_lookup[bs]; + yv12->y_buffer + offset, + yv12->y_stride, + &sse); + return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]); +} + +static BLOCK_SIZE get_rd_var_based_fixed_partition(VP9_COMP *cpi, + int mi_row, + int mi_col) { + unsigned int var = get_sby_perpixel_diff_variance(cpi, &cpi->mb, + mi_row, mi_col, + BLOCK_64X64); + if (var < 8) + return BLOCK_64X64; + else if (var < 128) + return BLOCK_32X32; + else if (var < 2048) + return BLOCK_16X16; + else + return BLOCK_8X8; +} + +static BLOCK_SIZE get_nonrd_var_based_fixed_partition(VP9_COMP *cpi, + int mi_row, + int mi_col) { + unsigned int var = get_sby_perpixel_diff_variance(cpi, &cpi->mb, + mi_row, mi_col, + BLOCK_64X64); + if (var < 4) + return BLOCK_64X64; + else if (var < 10) + return BLOCK_32X32; + else + return BLOCK_16X16; } // Original activity measure from Tim T's code. static unsigned int tt_activity_measure(MACROBLOCK *x) { - unsigned int act; unsigned int sse; - /* TODO: This could also be done over smaller areas (8x8), but that would - * require extensive changes elsewhere, as lambda is assumed to be fixed - * over an entire MB in most of the code. - * Another option is to compute four 8x8 variances, and pick a single - * lambda using a non-linear combination (e.g., the smallest, or second - * smallest, etc.). - */ - act = vp9_variance16x16(x->plane[0].src.buf, x->plane[0].src.stride, - VP9_VAR_OFFS, 0, &sse); - act <<= 4; - - /* If the region is flat, lower the activity some more. */ - if (act < 8 << 12) - act = act < 5 << 12 ? act : 5 << 12; - - return act; + // TODO: This could also be done over smaller areas (8x8), but that would + // require extensive changes elsewhere, as lambda is assumed to be fixed + // over an entire MB in most of the code. + // Another option is to compute four 8x8 variances, and pick a single + // lambda using a non-linear combination (e.g., the smallest, or second + // smallest, etc.). + const unsigned int act = vp9_variance16x16(x->plane[0].src.buf, + x->plane[0].src.stride, + VP9_VAR_OFFS, 0, &sse) << 4; + // If the region is flat, lower the activity some more. + return act < (8 << 12) ? MIN(act, 5 << 12) : act; } // Stub for alternative experimental activity measures. @@ -150,7 +178,7 @@ static unsigned int mb_activity_measure(MACROBLOCK *x, int mb_row, int mb_col) { unsigned int mb_activity; if (ALT_ACT_MEASURE) { - int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row); + const int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row); // Or use and alternative. mb_activity = alt_activity_measure(x, use_dc_pred); @@ -159,10 +187,7 @@ static unsigned int mb_activity_measure(MACROBLOCK *x, int mb_row, int mb_col) { mb_activity = tt_activity_measure(x); } - if (mb_activity < ACTIVITY_AVG_MIN) - mb_activity = ACTIVITY_AVG_MIN; - - return mb_activity; + return MAX(mb_activity, ACTIVITY_AVG_MIN); } // Calculate an "average" mb activity value for the frame @@ -277,9 +302,9 @@ static void calc_activity_index(VP9_COMP *cpi, MACROBLOCK *x) { // Loop through all MBs. Note activity of each, average activity and // calculate a normalized activity for each static void build_activity_map(VP9_COMP *cpi) { - MACROBLOCK * const x = &cpi->mb; + MACROBLOCK *const x = &cpi->mb; MACROBLOCKD *xd = &x->e_mbd; - VP9_COMMON * const cm = &cpi->common; + VP9_COMMON *const cm = &cpi->common; #if ALT_ACT_MEASURE YV12_BUFFER_CONFIG *new_yv12 = get_frame_new_buffer(cm); @@ -338,19 +363,17 @@ static void build_activity_map(VP9_COMP *cpi) { } // Macroblock activity masking -void vp9_activity_masking(VP9_COMP *cpi, MACROBLOCK *x) { +static void activity_masking(VP9_COMP *cpi, MACROBLOCK *x) { #if USE_ACT_INDEX x->rdmult += *(x->mb_activity_ptr) * (x->rdmult >> 2); x->errorperbit = x->rdmult * 100 / (110 * x->rddiv); x->errorperbit += (x->errorperbit == 0); #else - int64_t a; - int64_t b; - int64_t act = *(x->mb_activity_ptr); + const int64_t act = *(x->mb_activity_ptr); // Apply the masking to the RD multiplier. - a = act + (2 * cpi->activity_avg); - b = (2 * act) + cpi->activity_avg; + const int64_t a = act + (2 * cpi->activity_avg); + const int64_t b = (2 * act) + cpi->activity_avg; x->rdmult = (unsigned int) (((int64_t) x->rdmult * b + (a >> 1)) / a); x->errorperbit = x->rdmult * 100 / (110 * x->rddiv); @@ -361,6 +384,52 @@ void vp9_activity_masking(VP9_COMP *cpi, MACROBLOCK *x) { adjust_act_zbin(cpi, x); } +// Select a segment for the current SB64 +static void select_in_frame_q_segment(VP9_COMP *cpi, + int mi_row, int mi_col, + int output_enabled, int projected_rate) { + VP9_COMMON *const cm = &cpi->common; + + const int mi_offset = mi_row * cm->mi_cols + mi_col; + const int bw = num_8x8_blocks_wide_lookup[BLOCK_64X64]; + const int bh = num_8x8_blocks_high_lookup[BLOCK_64X64]; + const int xmis = MIN(cm->mi_cols - mi_col, bw); + const int ymis = MIN(cm->mi_rows - mi_row, bh); + int complexity_metric = 64; + int x, y; + + unsigned char segment; + + if (!output_enabled) { + segment = 0; + } else { + // Rate depends on fraction of a SB64 in frame (xmis * ymis / bw * bh). + // It is converted to bits * 256 units + const int target_rate = (cpi->rc.sb64_target_rate * xmis * ymis * 256) / + (bw * bh); + + if (projected_rate < (target_rate / 4)) { + segment = 1; + } else { + segment = 0; + } + + if (target_rate > 0) { + complexity_metric = + clamp((int)((projected_rate * 64) / target_rate), 16, 255); + } + } + + // Fill in the entires in the segment map corresponding to this SB64 + for (y = 0; y < ymis; y++) { + for (x = 0; x < xmis; x++) { + cpi->segmentation_map[mi_offset + y * cm->mi_cols + x] = segment; + cpi->complexity_map[mi_offset + y * cm->mi_cols + x] = + (unsigned char)complexity_metric; + } + } +} + static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx, BLOCK_SIZE bsize, int output_enabled) { int i, x_idx, y; @@ -373,32 +442,33 @@ static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx, MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi; MODE_INFO *mi_addr = xd->mi_8x8[0]; - int mb_mode_index = ctx->best_mode_index; const int mis = cm->mode_info_stride; const int mi_width = num_8x8_blocks_wide_lookup[bsize]; const int mi_height = num_8x8_blocks_high_lookup[bsize]; int max_plane; - assert(mi->mbmi.mode < MB_MODE_COUNT); - assert(mi->mbmi.ref_frame[0] < MAX_REF_FRAMES); - assert(mi->mbmi.ref_frame[1] < MAX_REF_FRAMES); assert(mi->mbmi.sb_type == bsize); + // For in frame adaptive Q copy over the chosen segment id into the + // mode innfo context for the chosen mode / partition. + if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && output_enabled) + mi->mbmi.segment_id = xd->mi_8x8[0]->mbmi.segment_id; + *mi_addr = *mi; max_plane = is_inter_block(mbmi) ? MAX_MB_PLANE : 1; for (i = 0; i < max_plane; ++i) { p[i].coeff = ctx->coeff_pbuf[i][1]; - pd[i].qcoeff = ctx->qcoeff_pbuf[i][1]; + p[i].qcoeff = ctx->qcoeff_pbuf[i][1]; pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][1]; - pd[i].eobs = ctx->eobs_pbuf[i][1]; + p[i].eobs = ctx->eobs_pbuf[i][1]; } for (i = max_plane; i < MAX_MB_PLANE; ++i) { p[i].coeff = ctx->coeff_pbuf[i][2]; - pd[i].qcoeff = ctx->qcoeff_pbuf[i][2]; + p[i].qcoeff = ctx->qcoeff_pbuf[i][2]; pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][2]; - pd[i].eobs = ctx->eobs_pbuf[i][2]; + p[i].eobs = ctx->eobs_pbuf[i][2]; } // Restore the coding context of the MB to that that was in place @@ -406,11 +476,13 @@ static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx, for (y = 0; y < mi_height; y++) for (x_idx = 0; x_idx < mi_width; x_idx++) if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx - && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) + && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) { xd->mi_8x8[x_idx + y * mis] = mi_addr; + } - if (cpi->sf.variance_adaptive_quantization) { - vp9_mb_init_quantizer(cpi, x); + if ((cpi->oxcf.aq_mode == VARIANCE_AQ) || + (cpi->oxcf.aq_mode == COMPLEXITY_AQ)) { + vp9_init_plane_quantizers(cpi, x); } // FIXME(rbultje) I'm pretty sure this should go to the end of this block @@ -438,52 +510,46 @@ static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx, cpi->rd_tx_select_diff[i] += ctx->tx_rd_diff[i]; } - if (frame_is_intra_only(cm)) { #if CONFIG_INTERNAL_STATS + if (frame_is_intra_only(cm)) { static const int kf_mode_index[] = { - THR_DC /*DC_PRED*/, - THR_V_PRED /*V_PRED*/, - THR_H_PRED /*H_PRED*/, - THR_D45_PRED /*D45_PRED*/, + THR_DC /*DC_PRED*/, + THR_V_PRED /*V_PRED*/, + THR_H_PRED /*H_PRED*/, + THR_D45_PRED /*D45_PRED*/, THR_D135_PRED /*D135_PRED*/, THR_D117_PRED /*D117_PRED*/, THR_D153_PRED /*D153_PRED*/, THR_D207_PRED /*D207_PRED*/, - THR_D63_PRED /*D63_PRED*/, - THR_TM /*TM_PRED*/, + THR_D63_PRED /*D63_PRED*/, + THR_TM /*TM_PRED*/, }; - cpi->mode_chosen_counts[kf_mode_index[mi->mbmi.mode]]++; -#endif + ++cpi->mode_chosen_counts[kf_mode_index[mbmi->mode]]; } else { // Note how often each mode chosen as best - cpi->mode_chosen_counts[mb_mode_index]++; - if (is_inter_block(mbmi) - && (mbmi->sb_type < BLOCK_8X8 || mbmi->mode == NEWMV)) { - int_mv best_mv[2]; - const MV_REFERENCE_FRAME rf1 = mbmi->ref_frame[0]; - const MV_REFERENCE_FRAME rf2 = mbmi->ref_frame[1]; - best_mv[0].as_int = ctx->best_ref_mv.as_int; - best_mv[1].as_int = ctx->second_best_ref_mv.as_int; - if (mbmi->mode == NEWMV) { - best_mv[0].as_int = mbmi->ref_mvs[rf1][0].as_int; - if (rf2 > 0) - best_mv[1].as_int = mbmi->ref_mvs[rf2][0].as_int; + ++cpi->mode_chosen_counts[ctx->best_mode_index]; + } +#endif + if (!frame_is_intra_only(cm)) { + if (is_inter_block(mbmi)) { + if (mbmi->sb_type < BLOCK_8X8 || mbmi->mode == NEWMV) { + MV best_mv[2]; + for (i = 0; i < 1 + has_second_ref(mbmi); ++i) + best_mv[i] = mbmi->ref_mvs[mbmi->ref_frame[i]][0].as_mv; + vp9_update_mv_count(cm, xd, best_mv); } - mbmi->best_mv[0].as_int = best_mv[0].as_int; - mbmi->best_mv[1].as_int = best_mv[1].as_int; - vp9_update_mv_count(cpi, x, best_mv); - } - if (cm->mcomp_filter_type == SWITCHABLE && is_inter_mode(mbmi->mode)) { - const int ctx = vp9_get_pred_context_switchable_interp(xd); - ++cm->counts.switchable_interp[ctx][mbmi->interp_filter]; + if (cm->interp_filter == SWITCHABLE) { + const int ctx = vp9_get_pred_context_switchable_interp(xd); + ++cm->counts.switchable_interp[ctx][mbmi->interp_filter]; + } } - cpi->rd_comp_pred_diff[SINGLE_PREDICTION_ONLY] += ctx->single_pred_diff; - cpi->rd_comp_pred_diff[COMP_PREDICTION_ONLY] += ctx->comp_pred_diff; - cpi->rd_comp_pred_diff[HYBRID_PREDICTION] += ctx->hybrid_pred_diff; + cpi->rd_comp_pred_diff[SINGLE_REFERENCE] += ctx->single_pred_diff; + cpi->rd_comp_pred_diff[COMPOUND_REFERENCE] += ctx->comp_pred_diff; + cpi->rd_comp_pred_diff[REFERENCE_MODE_SELECT] += ctx->hybrid_pred_diff; - for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) + for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) cpi->rd_filter_diff[i] += ctx->best_filter_diff[i]; } } @@ -496,6 +562,9 @@ void vp9_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src, src->alpha_stride}; int i; + // Set current frame pointer. + x->e_mbd.cur_buf = src; + for (i = 0; i < MAX_MB_PLANE; i++) setup_pred_plane(&x->plane[i].src, buffers[i], strides[i], mi_row, mi_col, NULL, x->e_mbd.plane[i].subsampling_x, @@ -508,7 +577,6 @@ static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile, VP9_COMMON *const cm = &cpi->common; MACROBLOCKD *const xd = &x->e_mbd; MB_MODE_INFO *mbmi; - const int dst_fb_idx = cm->new_fb_idx; const int idx_str = xd->mode_info_stride * mi_row + mi_col; const int mi_width = num_8x8_blocks_wide_lookup[bsize]; const int mi_height = num_8x8_blocks_high_lookup[bsize]; @@ -526,8 +594,6 @@ static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile, xd->mi_8x8 = cm->mi_grid_visible + idx_str; xd->prev_mi_8x8 = cm->prev_mi_grid_visible + idx_str; - // Special case: if prev_mi is NULL, the previous mode info context - // cannot be used. xd->last_mi = cm->prev_mi ? xd->prev_mi_8x8[0] : NULL; xd->mi_8x8[0] = cm->mi + idx_str; @@ -535,7 +601,7 @@ static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile, mbmi = &xd->mi_8x8[0]->mbmi; // Set up destination pointers - setup_dst_planes(xd, &cm->yv12_fb[dst_fb_idx], mi_row, mi_col); + vp9_setup_dst_planes(xd, get_frame_new_buffer(cm), mi_row, mi_col); // Set up limit values for MV components // mv beyond the range do not produce new/different prediction block @@ -558,16 +624,16 @@ static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile, /* segment ID */ if (seg->enabled) { - if (!cpi->sf.variance_adaptive_quantization) { - uint8_t *map = seg->update_map ? cpi->segmentation_map - : cm->last_frame_seg_map; + if (cpi->oxcf.aq_mode != VARIANCE_AQ) { + const uint8_t *const map = seg->update_map ? cpi->segmentation_map + : cm->last_frame_seg_map; mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col); } - vp9_mb_init_quantizer(cpi, x); + vp9_init_plane_quantizers(cpi, x); - if (seg->enabled && cpi->seg0_cnt > 0 - && !vp9_segfeature_active(seg, 0, SEG_LVL_REF_FRAME) - && vp9_segfeature_active(seg, 1, SEG_LVL_REF_FRAME)) { + if (seg->enabled && cpi->seg0_cnt > 0 && + !vp9_segfeature_active(seg, 0, SEG_LVL_REF_FRAME) && + vp9_segfeature_active(seg, 1, SEG_LVL_REF_FRAME)) { cpi->seg0_progress = (cpi->seg0_idx << 16) / cpi->seg0_cnt; } else { const int y = mb_row & ~3; @@ -584,25 +650,26 @@ static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile, x->encode_breakout = cpi->segment_encode_breakout[mbmi->segment_id]; } else { mbmi->segment_id = 0; - x->encode_breakout = cpi->oxcf.encode_breakout; + x->encode_breakout = cpi->encode_breakout; } } -static void pick_sb_modes(VP9_COMP *cpi, const TileInfo *const tile, - int mi_row, int mi_col, - int *totalrate, int64_t *totaldist, - BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx, - int64_t best_rd) { +static void rd_pick_sb_modes(VP9_COMP *cpi, const TileInfo *const tile, + int mi_row, int mi_col, + int *totalrate, int64_t *totaldist, + BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx, + int64_t best_rd) { VP9_COMMON *const cm = &cpi->common; MACROBLOCK *const x = &cpi->mb; MACROBLOCKD *const xd = &x->e_mbd; + MB_MODE_INFO *mbmi; struct macroblock_plane *const p = x->plane; struct macroblockd_plane *const pd = xd->plane; - int i; - int orig_rdmult = x->rdmult; + const AQ_MODE aq_mode = cpi->oxcf.aq_mode; + int i, orig_rdmult = x->rdmult; double rdmult_ratio; - vp9_clear_system_state(); // __asm emms; + vp9_clear_system_state(); rdmult_ratio = 1.0; // avoid uninitialized warnings // Use the lower precision, but faster, 32x32 fdct for mode selection. @@ -619,41 +686,55 @@ static void pick_sb_modes(VP9_COMP *cpi, const TileInfo *const tile, } set_offsets(cpi, tile, mi_row, mi_col, bsize); - xd->mi_8x8[0]->mbmi.sb_type = bsize; + mbmi = &xd->mi_8x8[0]->mbmi; + mbmi->sb_type = bsize; for (i = 0; i < MAX_MB_PLANE; ++i) { p[i].coeff = ctx->coeff_pbuf[i][0]; - pd[i].qcoeff = ctx->qcoeff_pbuf[i][0]; + p[i].qcoeff = ctx->qcoeff_pbuf[i][0]; pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0]; - pd[i].eobs = ctx->eobs_pbuf[i][0]; + p[i].eobs = ctx->eobs_pbuf[i][0]; } ctx->is_coded = 0; x->skip_recode = 0; // Set to zero to make sure we do not use the previous encoded frame stats - xd->mi_8x8[0]->mbmi.skip_coeff = 0; + mbmi->skip = 0; x->source_variance = get_sby_perpixel_variance(cpi, x, bsize); - if (cpi->sf.variance_adaptive_quantization) { - int energy; - if (bsize <= BLOCK_16X16) { - energy = x->mb_energy; + if (aq_mode == VARIANCE_AQ) { + const int energy = bsize <= BLOCK_16X16 ? x->mb_energy + : vp9_block_energy(cpi, x, bsize); + + if (cm->frame_type == KEY_FRAME || + cpi->refresh_alt_ref_frame || + (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) { + mbmi->segment_id = vp9_vaq_segment_id(energy); } else { - energy = vp9_block_energy(cpi, x, bsize); + const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map + : cm->last_frame_seg_map; + mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col); } - xd->mi_8x8[0]->mbmi.segment_id = vp9_vaq_segment_id(energy); rdmult_ratio = vp9_vaq_rdmult_ratio(energy); - vp9_mb_init_quantizer(cpi, x); + vp9_init_plane_quantizers(cpi, x); } if (cpi->oxcf.tuning == VP8_TUNE_SSIM) - vp9_activity_masking(cpi, x); + activity_masking(cpi, x); + + if (aq_mode == VARIANCE_AQ) { + vp9_clear_system_state(); + x->rdmult = (int)round(x->rdmult * rdmult_ratio); + } else if (aq_mode == COMPLEXITY_AQ) { + const int mi_offset = mi_row * cm->mi_cols + mi_col; + unsigned char complexity = cpi->complexity_map[mi_offset]; + const int is_edge = (mi_row <= 1) || (mi_row >= (cm->mi_rows - 2)) || + (mi_col <= 1) || (mi_col >= (cm->mi_cols - 2)); - if (cpi->sf.variance_adaptive_quantization) { - vp9_clear_system_state(); // __asm emms; - x->rdmult = round(x->rdmult * rdmult_ratio); + if (!is_edge && (complexity > 128)) + x->rdmult += ((x->rdmult * (complexity - 128)) / 256); } // Find best coding mode & reconstruct the MB so it is available @@ -670,47 +751,53 @@ static void pick_sb_modes(VP9_COMP *cpi, const TileInfo *const tile, totaldist, bsize, ctx, best_rd); } - if (cpi->sf.variance_adaptive_quantization) { + if (aq_mode == VARIANCE_AQ) { x->rdmult = orig_rdmult; if (*totalrate != INT_MAX) { - vp9_clear_system_state(); // __asm emms; - *totalrate = round(*totalrate * rdmult_ratio); + vp9_clear_system_state(); + *totalrate = (int)round(*totalrate * rdmult_ratio); } + } else if (aq_mode == COMPLEXITY_AQ) { + x->rdmult = orig_rdmult; } } static void update_stats(VP9_COMP *cpi) { VP9_COMMON *const cm = &cpi->common; - MACROBLOCK *const x = &cpi->mb; - MACROBLOCKD *const xd = &x->e_mbd; - MODE_INFO *mi = xd->mi_8x8[0]; - MB_MODE_INFO *const mbmi = &mi->mbmi; + const MACROBLOCK *const x = &cpi->mb; + const MACROBLOCKD *const xd = &x->e_mbd; + const MODE_INFO *const mi = xd->mi_8x8[0]; + const MB_MODE_INFO *const mbmi = &mi->mbmi; if (!frame_is_intra_only(cm)) { const int seg_ref_active = vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_REF_FRAME); + if (!seg_ref_active) { + FRAME_COUNTS *const counts = &cm->counts; + const int inter_block = is_inter_block(mbmi); - if (!seg_ref_active) - cpi->intra_inter_count[vp9_get_pred_context_intra_inter(xd)] - [is_inter_block(mbmi)]++; - - // If the segment reference feature is enabled we have only a single - // reference frame allowed for the segment so exclude it from - // the reference frame counts used to work out probabilities. - if (is_inter_block(mbmi) && !seg_ref_active) { - if (cm->comp_pred_mode == HYBRID_PREDICTION) - cpi->comp_inter_count[vp9_get_pred_context_comp_inter_inter(cm, xd)] - [has_second_ref(mbmi)]++; - - if (has_second_ref(mbmi)) { - cpi->comp_ref_count[vp9_get_pred_context_comp_ref_p(cm, xd)] - [mbmi->ref_frame[0] == GOLDEN_FRAME]++; - } else { - cpi->single_ref_count[vp9_get_pred_context_single_ref_p1(xd)][0] - [mbmi->ref_frame[0] != LAST_FRAME]++; - if (mbmi->ref_frame[0] != LAST_FRAME) - cpi->single_ref_count[vp9_get_pred_context_single_ref_p2(xd)][1] - [mbmi->ref_frame[0] != GOLDEN_FRAME]++; + counts->intra_inter[vp9_get_intra_inter_context(xd)][inter_block]++; + + // If the segment reference feature is enabled we have only a single + // reference frame allowed for the segment so exclude it from + // the reference frame counts used to work out probabilities. + if (inter_block) { + const MV_REFERENCE_FRAME ref0 = mbmi->ref_frame[0]; + + if (cm->reference_mode == REFERENCE_MODE_SELECT) + counts->comp_inter[vp9_get_reference_mode_context(cm, xd)] + [has_second_ref(mbmi)]++; + + if (has_second_ref(mbmi)) { + counts->comp_ref[vp9_get_pred_context_comp_ref_p(cm, xd)] + [ref0 == GOLDEN_FRAME]++; + } else { + counts->single_ref[vp9_get_pred_context_single_ref_p1(xd)][0] + [ref0 != LAST_FRAME]++; + if (ref0 != LAST_FRAME) + counts->single_ref[vp9_get_pred_context_single_ref_p2(xd)][1] + [ref0 != GOLDEN_FRAME]++; + } } } } @@ -797,16 +884,9 @@ static void save_context(VP9_COMP *cpi, int mi_row, int mi_col, static void encode_b(VP9_COMP *cpi, const TileInfo *const tile, TOKENEXTRA **tp, int mi_row, int mi_col, - int output_enabled, BLOCK_SIZE bsize, int sub_index) { - VP9_COMMON *const cm = &cpi->common; + int output_enabled, BLOCK_SIZE bsize) { MACROBLOCK *const x = &cpi->mb; - if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) - return; - - if (sub_index != -1) - *get_sb_index(x, bsize) = sub_index; - if (bsize < BLOCK_8X8) { // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0 // there is nothing to be done. @@ -830,64 +910,73 @@ static void encode_sb(VP9_COMP *cpi, const TileInfo *const tile, int output_enabled, BLOCK_SIZE bsize) { VP9_COMMON *const cm = &cpi->common; MACROBLOCK *const x = &cpi->mb; - BLOCK_SIZE c1 = BLOCK_8X8; - const int bsl = b_width_log2(bsize), bs = (1 << bsl) / 4; - int pl = 0; + const int bsl = b_width_log2(bsize), hbs = (1 << bsl) / 4; + int ctx; PARTITION_TYPE partition; BLOCK_SIZE subsize; - int i; if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return; - c1 = BLOCK_4X4; if (bsize >= BLOCK_8X8) { - pl = partition_plane_context(cpi->above_seg_context, cpi->left_seg_context, + ctx = partition_plane_context(cpi->above_seg_context, cpi->left_seg_context, mi_row, mi_col, bsize); - c1 = *(get_sb_partitioning(x, bsize)); + subsize = *get_sb_partitioning(x, bsize); + } else { + ctx = 0; + subsize = BLOCK_4X4; } - partition = partition_lookup[bsl][c1]; + + partition = partition_lookup[bsl][subsize]; switch (partition) { case PARTITION_NONE: if (output_enabled && bsize >= BLOCK_8X8) - cpi->partition_count[pl][PARTITION_NONE]++; - encode_b(cpi, tile, tp, mi_row, mi_col, output_enabled, c1, -1); + cm->counts.partition[ctx][PARTITION_NONE]++; + encode_b(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize); break; case PARTITION_VERT: if (output_enabled) - cpi->partition_count[pl][PARTITION_VERT]++; - encode_b(cpi, tile, tp, mi_row, mi_col, output_enabled, c1, 0); - encode_b(cpi, tile, tp, mi_row, mi_col + bs, output_enabled, c1, 1); + cm->counts.partition[ctx][PARTITION_VERT]++; + *get_sb_index(x, subsize) = 0; + encode_b(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize); + if (mi_col + hbs < cm->mi_cols) { + *get_sb_index(x, subsize) = 1; + encode_b(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled, subsize); + } break; case PARTITION_HORZ: if (output_enabled) - cpi->partition_count[pl][PARTITION_HORZ]++; - encode_b(cpi, tile, tp, mi_row, mi_col, output_enabled, c1, 0); - encode_b(cpi, tile, tp, mi_row + bs, mi_col, output_enabled, c1, 1); + cm->counts.partition[ctx][PARTITION_HORZ]++; + *get_sb_index(x, subsize) = 0; + encode_b(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize); + if (mi_row + hbs < cm->mi_rows) { + *get_sb_index(x, subsize) = 1; + encode_b(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled, subsize); + } break; case PARTITION_SPLIT: subsize = get_subsize(bsize, PARTITION_SPLIT); - if (output_enabled) - cpi->partition_count[pl][PARTITION_SPLIT]++; + cm->counts.partition[ctx][PARTITION_SPLIT]++; - for (i = 0; i < 4; i++) { - const int x_idx = i & 1, y_idx = i >> 1; - - *get_sb_index(x, subsize) = i; - encode_sb(cpi, tile, tp, mi_row + y_idx * bs, mi_col + x_idx * bs, - output_enabled, subsize); - } + *get_sb_index(x, subsize) = 0; + encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize); + *get_sb_index(x, subsize) = 1; + encode_sb(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled, subsize); + *get_sb_index(x, subsize) = 2; + encode_sb(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled, subsize); + *get_sb_index(x, subsize) = 3; + encode_sb(cpi, tile, tp, mi_row + hbs, mi_col + hbs, output_enabled, + subsize); break; default: - assert(0); - break; + assert("Invalid partition type."); } if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8) update_partition_context(cpi->above_seg_context, cpi->left_seg_context, - mi_row, mi_col, c1, bsize); + mi_row, mi_col, subsize, bsize); } // Check to see if the given partition size is allowed for a specified number @@ -896,10 +985,10 @@ static void encode_sb(VP9_COMP *cpi, const TileInfo *const tile, static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize, int rows_left, int cols_left, int *bh, int *bw) { - if ((rows_left <= 0) || (cols_left <= 0)) { + if (rows_left <= 0 || cols_left <= 0) { return MIN(bsize, BLOCK_8X8); } else { - for (; bsize > 0; --bsize) { + for (; bsize > 0; bsize -= 3) { *bh = num_8x8_blocks_high_lookup[bsize]; *bw = num_8x8_blocks_wide_lookup[bsize]; if ((*bh <= rows_left) && (*bw <= cols_left)) { @@ -916,14 +1005,14 @@ static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize, // may not be allowed in which case this code attempts to choose the largest // allowable partition. static void set_partitioning(VP9_COMP *cpi, const TileInfo *const tile, - MODE_INFO **mi_8x8, int mi_row, int mi_col) { + MODE_INFO **mi_8x8, int mi_row, int mi_col, + BLOCK_SIZE bsize) { VP9_COMMON *const cm = &cpi->common; - BLOCK_SIZE bsize = cpi->sf.always_this_block_size; const int mis = cm->mode_info_stride; int row8x8_remaining = tile->mi_row_end - mi_row; int col8x8_remaining = tile->mi_col_end - mi_col; int block_row, block_col; - MODE_INFO * mi_upper_left = cm->mi + mi_row * mis + mi_col; + MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col; int bh = num_8x8_blocks_high_lookup[bsize]; int bw = num_8x8_blocks_wide_lookup[bsize]; @@ -945,7 +1034,7 @@ static void set_partitioning(VP9_COMP *cpi, const TileInfo *const tile, for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) { int index = block_row * mis + block_col; // Find a partition size that fits - bsize = find_partition_size(cpi->sf.always_this_block_size, + bsize = find_partition_size(bsize, (row8x8_remaining - block_row), (col8x8_remaining - block_col), &bh, &bw); mi_8x8[index] = mi_upper_left + index; @@ -955,20 +1044,17 @@ static void set_partitioning(VP9_COMP *cpi, const TileInfo *const tile, } } -static void copy_partitioning(VP9_COMP *cpi, MODE_INFO **mi_8x8, +static void copy_partitioning(VP9_COMMON *cm, MODE_INFO **mi_8x8, MODE_INFO **prev_mi_8x8) { - VP9_COMMON *const cm = &cpi->common; const int mis = cm->mode_info_stride; int block_row, block_col; for (block_row = 0; block_row < 8; ++block_row) { for (block_col = 0; block_col < 8; ++block_col) { - MODE_INFO * prev_mi = prev_mi_8x8[block_row * mis + block_col]; - BLOCK_SIZE sb_type = prev_mi ? prev_mi->mbmi.sb_type : 0; - ptrdiff_t offset; - + MODE_INFO *const prev_mi = prev_mi_8x8[block_row * mis + block_col]; + const BLOCK_SIZE sb_type = prev_mi ? prev_mi->mbmi.sb_type : 0; if (prev_mi) { - offset = prev_mi - cm->prev_mi; + const ptrdiff_t offset = prev_mi - cm->prev_mi; mi_8x8[block_row * mis + block_col] = cm->mi + offset; mi_8x8[block_row * mis + block_col]->mbmi.sb_type = sb_type; } @@ -976,15 +1062,14 @@ static void copy_partitioning(VP9_COMP *cpi, MODE_INFO **mi_8x8, } } -static int sb_has_motion(VP9_COMP *cpi, MODE_INFO **prev_mi_8x8) { - VP9_COMMON *const cm = &cpi->common; +static int sb_has_motion(const VP9_COMMON *cm, MODE_INFO **prev_mi_8x8) { const int mis = cm->mode_info_stride; int block_row, block_col; if (cm->prev_mi) { for (block_row = 0; block_row < 8; ++block_row) { for (block_col = 0; block_col < 8; ++block_col) { - MODE_INFO * prev_mi = prev_mi_8x8[block_row * mis + block_col]; + const MODE_INFO *prev_mi = prev_mi_8x8[block_row * mis + block_col]; if (prev_mi) { if (abs(prev_mi->mbmi.mv[0].as_mv.row) >= 8 || abs(prev_mi->mbmi.mv[0].as_mv.col) >= 8) @@ -996,6 +1081,154 @@ static int sb_has_motion(VP9_COMP *cpi, MODE_INFO **prev_mi_8x8) { return 0; } +static void update_state_rt(VP9_COMP *cpi, const PICK_MODE_CONTEXT *ctx) { + int i; + VP9_COMMON *const cm = &cpi->common; + MACROBLOCK *const x = &cpi->mb; + MACROBLOCKD *const xd = &x->e_mbd; + MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi; + + x->skip = ctx->skip; + +#if CONFIG_INTERNAL_STATS + if (frame_is_intra_only(cm)) { + static const int kf_mode_index[] = { + THR_DC /*DC_PRED*/, + THR_V_PRED /*V_PRED*/, + THR_H_PRED /*H_PRED*/, + THR_D45_PRED /*D45_PRED*/, + THR_D135_PRED /*D135_PRED*/, + THR_D117_PRED /*D117_PRED*/, + THR_D153_PRED /*D153_PRED*/, + THR_D207_PRED /*D207_PRED*/, + THR_D63_PRED /*D63_PRED*/, + THR_TM /*TM_PRED*/, + }; + ++cpi->mode_chosen_counts[kf_mode_index[mbmi->mode]]; + } else { + // Note how often each mode chosen as best + ++cpi->mode_chosen_counts[ctx->best_mode_index]; + } +#endif + if (!frame_is_intra_only(cm)) { + if (is_inter_block(mbmi)) { + if (mbmi->sb_type < BLOCK_8X8 || mbmi->mode == NEWMV) { + MV best_mv[2]; + for (i = 0; i < 1 + has_second_ref(mbmi); ++i) + best_mv[i] = mbmi->ref_mvs[mbmi->ref_frame[i]][0].as_mv; + vp9_update_mv_count(cm, xd, best_mv); + } + + if (cm->interp_filter == SWITCHABLE) { + const int pred_ctx = vp9_get_pred_context_switchable_interp(xd); + ++cm->counts.switchable_interp[pred_ctx][mbmi->interp_filter]; + } + } + } +} + +static void encode_b_rt(VP9_COMP *cpi, const TileInfo *const tile, + TOKENEXTRA **tp, int mi_row, int mi_col, + int output_enabled, BLOCK_SIZE bsize) { + MACROBLOCK *const x = &cpi->mb; + + if (bsize < BLOCK_8X8) { + // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0 + // there is nothing to be done. + if (x->ab_index > 0) + return; + } + set_offsets(cpi, tile, mi_row, mi_col, bsize); + update_state_rt(cpi, get_block_context(x, bsize)); + + encode_superblock(cpi, tp, output_enabled, mi_row, mi_col, bsize); + update_stats(cpi); + + (*tp)->token = EOSB_TOKEN; + (*tp)++; +} + +static void encode_sb_rt(VP9_COMP *cpi, const TileInfo *const tile, + TOKENEXTRA **tp, int mi_row, int mi_col, + int output_enabled, BLOCK_SIZE bsize) { + VP9_COMMON *const cm = &cpi->common; + MACROBLOCK *const x = &cpi->mb; + const int bsl = b_width_log2(bsize), hbs = (1 << bsl) / 4; + int ctx; + PARTITION_TYPE partition; + BLOCK_SIZE subsize; + + if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) + return; + + if (bsize >= BLOCK_8X8) { + MACROBLOCKD *const xd = &cpi->mb.e_mbd; + const int idx_str = xd->mode_info_stride * mi_row + mi_col; + MODE_INFO ** mi_8x8 = cm->mi_grid_visible + idx_str; + ctx = partition_plane_context(cpi->above_seg_context, cpi->left_seg_context, + mi_row, mi_col, bsize); + subsize = mi_8x8[0]->mbmi.sb_type; + } else { + ctx = 0; + subsize = BLOCK_4X4; + } + + partition = partition_lookup[bsl][subsize]; + + switch (partition) { + case PARTITION_NONE: + if (output_enabled && bsize >= BLOCK_8X8) + cm->counts.partition[ctx][PARTITION_NONE]++; + encode_b_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize); + break; + case PARTITION_VERT: + if (output_enabled) + cm->counts.partition[ctx][PARTITION_VERT]++; + *get_sb_index(x, subsize) = 0; + encode_b_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize); + if (mi_col + hbs < cm->mi_cols) { + *get_sb_index(x, subsize) = 1; + encode_b_rt(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled, + subsize); + } + break; + case PARTITION_HORZ: + if (output_enabled) + cm->counts.partition[ctx][PARTITION_HORZ]++; + *get_sb_index(x, subsize) = 0; + encode_b_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize); + if (mi_row + hbs < cm->mi_rows) { + *get_sb_index(x, subsize) = 1; + encode_b_rt(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled, + subsize); + } + break; + case PARTITION_SPLIT: + subsize = get_subsize(bsize, PARTITION_SPLIT); + if (output_enabled) + cm->counts.partition[ctx][PARTITION_SPLIT]++; + + *get_sb_index(x, subsize) = 0; + encode_sb_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize); + *get_sb_index(x, subsize) = 1; + encode_sb_rt(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled, + subsize); + *get_sb_index(x, subsize) = 2; + encode_sb_rt(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled, + subsize); + *get_sb_index(x, subsize) = 3; + encode_sb_rt(cpi, tile, tp, mi_row + hbs, mi_col + hbs, output_enabled, + subsize); + break; + default: + assert("Invalid partition type."); + } + + if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8) + update_partition_context(cpi->above_seg_context, cpi->left_seg_context, + mi_row, mi_col, subsize, bsize); +} + static void rd_use_partition(VP9_COMP *cpi, const TileInfo *const tile, MODE_INFO **mi_8x8, @@ -1005,25 +1238,26 @@ static void rd_use_partition(VP9_COMP *cpi, VP9_COMMON *const cm = &cpi->common; MACROBLOCK *const x = &cpi->mb; const int mis = cm->mode_info_stride; - int bsl = b_width_log2(bsize); + const int bsl = b_width_log2(bsize); const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; - int ms = num_4x4_blocks_wide / 2; - int mh = num_4x4_blocks_high / 2; - int bss = (1 << bsl) / 4; + const int ms = num_4x4_blocks_wide / 2; + const int mh = num_4x4_blocks_high / 2; + const int bss = (1 << bsl) / 4; int i, pl; PARTITION_TYPE partition = PARTITION_NONE; BLOCK_SIZE subsize; ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE]; PARTITION_CONTEXT sl[8], sa[8]; int last_part_rate = INT_MAX; - int64_t last_part_dist = INT_MAX; - int split_rate = INT_MAX; - int64_t split_dist = INT_MAX; + int64_t last_part_dist = INT64_MAX; + int64_t last_part_rd = INT64_MAX; int none_rate = INT_MAX; - int64_t none_dist = INT_MAX; + int64_t none_dist = INT64_MAX; + int64_t none_rd = INT64_MAX; int chosen_rate = INT_MAX; - int64_t chosen_dist = INT_MAX; + int64_t chosen_dist = INT64_MAX; + int64_t chosen_rd = INT64_MAX; BLOCK_SIZE sub_subsize = BLOCK_4X4; int splits_below = 0; BLOCK_SIZE bs_type = mi_8x8[0]->mbmi.sb_type; @@ -1032,7 +1266,6 @@ static void rd_use_partition(VP9_COMP *cpi, return; partition = partition_lookup[bsl][bs_type]; - subsize = get_subsize(bsize, partition); if (bsize < BLOCK_8X8) { @@ -1053,10 +1286,8 @@ static void rd_use_partition(VP9_COMP *cpi, x->mb_energy = vp9_block_energy(cpi, x, bsize); } - x->fast_ms = 0; - x->subblock_ref = 0; - - if (cpi->sf.adjust_partitioning_from_last_frame) { + if (cpi->sf.partition_search_type == SEARCH_PARTITION && + cpi->sf.adjust_partitioning_from_last_frame) { // Check if any of the sub blocks are further split. if (partition == PARTITION_SPLIT && subsize > BLOCK_8X8) { sub_subsize = get_subsize(subsize, PARTITION_SPLIT); @@ -1076,13 +1307,17 @@ static void rd_use_partition(VP9_COMP *cpi, mi_row + (ms >> 1) < cm->mi_rows && mi_col + (ms >> 1) < cm->mi_cols) { *(get_sb_partitioning(x, bsize)) = bsize; - pick_sb_modes(cpi, tile, mi_row, mi_col, &none_rate, &none_dist, bsize, - get_block_context(x, bsize), INT64_MAX); + rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &none_rate, &none_dist, bsize, + get_block_context(x, bsize), INT64_MAX); pl = partition_plane_context(cpi->above_seg_context, cpi->left_seg_context, mi_row, mi_col, bsize); - none_rate += x->partition_cost[pl][PARTITION_NONE]; + + if (none_rate < INT_MAX) { + none_rate += x->partition_cost[pl][PARTITION_NONE]; + none_rd = RDCOST(x->rdmult, x->rddiv, none_rate, none_dist); + } restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); mi_8x8[0]->mbmi.sb_type = bs_type; @@ -1092,13 +1327,15 @@ static void rd_use_partition(VP9_COMP *cpi, switch (partition) { case PARTITION_NONE: - pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate, &last_part_dist, - bsize, get_block_context(x, bsize), INT64_MAX); + rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate, + &last_part_dist, bsize, + get_block_context(x, bsize), INT64_MAX); break; case PARTITION_HORZ: *get_sb_index(x, subsize) = 0; - pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate, &last_part_dist, - subsize, get_block_context(x, subsize), INT64_MAX); + rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate, + &last_part_dist, subsize, + get_block_context(x, subsize), INT64_MAX); if (last_part_rate != INT_MAX && bsize >= BLOCK_8X8 && mi_row + (mh >> 1) < cm->mi_rows) { int rt = 0; @@ -1106,11 +1343,11 @@ static void rd_use_partition(VP9_COMP *cpi, update_state(cpi, get_block_context(x, subsize), subsize, 0); encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize); *get_sb_index(x, subsize) = 1; - pick_sb_modes(cpi, tile, mi_row + (ms >> 1), mi_col, &rt, &dt, subsize, - get_block_context(x, subsize), INT64_MAX); - if (rt == INT_MAX || dt == INT_MAX) { + rd_pick_sb_modes(cpi, tile, mi_row + (ms >> 1), mi_col, &rt, &dt, + subsize, get_block_context(x, subsize), INT64_MAX); + if (rt == INT_MAX || dt == INT64_MAX) { last_part_rate = INT_MAX; - last_part_dist = INT_MAX; + last_part_dist = INT64_MAX; break; } @@ -1120,8 +1357,9 @@ static void rd_use_partition(VP9_COMP *cpi, break; case PARTITION_VERT: *get_sb_index(x, subsize) = 0; - pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate, &last_part_dist, - subsize, get_block_context(x, subsize), INT64_MAX); + rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate, + &last_part_dist, subsize, + get_block_context(x, subsize), INT64_MAX); if (last_part_rate != INT_MAX && bsize >= BLOCK_8X8 && mi_col + (ms >> 1) < cm->mi_cols) { int rt = 0; @@ -1129,11 +1367,11 @@ static void rd_use_partition(VP9_COMP *cpi, update_state(cpi, get_block_context(x, subsize), subsize, 0); encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize); *get_sb_index(x, subsize) = 1; - pick_sb_modes(cpi, tile, mi_row, mi_col + (ms >> 1), &rt, &dt, subsize, - get_block_context(x, subsize), INT64_MAX); - if (rt == INT_MAX || dt == INT_MAX) { + rd_pick_sb_modes(cpi, tile, mi_row, mi_col + (ms >> 1), &rt, &dt, + subsize, get_block_context(x, subsize), INT64_MAX); + if (rt == INT_MAX || dt == INT64_MAX) { last_part_rate = INT_MAX; - last_part_dist = INT_MAX; + last_part_dist = INT64_MAX; break; } last_part_rate += rt; @@ -1159,9 +1397,9 @@ static void rd_use_partition(VP9_COMP *cpi, rd_use_partition(cpi, tile, mi_8x8 + jj * bss * mis + ii * bss, tp, mi_row + y_idx, mi_col + x_idx, subsize, &rt, &dt, i != 3); - if (rt == INT_MAX || dt == INT_MAX) { + if (rt == INT_MAX || dt == INT64_MAX) { last_part_rate = INT_MAX; - last_part_dist = INT_MAX; + last_part_dist = INT64_MAX; break; } last_part_rate += rt; @@ -1174,16 +1412,19 @@ static void rd_use_partition(VP9_COMP *cpi, pl = partition_plane_context(cpi->above_seg_context, cpi->left_seg_context, mi_row, mi_col, bsize); - if (last_part_rate < INT_MAX) + if (last_part_rate < INT_MAX) { last_part_rate += x->partition_cost[pl][partition]; + last_part_rd = RDCOST(x->rdmult, x->rddiv, last_part_rate, last_part_dist); + } if (cpi->sf.adjust_partitioning_from_last_frame + && cpi->sf.partition_search_type == SEARCH_PARTITION && partition != PARTITION_SPLIT && bsize > BLOCK_8X8 && (mi_row + ms < cm->mi_rows || mi_row + (ms >> 1) == cm->mi_rows) && (mi_col + ms < cm->mi_cols || mi_col + (ms >> 1) == cm->mi_cols)) { BLOCK_SIZE split_subsize = get_subsize(bsize, PARTITION_SPLIT); - split_rate = 0; - split_dist = 0; + chosen_rate = 0; + chosen_dist = 0; restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); // Split partition. @@ -1204,51 +1445,50 @@ static void rd_use_partition(VP9_COMP *cpi, save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); - pick_sb_modes(cpi, tile, mi_row + y_idx, mi_col + x_idx, &rt, &dt, - split_subsize, get_block_context(x, split_subsize), - INT64_MAX); + rd_pick_sb_modes(cpi, tile, mi_row + y_idx, mi_col + x_idx, &rt, &dt, + split_subsize, get_block_context(x, split_subsize), + INT64_MAX); restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); - if (rt == INT_MAX || dt == INT_MAX) { - split_rate = INT_MAX; - split_dist = INT_MAX; + if (rt == INT_MAX || dt == INT64_MAX) { + chosen_rate = INT_MAX; + chosen_dist = INT64_MAX; break; } + chosen_rate += rt; + chosen_dist += dt; + if (i != 3) encode_sb(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx, 0, split_subsize); - split_rate += rt; - split_dist += dt; pl = partition_plane_context(cpi->above_seg_context, cpi->left_seg_context, - mi_row + y_idx, mi_col + x_idx, bsize); - split_rate += x->partition_cost[pl][PARTITION_NONE]; + mi_row + y_idx, mi_col + x_idx, + split_subsize); + chosen_rate += x->partition_cost[pl][PARTITION_NONE]; } pl = partition_plane_context(cpi->above_seg_context, cpi->left_seg_context, mi_row, mi_col, bsize); - if (split_rate < INT_MAX) { - split_rate += x->partition_cost[pl][PARTITION_SPLIT]; - - chosen_rate = split_rate; - chosen_dist = split_dist; + if (chosen_rate < INT_MAX) { + chosen_rate += x->partition_cost[pl][PARTITION_SPLIT]; + chosen_rd = RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist); } } // If last_part is better set the partitioning to that... - if (RDCOST(x->rdmult, x->rddiv, last_part_rate, last_part_dist) - < RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist)) { + if (last_part_rd < chosen_rd) { mi_8x8[0]->mbmi.sb_type = bsize; if (bsize >= BLOCK_8X8) *(get_sb_partitioning(x, bsize)) = subsize; chosen_rate = last_part_rate; chosen_dist = last_part_dist; + chosen_rd = last_part_rd; } // If none was better set the partitioning to that... - if (RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist) - > RDCOST(x->rdmult, x->rddiv, none_rate, none_dist)) { + if (none_rd < chosen_rd) { if (bsize >= BLOCK_8X8) *(get_sb_partitioning(x, bsize)) = bsize; chosen_rate = none_rate; @@ -1260,25 +1500,40 @@ static void rd_use_partition(VP9_COMP *cpi, // We must have chosen a partitioning and encoding or we'll fail later on. // No other opportunities for success. if ( bsize == BLOCK_64X64) - assert(chosen_rate < INT_MAX && chosen_dist < INT_MAX); + assert(chosen_rate < INT_MAX && chosen_dist < INT64_MAX); - if (do_recon) - encode_sb(cpi, tile, tp, mi_row, mi_col, bsize == BLOCK_64X64, bsize); + if (do_recon) { + int output_enabled = (bsize == BLOCK_64X64); + + // Check the projected output rate for this SB against it's target + // and and if necessary apply a Q delta using segmentation to get + // closer to the target. + if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && cm->seg.update_map) { + select_in_frame_q_segment(cpi, mi_row, mi_col, + output_enabled, chosen_rate); + } + + encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize); + } *rate = chosen_rate; *dist = chosen_dist; } static const BLOCK_SIZE min_partition_size[BLOCK_SIZES] = { - BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, - BLOCK_4X4, BLOCK_4X4, BLOCK_8X8, BLOCK_8X8, - BLOCK_8X8, BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, BLOCK_16X16 + BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, + BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, + BLOCK_8X8, BLOCK_8X8, BLOCK_8X8, + BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, + BLOCK_16X16 }; static const BLOCK_SIZE max_partition_size[BLOCK_SIZES] = { - BLOCK_8X8, BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, - BLOCK_32X32, BLOCK_32X32, BLOCK_32X32, BLOCK_64X64, - BLOCK_64X64, BLOCK_64X64, BLOCK_64X64, BLOCK_64X64, BLOCK_64X64 + BLOCK_8X8, BLOCK_16X16, BLOCK_16X16, + BLOCK_16X16, BLOCK_32X32, BLOCK_32X32, + BLOCK_32X32, BLOCK_64X64, BLOCK_64X64, + BLOCK_64X64, BLOCK_64X64, BLOCK_64X64, + BLOCK_64X64 }; // Look at all the mode_info entries for blocks that are part of this @@ -1309,6 +1564,15 @@ static void get_sb_partition_size_range(VP9_COMP *cpi, MODE_INFO ** mi_8x8, } } +// Next square block size less or equal than current block size. +static const BLOCK_SIZE next_square_size[BLOCK_SIZES] = { + BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, + BLOCK_8X8, BLOCK_8X8, BLOCK_8X8, + BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, + BLOCK_32X32, BLOCK_32X32, BLOCK_32X32, + BLOCK_64X64 +}; + // Look at neighboring blocks and set a min and max partition size based on // what they chose. static void rd_auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile, @@ -1364,104 +1628,24 @@ static void rd_auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile, } } - // Give a bit of leaway either side of the observed min and max - *min_block_size = min_partition_size[*min_block_size]; - *max_block_size = max_partition_size[*max_block_size]; + // adjust observed min and max + if (cpi->sf.auto_min_max_partition_size == RELAXED_NEIGHBORING_MIN_MAX) { + *min_block_size = min_partition_size[*min_block_size]; + *max_block_size = max_partition_size[*max_block_size]; + } // Check border cases where max and min from neighbours may not be legal. *max_block_size = find_partition_size(*max_block_size, row8x8_remaining, col8x8_remaining, &bh, &bw); *min_block_size = MIN(*min_block_size, *max_block_size); -} - -static void compute_fast_motion_search_level(VP9_COMP *cpi, BLOCK_SIZE bsize) { - VP9_COMMON *const cm = &cpi->common; - MACROBLOCK *const x = &cpi->mb; - - // Only use 8x8 result for non HD videos. - // int use_8x8 = (MIN(cpi->common.width, cpi->common.height) < 720) ? 1 : 0; - int use_8x8 = 1; - - if (cm->frame_type && !cpi->is_src_frame_alt_ref && - ((use_8x8 && bsize == BLOCK_16X16) || - bsize == BLOCK_32X32 || bsize == BLOCK_64X64)) { - int ref0 = 0, ref1 = 0, ref2 = 0, ref3 = 0; - PICK_MODE_CONTEXT *block_context = NULL; - - if (bsize == BLOCK_16X16) { - block_context = x->sb8x8_context[x->sb_index][x->mb_index]; - } else if (bsize == BLOCK_32X32) { - block_context = x->mb_context[x->sb_index]; - } else if (bsize == BLOCK_64X64) { - block_context = x->sb32_context; - } - - if (block_context) { - ref0 = block_context[0].mic.mbmi.ref_frame[0]; - ref1 = block_context[1].mic.mbmi.ref_frame[0]; - ref2 = block_context[2].mic.mbmi.ref_frame[0]; - ref3 = block_context[3].mic.mbmi.ref_frame[0]; - } - - // Currently, only consider 4 inter reference frames. - if (ref0 && ref1 && ref2 && ref3) { - int d01, d23, d02, d13; - - // Motion vectors for the four subblocks. - int16_t mvr0 = block_context[0].mic.mbmi.mv[0].as_mv.row; - int16_t mvc0 = block_context[0].mic.mbmi.mv[0].as_mv.col; - int16_t mvr1 = block_context[1].mic.mbmi.mv[0].as_mv.row; - int16_t mvc1 = block_context[1].mic.mbmi.mv[0].as_mv.col; - int16_t mvr2 = block_context[2].mic.mbmi.mv[0].as_mv.row; - int16_t mvc2 = block_context[2].mic.mbmi.mv[0].as_mv.col; - int16_t mvr3 = block_context[3].mic.mbmi.mv[0].as_mv.row; - int16_t mvc3 = block_context[3].mic.mbmi.mv[0].as_mv.col; - - // Adjust sign if ref is alt_ref. - if (cm->ref_frame_sign_bias[ref0]) { - mvr0 *= -1; - mvc0 *= -1; - } - - if (cm->ref_frame_sign_bias[ref1]) { - mvr1 *= -1; - mvc1 *= -1; - } - - if (cm->ref_frame_sign_bias[ref2]) { - mvr2 *= -1; - mvc2 *= -1; - } - if (cm->ref_frame_sign_bias[ref3]) { - mvr3 *= -1; - mvc3 *= -1; - } - - // Calculate mv distances. - d01 = MAX(abs(mvr0 - mvr1), abs(mvc0 - mvc1)); - d23 = MAX(abs(mvr2 - mvr3), abs(mvc2 - mvc3)); - d02 = MAX(abs(mvr0 - mvr2), abs(mvc0 - mvc2)); - d13 = MAX(abs(mvr1 - mvr3), abs(mvc1 - mvc3)); - - if (d01 < FAST_MOTION_MV_THRESH && d23 < FAST_MOTION_MV_THRESH && - d02 < FAST_MOTION_MV_THRESH && d13 < FAST_MOTION_MV_THRESH) { - // Set fast motion search level. - x->fast_ms = 1; - - if (ref0 == ref1 && ref1 == ref2 && ref2 == ref3 && - d01 < 2 && d23 < 2 && d02 < 2 && d13 < 2) { - // Set fast motion search level. - x->fast_ms = 2; - - if (!d01 && !d23 && !d02 && !d13) { - x->fast_ms = 3; - x->subblock_ref = ref0; - } - } - } - } + // When use_square_partition_only is true, make sure at least one square + // partition is allowed by selecting the next smaller square size as + // *min_block_size. + if (cpi->sf.use_square_partition_only && + (*max_block_size - *min_block_size) < 2) { + *min_block_size = next_square_size[*min_block_size]; } } @@ -1496,12 +1680,14 @@ static void rd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile, // Override skipping rectangular partition operations for edge blocks const int force_horz_split = (mi_row + ms >= cm->mi_rows); const int force_vert_split = (mi_col + ms >= cm->mi_cols); + const int xss = x->e_mbd.plane[1].subsampling_x; + const int yss = x->e_mbd.plane[1].subsampling_y; int partition_none_allowed = !force_horz_split && !force_vert_split; - int partition_horz_allowed = !force_vert_split && bsize >= BLOCK_8X8; - int partition_vert_allowed = !force_horz_split && bsize >= BLOCK_8X8; - - int partition_split_done = 0; + int partition_horz_allowed = !force_vert_split && yss <= xss && + bsize >= BLOCK_8X8; + int partition_vert_allowed = !force_horz_split && xss <= yss && + bsize >= BLOCK_8X8; (void) *tp_orig; if (bsize < BLOCK_8X8) { @@ -1554,8 +1740,8 @@ static void rd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile, // PARTITION_NONE if (partition_none_allowed) { - pick_sb_modes(cpi, tile, mi_row, mi_col, &this_rate, &this_dist, bsize, - get_block_context(x, bsize), best_rd); + rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &this_rate, &this_dist, bsize, + get_block_context(x, bsize), best_rd); if (this_rate != INT_MAX) { if (bsize >= BLOCK_8X8) { pl = partition_plane_context(cpi->above_seg_context, @@ -1565,7 +1751,8 @@ static void rd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile, } sum_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_dist); if (sum_rd < best_rd) { - int64_t stop_thresh = 2048; + int64_t stop_thresh = 4096; + int64_t stop_thresh_rd; best_rate = this_rate; best_dist = this_dist; @@ -1577,9 +1764,10 @@ static void rd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile, stop_thresh >>= 8 - (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]); + stop_thresh_rd = RDCOST(x->rdmult, x->rddiv, 0, stop_thresh); // If obtained distortion is very small, choose current partition // and stop splitting. - if (this_dist < stop_thresh) { + if (!x->e_mbd.lossless && best_rd < stop_thresh_rd) { do_split = 0; do_rect = 0; } @@ -1608,6 +1796,10 @@ static void rd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile, *get_sb_index(x, subsize) = i; if (cpi->sf.adaptive_motion_search) load_pred_mv(x, get_block_context(x, bsize)); + if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && + partition_none_allowed) + get_block_context(x, subsize)->pred_interp_filter = + get_block_context(x, bsize)->mic.mbmi.interp_filter; rd_pick_partition(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx, subsize, &this_rate, &this_dist, i != 3, best_rd - sum_rd); @@ -1637,26 +1829,21 @@ static void rd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile, if (cpi->sf.less_rectangular_check) do_rect &= !partition_none_allowed; } - partition_split_done = 1; restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); } - x->fast_ms = 0; - x->subblock_ref = 0; - - if (partition_split_done && - cpi->sf.using_small_partition_info) { - compute_fast_motion_search_level(cpi, bsize); - } - // PARTITION_HORZ if (partition_horz_allowed && do_rect) { subsize = get_subsize(bsize, PARTITION_HORZ); *get_sb_index(x, subsize) = 0; if (cpi->sf.adaptive_motion_search) load_pred_mv(x, get_block_context(x, bsize)); - pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rate, &sum_dist, subsize, - get_block_context(x, subsize), best_rd); + if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && + partition_none_allowed) + get_block_context(x, subsize)->pred_interp_filter = + get_block_context(x, bsize)->mic.mbmi.interp_filter; + rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rate, &sum_dist, subsize, + get_block_context(x, subsize), best_rd); sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); if (sum_rd < best_rd && mi_row + ms < cm->mi_rows) { @@ -1666,9 +1853,13 @@ static void rd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile, *get_sb_index(x, subsize) = 1; if (cpi->sf.adaptive_motion_search) load_pred_mv(x, get_block_context(x, bsize)); - pick_sb_modes(cpi, tile, mi_row + ms, mi_col, &this_rate, - &this_dist, subsize, get_block_context(x, subsize), - best_rd - sum_rd); + if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && + partition_none_allowed) + get_block_context(x, subsize)->pred_interp_filter = + get_block_context(x, bsize)->mic.mbmi.interp_filter; + rd_pick_sb_modes(cpi, tile, mi_row + ms, mi_col, &this_rate, + &this_dist, subsize, get_block_context(x, subsize), + best_rd - sum_rd); if (this_rate == INT_MAX) { sum_rd = INT64_MAX; } else { @@ -1700,8 +1891,12 @@ static void rd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile, *get_sb_index(x, subsize) = 0; if (cpi->sf.adaptive_motion_search) load_pred_mv(x, get_block_context(x, bsize)); - pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rate, &sum_dist, subsize, - get_block_context(x, subsize), best_rd); + if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && + partition_none_allowed) + get_block_context(x, subsize)->pred_interp_filter = + get_block_context(x, bsize)->mic.mbmi.interp_filter; + rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rate, &sum_dist, subsize, + get_block_context(x, subsize), best_rd); sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); if (sum_rd < best_rd && mi_col + ms < cm->mi_cols) { update_state(cpi, get_block_context(x, subsize), subsize, 0); @@ -1710,9 +1905,13 @@ static void rd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile, *get_sb_index(x, subsize) = 1; if (cpi->sf.adaptive_motion_search) load_pred_mv(x, get_block_context(x, bsize)); - pick_sb_modes(cpi, tile, mi_row, mi_col + ms, &this_rate, - &this_dist, subsize, get_block_context(x, subsize), - best_rd - sum_rd); + if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && + partition_none_allowed) + get_block_context(x, subsize)->pred_interp_filter = + get_block_context(x, bsize)->mic.mbmi.interp_filter; + rd_pick_sb_modes(cpi, tile, mi_row, mi_col + ms, &this_rate, + &this_dist, subsize, get_block_context(x, subsize), + best_rd - sum_rd); if (this_rate == INT_MAX) { sum_rd = INT64_MAX; } else { @@ -1737,59 +1936,37 @@ static void rd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile, restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); } - + // TODO(jbb): This code added so that we avoid static analysis + // warning related to the fact that best_rd isn't used after this + // point. This code should be refactored so that the duplicate + // checks occur in some sub function and thus are used... + (void) best_rd; *rate = best_rate; *dist = best_dist; - if (best_rate < INT_MAX && best_dist < INT64_MAX && do_recon) - encode_sb(cpi, tile, tp, mi_row, mi_col, bsize == BLOCK_64X64, bsize); + if (best_rate < INT_MAX && best_dist < INT64_MAX && do_recon) { + int output_enabled = (bsize == BLOCK_64X64); + + // Check the projected output rate for this SB against it's target + // and and if necessary apply a Q delta using segmentation to get + // closer to the target. + if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && cm->seg.update_map) { + select_in_frame_q_segment(cpi, mi_row, mi_col, output_enabled, best_rate); + } + encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize); + } if (bsize == BLOCK_64X64) { assert(tp_orig < *tp); assert(best_rate < INT_MAX); - assert(best_dist < INT_MAX); + assert(best_dist < INT64_MAX); } else { assert(tp_orig == *tp); } } -// Examines 64x64 block and chooses a best reference frame -static void rd_pick_reference_frame(VP9_COMP *cpi, const TileInfo *const tile, - int mi_row, int mi_col) { - VP9_COMMON * const cm = &cpi->common; - MACROBLOCK * const x = &cpi->mb; - int bsl = b_width_log2(BLOCK_64X64), bs = 1 << bsl; - int ms = bs / 2; - ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE]; - PARTITION_CONTEXT sl[8], sa[8]; - int pl; - int r; - int64_t d; - - save_context(cpi, mi_row, mi_col, a, l, sa, sl, BLOCK_64X64); - - // Default is non mask (all reference frames allowed. - cpi->ref_frame_mask = 0; - - // Do RD search for 64x64. - if ((mi_row + (ms >> 1) < cm->mi_rows) && - (mi_col + (ms >> 1) < cm->mi_cols)) { - cpi->set_ref_frame_mask = 1; - pick_sb_modes(cpi, tile, mi_row, mi_col, &r, &d, BLOCK_64X64, - get_block_context(x, BLOCK_64X64), INT64_MAX); - pl = partition_plane_context(cpi->above_seg_context, cpi->left_seg_context, - mi_row, mi_col, BLOCK_64X64); - r += x->partition_cost[pl][PARTITION_NONE]; - - *(get_sb_partitioning(x, BLOCK_64X64)) = BLOCK_64X64; - cpi->set_ref_frame_mask = 0; - } - - restore_context(cpi, mi_row, mi_col, a, l, sa, sl, BLOCK_64X64); -} - -static void encode_sb_row(VP9_COMP *cpi, const TileInfo *const tile, - int mi_row, TOKENEXTRA **tp) { - VP9_COMMON * const cm = &cpi->common; +static void encode_rd_sb_row(VP9_COMP *cpi, const TileInfo *const tile, + int mi_row, TOKENEXTRA **tp) { + VP9_COMMON *const cm = &cpi->common; int mi_col; // Initialize the left context for the new SB row @@ -1802,33 +1979,58 @@ static void encode_sb_row(VP9_COMP *cpi, const TileInfo *const tile, int dummy_rate; int64_t dummy_dist; - vp9_zero(cpi->mb.pred_mv); + BLOCK_SIZE i; + MACROBLOCK *x = &cpi->mb; + + if (cpi->sf.adaptive_pred_interp_filter) { + for (i = BLOCK_4X4; i < BLOCK_8X8; ++i) { + const int num_4x4_w = num_4x4_blocks_wide_lookup[i]; + const int num_4x4_h = num_4x4_blocks_high_lookup[i]; + const int num_4x4_blk = MAX(4, num_4x4_w * num_4x4_h); + for (x->sb_index = 0; x->sb_index < 4; ++x->sb_index) + for (x->mb_index = 0; x->mb_index < 4; ++x->mb_index) + for (x->b_index = 0; x->b_index < 16 / num_4x4_blk; ++x->b_index) + get_block_context(x, i)->pred_interp_filter = SWITCHABLE; + } + } - if (cpi->sf.reference_masking) - rd_pick_reference_frame(cpi, tile, mi_row, mi_col); + vp9_zero(cpi->mb.pred_mv); - if (cpi->sf.use_lastframe_partitioning || - cpi->sf.use_one_partition_size_always ) { + if ((cpi->sf.partition_search_type == SEARCH_PARTITION && + cpi->sf.use_lastframe_partitioning) || + cpi->sf.partition_search_type == FIXED_PARTITION || + cpi->sf.partition_search_type == VAR_BASED_FIXED_PARTITION) { const int idx_str = cm->mode_info_stride * mi_row + mi_col; MODE_INFO **mi_8x8 = cm->mi_grid_visible + idx_str; MODE_INFO **prev_mi_8x8 = cm->prev_mi_grid_visible + idx_str; - cpi->mb.source_variance = UINT_MAX; - if (cpi->sf.use_one_partition_size_always) { + if (cpi->sf.partition_search_type == FIXED_PARTITION) { + set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64); + set_partitioning(cpi, tile, mi_8x8, mi_row, mi_col, + cpi->sf.always_this_block_size); + rd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64, + &dummy_rate, &dummy_dist, 1); + } else if (cpi->sf.partition_search_type == VAR_BASED_FIXED_PARTITION || + cpi->sf.partition_search_type == VAR_BASED_PARTITION) { + // TODO(debargha): Implement VAR_BASED_PARTITION as a separate case. + // Currently both VAR_BASED_FIXED_PARTITION/VAR_BASED_PARTITION + // map to the same thing. + BLOCK_SIZE bsize; set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64); - set_partitioning(cpi, tile, mi_8x8, mi_row, mi_col); + bsize = get_rd_var_based_fixed_partition(cpi, mi_row, mi_col); + set_partitioning(cpi, tile, mi_8x8, mi_row, mi_col, bsize); rd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64, &dummy_rate, &dummy_dist, 1); } else { - if ((cpi->common.current_video_frame + if ((cm->current_video_frame % cpi->sf.last_partitioning_redo_frequency) == 0 || cm->prev_mi == 0 - || cpi->common.show_frame == 0 - || cpi->common.frame_type == KEY_FRAME - || cpi->is_src_frame_alt_ref + || cm->show_frame == 0 + || cm->frame_type == KEY_FRAME + || cpi->rc.is_src_frame_alt_ref || ((cpi->sf.use_lastframe_partitioning == LAST_FRAME_PARTITION_LOW_MOTION) && - sb_has_motion(cpi, prev_mi_8x8))) { + sb_has_motion(cm, prev_mi_8x8))) { // If required set upper and lower partition size limits if (cpi->sf.auto_min_max_partition_size) { set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64); @@ -1839,7 +2041,7 @@ static void encode_sb_row(VP9_COMP *cpi, const TileInfo *const tile, rd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64, &dummy_rate, &dummy_dist, 1, INT64_MAX); } else { - copy_partitioning(cpi, mi_8x8, prev_mi_8x8); + copy_partitioning(cm, mi_8x8, prev_mi_8x8); rd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64, &dummy_rate, &dummy_dist, 1); } @@ -1869,33 +2071,29 @@ static void init_encode_frame_mb_context(VP9_COMP *cpi) { xd->mode_info_stride = cm->mode_info_stride; - // reset intra mode contexts - if (frame_is_intra_only(cm)) - vp9_init_mbmode_probs(cm); - // Copy data over into macro block data structures. vp9_setup_src_planes(x, cpi->Source, 0, 0); // TODO(jkoleszar): are these initializations required? - setup_pre_planes(xd, 0, &cm->yv12_fb[cm->ref_frame_map[cpi->lst_fb_idx]], - 0, 0, NULL); - setup_dst_planes(xd, get_frame_new_buffer(cm), 0, 0); + vp9_setup_pre_planes(xd, 0, get_ref_frame_buffer(cpi, LAST_FRAME), 0, 0, + NULL); + vp9_setup_dst_planes(xd, get_frame_new_buffer(cm), 0, 0); - setup_block_dptrs(&x->e_mbd, cm->subsampling_x, cm->subsampling_y); + vp9_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y); xd->mi_8x8[0]->mbmi.mode = DC_PRED; xd->mi_8x8[0]->mbmi.uv_mode = DC_PRED; - vp9_zero(cpi->y_mode_count); - vp9_zero(cpi->y_uv_mode_count); + vp9_zero(cm->counts.y_mode); + vp9_zero(cm->counts.uv_mode); vp9_zero(cm->counts.inter_mode); - vp9_zero(cpi->partition_count); - vp9_zero(cpi->intra_inter_count); - vp9_zero(cpi->comp_inter_count); - vp9_zero(cpi->single_ref_count); - vp9_zero(cpi->comp_ref_count); + vp9_zero(cm->counts.partition); + vp9_zero(cm->counts.intra_inter); + vp9_zero(cm->counts.comp_inter); + vp9_zero(cm->counts.single_ref); + vp9_zero(cm->counts.comp_ref); vp9_zero(cm->counts.tx); - vp9_zero(cm->counts.mbskip); + vp9_zero(cm->counts.skip); // Note: this memset assumes above_context[0], [1] and [2] // are allocated as part of the same buffer. @@ -1922,125 +2120,6 @@ static void switch_lossless_mode(VP9_COMP *cpi, int lossless) { } } -static void switch_tx_mode(VP9_COMP *cpi) { - if (cpi->sf.tx_size_search_method == USE_LARGESTALL && - cpi->common.tx_mode >= ALLOW_32X32) - cpi->common.tx_mode = ALLOW_32X32; -} - -static void encode_frame_internal(VP9_COMP *cpi) { - int mi_row; - MACROBLOCK * const x = &cpi->mb; - VP9_COMMON * const cm = &cpi->common; - MACROBLOCKD * const xd = &x->e_mbd; - -// fprintf(stderr, "encode_frame_internal frame %d (%d) type %d\n", -// cpi->common.current_video_frame, cpi->common.show_frame, -// cm->frame_type); - -// debug output -#if DBG_PRNT_SEGMAP - { - FILE *statsfile; - statsfile = fopen("segmap2.stt", "a"); - fprintf(statsfile, "\n"); - fclose(statsfile); - } -#endif - - vp9_zero(cm->counts.switchable_interp); - vp9_zero(cpi->tx_stepdown_count); - - xd->mi_8x8 = cm->mi_grid_visible; - // required for vp9_frame_init_quantizer - xd->mi_8x8[0] = cm->mi; - - xd->last_mi = cm->prev_mi; - - vp9_zero(cpi->NMVcount); - vp9_zero(cpi->coef_counts); - vp9_zero(cm->counts.eob_branch); - - cpi->mb.e_mbd.lossless = cm->base_qindex == 0 && cm->y_dc_delta_q == 0 - && cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0; - switch_lossless_mode(cpi, cpi->mb.e_mbd.lossless); - - vp9_frame_init_quantizer(cpi); - - vp9_initialize_rd_consts(cpi); - vp9_initialize_me_consts(cpi, cm->base_qindex); - switch_tx_mode(cpi); - - if (cpi->oxcf.tuning == VP8_TUNE_SSIM) { - // Initialize encode frame context. - init_encode_frame_mb_context(cpi); - - // Build a frame level activity map - build_activity_map(cpi); - } - - // Re-initialize encode frame context. - init_encode_frame_mb_context(cpi); - - vp9_zero(cpi->rd_comp_pred_diff); - vp9_zero(cpi->rd_filter_diff); - vp9_zero(cpi->rd_tx_select_diff); - vp9_zero(cpi->rd_tx_select_threshes); - - set_prev_mi(cm); - - { - struct vpx_usec_timer emr_timer; - vpx_usec_timer_start(&emr_timer); - - { - // Take tiles into account and give start/end MB - int tile_col, tile_row; - TOKENEXTRA *tp = cpi->tok; - const int tile_cols = 1 << cm->log2_tile_cols; - const int tile_rows = 1 << cm->log2_tile_rows; - - for (tile_row = 0; tile_row < tile_rows; tile_row++) { - for (tile_col = 0; tile_col < tile_cols; tile_col++) { - TileInfo tile; - TOKENEXTRA *tp_old = tp; - - // For each row of SBs in the frame - vp9_tile_init(&tile, cm, tile_row, tile_col); - for (mi_row = tile.mi_row_start; - mi_row < tile.mi_row_end; mi_row += 8) - encode_sb_row(cpi, &tile, mi_row, &tp); - - cpi->tok_count[tile_row][tile_col] = (unsigned int)(tp - tp_old); - assert(tp - cpi->tok <= get_token_alloc(cm->mb_rows, cm->mb_cols)); - } - } - } - - vpx_usec_timer_mark(&emr_timer); - cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer); - } - - if (cpi->sf.skip_encode_sb) { - int j; - unsigned int intra_count = 0, inter_count = 0; - for (j = 0; j < INTRA_INTER_CONTEXTS; ++j) { - intra_count += cpi->intra_inter_count[j][0]; - inter_count += cpi->intra_inter_count[j][1]; - } - cpi->sf.skip_encode_frame = ((intra_count << 2) < inter_count); - cpi->sf.skip_encode_frame &= (cm->frame_type != KEY_FRAME); - cpi->sf.skip_encode_frame &= cm->show_frame; - } else { - cpi->sf.skip_encode_frame = 0; - } - -#if 0 - // Keep record of the total distortion this time around for future use - cpi->last_frame_distortion = cpi->frame_distortion; -#endif -} - static int check_dual_ref_flags(VP9_COMP *cpi) { const int ref_flags = cpi->ref_frame_flags; @@ -2057,7 +2136,7 @@ static int get_skip_flag(MODE_INFO **mi_8x8, int mis, int ymbs, int xmbs) { for (y = 0; y < ymbs; y++) { for (x = 0; x < xmbs; x++) { - if (!mi_8x8[y * mis + x]->mbmi.skip_coeff) + if (!mi_8x8[y * mis + x]->mbmi.skip) return 0; } } @@ -2075,15 +2154,14 @@ static void set_txfm_flag(MODE_INFO **mi_8x8, int mis, int ymbs, int xmbs, } } -static void reset_skip_txfm_size_b(VP9_COMP *cpi, MODE_INFO **mi_8x8, - int mis, TX_SIZE max_tx_size, int bw, int bh, - int mi_row, int mi_col, BLOCK_SIZE bsize) { - VP9_COMMON * const cm = &cpi->common; - +static void reset_skip_txfm_size_b(const VP9_COMMON *cm, int mis, + TX_SIZE max_tx_size, int bw, int bh, + int mi_row, int mi_col, + MODE_INFO **mi_8x8) { if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) { return; } else { - MB_MODE_INFO * const mbmi = &mi_8x8[0]->mbmi; + const MB_MODE_INFO *const mbmi = &mi_8x8[0]->mbmi; if (mbmi->tx_size > max_tx_size) { const int ymbs = MIN(bh, cm->mi_rows - mi_row); const int xmbs = MIN(bw, cm->mi_cols - mi_col); @@ -2095,10 +2173,9 @@ static void reset_skip_txfm_size_b(VP9_COMP *cpi, MODE_INFO **mi_8x8, } } -static void reset_skip_txfm_size_sb(VP9_COMP *cpi, MODE_INFO **mi_8x8, +static void reset_skip_txfm_size_sb(VP9_COMMON *cm, MODE_INFO **mi_8x8, TX_SIZE max_tx_size, int mi_row, int mi_col, BLOCK_SIZE bsize) { - VP9_COMMON * const cm = &cpi->common; const int mis = cm->mode_info_stride; int bw, bh; const int bs = num_8x8_blocks_wide_lookup[bsize], hbs = bs / 2; @@ -2110,19 +2187,18 @@ static void reset_skip_txfm_size_sb(VP9_COMP *cpi, MODE_INFO **mi_8x8, bh = num_8x8_blocks_high_lookup[mi_8x8[0]->mbmi.sb_type]; if (bw == bs && bh == bs) { - reset_skip_txfm_size_b(cpi, mi_8x8, mis, max_tx_size, bs, bs, mi_row, - mi_col, bsize); + reset_skip_txfm_size_b(cm, mis, max_tx_size, bs, bs, mi_row, mi_col, + mi_8x8); } else if (bw == bs && bh < bs) { - reset_skip_txfm_size_b(cpi, mi_8x8, mis, max_tx_size, bs, hbs, mi_row, - mi_col, bsize); - reset_skip_txfm_size_b(cpi, mi_8x8 + hbs * mis, mis, max_tx_size, bs, hbs, - mi_row + hbs, mi_col, bsize); + reset_skip_txfm_size_b(cm, mis, max_tx_size, bs, hbs, mi_row, mi_col, + mi_8x8); + reset_skip_txfm_size_b(cm, mis, max_tx_size, bs, hbs, mi_row + hbs, + mi_col, mi_8x8 + hbs * mis); } else if (bw < bs && bh == bs) { - reset_skip_txfm_size_b(cpi, mi_8x8, mis, max_tx_size, hbs, bs, mi_row, - mi_col, bsize); - reset_skip_txfm_size_b(cpi, mi_8x8 + hbs, mis, max_tx_size, hbs, bs, mi_row, - mi_col + hbs, bsize); - + reset_skip_txfm_size_b(cm, mis, max_tx_size, hbs, bs, mi_row, mi_col, + mi_8x8); + reset_skip_txfm_size_b(cm, mis, max_tx_size, hbs, bs, mi_row, + mi_col + hbs, mi_8x8 + hbs); } else { const BLOCK_SIZE subsize = subsize_lookup[PARTITION_SPLIT][bsize]; int n; @@ -2133,71 +2209,302 @@ static void reset_skip_txfm_size_sb(VP9_COMP *cpi, MODE_INFO **mi_8x8, const int mi_dc = hbs * (n & 1); const int mi_dr = hbs * (n >> 1); - reset_skip_txfm_size_sb(cpi, &mi_8x8[mi_dr * mis + mi_dc], max_tx_size, + reset_skip_txfm_size_sb(cm, &mi_8x8[mi_dr * mis + mi_dc], max_tx_size, mi_row + mi_dr, mi_col + mi_dc, subsize); } } } -static void reset_skip_txfm_size(VP9_COMP *cpi, TX_SIZE txfm_max) { - VP9_COMMON * const cm = &cpi->common; +static void reset_skip_txfm_size(VP9_COMMON *cm, TX_SIZE txfm_max) { int mi_row, mi_col; const int mis = cm->mode_info_stride; -// MODE_INFO *mi, *mi_ptr = cm->mi; MODE_INFO **mi_8x8, **mi_ptr = cm->mi_grid_visible; for (mi_row = 0; mi_row < cm->mi_rows; mi_row += 8, mi_ptr += 8 * mis) { mi_8x8 = mi_ptr; for (mi_col = 0; mi_col < cm->mi_cols; mi_col += 8, mi_8x8 += 8) { - reset_skip_txfm_size_sb(cpi, mi_8x8, txfm_max, mi_row, mi_col, + reset_skip_txfm_size_sb(cm, mi_8x8, txfm_max, mi_row, mi_col, BLOCK_64X64); } } } -static int get_frame_type(VP9_COMP *cpi) { - int frame_type; +static MV_REFERENCE_FRAME get_frame_type(const VP9_COMP *cpi) { if (frame_is_intra_only(&cpi->common)) - frame_type = 0; - else if (cpi->is_src_frame_alt_ref && cpi->refresh_golden_frame) - frame_type = 3; + return INTRA_FRAME; + else if (cpi->rc.is_src_frame_alt_ref && cpi->refresh_golden_frame) + return ALTREF_FRAME; else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame) - frame_type = 1; + return LAST_FRAME; else - frame_type = 2; - return frame_type; + return GOLDEN_FRAME; } -static void select_tx_mode(VP9_COMP *cpi) { +static TX_MODE select_tx_mode(const VP9_COMP *cpi) { if (cpi->oxcf.lossless) { - cpi->common.tx_mode = ONLY_4X4; + return ONLY_4X4; } else if (cpi->common.current_video_frame == 0) { - cpi->common.tx_mode = TX_MODE_SELECT; + return TX_MODE_SELECT; } else { if (cpi->sf.tx_size_search_method == USE_LARGESTALL) { - cpi->common.tx_mode = ALLOW_32X32; + return ALLOW_32X32; } else if (cpi->sf.tx_size_search_method == USE_FULL_RD) { - int frame_type = get_frame_type(cpi); - cpi->common.tx_mode = - cpi->rd_tx_select_threshes[frame_type][ALLOW_32X32] - > cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] ? - ALLOW_32X32 : TX_MODE_SELECT; + const MV_REFERENCE_FRAME frame_type = get_frame_type(cpi); + return cpi->rd_tx_select_threshes[frame_type][ALLOW_32X32] > + cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] ? + ALLOW_32X32 : TX_MODE_SELECT; } else { unsigned int total = 0; int i; for (i = 0; i < TX_SIZES; ++i) total += cpi->tx_stepdown_count[i]; + if (total) { - double fraction = (double)cpi->tx_stepdown_count[0] / total; - cpi->common.tx_mode = fraction > 0.90 ? ALLOW_32X32 : TX_MODE_SELECT; - // printf("fraction = %f\n", fraction); - } // else keep unchanged + const double fraction = (double)cpi->tx_stepdown_count[0] / total; + return fraction > 0.90 ? ALLOW_32X32 : TX_MODE_SELECT; + } else { + return cpi->common.tx_mode; + } + } + } +} + +// Start RTC Exploration +typedef enum { + BOTH_ZERO = 0, + ZERO_PLUS_PREDICTED = 1, + BOTH_PREDICTED = 2, + NEW_PLUS_NON_INTRA = 3, + BOTH_NEW = 4, + INTRA_PLUS_NON_INTRA = 5, + BOTH_INTRA = 6, + INVALID_CASE = 9 +} motion_vector_context; + +static void set_mode_info(MB_MODE_INFO *mbmi, BLOCK_SIZE bsize, + MB_PREDICTION_MODE mode) { + mbmi->mode = mode; + mbmi->uv_mode = mode; + mbmi->mv[0].as_int = 0; + mbmi->mv[1].as_int = 0; + mbmi->ref_frame[0] = INTRA_FRAME; + mbmi->ref_frame[1] = NONE; + mbmi->tx_size = max_txsize_lookup[bsize]; + mbmi->skip = 0; + mbmi->sb_type = bsize; + mbmi->segment_id = 0; +} + +static void nonrd_use_partition(VP9_COMP *cpi, const TileInfo *const tile, + TOKENEXTRA **tp, int mi_row, int mi_col, + BLOCK_SIZE bsize, int *rate, int64_t *dist) { + VP9_COMMON *const cm = &cpi->common; + MACROBLOCK *const x = &cpi->mb; + MACROBLOCKD *const xd = &cpi->mb.e_mbd; + int br, bc; + MB_PREDICTION_MODE mode = DC_PRED; + int rows = MIN(MI_BLOCK_SIZE, tile->mi_row_end - mi_row); + int cols = MIN(MI_BLOCK_SIZE, tile->mi_col_end - mi_col); + + int bw = num_8x8_blocks_wide_lookup[bsize]; + int bh = num_8x8_blocks_high_lookup[bsize]; + + int brate = 0; + int64_t bdist = 0; + *rate = 0; + *dist = 0; + + // find prediction mode for each 8x8 block + for (br = 0; br < rows; br += bh) { + for (bc = 0; bc < cols; bc += bw) { + const int row = mi_row + br; + const int col = mi_col + bc; + const BLOCK_SIZE bs = find_partition_size(bsize, rows - br, cols - bc, + &bh, &bw); + int i, j; + + set_offsets(cpi, tile, row, col, bs); + + if (cm->frame_type != KEY_FRAME) + vp9_pick_inter_mode(cpi, x, tile, row, col, &brate, &bdist, bs); + else + set_mode_info(&xd->mi_8x8[0]->mbmi, bs, mode); + + *rate += brate; + *dist += bdist; + + for (j = 0; j < bh; ++j) + for (i = 0; i < bw; ++i) + xd->mi_8x8[j * cm->mode_info_stride + i] = xd->mi_8x8[0]; + } + } +} + +static void encode_nonrd_sb_row(VP9_COMP *cpi, const TileInfo *const tile, + int mi_row, TOKENEXTRA **tp) { + int mi_col; + + // Initialize the left context for the new SB row + vpx_memset(&cpi->left_context, 0, sizeof(cpi->left_context)); + vpx_memset(cpi->left_seg_context, 0, sizeof(cpi->left_seg_context)); + + // Code each SB in the row + for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; + mi_col += MI_BLOCK_SIZE) { + int dummy_rate; + int64_t dummy_dist; + + cpi->mb.source_variance = UINT_MAX; + + if (cpi->sf.partition_search_type == FIXED_PARTITION) { + nonrd_use_partition(cpi, tile, tp, mi_row, mi_col, + cpi->sf.always_this_block_size, + &dummy_rate, &dummy_dist); + encode_sb_rt(cpi, tile, tp, mi_row, mi_col, 1, BLOCK_64X64); + } else if (cpi->sf.partition_search_type == VAR_BASED_FIXED_PARTITION || + cpi->sf.partition_search_type == VAR_BASED_PARTITION) { + // TODO(debargha): Implement VAR_BASED_PARTITION as a separate case. + // Currently both VAR_BASED_FIXED_PARTITION/VAR_BASED_PARTITION + // map to the same thing. + BLOCK_SIZE bsize = get_nonrd_var_based_fixed_partition(cpi, + mi_row, + mi_col); + nonrd_use_partition(cpi, tile, tp, mi_row, mi_col, + bsize, &dummy_rate, &dummy_dist); + encode_sb_rt(cpi, tile, tp, mi_row, mi_col, 1, BLOCK_64X64); + } else { + assert(0); } } } +// end RTC play code + +static void encode_frame_internal(VP9_COMP *cpi) { + int mi_row; + MACROBLOCK *const x = &cpi->mb; + VP9_COMMON *const cm = &cpi->common; + MACROBLOCKD *const xd = &x->e_mbd; + +// fprintf(stderr, "encode_frame_internal frame %d (%d) type %d\n", +// cpi->common.current_video_frame, cpi->common.show_frame, +// cm->frame_type); + + vp9_zero(cm->counts.switchable_interp); + vp9_zero(cpi->tx_stepdown_count); + + xd->mi_8x8 = cm->mi_grid_visible; + // required for vp9_frame_init_quantizer + xd->mi_8x8[0] = cm->mi; + + xd->last_mi = cm->prev_mi; + + vp9_zero(cm->counts.mv); + vp9_zero(cpi->coef_counts); + vp9_zero(cm->counts.eob_branch); + + // Set frame level transform size use case + cm->tx_mode = select_tx_mode(cpi); + + cpi->mb.e_mbd.lossless = cm->base_qindex == 0 && cm->y_dc_delta_q == 0 + && cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0; + switch_lossless_mode(cpi, cpi->mb.e_mbd.lossless); + + vp9_frame_init_quantizer(cpi); + + vp9_initialize_rd_consts(cpi); + vp9_initialize_me_consts(cpi, cm->base_qindex); + + if (cpi->oxcf.tuning == VP8_TUNE_SSIM) { + // Initialize encode frame context. + init_encode_frame_mb_context(cpi); + + // Build a frame level activity map + build_activity_map(cpi); + } + + // Re-initialize encode frame context. + init_encode_frame_mb_context(cpi); + + vp9_zero(cpi->rd_comp_pred_diff); + vp9_zero(cpi->rd_filter_diff); + vp9_zero(cpi->rd_tx_select_diff); + vp9_zero(cpi->rd_tx_select_threshes); + + set_prev_mi(cm); + + if (cpi->sf.use_nonrd_pick_mode) { + // Initialize internal buffer pointers for rtc coding, where non-RD + // mode decision is used and hence no buffer pointer swap needed. + int i; + struct macroblock_plane *const p = x->plane; + struct macroblockd_plane *const pd = xd->plane; + PICK_MODE_CONTEXT *ctx = &cpi->mb.sb64_context; + + for (i = 0; i < MAX_MB_PLANE; ++i) { + p[i].coeff = ctx->coeff_pbuf[i][0]; + p[i].qcoeff = ctx->qcoeff_pbuf[i][0]; + pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0]; + p[i].eobs = ctx->eobs_pbuf[i][0]; + } + } + + { + struct vpx_usec_timer emr_timer; + vpx_usec_timer_start(&emr_timer); + + { + // Take tiles into account and give start/end MB + int tile_col, tile_row; + TOKENEXTRA *tp = cpi->tok; + const int tile_cols = 1 << cm->log2_tile_cols; + const int tile_rows = 1 << cm->log2_tile_rows; + + for (tile_row = 0; tile_row < tile_rows; tile_row++) { + for (tile_col = 0; tile_col < tile_cols; tile_col++) { + TileInfo tile; + TOKENEXTRA *tp_old = tp; + + // For each row of SBs in the frame + vp9_tile_init(&tile, cm, tile_row, tile_col); + for (mi_row = tile.mi_row_start; + mi_row < tile.mi_row_end; mi_row += MI_BLOCK_SIZE) { + if (cpi->sf.use_nonrd_pick_mode && cm->frame_type != KEY_FRAME) + encode_nonrd_sb_row(cpi, &tile, mi_row, &tp); + else + encode_rd_sb_row(cpi, &tile, mi_row, &tp); + } + cpi->tok_count[tile_row][tile_col] = (unsigned int)(tp - tp_old); + assert(tp - cpi->tok <= get_token_alloc(cm->mb_rows, cm->mb_cols)); + } + } + } + + vpx_usec_timer_mark(&emr_timer); + cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer); + } + + if (cpi->sf.skip_encode_sb) { + int j; + unsigned int intra_count = 0, inter_count = 0; + for (j = 0; j < INTRA_INTER_CONTEXTS; ++j) { + intra_count += cm->counts.intra_inter[j][0]; + inter_count += cm->counts.intra_inter[j][1]; + } + cpi->sf.skip_encode_frame = (intra_count << 2) < inter_count && + cm->frame_type != KEY_FRAME && + cm->show_frame; + } else { + cpi->sf.skip_encode_frame = 0; + } + +#if 0 + // Keep record of the total distortion this time around for future use + cpi->last_frame_distortion = cpi->frame_distortion; +#endif +} void vp9_encode_frame(VP9_COMP *cpi) { - VP9_COMMON * const cm = &cpi->common; + VP9_COMMON *const cm = &cpi->common; // In the longer term the encoder should be generalized to match the // decoder such that we allow compound where one of the 3 buffers has a @@ -2206,10 +2513,10 @@ void vp9_encode_frame(VP9_COMP *cpi) { // side behavior is where the ALT ref buffer has opposite sign bias to // the other two. if (!frame_is_intra_only(cm)) { - if ((cm->ref_frame_sign_bias[ALTREF_FRAME] - == cm->ref_frame_sign_bias[GOLDEN_FRAME]) - || (cm->ref_frame_sign_bias[ALTREF_FRAME] - == cm->ref_frame_sign_bias[LAST_FRAME])) { + if ((cm->ref_frame_sign_bias[ALTREF_FRAME] == + cm->ref_frame_sign_bias[GOLDEN_FRAME]) || + (cm->ref_frame_sign_bias[ALTREF_FRAME] == + cm->ref_frame_sign_bias[LAST_FRAME])) { cm->allow_comp_inter_inter = 0; } else { cm->allow_comp_inter_inter = 1; @@ -2219,9 +2526,9 @@ void vp9_encode_frame(VP9_COMP *cpi) { } } - if (cpi->sf.RD) { - int i, pred_type; - INTERPOLATION_TYPE filter_type; + if (cpi->sf.frame_parameter_update) { + int i; + REFERENCE_MODE reference_mode; /* * This code does a single RD pass over the whole frame assuming * either compound, single or hybrid prediction as per whatever has @@ -2231,66 +2538,51 @@ void vp9_encode_frame(VP9_COMP *cpi) { * that for subsequent frames. * It does the same analysis for transform size selection also. */ - int frame_type = get_frame_type(cpi); + const MV_REFERENCE_FRAME frame_type = get_frame_type(cpi); + const int64_t *mode_thresh = cpi->rd_prediction_type_threshes[frame_type]; + const int64_t *filter_thresh = cpi->rd_filter_threshes[frame_type]; /* prediction (compound, single or hybrid) mode selection */ if (frame_type == 3 || !cm->allow_comp_inter_inter) - pred_type = SINGLE_PREDICTION_ONLY; - else if (cpi->rd_prediction_type_threshes[frame_type][1] - > cpi->rd_prediction_type_threshes[frame_type][0] - && cpi->rd_prediction_type_threshes[frame_type][1] - > cpi->rd_prediction_type_threshes[frame_type][2] - && check_dual_ref_flags(cpi) && cpi->static_mb_pct == 100) - pred_type = COMP_PREDICTION_ONLY; - else if (cpi->rd_prediction_type_threshes[frame_type][0] - > cpi->rd_prediction_type_threshes[frame_type][2]) - pred_type = SINGLE_PREDICTION_ONLY; + reference_mode = SINGLE_REFERENCE; + else if (mode_thresh[COMPOUND_REFERENCE] > mode_thresh[SINGLE_REFERENCE] && + mode_thresh[COMPOUND_REFERENCE] > + mode_thresh[REFERENCE_MODE_SELECT] && + check_dual_ref_flags(cpi) && + cpi->static_mb_pct == 100) + reference_mode = COMPOUND_REFERENCE; + else if (mode_thresh[SINGLE_REFERENCE] > mode_thresh[REFERENCE_MODE_SELECT]) + reference_mode = SINGLE_REFERENCE; else - pred_type = HYBRID_PREDICTION; - - /* filter type selection */ - // FIXME(rbultje) for some odd reason, we often select smooth_filter - // as default filter for ARF overlay frames. This is a REALLY BAD - // IDEA so we explicitly disable it here. - if (frame_type != 3 && - cpi->rd_filter_threshes[frame_type][1] > - cpi->rd_filter_threshes[frame_type][0] && - cpi->rd_filter_threshes[frame_type][1] > - cpi->rd_filter_threshes[frame_type][2] && - cpi->rd_filter_threshes[frame_type][1] > - cpi->rd_filter_threshes[frame_type][SWITCHABLE_FILTERS]) { - filter_type = EIGHTTAP_SMOOTH; - } else if (cpi->rd_filter_threshes[frame_type][2] > - cpi->rd_filter_threshes[frame_type][0] && - cpi->rd_filter_threshes[frame_type][2] > - cpi->rd_filter_threshes[frame_type][SWITCHABLE_FILTERS]) { - filter_type = EIGHTTAP_SHARP; - } else if (cpi->rd_filter_threshes[frame_type][0] > - cpi->rd_filter_threshes[frame_type][SWITCHABLE_FILTERS]) { - filter_type = EIGHTTAP; - } else { - filter_type = SWITCHABLE; + reference_mode = REFERENCE_MODE_SELECT; + + if (cm->interp_filter == SWITCHABLE) { + if (frame_type != ALTREF_FRAME && + filter_thresh[EIGHTTAP_SMOOTH] > filter_thresh[EIGHTTAP] && + filter_thresh[EIGHTTAP_SMOOTH] > filter_thresh[EIGHTTAP_SHARP] && + filter_thresh[EIGHTTAP_SMOOTH] > filter_thresh[SWITCHABLE - 1]) { + cm->interp_filter = EIGHTTAP_SMOOTH; + } else if (filter_thresh[EIGHTTAP_SHARP] > filter_thresh[EIGHTTAP] && + filter_thresh[EIGHTTAP_SHARP] > filter_thresh[SWITCHABLE - 1]) { + cm->interp_filter = EIGHTTAP_SHARP; + } else if (filter_thresh[EIGHTTAP] > filter_thresh[SWITCHABLE - 1]) { + cm->interp_filter = EIGHTTAP; + } } - cpi->mb.e_mbd.lossless = 0; - if (cpi->oxcf.lossless) { - cpi->mb.e_mbd.lossless = 1; - } + cpi->mb.e_mbd.lossless = cpi->oxcf.lossless; + cm->reference_mode = reference_mode; - /* transform size selection (4x4, 8x8, 16x16 or select-per-mb) */ - select_tx_mode(cpi); - cpi->common.comp_pred_mode = pred_type; - cpi->common.mcomp_filter_type = filter_type; encode_frame_internal(cpi); - for (i = 0; i < NB_PREDICTION_TYPES; ++i) { - const int diff = (int) (cpi->rd_comp_pred_diff[i] / cpi->common.MBs); + for (i = 0; i < REFERENCE_MODES; ++i) { + const int diff = (int) (cpi->rd_comp_pred_diff[i] / cm->MBs); cpi->rd_prediction_type_threshes[frame_type][i] += diff; cpi->rd_prediction_type_threshes[frame_type][i] >>= 1; } for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) { - const int64_t diff = cpi->rd_filter_diff[i] / cpi->common.MBs; + const int64_t diff = cpi->rd_filter_diff[i] / cm->MBs; cpi->rd_filter_threshes[frame_type][i] = (cpi->rd_filter_threshes[frame_type][i] + diff) / 2; } @@ -2299,32 +2591,31 @@ void vp9_encode_frame(VP9_COMP *cpi) { int64_t pd = cpi->rd_tx_select_diff[i]; int diff; if (i == TX_MODE_SELECT) - pd -= RDCOST(cpi->mb.rdmult, cpi->mb.rddiv, - 2048 * (TX_SIZES - 1), 0); - diff = (int) (pd / cpi->common.MBs); + pd -= RDCOST(cpi->mb.rdmult, cpi->mb.rddiv, 2048 * (TX_SIZES - 1), 0); + diff = (int) (pd / cm->MBs); cpi->rd_tx_select_threshes[frame_type][i] += diff; cpi->rd_tx_select_threshes[frame_type][i] /= 2; } - if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) { + if (cm->reference_mode == REFERENCE_MODE_SELECT) { int single_count_zero = 0; int comp_count_zero = 0; for (i = 0; i < COMP_INTER_CONTEXTS; i++) { - single_count_zero += cpi->comp_inter_count[i][0]; - comp_count_zero += cpi->comp_inter_count[i][1]; + single_count_zero += cm->counts.comp_inter[i][0]; + comp_count_zero += cm->counts.comp_inter[i][1]; } if (comp_count_zero == 0) { - cpi->common.comp_pred_mode = SINGLE_PREDICTION_ONLY; - vp9_zero(cpi->comp_inter_count); + cm->reference_mode = SINGLE_REFERENCE; + vp9_zero(cm->counts.comp_inter); } else if (single_count_zero == 0) { - cpi->common.comp_pred_mode = COMP_PREDICTION_ONLY; - vp9_zero(cpi->comp_inter_count); + cm->reference_mode = COMPOUND_REFERENCE; + vp9_zero(cm->counts.comp_inter); } } - if (cpi->common.tx_mode == TX_MODE_SELECT) { + if (cm->tx_mode == TX_MODE_SELECT) { int count4x4 = 0; int count8x8_lp = 0, count8x8_8x8p = 0; int count16x16_16x16p = 0, count16x16_lp = 0; @@ -2344,43 +2635,47 @@ void vp9_encode_frame(VP9_COMP *cpi) { count32x32 += cm->counts.tx.p32x32[i][TX_32X32]; } - if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 - && count32x32 == 0) { - cpi->common.tx_mode = ALLOW_8X8; - reset_skip_txfm_size(cpi, TX_8X8); - } else if (count8x8_8x8p == 0 && count16x16_16x16p == 0 - && count8x8_lp == 0 && count16x16_lp == 0 && count32x32 == 0) { - cpi->common.tx_mode = ONLY_4X4; - reset_skip_txfm_size(cpi, TX_4X4); + if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 && + count32x32 == 0) { + cm->tx_mode = ALLOW_8X8; + reset_skip_txfm_size(cm, TX_8X8); + } else if (count8x8_8x8p == 0 && count16x16_16x16p == 0 && + count8x8_lp == 0 && count16x16_lp == 0 && count32x32 == 0) { + cm->tx_mode = ONLY_4X4; + reset_skip_txfm_size(cm, TX_4X4); } else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) { - cpi->common.tx_mode = ALLOW_32X32; + cm->tx_mode = ALLOW_32X32; } else if (count32x32 == 0 && count8x8_lp == 0 && count4x4 == 0) { - cpi->common.tx_mode = ALLOW_16X16; - reset_skip_txfm_size(cpi, TX_16X16); + cm->tx_mode = ALLOW_16X16; + reset_skip_txfm_size(cm, TX_16X16); } } } else { + cpi->mb.e_mbd.lossless = cpi->oxcf.lossless; + cm->reference_mode = SINGLE_REFERENCE; + // Force the usage of the BILINEAR interp_filter. + cm->interp_filter = BILINEAR; encode_frame_internal(cpi); } } -static void sum_intra_stats(VP9_COMP *cpi, const MODE_INFO *mi) { +static void sum_intra_stats(FRAME_COUNTS *counts, const MODE_INFO *mi) { const MB_PREDICTION_MODE y_mode = mi->mbmi.mode; const MB_PREDICTION_MODE uv_mode = mi->mbmi.uv_mode; const BLOCK_SIZE bsize = mi->mbmi.sb_type; - ++cpi->y_uv_mode_count[y_mode][uv_mode]; - if (bsize < BLOCK_8X8) { int idx, idy; - const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; - const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; - for (idy = 0; idy < 2; idy += num_4x4_blocks_high) - for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) - ++cpi->y_mode_count[0][mi->bmi[idy * 2 + idx].as_mode]; + const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; + const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; + for (idy = 0; idy < 2; idy += num_4x4_h) + for (idx = 0; idx < 2; idx += num_4x4_w) + ++counts->y_mode[0][mi->bmi[idy * 2 + idx].as_mode]; } else { - ++cpi->y_mode_count[size_group_lookup[bsize]][y_mode]; + ++counts->y_mode[size_group_lookup[bsize]][y_mode]; } + + ++counts->uv_mode[y_mode][uv_mode]; } // Experimental stub function to create a per MB zbin adjustment based on @@ -2389,13 +2684,10 @@ static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x) { #if USE_ACT_INDEX x->act_zbin_adj = *(x->mb_activity_ptr); #else - int64_t a; - int64_t b; - int64_t act = *(x->mb_activity_ptr); - // Apply the masking to the RD multiplier. - a = act + 4 * cpi->activity_avg; - b = 4 * act + cpi->activity_avg; + const int64_t act = *(x->mb_activity_ptr); + const int64_t a = act + 4 * cpi->activity_avg; + const int64_t b = 4 * act + cpi->activity_avg; if (act > cpi->activity_avg) x->act_zbin_adj = (int) (((int64_t) b + (a >> 1)) / a) - 1; @@ -2403,11 +2695,30 @@ static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x) { x->act_zbin_adj = 1 - (int) (((int64_t) a + (b >> 1)) / b); #endif } + +static int get_zbin_mode_boost(const MB_MODE_INFO *mbmi, int enabled) { + if (enabled) { + if (is_inter_block(mbmi)) { + if (mbmi->mode == ZEROMV) { + return mbmi->ref_frame[0] != LAST_FRAME ? GF_ZEROMV_ZBIN_BOOST + : LF_ZEROMV_ZBIN_BOOST; + } else { + return mbmi->sb_type < BLOCK_8X8 ? SPLIT_MV_ZBIN_BOOST + : MV_ZBIN_BOOST; + } + } else { + return INTRA_ZBIN_BOOST; + } + } else { + return 0; + } +} + static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled, int mi_row, int mi_col, BLOCK_SIZE bsize) { - VP9_COMMON * const cm = &cpi->common; - MACROBLOCK * const x = &cpi->mb; - MACROBLOCKD * const xd = &x->e_mbd; + VP9_COMMON *const cm = &cpi->common; + MACROBLOCK *const x = &cpi->mb; + MACROBLOCKD *const xd = &x->e_mbd; MODE_INFO **mi_8x8 = xd->mi_8x8; MODE_INFO *mi = mi_8x8[0]; MB_MODE_INFO *mbmi = &mi->mbmi; @@ -2416,7 +2727,10 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled, const int mis = cm->mode_info_stride; const int mi_width = num_8x8_blocks_wide_lookup[bsize]; const int mi_height = num_8x8_blocks_high_lookup[bsize]; - x->skip_recode = !x->select_txfm_size && mbmi->sb_type >= BLOCK_8X8; + + x->skip_recode = !x->select_txfm_size && mbmi->sb_type >= BLOCK_8X8 && + (cpi->oxcf.aq_mode != COMPLEXITY_AQ) && + !cpi->sf.use_nonrd_pick_mode; x->skip_optimize = ctx->is_coded; ctx->is_coded = 1; x->use_lp32x32fdct = cpi->sf.use_lp32x32fdct; @@ -2431,7 +2745,8 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled, vp9_update_zbin_extra(cpi, x); } } else { - vp9_setup_interp_filters(xd, mbmi->interp_filter, cm); + set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]); + xd->interp_kernel = vp9_get_interp_kernel(mbmi->interp_filter); if (cpi->oxcf.tuning == VP8_TUNE_SSIM) { // Adjust the zbin based on this MB rate. @@ -2440,98 +2755,65 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled, // Experimental code. Special case for gf and arf zeromv modes. // Increase zbin size to suppress noise - cpi->zbin_mode_boost = 0; - if (cpi->zbin_mode_boost_enabled) { - if (is_inter_block(mbmi)) { - if (mbmi->mode == ZEROMV) { - if (mbmi->ref_frame[0] != LAST_FRAME) - cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST; - else - cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST; - } else if (mbmi->sb_type < BLOCK_8X8) { - cpi->zbin_mode_boost = SPLIT_MV_ZBIN_BOOST; - } else { - cpi->zbin_mode_boost = MV_ZBIN_BOOST; - } - } else { - cpi->zbin_mode_boost = INTRA_ZBIN_BOOST; - } - } - + cpi->zbin_mode_boost = get_zbin_mode_boost(mbmi, + cpi->zbin_mode_boost_enabled); vp9_update_zbin_extra(cpi, x); } if (!is_inter_block(mbmi)) { - vp9_encode_intra_block_y(x, MAX(bsize, BLOCK_8X8)); - vp9_encode_intra_block_uv(x, MAX(bsize, BLOCK_8X8)); + int plane; + mbmi->skip = 1; + for (plane = 0; plane < MAX_MB_PLANE; ++plane) + vp9_encode_intra_block_plane(x, MAX(bsize, BLOCK_8X8), plane); if (output_enabled) - sum_intra_stats(cpi, mi); + sum_intra_stats(&cm->counts, mi); + vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8)); } else { - int idx = cm->ref_frame_map[get_ref_frame_idx(cpi, mbmi->ref_frame[0])]; - YV12_BUFFER_CONFIG *ref_fb = &cm->yv12_fb[idx]; - YV12_BUFFER_CONFIG *second_ref_fb = NULL; - if (has_second_ref(mbmi)) { - idx = cm->ref_frame_map[get_ref_frame_idx(cpi, mbmi->ref_frame[1])]; - second_ref_fb = &cm->yv12_fb[idx]; + int ref; + const int is_compound = has_second_ref(mbmi); + for (ref = 0; ref < 1 + is_compound; ++ref) { + YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, + mbmi->ref_frame[ref]); + vp9_setup_pre_planes(xd, ref, cfg, mi_row, mi_col, + &xd->block_refs[ref]->sf); } - - assert(cm->frame_type != KEY_FRAME); - - setup_pre_planes(xd, 0, ref_fb, mi_row, mi_col, - &xd->scale_factor[0]); - setup_pre_planes(xd, 1, second_ref_fb, mi_row, mi_col, - &xd->scale_factor[1]); - vp9_build_inter_predictors_sb(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8)); - } - if (!is_inter_block(mbmi)) { - vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8)); - } else if (!x->skip) { - vp9_encode_sb(x, MAX(bsize, BLOCK_8X8)); - vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8)); - } else { - int mb_skip_context = xd->left_available ? mi_8x8[-1]->mbmi.skip_coeff : 0; - mb_skip_context += mi_8x8[-mis] ? mi_8x8[-mis]->mbmi.skip_coeff : 0; - - mbmi->skip_coeff = 1; - if (output_enabled) - cm->counts.mbskip[mb_skip_context][1]++; - reset_skip_context(xd, MAX(bsize, BLOCK_8X8)); + if (!x->skip) { + mbmi->skip = 1; + vp9_encode_sb(x, MAX(bsize, BLOCK_8X8)); + vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8)); + } else { + mbmi->skip = 1; + if (output_enabled) + cm->counts.skip[vp9_get_skip_context(xd)][1]++; + reset_skip_context(xd, MAX(bsize, BLOCK_8X8)); + } } if (output_enabled) { if (cm->tx_mode == TX_MODE_SELECT && mbmi->sb_type >= BLOCK_8X8 && !(is_inter_block(mbmi) && - (mbmi->skip_coeff || + (mbmi->skip || vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)))) { - const uint8_t context = vp9_get_pred_context_tx_size(xd); - ++get_tx_counts(max_txsize_lookup[bsize], - context, &cm->counts.tx)[mbmi->tx_size]; + ++get_tx_counts(max_txsize_lookup[bsize], vp9_get_tx_size_context(xd), + &cm->counts.tx)[mbmi->tx_size]; } else { int x, y; - TX_SIZE sz = tx_mode_to_biggest_tx_size[cm->tx_mode]; - assert(sizeof(tx_mode_to_biggest_tx_size) / - sizeof(tx_mode_to_biggest_tx_size[0]) == TX_MODES); + TX_SIZE tx_size; // The new intra coding scheme requires no change of transform size if (is_inter_block(&mi->mbmi)) { - if (sz == TX_32X32 && bsize < BLOCK_32X32) - sz = TX_16X16; - if (sz == TX_16X16 && bsize < BLOCK_16X16) - sz = TX_8X8; - if (sz == TX_8X8 && bsize < BLOCK_8X8) - sz = TX_4X4; - } else if (bsize >= BLOCK_8X8) { - sz = mbmi->tx_size; + tx_size = MIN(tx_mode_to_biggest_tx_size[cm->tx_mode], + max_txsize_lookup[bsize]); } else { - sz = TX_4X4; + tx_size = (bsize >= BLOCK_8X8) ? mbmi->tx_size : TX_4X4; } for (y = 0; y < mi_height; y++) for (x = 0; x < mi_width; x++) if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows) - mi_8x8[mis * y + x]->mbmi.tx_size = sz; + mi_8x8[mis * y + x]->mbmi.tx_size = tx_size; } } } diff --git a/libvpx/vp9/encoder/vp9_encodeframe.h b/libvpx/vp9/encoder/vp9_encodeframe.h index 3e9f538..f7d17c3 100644 --- a/libvpx/vp9/encoder/vp9_encodeframe.h +++ b/libvpx/vp9/encoder/vp9_encodeframe.h @@ -12,6 +12,10 @@ #ifndef VP9_ENCODER_VP9_ENCODEFRAME_H_ #define VP9_ENCODER_VP9_ENCODEFRAME_H_ +#ifdef __cplusplus +extern "C" { +#endif + struct macroblock; struct yv12_buffer_config; @@ -19,4 +23,8 @@ void vp9_setup_src_planes(struct macroblock *x, const struct yv12_buffer_config *src, int mi_row, int mi_col); +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_ENCODER_VP9_ENCODEFRAME_H_ diff --git a/libvpx/vp9/encoder/vp9_encodeintra.c b/libvpx/vp9/encoder/vp9_encodeintra.c deleted file mode 100644 index 32b4593..0000000 --- a/libvpx/vp9/encoder/vp9_encodeintra.c +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "./vpx_config.h" -#include "./vp9_rtcd.h" -#include "vp9/encoder/vp9_quantize.h" -#include "vp9/common/vp9_reconintra.h" -#include "vp9/encoder/vp9_encodemb.h" -#include "vp9/encoder/vp9_encodeintra.h" - -int vp9_encode_intra(MACROBLOCK *x, int use_16x16_pred) { - MB_MODE_INFO * mbmi = &x->e_mbd.mi_8x8[0]->mbmi; - x->skip_encode = 0; - mbmi->mode = DC_PRED; - mbmi->ref_frame[0] = INTRA_FRAME; - mbmi->tx_size = use_16x16_pred ? (mbmi->sb_type >= BLOCK_16X16 ? TX_16X16 - : TX_8X8) - : TX_4X4; - vp9_encode_intra_block_y(x, mbmi->sb_type); - return vp9_get_mb_ss(x->plane[0].src_diff); -} diff --git a/libvpx/vp9/encoder/vp9_encodeintra.h b/libvpx/vp9/encoder/vp9_encodeintra.h deleted file mode 100644 index e217924..0000000 --- a/libvpx/vp9/encoder/vp9_encodeintra.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP9_ENCODER_VP9_ENCODEINTRA_H_ -#define VP9_ENCODER_VP9_ENCODEINTRA_H_ - -#include "vp9/encoder/vp9_onyx_int.h" - -int vp9_encode_intra(MACROBLOCK *x, int use_16x16_pred); -void vp9_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize, - TX_SIZE tx_size, void *arg); - -#endif // VP9_ENCODER_VP9_ENCODEINTRA_H_ diff --git a/libvpx/vp9/encoder/vp9_encodemb.c b/libvpx/vp9/encoder/vp9_encodemb.c index a85ddee..8dbd1a4 100644 --- a/libvpx/vp9/encoder/vp9_encodemb.c +++ b/libvpx/vp9/encoder/vp9_encodemb.c @@ -19,56 +19,49 @@ #include "vp9/common/vp9_reconintra.h" #include "vp9/common/vp9_systemdependent.h" -#include "vp9/encoder/vp9_dct.h" #include "vp9/encoder/vp9_encodemb.h" #include "vp9/encoder/vp9_quantize.h" #include "vp9/encoder/vp9_rdopt.h" #include "vp9/encoder/vp9_tokenize.h" +struct optimize_ctx { + ENTROPY_CONTEXT ta[MAX_MB_PLANE][16]; + ENTROPY_CONTEXT tl[MAX_MB_PLANE][16]; +}; + +struct encode_b_args { + MACROBLOCK *x; + struct optimize_ctx *ctx; + unsigned char *skip; +}; + void vp9_subtract_block_c(int rows, int cols, - int16_t *diff_ptr, ptrdiff_t diff_stride, - const uint8_t *src_ptr, ptrdiff_t src_stride, - const uint8_t *pred_ptr, ptrdiff_t pred_stride) { + int16_t *diff, ptrdiff_t diff_stride, + const uint8_t *src, ptrdiff_t src_stride, + const uint8_t *pred, ptrdiff_t pred_stride) { int r, c; for (r = 0; r < rows; r++) { for (c = 0; c < cols; c++) - diff_ptr[c] = src_ptr[c] - pred_ptr[c]; + diff[c] = src[c] - pred[c]; - diff_ptr += diff_stride; - pred_ptr += pred_stride; - src_ptr += src_stride; + diff += diff_stride; + pred += pred_stride; + src += src_stride; } } -static void subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) { +void vp9_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) { struct macroblock_plane *const p = &x->plane[plane]; - const MACROBLOCKD *const xd = &x->e_mbd; - const struct macroblockd_plane *const pd = &xd->plane[plane]; - const int bw = plane_block_width(bsize, pd); - const int bh = plane_block_height(bsize, pd); + const struct macroblockd_plane *const pd = &x->e_mbd.plane[plane]; + const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd); + const int bw = 4 * num_4x4_blocks_wide_lookup[plane_bsize]; + const int bh = 4 * num_4x4_blocks_high_lookup[plane_bsize]; - vp9_subtract_block(bh, bw, p->src_diff, bw, - p->src.buf, p->src.stride, + vp9_subtract_block(bh, bw, p->src_diff, bw, p->src.buf, p->src.stride, pd->dst.buf, pd->dst.stride); } -void vp9_subtract_sby(MACROBLOCK *x, BLOCK_SIZE bsize) { - subtract_plane(x, bsize, 0); -} - -void vp9_subtract_sbuv(MACROBLOCK *x, BLOCK_SIZE bsize) { - int i; - - for (i = 1; i < MAX_MB_PLANE; i++) - subtract_plane(x, bsize, i); -} - -void vp9_subtract_sb(MACROBLOCK *x, BLOCK_SIZE bsize) { - vp9_subtract_sby(x, bsize); - vp9_subtract_sbuv(x, bsize); -} - #define RDTRUNC(RM, DM, R, D) ((128 + (R) * (RM)) & 0xFF) typedef struct vp9_token_state vp9_token_state; @@ -112,19 +105,19 @@ static int trellis_get_coeff_context(const int16_t *scan, return pt; } -static void optimize_b(MACROBLOCK *mb, - int plane, int block, BLOCK_SIZE plane_bsize, - ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l, - TX_SIZE tx_size) { +static void optimize_b(int plane, int block, BLOCK_SIZE plane_bsize, + TX_SIZE tx_size, MACROBLOCK *mb, + ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l) { MACROBLOCKD *const xd = &mb->e_mbd; + struct macroblock_plane *p = &mb->plane[plane]; struct macroblockd_plane *pd = &xd->plane[plane]; const int ref = is_inter_block(&xd->mi_8x8[0]->mbmi); vp9_token_state tokens[1025][2]; unsigned best_index[1025][2]; - const int16_t *coeff_ptr = BLOCK_OFFSET(mb->plane[plane].coeff, block); - int16_t *qcoeff_ptr; - int16_t *dqcoeff_ptr; - int eob = pd->eobs[block], final_eob, sz = 0; + const int16_t *coeff = BLOCK_OFFSET(mb->plane[plane].coeff, block); + int16_t *qcoeff = BLOCK_OFFSET(p->qcoeff, block); + int16_t *dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); + int eob = p->eobs[block], final_eob, sz = 0; const int i0 = 0; int rc, x, next, i; int64_t rdmult, rddiv, rd_cost0, rd_cost1; @@ -133,40 +126,39 @@ static void optimize_b(MACROBLOCK *mb, PLANE_TYPE type = pd->plane_type; int err_mult = plane_rd_mult[type]; const int default_eob = 16 << (tx_size << 1); - const int16_t *scan, *nb; const int mul = 1 + (tx_size == TX_32X32); uint8_t token_cache[1024]; const int16_t *dequant_ptr = pd->dequant; const uint8_t *const band_translate = get_band_translate(tx_size); + const scan_order *so = get_scan(xd, tx_size, type, block); + const int16_t *scan = so->scan; + const int16_t *nb = so->neighbors; assert((!type && !plane) || (type && plane)); - dqcoeff_ptr = BLOCK_OFFSET(pd->dqcoeff, block); - qcoeff_ptr = BLOCK_OFFSET(pd->qcoeff, block); - get_scan(xd, tx_size, type, block, &scan, &nb); assert(eob <= default_eob); /* Now set up a Viterbi trellis to evaluate alternative roundings. */ rdmult = mb->rdmult * err_mult; - if (mb->e_mbd.mi_8x8[0]->mbmi.ref_frame[0] == INTRA_FRAME) + if (!is_inter_block(&mb->e_mbd.mi_8x8[0]->mbmi)) rdmult = (rdmult * 9) >> 4; rddiv = mb->rddiv; /* Initialize the sentinel node of the trellis. */ tokens[eob][0].rate = 0; tokens[eob][0].error = 0; tokens[eob][0].next = default_eob; - tokens[eob][0].token = DCT_EOB_TOKEN; + tokens[eob][0].token = EOB_TOKEN; tokens[eob][0].qc = 0; *(tokens[eob] + 1) = *(tokens[eob] + 0); next = eob; for (i = 0; i < eob; i++) token_cache[scan[i]] = vp9_pt_energy_class[vp9_dct_value_tokens_ptr[ - qcoeff_ptr[scan[i]]].token]; + qcoeff[scan[i]]].token]; for (i = eob; i-- > i0;) { int base_bits, d2, dx; rc = scan[i]; - x = qcoeff_ptr[rc]; + x = qcoeff[rc]; /* Only add a trellis state for non-zero coefficients. */ if (x) { int shortcut = 0; @@ -191,7 +183,7 @@ static void optimize_b(MACROBLOCK *mb, /* And pick the best. */ best = rd_cost1 < rd_cost0; base_bits = *(vp9_dct_value_cost_ptr + x); - dx = mul * (dqcoeff_ptr[rc] - coeff_ptr[rc]); + dx = mul * (dqcoeff[rc] - coeff[rc]); d2 = dx * dx; tokens[i][0].rate = base_bits + (best ? rate1 : rate0); tokens[i][0].error = d2 + (best ? error1 : error0); @@ -204,8 +196,8 @@ static void optimize_b(MACROBLOCK *mb, rate0 = tokens[next][0].rate; rate1 = tokens[next][1].rate; - if ((abs(x)*dequant_ptr[rc != 0] > abs(coeff_ptr[rc]) * mul) && - (abs(x)*dequant_ptr[rc != 0] < abs(coeff_ptr[rc]) * mul + + if ((abs(x)*dequant_ptr[rc != 0] > abs(coeff[rc]) * mul) && + (abs(x)*dequant_ptr[rc != 0] < abs(coeff[rc]) * mul + dequant_ptr[rc != 0])) shortcut = 1; else @@ -221,21 +213,19 @@ static void optimize_b(MACROBLOCK *mb, /* If we reduced this coefficient to zero, check to see if * we need to move the EOB back here. */ - t0 = tokens[next][0].token == DCT_EOB_TOKEN ? - DCT_EOB_TOKEN : ZERO_TOKEN; - t1 = tokens[next][1].token == DCT_EOB_TOKEN ? - DCT_EOB_TOKEN : ZERO_TOKEN; + t0 = tokens[next][0].token == EOB_TOKEN ? EOB_TOKEN : ZERO_TOKEN; + t1 = tokens[next][1].token == EOB_TOKEN ? EOB_TOKEN : ZERO_TOKEN; } else { t0 = t1 = (vp9_dct_value_tokens_ptr + x)->token; } if (next < default_eob) { band = band_translate[i + 1]; - if (t0 != DCT_EOB_TOKEN) { + if (t0 != EOB_TOKEN) { pt = trellis_get_coeff_context(scan, nb, i, t0, token_cache); rate0 += mb->token_costs[tx_size][type][ref][band][!x][pt] [tokens[next][0].token]; } - if (t1 != DCT_EOB_TOKEN) { + if (t1 != EOB_TOKEN) { pt = trellis_get_coeff_context(scan, nb, i, t1, token_cache); rate1 += mb->token_costs[tx_size][type][ref][band][!x][pt] [tokens[next][1].token]; @@ -267,12 +257,12 @@ static void optimize_b(MACROBLOCK *mb, t0 = tokens[next][0].token; t1 = tokens[next][1].token; /* Update the cost of each path if we're past the EOB token. */ - if (t0 != DCT_EOB_TOKEN) { + if (t0 != EOB_TOKEN) { tokens[next][0].rate += mb->token_costs[tx_size][type][ref][band][1][0][t0]; tokens[next][0].token = ZERO_TOKEN; } - if (t1 != DCT_EOB_TOKEN) { + if (t1 != EOB_TOKEN) { tokens[next][1].rate += mb->token_costs[tx_size][type][ref][band][1][0][t1]; tokens[next][1].token = ZERO_TOKEN; @@ -296,116 +286,78 @@ static void optimize_b(MACROBLOCK *mb, UPDATE_RD_COST(); best = rd_cost1 < rd_cost0; final_eob = i0 - 1; - vpx_memset(qcoeff_ptr, 0, sizeof(*qcoeff_ptr) * (16 << (tx_size * 2))); - vpx_memset(dqcoeff_ptr, 0, sizeof(*dqcoeff_ptr) * (16 << (tx_size * 2))); + vpx_memset(qcoeff, 0, sizeof(*qcoeff) * (16 << (tx_size * 2))); + vpx_memset(dqcoeff, 0, sizeof(*dqcoeff) * (16 << (tx_size * 2))); for (i = next; i < eob; i = next) { x = tokens[i][best].qc; if (x) { final_eob = i; } rc = scan[i]; - qcoeff_ptr[rc] = x; - dqcoeff_ptr[rc] = (x * dequant_ptr[rc != 0]) / mul; + qcoeff[rc] = x; + dqcoeff[rc] = (x * dequant_ptr[rc != 0]) / mul; next = tokens[i][best].next; best = best_index[i][best]; } final_eob++; - xd->plane[plane].eobs[block] = final_eob; + mb->plane[plane].eobs[block] = final_eob; *a = *l = (final_eob > 0); } -void vp9_optimize_b(int plane, int block, BLOCK_SIZE plane_bsize, - TX_SIZE tx_size, MACROBLOCK *mb, struct optimize_ctx *ctx) { - int x, y; - txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y); - optimize_b(mb, plane, block, plane_bsize, - &ctx->ta[plane][x], &ctx->tl[plane][y], tx_size); -} - -static void optimize_init_b(int plane, BLOCK_SIZE bsize, - struct encode_b_args *args) { - const MACROBLOCKD *xd = &args->x->e_mbd; - const struct macroblockd_plane* const pd = &xd->plane[plane]; - const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd); - const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize]; - const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize]; - const MB_MODE_INFO *mbmi = &xd->mi_8x8[0]->mbmi; - const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi) : mbmi->tx_size; - - vp9_get_entropy_contexts(tx_size, args->ctx->ta[plane], args->ctx->tl[plane], - pd->above_context, pd->left_context, - num_4x4_w, num_4x4_h); +static INLINE void fdct32x32(int rd_transform, + const int16_t *src, int16_t *dst, int src_stride) { + if (rd_transform) + vp9_fdct32x32_rd(src, dst, src_stride); + else + vp9_fdct32x32(src, dst, src_stride); } -void vp9_xform_quant(int plane, int block, BLOCK_SIZE plane_bsize, - TX_SIZE tx_size, void *arg) { - struct encode_b_args* const args = arg; - MACROBLOCK* const x = args->x; - MACROBLOCKD* const xd = &x->e_mbd; - struct macroblock_plane *const p = &x->plane[plane]; - struct macroblockd_plane *const pd = &xd->plane[plane]; - int16_t *coeff = BLOCK_OFFSET(p->coeff, block); - int16_t *qcoeff = BLOCK_OFFSET(pd->qcoeff, block); - int16_t *dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); - const int16_t *scan, *iscan; - uint16_t *eob = &pd->eobs[block]; - const int bwl = b_width_log2(plane_bsize), bw = 1 << bwl; - const int twl = bwl - tx_size, twmask = (1 << twl) - 1; - int xoff, yoff; - int16_t *src_diff; +void vp9_xform_quant(MACROBLOCK *x, int plane, int block, + BLOCK_SIZE plane_bsize, TX_SIZE tx_size) { + MACROBLOCKD *const xd = &x->e_mbd; + const struct macroblock_plane *const p = &x->plane[plane]; + const struct macroblockd_plane *const pd = &xd->plane[plane]; + const scan_order *const scan_order = &vp9_default_scan_orders[tx_size]; + int16_t *const coeff = BLOCK_OFFSET(p->coeff, block); + int16_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block); + int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); + uint16_t *const eob = &p->eobs[block]; + const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize]; + int i, j; + const int16_t *src_diff; + txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &i, &j); + src_diff = &p->src_diff[4 * (j * diff_stride + i)]; switch (tx_size) { case TX_32X32: - scan = vp9_default_scan_32x32; - iscan = vp9_default_iscan_32x32; - block >>= 6; - xoff = 32 * (block & twmask); - yoff = 32 * (block >> twl); - src_diff = p->src_diff + 4 * bw * yoff + xoff; - if (x->use_lp32x32fdct) - vp9_fdct32x32_rd(src_diff, coeff, bw * 4); - else - vp9_fdct32x32(src_diff, coeff, bw * 4); + fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride); vp9_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin, p->round, p->quant, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, p->zbin_extra, eob, scan, iscan); + pd->dequant, p->zbin_extra, eob, scan_order->scan, + scan_order->iscan); break; case TX_16X16: - scan = vp9_default_scan_16x16; - iscan = vp9_default_iscan_16x16; - block >>= 4; - xoff = 16 * (block & twmask); - yoff = 16 * (block >> twl); - src_diff = p->src_diff + 4 * bw * yoff + xoff; - vp9_fdct16x16(src_diff, coeff, bw * 4); + vp9_fdct16x16(src_diff, coeff, diff_stride); vp9_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round, p->quant, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, p->zbin_extra, eob, scan, iscan); + pd->dequant, p->zbin_extra, eob, + scan_order->scan, scan_order->iscan); break; case TX_8X8: - scan = vp9_default_scan_8x8; - iscan = vp9_default_iscan_8x8; - block >>= 2; - xoff = 8 * (block & twmask); - yoff = 8 * (block >> twl); - src_diff = p->src_diff + 4 * bw * yoff + xoff; - vp9_fdct8x8(src_diff, coeff, bw * 4); + vp9_fdct8x8(src_diff, coeff, diff_stride); vp9_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round, p->quant, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, p->zbin_extra, eob, scan, iscan); + pd->dequant, p->zbin_extra, eob, + scan_order->scan, scan_order->iscan); break; case TX_4X4: - scan = vp9_default_scan_4x4; - iscan = vp9_default_iscan_4x4; - xoff = 4 * (block & twmask); - yoff = 4 * (block >> twl); - src_diff = p->src_diff + 4 * bw * yoff + xoff; - x->fwd_txm4x4(src_diff, coeff, bw * 4); + x->fwd_txm4x4(src_diff, coeff, diff_stride); vp9_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, p->quant, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, p->zbin_extra, eob, scan, iscan); + pd->dequant, p->zbin_extra, eob, + scan_order->scan, scan_order->iscan); break; default: assert(0); @@ -418,108 +370,109 @@ static void encode_block(int plane, int block, BLOCK_SIZE plane_bsize, MACROBLOCK *const x = args->x; MACROBLOCKD *const xd = &x->e_mbd; struct optimize_ctx *const ctx = args->ctx; + struct macroblock_plane *const p = &x->plane[plane]; struct macroblockd_plane *const pd = &xd->plane[plane]; int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); int i, j; uint8_t *dst; + ENTROPY_CONTEXT *a, *l; txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &i, &j); dst = &pd->dst.buf[4 * j * pd->dst.stride + 4 * i]; + a = &ctx->ta[plane][i]; + l = &ctx->tl[plane][j]; // TODO(jingning): per transformed block zero forcing only enabled for // luma component. will integrate chroma components as well. if (x->zcoeff_blk[tx_size][block] && plane == 0) { - pd->eobs[block] = 0; - ctx->ta[plane][i] = 0; - ctx->tl[plane][j] = 0; + p->eobs[block] = 0; + *a = *l = 0; return; } if (!x->skip_recode) - vp9_xform_quant(plane, block, plane_bsize, tx_size, arg); + vp9_xform_quant(x, plane, block, plane_bsize, tx_size); if (x->optimize && (!x->skip_recode || !x->skip_optimize)) { - vp9_optimize_b(plane, block, plane_bsize, tx_size, x, ctx); + optimize_b(plane, block, plane_bsize, tx_size, x, a, l); } else { - ctx->ta[plane][i] = pd->eobs[block] > 0; - ctx->tl[plane][j] = pd->eobs[block] > 0; + *a = *l = p->eobs[block] > 0; } - if (x->skip_encode || pd->eobs[block] == 0) + if (p->eobs[block]) + *(args->skip) = 0; + + if (x->skip_encode || p->eobs[block] == 0) return; switch (tx_size) { case TX_32X32: - vp9_idct32x32_add(dqcoeff, dst, pd->dst.stride, pd->eobs[block]); + vp9_idct32x32_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]); break; case TX_16X16: - vp9_idct16x16_add(dqcoeff, dst, pd->dst.stride, pd->eobs[block]); + vp9_idct16x16_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]); break; case TX_8X8: - vp9_idct8x8_add(dqcoeff, dst, pd->dst.stride, pd->eobs[block]); + vp9_idct8x8_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]); break; case TX_4X4: // this is like vp9_short_idct4x4 but has a special case around eob<=1 // which is significant (not just an optimization) for the lossless // case. - xd->itxm_add(dqcoeff, dst, pd->dst.stride, pd->eobs[block]); + xd->itxm_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]); break; default: - assert(!"Invalid transform size"); + assert(0 && "Invalid transform size"); } } - static void encode_block_pass1(int plane, int block, BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) { - struct encode_b_args *const args = arg; - MACROBLOCK *const x = args->x; + MACROBLOCK *const x = (MACROBLOCK *)arg; MACROBLOCKD *const xd = &x->e_mbd; + struct macroblock_plane *const p = &x->plane[plane]; struct macroblockd_plane *const pd = &xd->plane[plane]; - const int raster_block = txfrm_block_to_raster_block(plane_bsize, tx_size, - block); - int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); - uint8_t *const dst = raster_block_offset_uint8(plane_bsize, raster_block, - pd->dst.buf, pd->dst.stride); - - vp9_xform_quant(plane, block, plane_bsize, tx_size, arg); + int i, j; + uint8_t *dst; + txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &i, &j); + dst = &pd->dst.buf[4 * j * pd->dst.stride + 4 * i]; - if (pd->eobs[block] == 0) - return; + vp9_xform_quant(x, plane, block, plane_bsize, tx_size); - xd->itxm_add(dqcoeff, dst, pd->dst.stride, pd->eobs[block]); + if (p->eobs[block] > 0) + xd->itxm_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]); } -void vp9_encode_sby(MACROBLOCK *x, BLOCK_SIZE bsize) { - MACROBLOCKD *const xd = &x->e_mbd; - struct optimize_ctx ctx; - struct encode_b_args arg = {x, &ctx}; - - vp9_subtract_sby(x, bsize); - if (x->optimize) - optimize_init_b(0, bsize, &arg); - - foreach_transformed_block_in_plane(xd, bsize, 0, encode_block_pass1, &arg); +void vp9_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize) { + vp9_subtract_plane(x, bsize, 0); + vp9_foreach_transformed_block_in_plane(&x->e_mbd, bsize, 0, + encode_block_pass1, x); } void vp9_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize) { MACROBLOCKD *const xd = &x->e_mbd; struct optimize_ctx ctx; - struct encode_b_args arg = {x, &ctx}; - - if (!x->skip_recode) - vp9_subtract_sb(x, bsize); + MB_MODE_INFO *mbmi = &xd->mi_8x8[0]->mbmi; + struct encode_b_args arg = {x, &ctx, &mbmi->skip}; + int plane; + + for (plane = 0; plane < MAX_MB_PLANE; ++plane) { + if (!x->skip_recode) + vp9_subtract_plane(x, bsize, plane); + + if (x->optimize && (!x->skip_recode || !x->skip_optimize)) { + const struct macroblockd_plane* const pd = &xd->plane[plane]; + const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi) : mbmi->tx_size; + vp9_get_entropy_contexts(bsize, tx_size, pd, + ctx.ta[plane], ctx.tl[plane]); + } - if (x->optimize && (!x->skip_recode || !x->skip_optimize)) { - int i; - for (i = 0; i < MAX_MB_PLANE; ++i) - optimize_init_b(i, bsize, &arg); + vp9_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block, + &arg); } - - foreach_transformed_block(xd, bsize, encode_block, &arg); } -void vp9_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize, - TX_SIZE tx_size, void *arg) { +static void encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize, + TX_SIZE tx_size, void *arg) { struct encode_b_args* const args = arg; MACROBLOCK *const x = args->x; MACROBLOCKD *const xd = &x->e_mbd; @@ -527,127 +480,104 @@ void vp9_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize, struct macroblock_plane *const p = &x->plane[plane]; struct macroblockd_plane *const pd = &xd->plane[plane]; int16_t *coeff = BLOCK_OFFSET(p->coeff, block); - int16_t *qcoeff = BLOCK_OFFSET(pd->qcoeff, block); + int16_t *qcoeff = BLOCK_OFFSET(p->qcoeff, block); int16_t *dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); - const int16_t *scan, *iscan; + const scan_order *scan_order; TX_TYPE tx_type; MB_PREDICTION_MODE mode; - const int bwl = b_width_log2(plane_bsize), bw = 1 << bwl; - const int twl = bwl - tx_size, twmask = (1 << twl) - 1; - int xoff, yoff; + const int bwl = b_width_log2(plane_bsize); + const int diff_stride = 4 * (1 << bwl); uint8_t *src, *dst; int16_t *src_diff; - uint16_t *eob = &pd->eobs[block]; - - if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0) - extend_for_intra(xd, plane_bsize, plane, block, tx_size); - - // if (x->optimize) - // vp9_optimize_b(plane, block, plane_bsize, tx_size, x, args->ctx); + uint16_t *eob = &p->eobs[block]; + const int src_stride = p->src.stride; + const int dst_stride = pd->dst.stride; + int i, j; + txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &i, &j); + dst = &pd->dst.buf[4 * (j * dst_stride + i)]; + src = &p->src.buf[4 * (j * src_stride + i)]; + src_diff = &p->src_diff[4 * (j * diff_stride + i)]; switch (tx_size) { case TX_32X32: - scan = vp9_default_scan_32x32; - iscan = vp9_default_iscan_32x32; + scan_order = &vp9_default_scan_orders[TX_32X32]; mode = plane == 0 ? mbmi->mode : mbmi->uv_mode; - block >>= 6; - xoff = 32 * (block & twmask); - yoff = 32 * (block >> twl); - dst = pd->dst.buf + yoff * pd->dst.stride + xoff; - vp9_predict_intra_block(xd, block, bwl, TX_32X32, mode, - dst, pd->dst.stride, dst, pd->dst.stride); - + vp9_predict_intra_block(xd, block >> 6, bwl, TX_32X32, mode, + x->skip_encode ? src : dst, + x->skip_encode ? src_stride : dst_stride, + dst, dst_stride, i, j, plane); if (!x->skip_recode) { - src = p->src.buf + yoff * p->src.stride + xoff; - src_diff = p->src_diff + 4 * bw * yoff + xoff; - vp9_subtract_block(32, 32, src_diff, bw * 4, - src, p->src.stride, dst, pd->dst.stride); - if (x->use_lp32x32fdct) - vp9_fdct32x32_rd(src_diff, coeff, bw * 4); - else - vp9_fdct32x32(src_diff, coeff, bw * 4); + vp9_subtract_block(32, 32, src_diff, diff_stride, + src, src_stride, dst, dst_stride); + fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride); vp9_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin, p->round, p->quant, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, p->zbin_extra, eob, scan, iscan); + pd->dequant, p->zbin_extra, eob, scan_order->scan, + scan_order->iscan); } if (!x->skip_encode && *eob) - vp9_idct32x32_add(dqcoeff, dst, pd->dst.stride, *eob); + vp9_idct32x32_add(dqcoeff, dst, dst_stride, *eob); break; case TX_16X16: - tx_type = get_tx_type_16x16(pd->plane_type, xd); - scan = get_scan_16x16(tx_type); - iscan = get_iscan_16x16(tx_type); + tx_type = get_tx_type(pd->plane_type, xd); + scan_order = &vp9_scan_orders[TX_16X16][tx_type]; mode = plane == 0 ? mbmi->mode : mbmi->uv_mode; - block >>= 4; - xoff = 16 * (block & twmask); - yoff = 16 * (block >> twl); - dst = pd->dst.buf + yoff * pd->dst.stride + xoff; - vp9_predict_intra_block(xd, block, bwl, TX_16X16, mode, - dst, pd->dst.stride, dst, pd->dst.stride); + vp9_predict_intra_block(xd, block >> 4, bwl, TX_16X16, mode, + x->skip_encode ? src : dst, + x->skip_encode ? src_stride : dst_stride, + dst, dst_stride, i, j, plane); if (!x->skip_recode) { - src = p->src.buf + yoff * p->src.stride + xoff; - src_diff = p->src_diff + 4 * bw * yoff + xoff; - vp9_subtract_block(16, 16, src_diff, bw * 4, - src, p->src.stride, dst, pd->dst.stride); - vp9_fht16x16(tx_type, src_diff, coeff, bw * 4); + vp9_subtract_block(16, 16, src_diff, diff_stride, + src, src_stride, dst, dst_stride); + vp9_fht16x16(src_diff, coeff, diff_stride, tx_type); vp9_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round, p->quant, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, p->zbin_extra, eob, scan, iscan); + pd->dequant, p->zbin_extra, eob, scan_order->scan, + scan_order->iscan); } if (!x->skip_encode && *eob) - vp9_iht16x16_add(tx_type, dqcoeff, dst, pd->dst.stride, *eob); + vp9_iht16x16_add(tx_type, dqcoeff, dst, dst_stride, *eob); break; case TX_8X8: - tx_type = get_tx_type_8x8(pd->plane_type, xd); - scan = get_scan_8x8(tx_type); - iscan = get_iscan_8x8(tx_type); + tx_type = get_tx_type(pd->plane_type, xd); + scan_order = &vp9_scan_orders[TX_8X8][tx_type]; mode = plane == 0 ? mbmi->mode : mbmi->uv_mode; - block >>= 2; - xoff = 8 * (block & twmask); - yoff = 8 * (block >> twl); - dst = pd->dst.buf + yoff * pd->dst.stride + xoff; - vp9_predict_intra_block(xd, block, bwl, TX_8X8, mode, - dst, pd->dst.stride, dst, pd->dst.stride); + vp9_predict_intra_block(xd, block >> 2, bwl, TX_8X8, mode, + x->skip_encode ? src : dst, + x->skip_encode ? src_stride : dst_stride, + dst, dst_stride, i, j, plane); if (!x->skip_recode) { - src = p->src.buf + yoff * p->src.stride + xoff; - src_diff = p->src_diff + 4 * bw * yoff + xoff; - vp9_subtract_block(8, 8, src_diff, bw * 4, - src, p->src.stride, dst, pd->dst.stride); - vp9_fht8x8(tx_type, src_diff, coeff, bw * 4); + vp9_subtract_block(8, 8, src_diff, diff_stride, + src, src_stride, dst, dst_stride); + vp9_fht8x8(src_diff, coeff, diff_stride, tx_type); vp9_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round, p->quant, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, p->zbin_extra, eob, scan, iscan); + pd->dequant, p->zbin_extra, eob, scan_order->scan, + scan_order->iscan); } if (!x->skip_encode && *eob) - vp9_iht8x8_add(tx_type, dqcoeff, dst, pd->dst.stride, *eob); + vp9_iht8x8_add(tx_type, dqcoeff, dst, dst_stride, *eob); break; case TX_4X4: tx_type = get_tx_type_4x4(pd->plane_type, xd, block); - scan = get_scan_4x4(tx_type); - iscan = get_iscan_4x4(tx_type); - if (mbmi->sb_type < BLOCK_8X8 && plane == 0) - mode = xd->mi_8x8[0]->bmi[block].as_mode; - else - mode = plane == 0 ? mbmi->mode : mbmi->uv_mode; - - xoff = 4 * (block & twmask); - yoff = 4 * (block >> twl); - dst = pd->dst.buf + yoff * pd->dst.stride + xoff; + scan_order = &vp9_scan_orders[TX_4X4][tx_type]; + mode = plane == 0 ? get_y_mode(xd->mi_8x8[0], block) : mbmi->uv_mode; vp9_predict_intra_block(xd, block, bwl, TX_4X4, mode, - dst, pd->dst.stride, dst, pd->dst.stride); + x->skip_encode ? src : dst, + x->skip_encode ? src_stride : dst_stride, + dst, dst_stride, i, j, plane); if (!x->skip_recode) { - src = p->src.buf + yoff * p->src.stride + xoff; - src_diff = p->src_diff + 4 * bw * yoff + xoff; - vp9_subtract_block(4, 4, src_diff, bw * 4, - src, p->src.stride, dst, pd->dst.stride); + vp9_subtract_block(4, 4, src_diff, diff_stride, + src, src_stride, dst, dst_stride); if (tx_type != DCT_DCT) - vp9_short_fht4x4(src_diff, coeff, bw * 4, tx_type); + vp9_fht4x4(src_diff, coeff, diff_stride, tx_type); else - x->fwd_txm4x4(src_diff, coeff, bw * 4); + x->fwd_txm4x4(src_diff, coeff, diff_stride); vp9_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, p->quant, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, p->zbin_extra, eob, scan, iscan); + pd->dequant, p->zbin_extra, eob, scan_order->scan, + scan_order->iscan); } if (!x->skip_encode && *eob) { @@ -655,28 +585,42 @@ void vp9_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize, // this is like vp9_short_idct4x4 but has a special case around eob<=1 // which is significant (not just an optimization) for the lossless // case. - xd->itxm_add(dqcoeff, dst, pd->dst.stride, *eob); + xd->itxm_add(dqcoeff, dst, dst_stride, *eob); else - vp9_iht4x4_16_add(dqcoeff, dst, pd->dst.stride, tx_type); + vp9_iht4x4_16_add(dqcoeff, dst, dst_stride, tx_type); } break; default: assert(0); } + if (*eob) + *(args->skip) = 0; } -void vp9_encode_intra_block_y(MACROBLOCK *x, BLOCK_SIZE bsize) { - MACROBLOCKD* const xd = &x->e_mbd; - struct optimize_ctx ctx; - struct encode_b_args arg = {x, &ctx}; - - foreach_transformed_block_in_plane(xd, bsize, 0, vp9_encode_block_intra, - &arg); +void vp9_encode_block_intra(MACROBLOCK *x, int plane, int block, + BLOCK_SIZE plane_bsize, TX_SIZE tx_size, + unsigned char *skip) { + struct encode_b_args arg = {x, NULL, skip}; + encode_block_intra(plane, block, plane_bsize, tx_size, &arg); } -void vp9_encode_intra_block_uv(MACROBLOCK *x, BLOCK_SIZE bsize) { - MACROBLOCKD* const xd = &x->e_mbd; - struct optimize_ctx ctx; - struct encode_b_args arg = {x, &ctx}; - foreach_transformed_block_uv(xd, bsize, vp9_encode_block_intra, &arg); + + +void vp9_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) { + const MACROBLOCKD *const xd = &x->e_mbd; + struct encode_b_args arg = {x, NULL, &xd->mi_8x8[0]->mbmi.skip}; + + vp9_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block_intra, + &arg); } +int vp9_encode_intra(MACROBLOCK *x, int use_16x16_pred) { + MB_MODE_INFO * mbmi = &x->e_mbd.mi_8x8[0]->mbmi; + x->skip_encode = 0; + mbmi->mode = DC_PRED; + mbmi->ref_frame[0] = INTRA_FRAME; + mbmi->tx_size = use_16x16_pred ? (mbmi->sb_type >= BLOCK_16X16 ? TX_16X16 + : TX_8X8) + : TX_4X4; + vp9_encode_intra_block_plane(x, mbmi->sb_type, 0); + return vp9_get_mb_ss(x->plane[0].src_diff); +} diff --git a/libvpx/vp9/encoder/vp9_encodemb.h b/libvpx/vp9/encoder/vp9_encodemb.h index 61dd735..dcf6e87 100644 --- a/libvpx/vp9/encoder/vp9_encodemb.h +++ b/libvpx/vp9/encoder/vp9_encodemb.h @@ -16,39 +16,28 @@ #include "vp9/encoder/vp9_onyx_int.h" #include "vp9/common/vp9_onyxc_int.h" -typedef struct { - MB_PREDICTION_MODE mode; - MV_REFERENCE_FRAME ref_frame; - MV_REFERENCE_FRAME second_ref_frame; -} MODE_DEFINITION; - -typedef struct { - MV_REFERENCE_FRAME ref_frame; - MV_REFERENCE_FRAME second_ref_frame; -} REF_DEFINITION; - -struct optimize_ctx { - ENTROPY_CONTEXT ta[MAX_MB_PLANE][16]; - ENTROPY_CONTEXT tl[MAX_MB_PLANE][16]; -}; - -struct encode_b_args { - MACROBLOCK *x; - struct optimize_ctx *ctx; -}; +#ifdef __cplusplus +extern "C" { +#endif void vp9_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize); -void vp9_encode_sby(MACROBLOCK *x, BLOCK_SIZE bsize); +void vp9_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize); -void vp9_xform_quant(int plane, int block, BLOCK_SIZE plane_bsize, - TX_SIZE tx_size, void *arg); +void vp9_xform_quant(MACROBLOCK *x, int plane, int block, + BLOCK_SIZE plane_bsize, TX_SIZE tx_size); -void vp9_subtract_sby(MACROBLOCK *x, BLOCK_SIZE bsize); -void vp9_subtract_sbuv(MACROBLOCK *x, BLOCK_SIZE bsize); -void vp9_subtract_sb(MACROBLOCK *x, BLOCK_SIZE bsize); +void vp9_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane); -void vp9_encode_intra_block_y(MACROBLOCK *x, BLOCK_SIZE bsize); -void vp9_encode_intra_block_uv(MACROBLOCK *x, BLOCK_SIZE bsize); +void vp9_encode_block_intra(MACROBLOCK *x, int plane, int block, + BLOCK_SIZE plane_bsize, TX_SIZE tx_size, + unsigned char *skip); +void vp9_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane); + +int vp9_encode_intra(MACROBLOCK *x, int use_16x16_pred); + +#ifdef __cplusplus +} // extern "C" +#endif #endif // VP9_ENCODER_VP9_ENCODEMB_H_ diff --git a/libvpx/vp9/encoder/vp9_encodemv.c b/libvpx/vp9/encoder/vp9_encodemv.c index 030ca64..703dde3 100644 --- a/libvpx/vp9/encoder/vp9_encodemv.c +++ b/libvpx/vp9/encoder/vp9_encodemv.c @@ -13,12 +13,21 @@ #include "vp9/common/vp9_common.h" #include "vp9/common/vp9_entropymode.h" #include "vp9/common/vp9_systemdependent.h" + +#include "vp9/encoder/vp9_cost.h" #include "vp9/encoder/vp9_encodemv.h" +static struct vp9_token mv_joint_encodings[MV_JOINTS]; +static struct vp9_token mv_class_encodings[MV_CLASSES]; +static struct vp9_token mv_fp_encodings[MV_FP_SIZE]; +static struct vp9_token mv_class0_encodings[CLASS0_SIZE]; -#ifdef ENTROPY_STATS -extern unsigned int active_section; -#endif +void vp9_entropy_mv_init() { + vp9_tokens_from_tree(mv_joint_encodings, vp9_mv_joint_tree); + vp9_tokens_from_tree(mv_class_encodings, vp9_mv_class_tree); + vp9_tokens_from_tree(mv_class0_encodings, vp9_mv_class0_tree); + vp9_tokens_from_tree(mv_fp_encodings, vp9_mv_fp_tree); +} static void encode_mv_component(vp9_writer* w, int comp, const nmv_component* mvcomp, int usehp) { @@ -36,13 +45,13 @@ static void encode_mv_component(vp9_writer* w, int comp, vp9_write(w, sign, mvcomp->sign); // Class - write_token(w, vp9_mv_class_tree, mvcomp->classes, - &vp9_mv_class_encodings[mv_class]); + vp9_write_token(w, vp9_mv_class_tree, mvcomp->classes, + &mv_class_encodings[mv_class]); // Integer bits if (mv_class == MV_CLASS_0) { - write_token(w, vp9_mv_class0_tree, mvcomp->class0, - &vp9_mv_class0_encodings[d]); + vp9_write_token(w, vp9_mv_class0_tree, mvcomp->class0, + &mv_class0_encodings[d]); } else { int i; const int n = mv_class + CLASS0_BITS - 1; // number of bits @@ -51,9 +60,9 @@ static void encode_mv_component(vp9_writer* w, int comp, } // Fractional bits - write_token(w, vp9_mv_fp_tree, - mv_class == MV_CLASS_0 ? mvcomp->class0_fp[d] : mvcomp->fp, - &vp9_mv_fp_encodings[fr]); + vp9_write_token(w, vp9_mv_fp_tree, + mv_class == MV_CLASS_0 ? mvcomp->class0_fp[d] : mvcomp->fp, + &mv_fp_encodings[fr]); // High precision bit if (usehp) @@ -68,7 +77,7 @@ static void build_nmv_component_cost_table(int *mvcost, int i, v; int sign_cost[2], class_cost[MV_CLASSES], class0_cost[CLASS0_SIZE]; int bits_cost[MV_OFFSET_BITS][2]; - int class0_fp_cost[CLASS0_SIZE][4], fp_cost[4]; + int class0_fp_cost[CLASS0_SIZE][MV_FP_SIZE], fp_cost[MV_FP_SIZE]; int class0_hp_cost[2], hp_cost[2]; sign_cost[0] = vp9_cost_zero(mvcomp->sign); @@ -126,138 +135,66 @@ static void build_nmv_component_cost_table(int *mvcost, static int update_mv(vp9_writer *w, const unsigned int ct[2], vp9_prob *cur_p, vp9_prob upd_p) { - const vp9_prob new_p = get_binary_prob(ct[0], ct[1]); - vp9_prob mod_p = new_p | 1; - const int cur_b = cost_branch256(ct, *cur_p); - const int mod_b = cost_branch256(ct, mod_p); - const int cost = 7 * 256 + (vp9_cost_one(upd_p) - vp9_cost_zero(upd_p)); - if (cur_b - mod_b > cost) { - *cur_p = mod_p; - vp9_write(w, 1, upd_p); - vp9_write_literal(w, mod_p >> 1, 7); - return 1; - } else { - vp9_write(w, 0, upd_p); - return 0; + const vp9_prob new_p = get_binary_prob(ct[0], ct[1]) | 1; + const int update = cost_branch256(ct, *cur_p) + vp9_cost_zero(upd_p) > + cost_branch256(ct, new_p) + vp9_cost_one(upd_p) + 7 * 256; + vp9_write(w, update, upd_p); + if (update) { + *cur_p = new_p; + vp9_write_literal(w, new_p >> 1, 7); } + return update; } -static void counts_to_nmv_context( - nmv_context_counts *nmv_count, - int usehp, - unsigned int (*branch_ct_joint)[2], - unsigned int (*branch_ct_sign)[2], - unsigned int (*branch_ct_classes)[MV_CLASSES - 1][2], - unsigned int (*branch_ct_class0)[CLASS0_SIZE - 1][2], - unsigned int (*branch_ct_bits)[MV_OFFSET_BITS][2], - unsigned int (*branch_ct_class0_fp)[CLASS0_SIZE][4 - 1][2], - unsigned int (*branch_ct_fp)[4 - 1][2], - unsigned int (*branch_ct_class0_hp)[2], - unsigned int (*branch_ct_hp)[2]) { - int i, j, k; - vp9_tree_probs_from_distribution(vp9_mv_joint_tree, branch_ct_joint, - nmv_count->joints); - for (i = 0; i < 2; ++i) { - const uint32_t s0 = nmv_count->comps[i].sign[0]; - const uint32_t s1 = nmv_count->comps[i].sign[1]; - - branch_ct_sign[i][0] = s0; - branch_ct_sign[i][1] = s1; - vp9_tree_probs_from_distribution(vp9_mv_class_tree, - branch_ct_classes[i], - nmv_count->comps[i].classes); - vp9_tree_probs_from_distribution(vp9_mv_class0_tree, - branch_ct_class0[i], - nmv_count->comps[i].class0); - for (j = 0; j < MV_OFFSET_BITS; ++j) { - const uint32_t b0 = nmv_count->comps[i].bits[j][0]; - const uint32_t b1 = nmv_count->comps[i].bits[j][1]; - - branch_ct_bits[i][j][0] = b0; - branch_ct_bits[i][j][1] = b1; - } - } - for (i = 0; i < 2; ++i) { - for (k = 0; k < CLASS0_SIZE; ++k) { - vp9_tree_probs_from_distribution(vp9_mv_fp_tree, - branch_ct_class0_fp[i][k], - nmv_count->comps[i].class0_fp[k]); - } - vp9_tree_probs_from_distribution(vp9_mv_fp_tree, - branch_ct_fp[i], - nmv_count->comps[i].fp); - } - if (usehp) { - for (i = 0; i < 2; ++i) { - const uint32_t c0_hp0 = nmv_count->comps[i].class0_hp[0]; - const uint32_t c0_hp1 = nmv_count->comps[i].class0_hp[1]; - const uint32_t hp0 = nmv_count->comps[i].hp[0]; - const uint32_t hp1 = nmv_count->comps[i].hp[1]; +static void write_mv_update(const vp9_tree_index *tree, + vp9_prob probs[/*n - 1*/], + const unsigned int counts[/*n - 1*/], + int n, vp9_writer *w) { + int i; + unsigned int branch_ct[32][2]; - branch_ct_class0_hp[i][0] = c0_hp0; - branch_ct_class0_hp[i][1] = c0_hp1; + // Assuming max number of probabilities <= 32 + assert(n <= 32); - branch_ct_hp[i][0] = hp0; - branch_ct_hp[i][1] = hp1; - } - } + vp9_tree_probs_from_distribution(tree, branch_ct, counts); + for (i = 0; i < n - 1; ++i) + update_mv(w, branch_ct[i], &probs[i], MV_UPDATE_PROB); } -void vp9_write_nmv_probs(VP9_COMP* const cpi, int usehp, vp9_writer* const bc) { +void vp9_write_nmv_probs(VP9_COMMON *cm, int usehp, vp9_writer *w) { int i, j; - unsigned int branch_ct_joint[MV_JOINTS - 1][2]; - unsigned int branch_ct_sign[2][2]; - unsigned int branch_ct_classes[2][MV_CLASSES - 1][2]; - unsigned int branch_ct_class0[2][CLASS0_SIZE - 1][2]; - unsigned int branch_ct_bits[2][MV_OFFSET_BITS][2]; - unsigned int branch_ct_class0_fp[2][CLASS0_SIZE][4 - 1][2]; - unsigned int branch_ct_fp[2][4 - 1][2]; - unsigned int branch_ct_class0_hp[2][2]; - unsigned int branch_ct_hp[2][2]; - nmv_context *mvc = &cpi->common.fc.nmvc; - - counts_to_nmv_context(&cpi->NMVcount, usehp, - branch_ct_joint, branch_ct_sign, branch_ct_classes, - branch_ct_class0, branch_ct_bits, - branch_ct_class0_fp, branch_ct_fp, - branch_ct_class0_hp, branch_ct_hp); - - for (j = 0; j < MV_JOINTS - 1; ++j) - update_mv(bc, branch_ct_joint[j], &mvc->joints[j], NMV_UPDATE_PROB); + nmv_context *const mvc = &cm->fc.nmvc; + nmv_context_counts *const counts = &cm->counts.mv; - for (i = 0; i < 2; ++i) { - update_mv(bc, branch_ct_sign[i], &mvc->comps[i].sign, NMV_UPDATE_PROB); - for (j = 0; j < MV_CLASSES - 1; ++j) - update_mv(bc, branch_ct_classes[i][j], &mvc->comps[i].classes[j], - NMV_UPDATE_PROB); - - for (j = 0; j < CLASS0_SIZE - 1; ++j) - update_mv(bc, branch_ct_class0[i][j], &mvc->comps[i].class0[j], - NMV_UPDATE_PROB); + write_mv_update(vp9_mv_joint_tree, mvc->joints, counts->joints, MV_JOINTS, w); + for (i = 0; i < 2; ++i) { + nmv_component *comp = &mvc->comps[i]; + nmv_component_counts *comp_counts = &counts->comps[i]; + + update_mv(w, comp_counts->sign, &comp->sign, MV_UPDATE_PROB); + write_mv_update(vp9_mv_class_tree, comp->classes, comp_counts->classes, + MV_CLASSES, w); + write_mv_update(vp9_mv_class0_tree, comp->class0, comp_counts->class0, + CLASS0_SIZE, w); for (j = 0; j < MV_OFFSET_BITS; ++j) - update_mv(bc, branch_ct_bits[i][j], &mvc->comps[i].bits[j], - NMV_UPDATE_PROB); + update_mv(w, comp_counts->bits[j], &comp->bits[j], MV_UPDATE_PROB); } for (i = 0; i < 2; ++i) { - for (j = 0; j < CLASS0_SIZE; ++j) { - int k; - for (k = 0; k < 3; ++k) - update_mv(bc, branch_ct_class0_fp[i][j][k], - &mvc->comps[i].class0_fp[j][k], NMV_UPDATE_PROB); - } + for (j = 0; j < CLASS0_SIZE; ++j) + write_mv_update(vp9_mv_fp_tree, mvc->comps[i].class0_fp[j], + counts->comps[i].class0_fp[j], MV_FP_SIZE, w); - for (j = 0; j < 3; ++j) - update_mv(bc, branch_ct_fp[i][j], &mvc->comps[i].fp[j], NMV_UPDATE_PROB); + write_mv_update(vp9_mv_fp_tree, mvc->comps[i].fp, counts->comps[i].fp, + MV_FP_SIZE, w); } if (usehp) { for (i = 0; i < 2; ++i) { - update_mv(bc, branch_ct_class0_hp[i], &mvc->comps[i].class0_hp, - NMV_UPDATE_PROB); - update_mv(bc, branch_ct_hp[i], &mvc->comps[i].hp, - NMV_UPDATE_PROB); + update_mv(w, counts->comps[i].class0_hp, &mvc->comps[i].class0_hp, + MV_UPDATE_PROB); + update_mv(w, counts->comps[i].hp, &mvc->comps[i].hp, MV_UPDATE_PROB); } } } @@ -270,7 +207,7 @@ void vp9_encode_mv(VP9_COMP* cpi, vp9_writer* w, const MV_JOINT_TYPE j = vp9_get_mv_joint(&diff); usehp = usehp && vp9_use_mv_hp(ref); - write_token(w, vp9_mv_joint_tree, mvctx->joints, &vp9_mv_joint_encodings[j]); + vp9_write_token(w, vp9_mv_joint_tree, mvctx->joints, &mv_joint_encodings[j]); if (mv_joint_vertical(j)) encode_mv_component(w, diff.row, &mvctx->comps[0], usehp); @@ -285,34 +222,29 @@ void vp9_encode_mv(VP9_COMP* cpi, vp9_writer* w, } } -void vp9_build_nmv_cost_table(int *mvjoint, - int *mvcost[2], - const nmv_context* const mvctx, - int usehp, - int mvc_flag_v, - int mvc_flag_h) { - vp9_clear_system_state(); - vp9_cost_tokens(mvjoint, mvctx->joints, vp9_mv_joint_tree); - if (mvc_flag_v) - build_nmv_component_cost_table(mvcost[0], &mvctx->comps[0], usehp); - if (mvc_flag_h) - build_nmv_component_cost_table(mvcost[1], &mvctx->comps[1], usehp); +void vp9_build_nmv_cost_table(int *mvjoint, int *mvcost[2], + const nmv_context* ctx, int usehp) { + vp9_cost_tokens(mvjoint, ctx->joints, vp9_mv_joint_tree); + build_nmv_component_cost_table(mvcost[0], &ctx->comps[0], usehp); + build_nmv_component_cost_table(mvcost[1], &ctx->comps[1], usehp); } -static void inc_mvs(int_mv mv[2], int_mv ref[2], int is_compound, +static void inc_mvs(const int_mv mv[2], const MV ref[2], int is_compound, nmv_context_counts *counts) { int i; for (i = 0; i < 1 + is_compound; ++i) { - const MV diff = { mv[i].as_mv.row - ref[i].as_mv.row, - mv[i].as_mv.col - ref[i].as_mv.col }; + const MV diff = { mv[i].as_mv.row - ref[i].row, + mv[i].as_mv.col - ref[i].col }; vp9_inc_mv(&diff, counts); } } -void vp9_update_mv_count(VP9_COMP *cpi, MACROBLOCK *x, int_mv best_ref_mv[2]) { - MODE_INFO *mi = x->e_mbd.mi_8x8[0]; - MB_MODE_INFO *const mbmi = &mi->mbmi; +void vp9_update_mv_count(VP9_COMMON *cm, const MACROBLOCKD *xd, + const MV best_ref_mv[2]) { + const MODE_INFO *mi = xd->mi_8x8[0]; + const MB_MODE_INFO *const mbmi = &mi->mbmi; const int is_compound = has_second_ref(mbmi); + nmv_context_counts *counts = &cm->counts.mv; if (mbmi->sb_type < BLOCK_8X8) { const int num_4x4_w = num_4x4_blocks_wide_lookup[mbmi->sb_type]; @@ -323,10 +255,11 @@ void vp9_update_mv_count(VP9_COMP *cpi, MACROBLOCK *x, int_mv best_ref_mv[2]) { for (idx = 0; idx < 2; idx += num_4x4_w) { const int i = idy * 2 + idx; if (mi->bmi[i].as_mode == NEWMV) - inc_mvs(mi->bmi[i].as_mv, best_ref_mv, is_compound, &cpi->NMVcount); + inc_mvs(mi->bmi[i].as_mv, best_ref_mv, is_compound, counts); } } } else if (mbmi->mode == NEWMV) { - inc_mvs(mbmi->mv, best_ref_mv, is_compound, &cpi->NMVcount); + inc_mvs(mbmi->mv, best_ref_mv, is_compound, counts); } } + diff --git a/libvpx/vp9/encoder/vp9_encodemv.h b/libvpx/vp9/encoder/vp9_encodemv.h index 6331778..f16b2c1 100644 --- a/libvpx/vp9/encoder/vp9_encodemv.h +++ b/libvpx/vp9/encoder/vp9_encodemv.h @@ -14,18 +14,25 @@ #include "vp9/encoder/vp9_onyx_int.h" -void vp9_write_nmv_probs(VP9_COMP* const, int usehp, vp9_writer* const); +#ifdef __cplusplus +extern "C" { +#endif + +void vp9_entropy_mv_init(); + +void vp9_write_nmv_probs(VP9_COMMON *cm, int usehp, vp9_writer *w); void vp9_encode_mv(VP9_COMP *cpi, vp9_writer* w, const MV* mv, const MV* ref, const nmv_context* mvctx, int usehp); -void vp9_build_nmv_cost_table(int *mvjoint, - int *mvcost[2], - const nmv_context* const mvctx, - int usehp, - int mvc_flag_v, - int mvc_flag_h); +void vp9_build_nmv_cost_table(int *mvjoint, int *mvcost[2], + const nmv_context* mvctx, int usehp); + +void vp9_update_mv_count(VP9_COMMON *cm, const MACROBLOCKD *xd, + const MV best_ref_mv[2]); -void vp9_update_mv_count(VP9_COMP *cpi, MACROBLOCK *x, int_mv best_ref_mv[2]); +#ifdef __cplusplus +} // extern "C" +#endif #endif // VP9_ENCODER_VP9_ENCODEMV_H_ diff --git a/libvpx/vp9/common/vp9_extend.c b/libvpx/vp9/encoder/vp9_extend.c index 836bf0e..dcbb5ac 100644 --- a/libvpx/vp9/common/vp9_extend.c +++ b/libvpx/vp9/encoder/vp9_extend.c @@ -11,7 +11,7 @@ #include "vpx_mem/vpx_mem.h" #include "vp9/common/vp9_common.h" -#include "vp9/common/vp9_extend.h" +#include "vp9/encoder/vp9_extend.h" static void copy_and_extend_plane(const uint8_t *src, int src_pitch, uint8_t *dst, int dst_pitch, diff --git a/libvpx/vp9/common/vp9_extend.h b/libvpx/vp9/encoder/vp9_extend.h index 7ff79b7..058fe09 100644 --- a/libvpx/vp9/common/vp9_extend.h +++ b/libvpx/vp9/encoder/vp9_extend.h @@ -8,12 +8,16 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef VP9_COMMON_VP9_EXTEND_H_ -#define VP9_COMMON_VP9_EXTEND_H_ +#ifndef VP9_ENCODER_VP9_EXTEND_H_ +#define VP9_ENCODER_VP9_EXTEND_H_ #include "vpx_scale/yv12config.h" #include "vpx/vpx_integer.h" +#ifdef __cplusplus +extern "C" { +#endif + void vp9_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst); @@ -22,4 +26,8 @@ void vp9_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst, int srcy, int srcx, int srch, int srcw); -#endif // VP9_COMMON_VP9_EXTEND_H_ +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // VP9_ENCODER_VP9_EXTEND_H_ diff --git a/libvpx/vp9/encoder/vp9_firstpass.c b/libvpx/vp9/encoder/vp9_firstpass.c index 974c300..acb5a09 100644 --- a/libvpx/vp9/encoder/vp9_firstpass.c +++ b/libvpx/vp9/encoder/vp9_firstpass.c @@ -8,32 +8,34 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include <math.h> #include <limits.h> +#include <math.h> #include <stdio.h> -#include "vp9/encoder/vp9_block.h" -#include "vp9/encoder/vp9_onyx_int.h" -#include "vp9/encoder/vp9_variance.h" -#include "vp9/encoder/vp9_encodeintra.h" -#include "vp9/encoder/vp9_mcomp.h" -#include "vp9/encoder/vp9_firstpass.h" + +#include "./vpx_scale_rtcd.h" + +#include "vpx_mem/vpx_mem.h" #include "vpx_scale/vpx_scale.h" +#include "vpx_scale/yv12config.h" + +#include "vp9/common/vp9_entropymv.h" +#include "vp9/common/vp9_quant_common.h" +#include "vp9/common/vp9_reconinter.h" // vp9_setup_dst_planes() +#include "vp9/common/vp9_systemdependent.h" + +#include "vp9/encoder/vp9_block.h" #include "vp9/encoder/vp9_encodeframe.h" #include "vp9/encoder/vp9_encodemb.h" -#include "vp9/common/vp9_extend.h" -#include "vp9/common/vp9_systemdependent.h" -#include "vpx_mem/vpx_mem.h" -#include "vpx_scale/yv12config.h" +#include "vp9/encoder/vp9_encodemv.h" +#include "vp9/encoder/vp9_extend.h" +#include "vp9/encoder/vp9_firstpass.h" +#include "vp9/encoder/vp9_mcomp.h" +#include "vp9/encoder/vp9_onyx_int.h" #include "vp9/encoder/vp9_quantize.h" -#include "vp9/encoder/vp9_rdopt.h" #include "vp9/encoder/vp9_ratectrl.h" -#include "vp9/common/vp9_quant_common.h" -#include "vp9/common/vp9_entropymv.h" -#include "vp9/encoder/vp9_encodemv.h" +#include "vp9/encoder/vp9_rdopt.h" #include "vp9/encoder/vp9_vaq.h" -#include "./vpx_scale_rtcd.h" -// TODO(jkoleszar): for setup_dst_planes -#include "vp9/common/vp9_reconinter.h" +#include "vp9/encoder/vp9_variance.h" #define OUTPUT_FPF 0 @@ -50,8 +52,9 @@ #define DOUBLE_DIVIDE_CHECK(x) ((x) < 0 ? (x) - 0.000001 : (x) + 0.000001) -#define POW1 (double)cpi->oxcf.two_pass_vbrbias/100.0 -#define POW2 (double)cpi->oxcf.two_pass_vbrbias/100.0 +#define MIN_KF_BOOST 300 + +#define DISABLE_RC_LONG_TERM_MEM 0 static void swap_yv12(YV12_BUFFER_CONFIG *a, YV12_BUFFER_CONFIG *b) { YV12_BUFFER_CONFIG temp = *a; @@ -59,15 +62,13 @@ static void swap_yv12(YV12_BUFFER_CONFIG *a, YV12_BUFFER_CONFIG *b) { *b = temp; } -static void find_next_key_frame(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame); - static int select_cq_level(int qindex) { int ret_val = QINDEX_RANGE - 1; int i; double target_q = (vp9_convert_qindex_to_q(qindex) * 0.5847) + 1.0; - for (i = 0; i < QINDEX_RANGE; i++) { + for (i = 0; i < QINDEX_RANGE; ++i) { if (target_q <= vp9_convert_qindex_to_q(i)) { ret_val = i; break; @@ -77,33 +78,48 @@ static int select_cq_level(int qindex) { return ret_val; } +static int gfboost_qadjust(int qindex) { + const double q = vp9_convert_qindex_to_q(qindex); + return (int)((0.00000828 * q * q * q) + + (-0.0055 * q * q) + + (1.32 * q) + 79.3); +} + +static int kfboost_qadjust(int qindex) { + const double q = vp9_convert_qindex_to_q(qindex); + return (int)((0.00000973 * q * q * q) + + (-0.00613 * q * q) + + (1.316 * q) + 121.2); +} // Resets the first pass file to the given position using a relative seek from // the current position. -static void reset_fpf_position(VP9_COMP *cpi, FIRSTPASS_STATS *position) { - cpi->twopass.stats_in = position; +static void reset_fpf_position(struct twopass_rc *p, + const FIRSTPASS_STATS *position) { + p->stats_in = position; } -static int lookup_next_frame_stats(VP9_COMP *cpi, FIRSTPASS_STATS *next_frame) { - if (cpi->twopass.stats_in >= cpi->twopass.stats_in_end) +static int lookup_next_frame_stats(const struct twopass_rc *p, + FIRSTPASS_STATS *next_frame) { + if (p->stats_in >= p->stats_in_end) return EOF; - *next_frame = *cpi->twopass.stats_in; + *next_frame = *p->stats_in; return 1; } -// Read frame stats at an offset from the current position -static int read_frame_stats(VP9_COMP *cpi, - FIRSTPASS_STATS *frame_stats, - int offset) { - FIRSTPASS_STATS *fps_ptr = cpi->twopass.stats_in; - // Check legality of offset +// Read frame stats at an offset from the current position. +static int read_frame_stats(const struct twopass_rc *p, + FIRSTPASS_STATS *frame_stats, int offset) { + const FIRSTPASS_STATS *fps_ptr = p->stats_in; + + // Check legality of offset. if (offset >= 0) { - if (&fps_ptr[offset] >= cpi->twopass.stats_in_end) + if (&fps_ptr[offset] >= p->stats_in_end) return EOF; } else if (offset < 0) { - if (&fps_ptr[offset] < cpi->twopass.stats_in_start) + if (&fps_ptr[offset] < p->stats_in_start) return EOF; } @@ -111,19 +127,17 @@ static int read_frame_stats(VP9_COMP *cpi, return 1; } -static int input_stats(VP9_COMP *cpi, FIRSTPASS_STATS *fps) { - if (cpi->twopass.stats_in >= cpi->twopass.stats_in_end) +static int input_stats(struct twopass_rc *p, FIRSTPASS_STATS *fps) { + if (p->stats_in >= p->stats_in_end) return EOF; - *fps = *cpi->twopass.stats_in; - cpi->twopass.stats_in = - (void *)((char *)cpi->twopass.stats_in + sizeof(FIRSTPASS_STATS)); + *fps = *p->stats_in; + ++p->stats_in; return 1; } -static void output_stats(const VP9_COMP *cpi, - struct vpx_codec_pkt_list *pktlist, - FIRSTPASS_STATS *stats) { +static void output_stats(FIRSTPASS_STATS *stats, + struct vpx_codec_pkt_list *pktlist) { struct vpx_codec_cx_pkt pkt; pkt.kind = VPX_CODEC_STATS_PKT; pkt.data.twopass_stats.buf = stats; @@ -132,12 +146,11 @@ static void output_stats(const VP9_COMP *cpi, // TEMP debug code #if OUTPUT_FPF - { FILE *fpfile; fpfile = fopen("firstpass.stt", "a"); - fprintf(stdout, "%12.0f %12.0f %12.0f %12.0f %12.0f %12.4f %12.4f" + fprintf(fpfile, "%12.0f %12.0f %12.0f %12.0f %12.0f %12.4f %12.4f" "%12.4f %12.4f %12.4f %12.4f %12.4f %12.4f %12.4f" "%12.0f %12.0f %12.4f %12.0f %12.0f %12.4f\n", stats->frame, @@ -254,13 +267,17 @@ static void avg_stats(FIRSTPASS_STATS *section) { // Calculate a modified Error used in distributing bits between easier and // harder frames. -static double calculate_modified_err(VP9_COMP *cpi, - FIRSTPASS_STATS *this_frame) { - const FIRSTPASS_STATS *const stats = &cpi->twopass.total_stats; +static double calculate_modified_err(const VP9_COMP *cpi, + const FIRSTPASS_STATS *this_frame) { + const struct twopass_rc *const twopass = &cpi->twopass; + const FIRSTPASS_STATS *const stats = &twopass->total_stats; const double av_err = stats->ssim_weighted_pred_err / stats->count; - const double this_err = this_frame->ssim_weighted_pred_err; - return av_err * pow(this_err / DOUBLE_DIVIDE_CHECK(av_err), - this_err > av_err ? POW1 : POW2); + double modified_error = av_err * pow(this_frame->ssim_weighted_pred_err / + DOUBLE_DIVIDE_CHECK(av_err), + cpi->oxcf.two_pass_vbrbias / 100.0); + + return fclamp(modified_error, + twopass->modified_error_min, twopass->modified_error_max); } static const double weight_table[256] = { @@ -303,43 +320,35 @@ static const double weight_table[256] = { 1.000000, 1.000000, 1.000000, 1.000000 }; -static double simple_weight(YV12_BUFFER_CONFIG *source) { +static double simple_weight(const YV12_BUFFER_CONFIG *buf) { int i, j; + double sum = 0.0; + const int w = buf->y_crop_width; + const int h = buf->y_crop_height; + const uint8_t *row = buf->y_buffer; + + for (i = 0; i < h; ++i) { + const uint8_t *pixel = row; + for (j = 0; j < w; ++j) + sum += weight_table[*pixel++]; + row += buf->y_stride; + } - uint8_t *src = source->y_buffer; - double sum_weights = 0.0; - - // Loop through the Y plane examining levels and creating a weight for - // the image. - i = source->y_height; - do { - j = source->y_width; - do { - sum_weights += weight_table[ *src]; - src++; - } while (--j); - src -= source->y_width; - src += source->y_stride; - } while (--i); - - sum_weights /= (source->y_height * source->y_width); - - return sum_weights; + return MAX(0.1, sum / (w * h)); } +// This function returns the maximum target rate per frame. +static int frame_max_bits(const VP9_COMP *cpi) { + int64_t max_bits = + ((int64_t)cpi->rc.av_per_frame_bandwidth * + (int64_t)cpi->oxcf.two_pass_vbrmax_section) / 100; -// This function returns the current per frame maximum bitrate target. -static int frame_max_bits(VP9_COMP *cpi) { - // Max allocation for a single frame based on the max section guidelines - // passed in and how many bits are left. - // For VBR base this on the bits and frames left plus the - // two_pass_vbrmax_section rate passed in by the user. - const double max_bits = (1.0 * cpi->twopass.bits_left / - (cpi->twopass.total_stats.count - cpi->common.current_video_frame)) * - (cpi->oxcf.two_pass_vbrmax_section / 100.0); + if (max_bits < 0) + max_bits = 0; + else if (max_bits > cpi->rc.max_frame_bandwidth) + max_bits = cpi->rc.max_frame_bandwidth; - // Trap case where we are out of bits. - return MAX((int)max_bits, 0); + return (int)max_bits; } void vp9_init_first_pass(VP9_COMP *cpi) { @@ -347,135 +356,115 @@ void vp9_init_first_pass(VP9_COMP *cpi) { } void vp9_end_first_pass(VP9_COMP *cpi) { - output_stats(cpi, cpi->output_pkt_list, &cpi->twopass.total_stats); + output_stats(&cpi->twopass.total_stats, cpi->output_pkt_list); } -static void zz_motion_search(VP9_COMP *cpi, MACROBLOCK *x, - YV12_BUFFER_CONFIG *recon_buffer, - int *best_motion_err, int recon_yoffset) { - MACROBLOCKD *const xd = &x->e_mbd; - - // Set up pointers for this macro block recon buffer - xd->plane[0].pre[0].buf = recon_buffer->y_buffer + recon_yoffset; - - switch (xd->mi_8x8[0]->mbmi.sb_type) { +static vp9_variance_fn_t get_block_variance_fn(BLOCK_SIZE bsize) { + switch (bsize) { case BLOCK_8X8: - vp9_mse8x8(x->plane[0].src.buf, x->plane[0].src.stride, - xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride, - (unsigned int *)(best_motion_err)); - break; + return vp9_mse8x8; case BLOCK_16X8: - vp9_mse16x8(x->plane[0].src.buf, x->plane[0].src.stride, - xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride, - (unsigned int *)(best_motion_err)); - break; + return vp9_mse16x8; case BLOCK_8X16: - vp9_mse8x16(x->plane[0].src.buf, x->plane[0].src.stride, - xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride, - (unsigned int *)(best_motion_err)); - break; + return vp9_mse8x16; default: - vp9_mse16x16(x->plane[0].src.buf, x->plane[0].src.stride, - xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride, - (unsigned int *)(best_motion_err)); - break; + return vp9_mse16x16; } } +static unsigned int zz_motion_search(const MACROBLOCK *x) { + const MACROBLOCKD *const xd = &x->e_mbd; + const uint8_t *const src = x->plane[0].src.buf; + const int src_stride = x->plane[0].src.stride; + const uint8_t *const ref = xd->plane[0].pre[0].buf; + const int ref_stride = xd->plane[0].pre[0].stride; + unsigned int sse; + vp9_variance_fn_t fn = get_block_variance_fn(xd->mi_8x8[0]->mbmi.sb_type); + fn(src, src_stride, ref, ref_stride, &sse); + return sse; +} + static void first_pass_motion_search(VP9_COMP *cpi, MACROBLOCK *x, - int_mv *ref_mv, MV *best_mv, - YV12_BUFFER_CONFIG *recon_buffer, - int *best_motion_err, int recon_yoffset) { + const MV *ref_mv, MV *best_mv, + int *best_motion_err) { MACROBLOCKD *const xd = &x->e_mbd; - int num00; - - int_mv tmp_mv; - int_mv ref_mv_full; - - int tmp_err; + MV tmp_mv = {0, 0}; + MV ref_mv_full = {ref_mv->row >> 3, ref_mv->col >> 3}; + int num00, tmp_err, n, sr = 0; int step_param = 3; int further_steps = (MAX_MVSEARCH_STEPS - 1) - step_param; - int n; - vp9_variance_fn_ptr_t v_fn_ptr = - cpi->fn_ptr[xd->mi_8x8[0]->mbmi.sb_type]; + const BLOCK_SIZE bsize = xd->mi_8x8[0]->mbmi.sb_type; + vp9_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[bsize]; int new_mv_mode_penalty = 256; + const int quart_frm = MIN(cpi->common.width, cpi->common.height); - int sr = 0; - int quart_frm = MIN(cpi->common.width, cpi->common.height); - - // refine the motion search range accroding to the frame dimension - // for first pass test + // Refine the motion search range according to the frame dimension + // for first pass test. while ((quart_frm << sr) < MAX_FULL_PEL_VAL) - sr++; - if (sr) - sr--; + ++sr; - step_param += sr; + step_param += sr; further_steps -= sr; - // override the default variance function to use MSE - switch (xd->mi_8x8[0]->mbmi.sb_type) { - case BLOCK_8X8: - v_fn_ptr.vf = vp9_mse8x8; - break; - case BLOCK_16X8: - v_fn_ptr.vf = vp9_mse16x8; - break; - case BLOCK_8X16: - v_fn_ptr.vf = vp9_mse8x16; - break; - default: - v_fn_ptr.vf = vp9_mse16x16; - break; - } + // Override the default variance function to use MSE. + v_fn_ptr.vf = get_block_variance_fn(bsize); - // Set up pointers for this macro block recon buffer - xd->plane[0].pre[0].buf = recon_buffer->y_buffer + recon_yoffset; - - // Initial step/diamond search centred on best mv - tmp_mv.as_int = 0; - ref_mv_full.as_mv.col = ref_mv->as_mv.col >> 3; - ref_mv_full.as_mv.row = ref_mv->as_mv.row >> 3; - tmp_err = cpi->diamond_search_sad(x, &ref_mv_full, &tmp_mv, step_param, + // Center the initial step/diamond search on best mv. + tmp_err = cpi->diamond_search_sad(x, &ref_mv_full, &tmp_mv, + step_param, x->sadperbit16, &num00, &v_fn_ptr, x->nmvjointcost, x->mvcost, ref_mv); + if (tmp_err < INT_MAX) + tmp_err = vp9_get_mvpred_var(x, &tmp_mv, ref_mv, &v_fn_ptr, 1); if (tmp_err < INT_MAX - new_mv_mode_penalty) tmp_err += new_mv_mode_penalty; if (tmp_err < *best_motion_err) { *best_motion_err = tmp_err; - best_mv->row = tmp_mv.as_mv.row; - best_mv->col = tmp_mv.as_mv.col; + best_mv->row = tmp_mv.row; + best_mv->col = tmp_mv.col; } - // Further step/diamond searches as necessary + // Carry out further step/diamond searches as necessary. n = num00; num00 = 0; while (n < further_steps) { - n++; + ++n; if (num00) { - num00--; + --num00; } else { tmp_err = cpi->diamond_search_sad(x, &ref_mv_full, &tmp_mv, step_param + n, x->sadperbit16, &num00, &v_fn_ptr, x->nmvjointcost, x->mvcost, ref_mv); + if (tmp_err < INT_MAX) + tmp_err = vp9_get_mvpred_var(x, &tmp_mv, ref_mv, &v_fn_ptr, 1); if (tmp_err < INT_MAX - new_mv_mode_penalty) tmp_err += new_mv_mode_penalty; if (tmp_err < *best_motion_err) { *best_motion_err = tmp_err; - best_mv->row = tmp_mv.as_mv.row; - best_mv->col = tmp_mv.as_mv.col; + best_mv->row = tmp_mv.row; + best_mv->col = tmp_mv.col; } } } } +static BLOCK_SIZE get_bsize(const VP9_COMMON *cm, int mb_row, int mb_col) { + if (2 * mb_col + 1 < cm->mi_cols) { + return 2 * mb_row + 1 < cm->mi_rows ? BLOCK_16X16 + : BLOCK_16X8; + } else { + return 2 * mb_row + 1 < cm->mi_rows ? BLOCK_8X16 + : BLOCK_8X8; + } +} + void vp9_first_pass(VP9_COMP *cpi) { int mb_row, mb_col; MACROBLOCK *const x = &cpi->mb; @@ -484,24 +473,23 @@ void vp9_first_pass(VP9_COMP *cpi) { TileInfo tile; struct macroblock_plane *const p = x->plane; struct macroblockd_plane *const pd = xd->plane; - PICK_MODE_CONTEXT *ctx = &x->sb64_context; + const PICK_MODE_CONTEXT *ctx = &x->sb64_context; int i; int recon_yoffset, recon_uvoffset; - const int lst_yv12_idx = cm->ref_frame_map[cpi->lst_fb_idx]; - const int gld_yv12_idx = cm->ref_frame_map[cpi->gld_fb_idx]; - YV12_BUFFER_CONFIG *const lst_yv12 = &cm->yv12_fb[lst_yv12_idx]; - YV12_BUFFER_CONFIG *const gld_yv12 = &cm->yv12_fb[gld_yv12_idx]; + YV12_BUFFER_CONFIG *const lst_yv12 = get_ref_frame_buffer(cpi, LAST_FRAME); + YV12_BUFFER_CONFIG *const gld_yv12 = get_ref_frame_buffer(cpi, GOLDEN_FRAME); YV12_BUFFER_CONFIG *const new_yv12 = get_frame_new_buffer(cm); const int recon_y_stride = lst_yv12->y_stride; const int recon_uv_stride = lst_yv12->uv_stride; + const int uv_mb_height = 16 >> (lst_yv12->y_height > lst_yv12->uv_height); int64_t intra_error = 0; int64_t coded_error = 0; int64_t sr_coded_error = 0; int sum_mvr = 0, sum_mvc = 0; int sum_mvr_abs = 0, sum_mvc_abs = 0; - int sum_mvrs = 0, sum_mvcs = 0; + int64_t sum_mvrs = 0, sum_mvcs = 0; int mvcount = 0; int intercount = 0; int second_ref_count = 0; @@ -510,112 +498,85 @@ void vp9_first_pass(VP9_COMP *cpi) { int new_mv_count = 0; int sum_in_vectors = 0; uint32_t lastmv_as_int = 0; + struct twopass_rc *const twopass = &cpi->twopass; + const MV zero_mv = {0, 0}; - int_mv zero_ref_mv; - - zero_ref_mv.as_int = 0; - - vp9_clear_system_state(); // __asm emms; + vp9_clear_system_state(); vp9_setup_src_planes(x, cpi->Source, 0, 0); - setup_pre_planes(xd, 0, lst_yv12, 0, 0, NULL); - setup_dst_planes(xd, new_yv12, 0, 0); + vp9_setup_pre_planes(xd, 0, lst_yv12, 0, 0, NULL); + vp9_setup_dst_planes(xd, new_yv12, 0, 0); xd->mi_8x8 = cm->mi_grid_visible; - // required for vp9_frame_init_quantizer xd->mi_8x8[0] = cm->mi; - setup_block_dptrs(&x->e_mbd, cm->subsampling_x, cm->subsampling_y); + vp9_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y); vp9_frame_init_quantizer(cpi); for (i = 0; i < MAX_MB_PLANE; ++i) { p[i].coeff = ctx->coeff_pbuf[i][1]; - pd[i].qcoeff = ctx->qcoeff_pbuf[i][1]; + p[i].qcoeff = ctx->qcoeff_pbuf[i][1]; pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][1]; - pd[i].eobs = ctx->eobs_pbuf[i][1]; + p[i].eobs = ctx->eobs_pbuf[i][1]; } x->skip_recode = 0; + vp9_init_mv_probs(cm); + vp9_initialize_rd_consts(cpi); - // Initialise the MV cost table to the defaults - // if( cm->current_video_frame == 0) - // if ( 0 ) - { - vp9_init_mv_probs(cm); - vp9_initialize_rd_consts(cpi); - } - - // tiling is ignored in the first pass + // Tiling is ignored in the first pass. vp9_tile_init(&tile, cm, 0, 0); - // for each macroblock row in image - for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) { + for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) { int_mv best_ref_mv; best_ref_mv.as_int = 0; - // reset above block coeffs + // Reset above block coeffs. xd->up_available = (mb_row != 0); recon_yoffset = (mb_row * recon_y_stride * 16); - recon_uvoffset = (mb_row * recon_uv_stride * 8); + recon_uvoffset = (mb_row * recon_uv_stride * uv_mb_height); // Set up limit values for motion vectors to prevent them extending - // outside the UMV borders + // outside the UMV borders. x->mv_row_min = -((mb_row * 16) + BORDER_MV_PIXELS_B16); x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) + BORDER_MV_PIXELS_B16; - // for each macroblock col in image - for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) { + for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) { int this_error; - int gf_motion_error = INT_MAX; - int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row); - double error_weight; + const int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row); + double error_weight = 1.0; + const BLOCK_SIZE bsize = get_bsize(cm, mb_row, mb_col); - vp9_clear_system_state(); // __asm emms; - error_weight = 1.0; // avoid uninitialized warnings + vp9_clear_system_state(); xd->plane[0].dst.buf = new_yv12->y_buffer + recon_yoffset; xd->plane[1].dst.buf = new_yv12->u_buffer + recon_uvoffset; xd->plane[2].dst.buf = new_yv12->v_buffer + recon_uvoffset; xd->left_available = (mb_col != 0); - - if (mb_col * 2 + 1 < cm->mi_cols) { - if (mb_row * 2 + 1 < cm->mi_rows) { - xd->mi_8x8[0]->mbmi.sb_type = BLOCK_16X16; - } else { - xd->mi_8x8[0]->mbmi.sb_type = BLOCK_16X8; - } - } else { - if (mb_row * 2 + 1 < cm->mi_rows) { - xd->mi_8x8[0]->mbmi.sb_type = BLOCK_8X16; - } else { - xd->mi_8x8[0]->mbmi.sb_type = BLOCK_8X8; - } - } + xd->mi_8x8[0]->mbmi.sb_type = bsize; xd->mi_8x8[0]->mbmi.ref_frame[0] = INTRA_FRAME; set_mi_row_col(xd, &tile, - mb_row << 1, - num_8x8_blocks_high_lookup[xd->mi_8x8[0]->mbmi.sb_type], - mb_col << 1, - num_8x8_blocks_wide_lookup[xd->mi_8x8[0]->mbmi.sb_type], + mb_row << 1, num_8x8_blocks_high_lookup[bsize], + mb_col << 1, num_8x8_blocks_wide_lookup[bsize], cm->mi_rows, cm->mi_cols); - if (cpi->sf.variance_adaptive_quantization) { - int energy = vp9_block_energy(cpi, x, xd->mi_8x8[0]->mbmi.sb_type); + if (cpi->oxcf.aq_mode == VARIANCE_AQ) { + const int energy = vp9_block_energy(cpi, x, bsize); error_weight = vp9_vaq_inv_q_ratio(energy); } - // do intra 16x16 prediction + // Do intra 16x16 prediction. this_error = vp9_encode_intra(x, use_dc_pred); - if (cpi->sf.variance_adaptive_quantization) { - vp9_clear_system_state(); // __asm emms; - this_error *= error_weight; + if (cpi->oxcf.aq_mode == VARIANCE_AQ) { + vp9_clear_system_state(); + this_error = (int)(this_error * error_weight); } - // intrapenalty below deals with situations where the intra and inter - // error scores are very low (eg a plain black frame). + // Intrapenalty below deals with situations where the intra and inter + // error scores are very low (e.g. a plain black frame). // We do not have special cases in first pass for 0,0 and nearest etc so // all inter modes carry an overhead cost estimate for the mv. // When the error score is very low this causes us to pick all or lots of @@ -623,44 +584,42 @@ void vp9_first_pass(VP9_COMP *cpi) { // This penalty adds a cost matching that of a 0,0 mv to the intra case. this_error += intrapenalty; - // Cumulative intra error total + // Accumulate the intra error. intra_error += (int64_t)this_error; // Set up limit values for motion vectors to prevent them extending // outside the UMV borders. x->mv_col_min = -((mb_col * 16) + BORDER_MV_PIXELS_B16); - x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16) - + BORDER_MV_PIXELS_B16; + x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16) + BORDER_MV_PIXELS_B16; - // Other than for the first frame do a motion search + // Other than for the first frame do a motion search. if (cm->current_video_frame > 0) { - int tmp_err; - int motion_error = INT_MAX; + int tmp_err, motion_error; int_mv mv, tmp_mv; - // Simple 0,0 motion with no mv overhead - zz_motion_search(cpi, x, lst_yv12, &motion_error, recon_yoffset); + xd->plane[0].pre[0].buf = lst_yv12->y_buffer + recon_yoffset; + motion_error = zz_motion_search(x); + // Assume 0,0 motion with no mv overhead. mv.as_int = tmp_mv.as_int = 0; // Test last reference frame using the previous best mv as the - // starting point (best reference) for the search - first_pass_motion_search(cpi, x, &best_ref_mv, - &mv.as_mv, lst_yv12, - &motion_error, recon_yoffset); - if (cpi->sf.variance_adaptive_quantization) { - vp9_clear_system_state(); // __asm emms; - motion_error *= error_weight; + // starting point (best reference) for the search. + first_pass_motion_search(cpi, x, &best_ref_mv.as_mv, &mv.as_mv, + &motion_error); + if (cpi->oxcf.aq_mode == VARIANCE_AQ) { + vp9_clear_system_state(); + motion_error = (int)(motion_error * error_weight); } // If the current best reference mv is not centered on 0,0 then do a 0,0 // based search as well. if (best_ref_mv.as_int) { tmp_err = INT_MAX; - first_pass_motion_search(cpi, x, &zero_ref_mv, &tmp_mv.as_mv, - lst_yv12, &tmp_err, recon_yoffset); - if (cpi->sf.variance_adaptive_quantization) { - vp9_clear_system_state(); // __asm emms; - tmp_err *= error_weight; + first_pass_motion_search(cpi, x, &zero_mv, &tmp_mv.as_mv, + &tmp_err); + if (cpi->oxcf.aq_mode == VARIANCE_AQ) { + vp9_clear_system_state(); + tmp_err = (int)(tmp_err * error_weight); } if (tmp_err < motion_error) { @@ -669,34 +628,33 @@ void vp9_first_pass(VP9_COMP *cpi) { } } - // Experimental search in an older reference frame + // Search in an older reference frame. if (cm->current_video_frame > 1) { - // Simple 0,0 motion with no mv overhead - zz_motion_search(cpi, x, gld_yv12, - &gf_motion_error, recon_yoffset); - - first_pass_motion_search(cpi, x, &zero_ref_mv, - &tmp_mv.as_mv, gld_yv12, - &gf_motion_error, recon_yoffset); - if (cpi->sf.variance_adaptive_quantization) { - vp9_clear_system_state(); // __asm emms; - gf_motion_error *= error_weight; - } + // Assume 0,0 motion with no mv overhead. + int gf_motion_error; + + xd->plane[0].pre[0].buf = gld_yv12->y_buffer + recon_yoffset; + gf_motion_error = zz_motion_search(x); - if ((gf_motion_error < motion_error) && - (gf_motion_error < this_error)) { - second_ref_count++; + first_pass_motion_search(cpi, x, &zero_mv, &tmp_mv.as_mv, + &gf_motion_error); + if (cpi->oxcf.aq_mode == VARIANCE_AQ) { + vp9_clear_system_state(); + gf_motion_error = (int)(gf_motion_error * error_weight); } - // Reset to last frame as reference buffer + if (gf_motion_error < motion_error && gf_motion_error < this_error) + ++second_ref_count; + + // Reset to last frame as reference buffer. xd->plane[0].pre[0].buf = lst_yv12->y_buffer + recon_yoffset; xd->plane[1].pre[0].buf = lst_yv12->u_buffer + recon_uvoffset; xd->plane[2].pre[0].buf = lst_yv12->v_buffer + recon_uvoffset; - // In accumulating a score for the older reference frame - // take the best of the motion predicted score and - // the intra coded error (just as will be done for) - // accumulation of "coded_error" for the last frame. + // In accumulating a score for the older reference frame take the + // best of the motion predicted score and the intra coded error + // (just as will be done for) accumulation of "coded_error" for + // the last frame. if (gf_motion_error < this_error) sr_coded_error += gf_motion_error; else @@ -704,74 +662,69 @@ void vp9_first_pass(VP9_COMP *cpi) { } else { sr_coded_error += motion_error; } - /* Intra assumed best */ + // Start by assuming that intra mode is best. best_ref_mv.as_int = 0; if (motion_error <= this_error) { - // Keep a count of cases where the inter and intra were - // very close and very low. This helps with scene cut - // detection for example in cropped clips with black bars - // at the sides or top and bottom. - if ((((this_error - intrapenalty) * 9) <= - (motion_error * 10)) && - (this_error < (2 * intrapenalty))) { - neutral_count++; - } + // Keep a count of cases where the inter and intra were very close + // and very low. This helps with scene cut detection for example in + // cropped clips with black bars at the sides or top and bottom. + if (((this_error - intrapenalty) * 9 <= motion_error * 10) && + this_error < 2 * intrapenalty) + ++neutral_count; mv.as_mv.row *= 8; mv.as_mv.col *= 8; this_error = motion_error; - vp9_set_mbmode_and_mvs(x, NEWMV, &mv); + xd->mi_8x8[0]->mbmi.mode = NEWMV; + xd->mi_8x8[0]->mbmi.mv[0] = mv; xd->mi_8x8[0]->mbmi.tx_size = TX_4X4; xd->mi_8x8[0]->mbmi.ref_frame[0] = LAST_FRAME; xd->mi_8x8[0]->mbmi.ref_frame[1] = NONE; - vp9_build_inter_predictors_sby(xd, mb_row << 1, - mb_col << 1, - xd->mi_8x8[0]->mbmi.sb_type); - vp9_encode_sby(x, xd->mi_8x8[0]->mbmi.sb_type); + vp9_build_inter_predictors_sby(xd, mb_row << 1, mb_col << 1, bsize); + vp9_encode_sby_pass1(x, bsize); sum_mvr += mv.as_mv.row; sum_mvr_abs += abs(mv.as_mv.row); sum_mvc += mv.as_mv.col; sum_mvc_abs += abs(mv.as_mv.col); sum_mvrs += mv.as_mv.row * mv.as_mv.row; sum_mvcs += mv.as_mv.col * mv.as_mv.col; - intercount++; + ++intercount; best_ref_mv.as_int = mv.as_int; - // Was the vector non-zero if (mv.as_int) { - mvcount++; + ++mvcount; - // Was it different from the last non zero vector + // Non-zero vector, was it different from the last non zero vector? if (mv.as_int != lastmv_as_int) - new_mv_count++; + ++new_mv_count; lastmv_as_int = mv.as_int; - // Does the Row vector point inwards or outwards + // Does the row vector point inwards or outwards? if (mb_row < cm->mb_rows / 2) { if (mv.as_mv.row > 0) - sum_in_vectors--; + --sum_in_vectors; else if (mv.as_mv.row < 0) - sum_in_vectors++; + ++sum_in_vectors; } else if (mb_row > cm->mb_rows / 2) { if (mv.as_mv.row > 0) - sum_in_vectors++; + ++sum_in_vectors; else if (mv.as_mv.row < 0) - sum_in_vectors--; + --sum_in_vectors; } - // Does the Row vector point inwards or outwards + // Does the col vector point inwards or outwards? if (mb_col < cm->mb_cols / 2) { if (mv.as_mv.col > 0) - sum_in_vectors--; + --sum_in_vectors; else if (mv.as_mv.col < 0) - sum_in_vectors++; + ++sum_in_vectors; } else if (mb_col > cm->mb_cols / 2) { if (mv.as_mv.col > 0) - sum_in_vectors++; + ++sum_in_vectors; else if (mv.as_mv.col < 0) - sum_in_vectors--; + --sum_in_vectors; } } } @@ -780,108 +733,95 @@ void vp9_first_pass(VP9_COMP *cpi) { } coded_error += (int64_t)this_error; - // adjust to the next column of macroblocks + // Adjust to the next column of MBs. x->plane[0].src.buf += 16; - x->plane[1].src.buf += 8; - x->plane[2].src.buf += 8; + x->plane[1].src.buf += uv_mb_height; + x->plane[2].src.buf += uv_mb_height; recon_yoffset += 16; - recon_uvoffset += 8; + recon_uvoffset += uv_mb_height; } - // adjust to the next row of mbs + // Adjust to the next row of MBs. x->plane[0].src.buf += 16 * x->plane[0].src.stride - 16 * cm->mb_cols; - x->plane[1].src.buf += 8 * x->plane[1].src.stride - 8 * cm->mb_cols; - x->plane[2].src.buf += 8 * x->plane[1].src.stride - 8 * cm->mb_cols; + x->plane[1].src.buf += uv_mb_height * x->plane[1].src.stride - + uv_mb_height * cm->mb_cols; + x->plane[2].src.buf += uv_mb_height * x->plane[1].src.stride - + uv_mb_height * cm->mb_cols; - vp9_clear_system_state(); // __asm emms; + vp9_clear_system_state(); } - vp9_clear_system_state(); // __asm emms; + vp9_clear_system_state(); { - double weight = 0.0; - FIRSTPASS_STATS fps; - fps.frame = cm->current_video_frame; + fps.frame = cm->current_video_frame; fps.intra_error = (double)(intra_error >> 8); fps.coded_error = (double)(coded_error >> 8); fps.sr_coded_error = (double)(sr_coded_error >> 8); - weight = simple_weight(cpi->Source); - - - if (weight < 0.1) - weight = 0.1; - - fps.ssim_weighted_pred_err = fps.coded_error * weight; - - fps.pcnt_inter = 0.0; - fps.pcnt_motion = 0.0; - fps.MVr = 0.0; - fps.mvr_abs = 0.0; - fps.MVc = 0.0; - fps.mvc_abs = 0.0; - fps.MVrv = 0.0; - fps.MVcv = 0.0; - fps.mv_in_out_count = 0.0; - fps.new_mv_count = 0.0; - fps.count = 1.0; - - fps.pcnt_inter = 1.0 * (double)intercount / cm->MBs; - fps.pcnt_second_ref = 1.0 * (double)second_ref_count / cm->MBs; - fps.pcnt_neutral = 1.0 * (double)neutral_count / cm->MBs; + fps.ssim_weighted_pred_err = fps.coded_error * simple_weight(cpi->Source); + fps.count = 1.0; + fps.pcnt_inter = (double)intercount / cm->MBs; + fps.pcnt_second_ref = (double)second_ref_count / cm->MBs; + fps.pcnt_neutral = (double)neutral_count / cm->MBs; if (mvcount > 0) { - fps.MVr = (double)sum_mvr / (double)mvcount; - fps.mvr_abs = (double)sum_mvr_abs / (double)mvcount; - fps.MVc = (double)sum_mvc / (double)mvcount; - fps.mvc_abs = (double)sum_mvc_abs / (double)mvcount; - fps.MVrv = ((double)sum_mvrs - (fps.MVr * fps.MVr / (double)mvcount)) / - (double)mvcount; - fps.MVcv = ((double)sum_mvcs - (fps.MVc * fps.MVc / (double)mvcount)) / - (double)mvcount; - fps.mv_in_out_count = (double)sum_in_vectors / (double)(mvcount * 2); + fps.MVr = (double)sum_mvr / mvcount; + fps.mvr_abs = (double)sum_mvr_abs / mvcount; + fps.MVc = (double)sum_mvc / mvcount; + fps.mvc_abs = (double)sum_mvc_abs / mvcount; + fps.MVrv = ((double)sum_mvrs - (fps.MVr * fps.MVr / mvcount)) / mvcount; + fps.MVcv = ((double)sum_mvcs - (fps.MVc * fps.MVc / mvcount)) / mvcount; + fps.mv_in_out_count = (double)sum_in_vectors / (mvcount * 2); fps.new_mv_count = new_mv_count; - - fps.pcnt_motion = 1.0 * (double)mvcount / cpi->common.MBs; + fps.pcnt_motion = (double)mvcount / cm->MBs; + } else { + fps.MVr = 0.0; + fps.mvr_abs = 0.0; + fps.MVc = 0.0; + fps.mvc_abs = 0.0; + fps.MVrv = 0.0; + fps.MVcv = 0.0; + fps.mv_in_out_count = 0.0; + fps.new_mv_count = 0.0; + fps.pcnt_motion = 0.0; } // TODO(paulwilkins): Handle the case when duration is set to 0, or // something less than the full time between subsequent values of // cpi->source_time_stamp. - fps.duration = (double)(cpi->source->ts_end - - cpi->source->ts_start); + fps.duration = (double)(cpi->source->ts_end - cpi->source->ts_start); - // don't want to do output stats with a stack variable! - cpi->twopass.this_frame_stats = fps; - output_stats(cpi, cpi->output_pkt_list, &cpi->twopass.this_frame_stats); - accumulate_stats(&cpi->twopass.total_stats, &fps); + // Don't want to do output stats with a stack variable! + twopass->this_frame_stats = fps; + output_stats(&twopass->this_frame_stats, cpi->output_pkt_list); + accumulate_stats(&twopass->total_stats, &fps); } // Copy the previous Last Frame back into gf and and arf buffers if - // the prediction is good enough... but also dont allow it to lag too far - if ((cpi->twopass.sr_update_lag > 3) || + // the prediction is good enough... but also don't allow it to lag too far. + if ((twopass->sr_update_lag > 3) || ((cm->current_video_frame > 0) && - (cpi->twopass.this_frame_stats.pcnt_inter > 0.20) && - ((cpi->twopass.this_frame_stats.intra_error / - DOUBLE_DIVIDE_CHECK(cpi->twopass.this_frame_stats.coded_error)) > - 2.0))) { + (twopass->this_frame_stats.pcnt_inter > 0.20) && + ((twopass->this_frame_stats.intra_error / + DOUBLE_DIVIDE_CHECK(twopass->this_frame_stats.coded_error)) > 2.0))) { vp8_yv12_copy_frame(lst_yv12, gld_yv12); - cpi->twopass.sr_update_lag = 1; + twopass->sr_update_lag = 1; } else { - cpi->twopass.sr_update_lag++; + ++twopass->sr_update_lag; } - // swap frame pointers so last frame refers to the frame we just compressed + // Swap frame pointers so last frame refers to the frame we just compressed. swap_yv12(lst_yv12, new_yv12); - vp9_extend_frame_borders(lst_yv12, cm->subsampling_x, cm->subsampling_y); + vp9_extend_frame_borders(lst_yv12); // Special case for the first frame. Copy into the GF buffer as a second // reference. if (cm->current_video_frame == 0) vp8_yv12_copy_frame(lst_yv12, gld_yv12); - // use this to see what the first pass reconstruction looks like + // Use this to see what the first pass reconstruction looks like. if (0) { char filename[512]; FILE *recon_file; @@ -897,54 +837,15 @@ void vp9_first_pass(VP9_COMP *cpi) { fclose(recon_file); } - cm->current_video_frame++; + ++cm->current_video_frame; } -// Estimate a cost per mb attributable to overheads such as the coding of -// modes and motion vectors. -// Currently simplistic in its assumptions for testing. -// - - +// Estimate a cost per mb attributable to overheads such as the coding of modes +// and motion vectors. This currently makes simplistic assumptions for testing. static double bitcost(double prob) { return -(log(prob) / log(2.0)); } -static int64_t estimate_modemvcost(VP9_COMP *cpi, - FIRSTPASS_STATS *fpstats) { -#if 0 - int mv_cost; - int mode_cost; - - double av_pct_inter = fpstats->pcnt_inter / fpstats->count; - double av_pct_motion = fpstats->pcnt_motion / fpstats->count; - double av_intra = (1.0 - av_pct_inter); - - double zz_cost; - double motion_cost; - double intra_cost; - - zz_cost = bitcost(av_pct_inter - av_pct_motion); - motion_cost = bitcost(av_pct_motion); - intra_cost = bitcost(av_intra); - - // Estimate of extra bits per mv overhead for mbs - // << 9 is the normalization to the (bits * 512) used in vp9_bits_per_mb - mv_cost = ((int)(fpstats->new_mv_count / fpstats->count) * 8) << 9; - - // Crude estimate of overhead cost from modes - // << 9 is the normalization to (bits * 512) used in vp9_bits_per_mb - mode_cost = - (int)((((av_pct_inter - av_pct_motion) * zz_cost) + - (av_pct_motion * motion_cost) + - (av_intra * intra_cost)) * cpi->common.MBs) << 9; - - // return mv_cost + mode_cost; - // TODO(paulwilkins): Fix overhead costs for extended Q range. -#endif - return 0; -} - static double calc_correction_factor(double err_per_mb, double err_divisor, double pt_low, @@ -953,206 +854,47 @@ static double calc_correction_factor(double err_per_mb, const double error_term = err_per_mb / err_divisor; // Adjustment based on actual quantizer to power term. - const double power_term = MIN(vp9_convert_qindex_to_q(q) * 0.01 + pt_low, + const double power_term = MIN(vp9_convert_qindex_to_q(q) * 0.0125 + pt_low, pt_high); - // Calculate correction factor + // Calculate correction factor. if (power_term < 1.0) assert(error_term >= 0.0); return fclamp(pow(error_term, power_term), 0.05, 5.0); } -// Given a current maxQ value sets a range for future values. -// PGW TODO.. -// This code removes direct dependency on QIndex to determine the range -// (now uses the actual quantizer) but has not been tuned. -static void adjust_maxq_qrange(VP9_COMP *cpi) { - int i; - // Set the max corresponding to cpi->avg_q * 2.0 - double q = cpi->avg_q * 2.0; - cpi->twopass.maxq_max_limit = cpi->worst_quality; - for (i = cpi->best_quality; i <= cpi->worst_quality; i++) { - cpi->twopass.maxq_max_limit = i; - if (vp9_convert_qindex_to_q(i) >= q) - break; - } - - // Set the min corresponding to cpi->avg_q * 0.5 - q = cpi->avg_q * 0.5; - cpi->twopass.maxq_min_limit = cpi->best_quality; - for (i = cpi->worst_quality; i >= cpi->best_quality; i--) { - cpi->twopass.maxq_min_limit = i; - if (vp9_convert_qindex_to_q(i) <= q) - break; - } -} - -static int estimate_max_q(VP9_COMP *cpi, - FIRSTPASS_STATS *fpstats, - int section_target_bandwitdh) { +int vp9_twopass_worst_quality(VP9_COMP *cpi, FIRSTPASS_STATS *fpstats, + int section_target_bandwitdh) { int q; - int num_mbs = cpi->common.MBs; + const int num_mbs = cpi->common.MBs; int target_norm_bits_per_mb; + const RATE_CONTROL *const rc = &cpi->rc; - double section_err = fpstats->coded_error / fpstats->count; - double sr_correction; - double err_per_mb = section_err / num_mbs; - double err_correction_factor; - double speed_correction = 1.0; + const double section_err = fpstats->coded_error / fpstats->count; + const double err_per_mb = section_err / num_mbs; if (section_target_bandwitdh <= 0) - return cpi->twopass.maxq_max_limit; // Highest value allowed + return rc->worst_quality; // Highest value allowed target_norm_bits_per_mb = section_target_bandwitdh < (1 << 20) ? (512 * section_target_bandwitdh) / num_mbs : 512 * (section_target_bandwitdh / num_mbs); - // Look at the drop in prediction quality between the last frame - // and the GF buffer (which contained an older frame). - if (fpstats->sr_coded_error > fpstats->coded_error) { - double sr_err_diff = (fpstats->sr_coded_error - fpstats->coded_error) / - (fpstats->count * cpi->common.MBs); - sr_correction = fclamp(pow(sr_err_diff / 32.0, 0.25), 0.75, 1.25); - } else { - sr_correction = 0.75; - } - - // Calculate a corrective factor based on a rolling ratio of bits spent - // vs target bits - if (cpi->rolling_target_bits > 0 && - cpi->active_worst_quality < cpi->worst_quality) { - double rolling_ratio = (double)cpi->rolling_actual_bits / - (double)cpi->rolling_target_bits; - - if (rolling_ratio < 0.95) - cpi->twopass.est_max_qcorrection_factor -= 0.005; - else if (rolling_ratio > 1.05) - cpi->twopass.est_max_qcorrection_factor += 0.005; - - cpi->twopass.est_max_qcorrection_factor = fclamp( - cpi->twopass.est_max_qcorrection_factor, 0.1, 10.0); - } - - // Corrections for higher compression speed settings - // (reduced compression expected) - // FIXME(jimbankoski): Once we settle on vp9 speed features we need to - // change this code. - if (cpi->compressor_speed == 1) - speed_correction = cpi->oxcf.cpu_used <= 5 ? - 1.04 + (/*cpi->oxcf.cpu_used*/0 * 0.04) : - 1.25; - // Try and pick a max Q that will be high enough to encode the // content at the given rate. - for (q = cpi->twopass.maxq_min_limit; q < cpi->twopass.maxq_max_limit; q++) { - int bits_per_mb_at_this_q; - - err_correction_factor = calc_correction_factor(err_per_mb, - ERR_DIVISOR, 0.4, 0.90, q) * - sr_correction * speed_correction * - cpi->twopass.est_max_qcorrection_factor; - - bits_per_mb_at_this_q = vp9_bits_per_mb(INTER_FRAME, q, - err_correction_factor); - + for (q = rc->best_quality; q < rc->worst_quality; ++q) { + const double err_correction_factor = calc_correction_factor(err_per_mb, + ERR_DIVISOR, 0.5, 0.90, q); + const int bits_per_mb_at_this_q = vp9_rc_bits_per_mb(INTER_FRAME, q, + err_correction_factor); if (bits_per_mb_at_this_q <= target_norm_bits_per_mb) break; } // Restriction on active max q for constrained quality mode. - if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY && - q < cpi->cq_target_quality) - q = cpi->cq_target_quality; - - // Adjust maxq_min_limit and maxq_max_limit limits based on - // average q observed in clip for non kf/gf/arf frames - // Give average a chance to settle though. - // PGW TODO.. This code is broken for the extended Q range - if (cpi->ni_frames > ((int)cpi->twopass.total_stats.count >> 8) && - cpi->ni_frames > 25) - adjust_maxq_qrange(cpi); - - return q; -} - -// For cq mode estimate a cq level that matches the observed -// complexity and data rate. -static int estimate_cq(VP9_COMP *cpi, - FIRSTPASS_STATS *fpstats, - int section_target_bandwitdh) { - int q; - int num_mbs = cpi->common.MBs; - int target_norm_bits_per_mb; - - double section_err = (fpstats->coded_error / fpstats->count); - double err_per_mb = section_err / num_mbs; - double err_correction_factor; - double sr_err_diff; - double sr_correction; - double speed_correction = 1.0; - double clip_iiratio; - double clip_iifactor; - - target_norm_bits_per_mb = (section_target_bandwitdh < (1 << 20)) - ? (512 * section_target_bandwitdh) / num_mbs - : 512 * (section_target_bandwitdh / num_mbs); - - - // Corrections for higher compression speed settings - // (reduced compression expected) - if (cpi->compressor_speed == 1) { - if (cpi->oxcf.cpu_used <= 5) - speed_correction = 1.04 + (/*cpi->oxcf.cpu_used*/ 0 * 0.04); - else - speed_correction = 1.25; - } - - // Look at the drop in prediction quality between the last frame - // and the GF buffer (which contained an older frame). - if (fpstats->sr_coded_error > fpstats->coded_error) { - sr_err_diff = - (fpstats->sr_coded_error - fpstats->coded_error) / - (fpstats->count * cpi->common.MBs); - sr_correction = (sr_err_diff / 32.0); - sr_correction = pow(sr_correction, 0.25); - if (sr_correction < 0.75) - sr_correction = 0.75; - else if (sr_correction > 1.25) - sr_correction = 1.25; - } else { - sr_correction = 0.75; - } - - // II ratio correction factor for clip as a whole - clip_iiratio = cpi->twopass.total_stats.intra_error / - DOUBLE_DIVIDE_CHECK(cpi->twopass.total_stats.coded_error); - clip_iifactor = 1.0 - ((clip_iiratio - 10.0) * 0.025); - if (clip_iifactor < 0.80) - clip_iifactor = 0.80; - - // Try and pick a Q that can encode the content at the given rate. - for (q = 0; q < MAXQ; q++) { - int bits_per_mb_at_this_q; - - // Error per MB based correction factor - err_correction_factor = - calc_correction_factor(err_per_mb, 100.0, 0.4, 0.90, q) * - sr_correction * speed_correction * clip_iifactor; - - bits_per_mb_at_this_q = - vp9_bits_per_mb(INTER_FRAME, q, err_correction_factor); - - if (bits_per_mb_at_this_q <= target_norm_bits_per_mb) - break; - } - - // Clip value to range "best allowed to (worst allowed - 1)" - q = select_cq_level(q); - if (q >= cpi->worst_quality) - q = cpi->worst_quality - 1; - if (q < cpi->best_quality) - q = cpi->best_quality; + if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) + q = MAX(q, cpi->cq_target_quality); return q; } @@ -1161,132 +903,105 @@ extern void vp9_new_framerate(VP9_COMP *cpi, double framerate); void vp9_init_second_pass(VP9_COMP *cpi) { FIRSTPASS_STATS this_frame; - FIRSTPASS_STATS *start_pos; - - double lower_bounds_min_rate = FRAME_OVERHEAD_BITS * cpi->oxcf.framerate; - double two_pass_min_rate = (double)(cpi->oxcf.target_bandwidth * - cpi->oxcf.two_pass_vbrmin_section / 100); + const FIRSTPASS_STATS *start_pos; + struct twopass_rc *const twopass = &cpi->twopass; + const VP9_CONFIG *const oxcf = &cpi->oxcf; - if (two_pass_min_rate < lower_bounds_min_rate) - two_pass_min_rate = lower_bounds_min_rate; + zero_stats(&twopass->total_stats); + zero_stats(&twopass->total_left_stats); - zero_stats(&cpi->twopass.total_stats); - zero_stats(&cpi->twopass.total_left_stats); - - if (!cpi->twopass.stats_in_end) + if (!twopass->stats_in_end) return; - cpi->twopass.total_stats = *cpi->twopass.stats_in_end; - cpi->twopass.total_left_stats = cpi->twopass.total_stats; + twopass->total_stats = *twopass->stats_in_end; + twopass->total_left_stats = twopass->total_stats; - // each frame can have a different duration, as the frame rate in the source - // isn't guaranteed to be constant. The frame rate prior to the first frame - // encoded in the second pass is a guess. However the sum duration is not. - // Its calculated based on the actual durations of all frames from the first - // pass. - vp9_new_framerate(cpi, 10000000.0 * cpi->twopass.total_stats.count / - cpi->twopass.total_stats.duration); + // Each frame can have a different duration, as the frame rate in the source + // isn't guaranteed to be constant. The frame rate prior to the first frame + // encoded in the second pass is a guess. However, the sum duration is not. + // It is calculated based on the actual durations of all frames from the + // first pass. + vp9_new_framerate(cpi, 10000000.0 * twopass->total_stats.count / + twopass->total_stats.duration); - cpi->output_framerate = cpi->oxcf.framerate; - cpi->twopass.bits_left = (int64_t)(cpi->twopass.total_stats.duration * - cpi->oxcf.target_bandwidth / 10000000.0); - cpi->twopass.bits_left -= (int64_t)(cpi->twopass.total_stats.duration * - two_pass_min_rate / 10000000.0); + cpi->output_framerate = oxcf->framerate; + twopass->bits_left = (int64_t)(twopass->total_stats.duration * + oxcf->target_bandwidth / 10000000.0); // Calculate a minimum intra value to be used in determining the IIratio // scores used in the second pass. We have this minimum to make sure // that clips that are static but "low complexity" in the intra domain - // are still boosted appropriately for KF/GF/ARF - cpi->twopass.kf_intra_err_min = KF_MB_INTRA_MIN * cpi->common.MBs; - cpi->twopass.gf_intra_err_min = GF_MB_INTRA_MIN * cpi->common.MBs; + // are still boosted appropriately for KF/GF/ARF. + twopass->kf_intra_err_min = KF_MB_INTRA_MIN * cpi->common.MBs; + twopass->gf_intra_err_min = GF_MB_INTRA_MIN * cpi->common.MBs; - // This variable monitors how far behind the second ref update is lagging - cpi->twopass.sr_update_lag = 1; + // This variable monitors how far behind the second ref update is lagging. + twopass->sr_update_lag = 1; // Scan the first pass file and calculate an average Intra / Inter error score // ratio for the sequence. { double sum_iiratio = 0.0; - double IIRatio; - - start_pos = cpi->twopass.stats_in; // Note the starting "file" position. + start_pos = twopass->stats_in; - while (input_stats(cpi, &this_frame) != EOF) { - IIRatio = this_frame.intra_error - / DOUBLE_DIVIDE_CHECK(this_frame.coded_error); - IIRatio = (IIRatio < 1.0) ? 1.0 : (IIRatio > 20.0) ? 20.0 : IIRatio; - sum_iiratio += IIRatio; + while (input_stats(twopass, &this_frame) != EOF) { + const double iiratio = this_frame.intra_error / + DOUBLE_DIVIDE_CHECK(this_frame.coded_error); + sum_iiratio += fclamp(iiratio, 1.0, 20.0); } - cpi->twopass.avg_iiratio = sum_iiratio / - DOUBLE_DIVIDE_CHECK((double)cpi->twopass.total_stats.count); + twopass->avg_iiratio = sum_iiratio / + DOUBLE_DIVIDE_CHECK((double)twopass->total_stats.count); - // Reset file position - reset_fpf_position(cpi, start_pos); + reset_fpf_position(twopass, start_pos); } // Scan the first pass file and calculate a modified total error based upon // the bias/power function used to allocate bits. { - start_pos = cpi->twopass.stats_in; // Note starting "file" position + double av_error = twopass->total_stats.ssim_weighted_pred_err / + DOUBLE_DIVIDE_CHECK(twopass->total_stats.count); - cpi->twopass.modified_error_total = 0.0; - cpi->twopass.modified_error_used = 0.0; + start_pos = twopass->stats_in; - while (input_stats(cpi, &this_frame) != EOF) { - cpi->twopass.modified_error_total += + twopass->modified_error_total = 0.0; + twopass->modified_error_min = + (av_error * oxcf->two_pass_vbrmin_section) / 100; + twopass->modified_error_max = + (av_error * oxcf->two_pass_vbrmax_section) / 100; + + while (input_stats(twopass, &this_frame) != EOF) { + twopass->modified_error_total += calculate_modified_err(cpi, &this_frame); } - cpi->twopass.modified_error_left = cpi->twopass.modified_error_total; + twopass->modified_error_left = twopass->modified_error_total; - reset_fpf_position(cpi, start_pos); // Reset file position + reset_fpf_position(twopass, start_pos); } } -void vp9_end_second_pass(VP9_COMP *cpi) { -} - -// This function gives and estimate of how badly we believe -// the prediction quality is decaying from frame to frame. -static double get_prediction_decay_rate(VP9_COMP *cpi, - FIRSTPASS_STATS *next_frame) { - double prediction_decay_rate; - double second_ref_decay; - double mb_sr_err_diff; - - // Initial basis is the % mbs inter coded - prediction_decay_rate = next_frame->pcnt_inter; - +// This function gives an estimate of how badly we believe the prediction +// quality is decaying from frame to frame. +static double get_prediction_decay_rate(const VP9_COMMON *cm, + const FIRSTPASS_STATS *next_frame) { // Look at the observed drop in prediction quality between the last frame // and the GF buffer (which contains an older frame). - mb_sr_err_diff = (next_frame->sr_coded_error - next_frame->coded_error) / - cpi->common.MBs; - if (mb_sr_err_diff <= 512.0) { - second_ref_decay = 1.0 - (mb_sr_err_diff / 512.0); - second_ref_decay = pow(second_ref_decay, 0.5); - if (second_ref_decay < 0.85) - second_ref_decay = 0.85; - else if (second_ref_decay > 1.0) - second_ref_decay = 1.0; - } else { - second_ref_decay = 0.85; - } - - if (second_ref_decay < prediction_decay_rate) - prediction_decay_rate = second_ref_decay; + const double mb_sr_err_diff = (next_frame->sr_coded_error - + next_frame->coded_error) / cm->MBs; + const double second_ref_decay = mb_sr_err_diff <= 512.0 + ? fclamp(pow(1.0 - (mb_sr_err_diff / 512.0), 0.5), 0.85, 1.0) + : 0.85; - return prediction_decay_rate; + return MIN(second_ref_decay, next_frame->pcnt_inter); } // Function to test for a condition where a complex transition is followed // by a static section. For example in slide shows where there is a fade // between slides. This is to help with more optimal kf and gf positioning. -static int detect_transition_to_still( - VP9_COMP *cpi, - int frame_interval, - int still_interval, - double loop_decay_rate, - double last_decay_rate) { +static int detect_transition_to_still(VP9_COMP *cpi, int frame_interval, + int still_interval, + double loop_decay_rate, + double last_decay_rate) { int trans_to_still = 0; // Break clause to detect very still sections after motion @@ -1296,25 +1011,21 @@ static int detect_transition_to_still( loop_decay_rate >= 0.999 && last_decay_rate < 0.9) { int j; - FIRSTPASS_STATS *position = cpi->twopass.stats_in; + const FIRSTPASS_STATS *position = cpi->twopass.stats_in; FIRSTPASS_STATS tmp_next_frame; - double zz_inter; - // Look ahead a few frames to see if static condition - // persists... - for (j = 0; j < still_interval; j++) { - if (EOF == input_stats(cpi, &tmp_next_frame)) + // Look ahead a few frames to see if static condition persists... + for (j = 0; j < still_interval; ++j) { + if (EOF == input_stats(&cpi->twopass, &tmp_next_frame)) break; - zz_inter = - (tmp_next_frame.pcnt_inter - tmp_next_frame.pcnt_motion); - if (zz_inter < 0.999) + if (tmp_next_frame.pcnt_inter - tmp_next_frame.pcnt_motion < 0.999) break; } - // Reset file position - reset_fpf_position(cpi, position); - // Only if it does do we signal a transition to still + reset_fpf_position(&cpi->twopass, position); + + // Only if it does do we signal a transition to still. if (j == still_interval) trans_to_still = 1; } @@ -1324,20 +1035,20 @@ static int detect_transition_to_still( // This function detects a flash through the high relative pcnt_second_ref // score in the frame following a flash frame. The offset passed in should -// reflect this -static int detect_flash(VP9_COMP *cpi, int offset) { +// reflect this. +static int detect_flash(const struct twopass_rc *twopass, int offset) { FIRSTPASS_STATS next_frame; int flash_detected = 0; // Read the frame data. // The return is FALSE (no flash detected) if not a valid frame - if (read_frame_stats(cpi, &next_frame, offset) != EOF) { + if (read_frame_stats(twopass, &next_frame, offset) != EOF) { // What we are looking for here is a situation where there is a // brief break in prediction (such as a flash) but subsequent frames // are reasonably well predicted by an earlier (pre flash) frame. // The recovery after a flash is indicated by a high pcnt_second_ref - // comapred to pcnt_inter. + // compared to pcnt_inter. if (next_frame.pcnt_second_ref > next_frame.pcnt_inter && next_frame.pcnt_second_ref >= 0.5) flash_detected = 1; @@ -1346,56 +1057,48 @@ static int detect_flash(VP9_COMP *cpi, int offset) { return flash_detected; } -// Update the motion related elements to the GF arf boost calculation +// Update the motion related elements to the GF arf boost calculation. static void accumulate_frame_motion_stats( FIRSTPASS_STATS *this_frame, double *this_frame_mv_in_out, double *mv_in_out_accumulator, double *abs_mv_in_out_accumulator, double *mv_ratio_accumulator) { - // double this_frame_mv_in_out; - double this_frame_mvr_ratio; - double this_frame_mvc_ratio; double motion_pct; // Accumulate motion stats. motion_pct = this_frame->pcnt_motion; - // Accumulate Motion In/Out of frame stats + // Accumulate Motion In/Out of frame stats. *this_frame_mv_in_out = this_frame->mv_in_out_count * motion_pct; *mv_in_out_accumulator += this_frame->mv_in_out_count * motion_pct; - *abs_mv_in_out_accumulator += - fabs(this_frame->mv_in_out_count * motion_pct); + *abs_mv_in_out_accumulator += fabs(this_frame->mv_in_out_count * motion_pct); // Accumulate a measure of how uniform (or conversely how random) - // the motion field is. (A ratio of absmv / mv) + // the motion field is (a ratio of absmv / mv). if (motion_pct > 0.05) { - this_frame_mvr_ratio = fabs(this_frame->mvr_abs) / + const double this_frame_mvr_ratio = fabs(this_frame->mvr_abs) / DOUBLE_DIVIDE_CHECK(fabs(this_frame->MVr)); - this_frame_mvc_ratio = fabs(this_frame->mvc_abs) / + const double this_frame_mvc_ratio = fabs(this_frame->mvc_abs) / DOUBLE_DIVIDE_CHECK(fabs(this_frame->MVc)); - *mv_ratio_accumulator += - (this_frame_mvr_ratio < this_frame->mvr_abs) + *mv_ratio_accumulator += (this_frame_mvr_ratio < this_frame->mvr_abs) ? (this_frame_mvr_ratio * motion_pct) : this_frame->mvr_abs * motion_pct; - *mv_ratio_accumulator += - (this_frame_mvc_ratio < this_frame->mvc_abs) + *mv_ratio_accumulator += (this_frame_mvc_ratio < this_frame->mvc_abs) ? (this_frame_mvc_ratio * motion_pct) : this_frame->mvc_abs * motion_pct; } } // Calculate a baseline boost number for the current frame. -static double calc_frame_boost( - VP9_COMP *cpi, - FIRSTPASS_STATS *this_frame, - double this_frame_mv_in_out) { +static double calc_frame_boost(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame, + double this_frame_mv_in_out) { double frame_boost; - // Underlying boost factor is based on inter intra error ratio + // Underlying boost factor is based on inter intra error ratio. if (this_frame->intra_error > cpi->twopass.gf_intra_err_min) frame_boost = (IIFACTOR * this_frame->intra_error / DOUBLE_DIVIDE_CHECK(this_frame->coded_error)); @@ -1403,28 +1106,23 @@ static double calc_frame_boost( frame_boost = (IIFACTOR * cpi->twopass.gf_intra_err_min / DOUBLE_DIVIDE_CHECK(this_frame->coded_error)); - // Increase boost for frames where new data coming into frame - // (eg zoom out). Slightly reduce boost if there is a net balance - // of motion out of the frame (zoom in). - // The range for this_frame_mv_in_out is -1.0 to +1.0 + // Increase boost for frames where new data coming into frame (e.g. zoom out). + // Slightly reduce boost if there is a net balance of motion out of the frame + // (zoom in). The range for this_frame_mv_in_out is -1.0 to +1.0. if (this_frame_mv_in_out > 0.0) frame_boost += frame_boost * (this_frame_mv_in_out * 2.0); - // In extreme case boost is halved + // In the extreme case the boost is halved. else frame_boost += frame_boost * (this_frame_mv_in_out / 2.0); - // Clip to maximum - if (frame_boost > GF_RMAX) - frame_boost = GF_RMAX; - - return frame_boost; + return MIN(frame_boost, GF_RMAX); } static int calc_arf_boost(VP9_COMP *cpi, int offset, int f_frames, int b_frames, int *f_boost, int *b_boost) { FIRSTPASS_STATS this_frame; - + struct twopass_rc *const twopass = &cpi->twopass; int i; double boost_score = 0.0; double mv_ratio_accumulator = 0.0; @@ -1435,12 +1133,12 @@ static int calc_arf_boost(VP9_COMP *cpi, int offset, int arf_boost; int flash_detected = 0; - // Search forward from the proposed arf/next gf position - for (i = 0; i < f_frames; i++) { - if (read_frame_stats(cpi, &this_frame, (i + offset)) == EOF) + // Search forward from the proposed arf/next gf position. + for (i = 0; i < f_frames; ++i) { + if (read_frame_stats(twopass, &this_frame, (i + offset)) == EOF) break; - // Update the motion related elements to the boost calculation + // Update the motion related elements to the boost calculation. accumulate_frame_motion_stats(&this_frame, &this_frame_mv_in_out, &mv_in_out_accumulator, &abs_mv_in_out_accumulator, @@ -1448,12 +1146,12 @@ static int calc_arf_boost(VP9_COMP *cpi, int offset, // We want to discount the flash frame itself and the recovery // frame that follows as both will have poor scores. - flash_detected = detect_flash(cpi, (i + offset)) || - detect_flash(cpi, (i + offset + 1)); + flash_detected = detect_flash(twopass, i + offset) || + detect_flash(twopass, i + offset + 1); - // Cumulative effect of prediction quality decay + // Accumulate the effect of prediction quality decay. if (!flash_detected) { - decay_accumulator *= get_prediction_decay_rate(cpi, &this_frame); + decay_accumulator *= get_prediction_decay_rate(&cpi->common, &this_frame); decay_accumulator = decay_accumulator < MIN_DECAY_FACTOR ? MIN_DECAY_FACTOR : decay_accumulator; } @@ -1464,7 +1162,7 @@ static int calc_arf_boost(VP9_COMP *cpi, int offset, *f_boost = (int)boost_score; - // Reset for backward looking loop + // Reset for backward looking loop. boost_score = 0.0; mv_ratio_accumulator = 0.0; decay_accumulator = 1.0; @@ -1472,12 +1170,12 @@ static int calc_arf_boost(VP9_COMP *cpi, int offset, mv_in_out_accumulator = 0.0; abs_mv_in_out_accumulator = 0.0; - // Search backward towards last gf position - for (i = -1; i >= -b_frames; i--) { - if (read_frame_stats(cpi, &this_frame, (i + offset)) == EOF) + // Search backward towards last gf position. + for (i = -1; i >= -b_frames; --i) { + if (read_frame_stats(twopass, &this_frame, (i + offset)) == EOF) break; - // Update the motion related elements to the boost calculation + // Update the motion related elements to the boost calculation. accumulate_frame_motion_stats(&this_frame, &this_frame_mv_in_out, &mv_in_out_accumulator, &abs_mv_in_out_accumulator, @@ -1485,12 +1183,12 @@ static int calc_arf_boost(VP9_COMP *cpi, int offset, // We want to discount the the flash frame itself and the recovery // frame that follows as both will have poor scores. - flash_detected = detect_flash(cpi, (i + offset)) || - detect_flash(cpi, (i + offset + 1)); + flash_detected = detect_flash(twopass, i + offset) || + detect_flash(twopass, i + offset + 1); - // Cumulative effect of prediction quality decay + // Cumulative effect of prediction quality decay. if (!flash_detected) { - decay_accumulator *= get_prediction_decay_rate(cpi, &this_frame); + decay_accumulator *= get_prediction_decay_rate(&cpi->common, &this_frame); decay_accumulator = decay_accumulator < MIN_DECAY_FACTOR ? MIN_DECAY_FACTOR : decay_accumulator; } @@ -1538,8 +1236,7 @@ static void schedule_frames(VP9_COMP *cpi, const int start, const int end, return; } - // ARF Group: work out the ARF schedule. - // Mark ARF frames as negative. + // ARF Group: Work out the ARF schedule and mark ARF frames as negative. if (end < 0) { // printf("start:%d end:%d\n", -end, -end); // ARF frame is at the end of the range. @@ -1591,6 +1288,8 @@ void define_fixed_arf_period(VP9_COMP *cpi) { cpi->this_frame_weight = cpi->arf_weight[cpi->sequence_number]; assert(cpi->this_frame_weight >= 0); + cpi->twopass.gf_zeromotion_pct = 0; + // Initialize frame coding order variables. cpi->new_frame_coding_order_period = 0; cpi->next_frame_in_order = 0; @@ -1599,16 +1298,16 @@ void define_fixed_arf_period(VP9_COMP *cpi) { vp9_zero(cpi->arf_buffer_idx); vpx_memset(cpi->arf_weight, -1, sizeof(cpi->arf_weight)); - if (cpi->twopass.frames_to_key <= (FIXED_ARF_GROUP_SIZE + 8)) { + if (cpi->rc.frames_to_key <= (FIXED_ARF_GROUP_SIZE + 8)) { // Setup a GF group close to the keyframe. - cpi->source_alt_ref_pending = 0; - cpi->baseline_gf_interval = cpi->twopass.frames_to_key; - schedule_frames(cpi, 0, (cpi->baseline_gf_interval - 1), 2, 0, 0); + cpi->rc.source_alt_ref_pending = 0; + cpi->rc.baseline_gf_interval = cpi->rc.frames_to_key; + schedule_frames(cpi, 0, (cpi->rc.baseline_gf_interval - 1), 2, 0, 0); } else { // Setup a fixed period ARF group. - cpi->source_alt_ref_pending = 1; - cpi->baseline_gf_interval = FIXED_ARF_GROUP_SIZE; - schedule_frames(cpi, 0, -(cpi->baseline_gf_interval - 1), 2, 1, 0); + cpi->rc.source_alt_ref_pending = 1; + cpi->rc.baseline_gf_interval = FIXED_ARF_GROUP_SIZE; + schedule_frames(cpi, 0, -(cpi->rc.baseline_gf_interval - 1), 2, 1, 0); } // Replace level indicator of -1 with correct level. @@ -1647,7 +1346,8 @@ void define_fixed_arf_period(VP9_COMP *cpi) { // Analyse and define a gf/arf group. static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { FIRSTPASS_STATS next_frame = { 0 }; - FIRSTPASS_STATS *start_pos; + const FIRSTPASS_STATS *start_pos; + struct twopass_rc *const twopass = &cpi->twopass; int i; double boost_score = 0.0; double old_boost_score = 0.0; @@ -1659,40 +1359,40 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { double decay_accumulator = 1.0; double zero_motion_accumulator = 1.0; - double loop_decay_rate = 1.00; // Starting decay rate + double loop_decay_rate = 1.00; double last_loop_decay_rate = 1.00; double this_frame_mv_in_out = 0.0; double mv_in_out_accumulator = 0.0; double abs_mv_in_out_accumulator = 0.0; double mv_ratio_accumulator_thresh; - int max_bits = frame_max_bits(cpi); // Max for a single frame + const int max_bits = frame_max_bits(cpi); // Max bits for a single frame. - unsigned int allow_alt_ref = - cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames; + unsigned int allow_alt_ref = cpi->oxcf.play_alternate && + cpi->oxcf.lag_in_frames; int f_boost = 0; int b_boost = 0; int flash_detected; int active_max_gf_interval; + RATE_CONTROL *const rc = &cpi->rc; - cpi->twopass.gf_group_bits = 0; + twopass->gf_group_bits = 0; - vp9_clear_system_state(); // __asm emms; + vp9_clear_system_state(); - start_pos = cpi->twopass.stats_in; + start_pos = twopass->stats_in; // Load stats for the current frame. mod_frame_err = calculate_modified_err(cpi, this_frame); - // Note the error of the frame at the start of the group (this will be - // the GF frame error if we code a normal gf + // Note the error of the frame at the start of the group. This will be + // the GF frame error if we code a normal gf. gf_first_frame_err = mod_frame_err; - // Special treatment if the current frame is a key frame (which is also - // a gf). If it is then its error score (and hence bit allocation) need - // to be subtracted out from the calculation for the GF group - if (cpi->common.frame_type == KEY_FRAME) + // If this is a key frame or the overlay from a previous arf then + // the error score / cost of this frame has already been accounted for. + if (cpi->common.frame_type == KEY_FRAME || rc->source_alt_ref_active) gf_group_err -= gf_first_frame_err; // Motion breakout threshold for loop below depends on image size. @@ -1704,50 +1404,49 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { // bits to spare and are better with a smaller interval and smaller boost. // At high Q when there are few bits to spare we are better with a longer // interval to spread the cost of the GF. + // active_max_gf_interval = - 12 + ((int)vp9_convert_qindex_to_q(cpi->active_worst_quality) >> 5); + 12 + ((int)vp9_convert_qindex_to_q(rc->last_q[INTER_FRAME]) >> 5); - if (active_max_gf_interval > cpi->max_gf_interval) - active_max_gf_interval = cpi->max_gf_interval; + if (active_max_gf_interval > rc->max_gf_interval) + active_max_gf_interval = rc->max_gf_interval; i = 0; - while (((i < cpi->twopass.static_scene_max_gf_interval) || - ((cpi->twopass.frames_to_key - i) < MIN_GF_INTERVAL)) && - (i < cpi->twopass.frames_to_key)) { - i++; // Increment the loop counter + while (i < rc->static_scene_max_gf_interval && i < rc->frames_to_key) { + ++i; - // Accumulate error score of frames in this gf group + // Accumulate error score of frames in this gf group. mod_frame_err = calculate_modified_err(cpi, this_frame); gf_group_err += mod_frame_err; - if (EOF == input_stats(cpi, &next_frame)) + if (EOF == input_stats(twopass, &next_frame)) break; // Test for the case where there is a brief flash but the prediction // quality back to an earlier frame is then restored. - flash_detected = detect_flash(cpi, 0); + flash_detected = detect_flash(twopass, 0); - // Update the motion related elements to the boost calculation + // Update the motion related elements to the boost calculation. accumulate_frame_motion_stats(&next_frame, &this_frame_mv_in_out, &mv_in_out_accumulator, &abs_mv_in_out_accumulator, &mv_ratio_accumulator); - // Cumulative effect of prediction quality decay + // Accumulate the effect of prediction quality decay. if (!flash_detected) { last_loop_decay_rate = loop_decay_rate; - loop_decay_rate = get_prediction_decay_rate(cpi, &next_frame); + loop_decay_rate = get_prediction_decay_rate(&cpi->common, &next_frame); decay_accumulator = decay_accumulator * loop_decay_rate; // Monitor for static sections. if ((next_frame.pcnt_inter - next_frame.pcnt_motion) < zero_motion_accumulator) { - zero_motion_accumulator = - (next_frame.pcnt_inter - next_frame.pcnt_motion); + zero_motion_accumulator = next_frame.pcnt_inter - + next_frame.pcnt_motion; } - // Break clause to detect very still sections after motion - // (for example a static image after a fade or other transition). + // Break clause to detect very still sections after motion. For example, + // a static image after a fade or other transition. if (detect_transition_to_still(cpi, i, 5, loop_decay_rate, last_loop_decay_rate)) { allow_alt_ref = 0; @@ -1755,20 +1454,17 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { } } - // Calculate a boost number for this frame - boost_score += - (decay_accumulator * + // Calculate a boost number for this frame. + boost_score += (decay_accumulator * calc_frame_boost(cpi, &next_frame, this_frame_mv_in_out)); // Break out conditions. if ( - // Break at cpi->max_gf_interval unless almost totally static + // Break at cpi->max_gf_interval unless almost totally static. (i >= active_max_gf_interval && (zero_motion_accumulator < 0.995)) || ( - // Don't break out with a very short interval + // Don't break out with a very short interval. (i > MIN_GF_INTERVAL) && - // Don't break out very close to a key frame - ((cpi->twopass.frames_to_key - i) >= MIN_GF_INTERVAL) && ((boost_score > 125.0) || (next_frame.pcnt_inter < 0.75)) && (!flash_detected) && ((mv_ratio_accumulator > mv_ratio_accumulator_thresh) || @@ -1784,26 +1480,23 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { old_boost_score = boost_score; } - cpi->gf_zeromotion_pct = (int)(zero_motion_accumulator * 1000.0); + twopass->gf_zeromotion_pct = (int)(zero_motion_accumulator * 1000.0); - // Don't allow a gf too near the next kf - if ((cpi->twopass.frames_to_key - i) < MIN_GF_INTERVAL) { - while (i < cpi->twopass.frames_to_key) { - i++; + // Don't allow a gf too near the next kf. + if ((rc->frames_to_key - i) < MIN_GF_INTERVAL) { + while (i < (rc->frames_to_key + !rc->next_key_frame_forced)) { + ++i; - if (EOF == input_stats(cpi, this_frame)) + if (EOF == input_stats(twopass, this_frame)) break; - if (i < cpi->twopass.frames_to_key) { + if (i < rc->frames_to_key) { mod_frame_err = calculate_modified_err(cpi, this_frame); gf_group_err += mod_frame_err; } } } - // Set the interval until the next gf or arf. - cpi->baseline_gf_interval = i; - #if CONFIG_MULTIPLE_ARF if (cpi->multi_arf_enabled) { // Initialize frame coding order variables. @@ -1816,36 +1509,39 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { } #endif - // Should we use the alternate reference frame + // Set the interval until the next gf. + if (cpi->common.frame_type == KEY_FRAME || rc->source_alt_ref_active) + rc->baseline_gf_interval = i - 1; + else + rc->baseline_gf_interval = i; + + // Should we use the alternate reference frame. if (allow_alt_ref && (i < cpi->oxcf.lag_in_frames) && (i >= MIN_GF_INTERVAL) && - // dont use ARF very near next kf - (i <= (cpi->twopass.frames_to_key - MIN_GF_INTERVAL)) && - ((next_frame.pcnt_inter > 0.75) || - (next_frame.pcnt_second_ref > 0.5)) && - ((mv_in_out_accumulator / (double)i > -0.2) || - (mv_in_out_accumulator > -2.0)) && - (boost_score > 100)) { - // Alternative boost calculation for alt ref - cpi->gfu_boost = calc_arf_boost(cpi, 0, (i - 1), (i - 1), &f_boost, - &b_boost); - cpi->source_alt_ref_pending = 1; + // For real scene cuts (not forced kfs) don't allow arf very near kf. + (rc->next_key_frame_forced || + (i <= (rc->frames_to_key - MIN_GF_INTERVAL)))) { + // Calculate the boost for alt ref. + rc->gfu_boost = calc_arf_boost(cpi, 0, (i - 1), (i - 1), &f_boost, + &b_boost); + rc->source_alt_ref_pending = 1; #if CONFIG_MULTIPLE_ARF // Set the ARF schedule. if (cpi->multi_arf_enabled) { - schedule_frames(cpi, 0, -(cpi->baseline_gf_interval - 1), 2, 1, 0); + schedule_frames(cpi, 0, -(rc->baseline_gf_interval - 1), 2, 1, 0); } #endif } else { - cpi->gfu_boost = (int)boost_score; - cpi->source_alt_ref_pending = 0; + rc->gfu_boost = (int)boost_score; + rc->source_alt_ref_pending = 0; #if CONFIG_MULTIPLE_ARF // Set the GF schedule. if (cpi->multi_arf_enabled) { - schedule_frames(cpi, 0, cpi->baseline_gf_interval - 1, 2, 0, 0); - assert(cpi->new_frame_coding_order_period == cpi->baseline_gf_interval); + schedule_frames(cpi, 0, rc->baseline_gf_interval - 1, 2, 0, 0); + assert(cpi->new_frame_coding_order_period == + rc->baseline_gf_interval); } #endif } @@ -1888,65 +1584,43 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { #endif #endif - // Now decide how many bits should be allocated to the GF group as a - // proportion of those remaining in the kf group. - // The final key frame group in the clip is treated as a special case - // where cpi->twopass.kf_group_bits is tied to cpi->twopass.bits_left. - // This is also important for short clips where there may only be one - // key frame. - if (cpi->twopass.frames_to_key >= (int)(cpi->twopass.total_stats.count - - cpi->common.current_video_frame)) { - cpi->twopass.kf_group_bits = - (cpi->twopass.bits_left > 0) ? cpi->twopass.bits_left : 0; - } - - // Calculate the bits to be allocated to the group as a whole - if ((cpi->twopass.kf_group_bits > 0) && - (cpi->twopass.kf_group_error_left > 0)) { - cpi->twopass.gf_group_bits = - (int64_t)(cpi->twopass.kf_group_bits * + // Calculate the bits to be allocated to the group as a whole. + if (twopass->kf_group_bits > 0 && twopass->kf_group_error_left > 0) { + twopass->gf_group_bits = (int64_t)(cpi->twopass.kf_group_bits * (gf_group_err / cpi->twopass.kf_group_error_left)); } else { - cpi->twopass.gf_group_bits = 0; + twopass->gf_group_bits = 0; } - cpi->twopass.gf_group_bits = - (cpi->twopass.gf_group_bits < 0) - ? 0 - : (cpi->twopass.gf_group_bits > cpi->twopass.kf_group_bits) - ? cpi->twopass.kf_group_bits : cpi->twopass.gf_group_bits; + twopass->gf_group_bits = (twopass->gf_group_bits < 0) ? + 0 : (twopass->gf_group_bits > twopass->kf_group_bits) ? + twopass->kf_group_bits : twopass->gf_group_bits; // Clip cpi->twopass.gf_group_bits based on user supplied data rate - // variability limit (cpi->oxcf.two_pass_vbrmax_section) - if (cpi->twopass.gf_group_bits > - (int64_t)max_bits * cpi->baseline_gf_interval) - cpi->twopass.gf_group_bits = (int64_t)max_bits * cpi->baseline_gf_interval; - - // Reset the file position - reset_fpf_position(cpi, start_pos); + // variability limit, cpi->oxcf.two_pass_vbrmax_section. + if (twopass->gf_group_bits > (int64_t)max_bits * rc->baseline_gf_interval) + twopass->gf_group_bits = (int64_t)max_bits * rc->baseline_gf_interval; - // Update the record of error used so far (only done once per gf group) - cpi->twopass.modified_error_used += gf_group_err; + // Reset the file position. + reset_fpf_position(twopass, start_pos); // Assign bits to the arf or gf. - for (i = 0; - i <= (cpi->source_alt_ref_pending && cpi->common.frame_type != KEY_FRAME); - ++i) { + for (i = 0; i <= (rc->source_alt_ref_pending && + cpi->common.frame_type != KEY_FRAME); ++i) { int allocation_chunks; - int q = cpi->oxcf.fixed_q < 0 ? cpi->last_q[INTER_FRAME] - : cpi->oxcf.fixed_q; + int q = rc->last_q[INTER_FRAME]; int gf_bits; - int boost = (cpi->gfu_boost * vp9_gfboost_qadjust(q)) / 100; + int boost = (rc->gfu_boost * gfboost_qadjust(q)) / 100; - // Set max and minimum boost and hence minimum allocation - boost = clamp(boost, 125, (cpi->baseline_gf_interval + 1) * 200); + // Set max and minimum boost and hence minimum allocation. + boost = clamp(boost, 125, (rc->baseline_gf_interval + 1) * 200); - if (cpi->source_alt_ref_pending && i == 0) - allocation_chunks = ((cpi->baseline_gf_interval + 1) * 100) + boost; + if (rc->source_alt_ref_pending && i == 0) + allocation_chunks = ((rc->baseline_gf_interval + 1) * 100) + boost; else - allocation_chunks = (cpi->baseline_gf_interval * 100) + (boost - 100); + allocation_chunks = (rc->baseline_gf_interval * 100) + (boost - 100); - // Prevent overflow + // Prevent overflow. if (boost > 1023) { int divisor = boost >> 10; boost /= divisor; @@ -1954,18 +1628,18 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { } // Calculate the number of bits to be spent on the gf or arf based on - // the boost number - gf_bits = (int)((double)boost * (cpi->twopass.gf_group_bits / - (double)allocation_chunks)); + // the boost number. + gf_bits = (int)((double)boost * (twopass->gf_group_bits / + (double)allocation_chunks)); // If the frame that is to be boosted is simpler than the average for // the gf/arf group then use an alternative calculation - // based on the error score of the frame itself - if (mod_frame_err < gf_group_err / (double)cpi->baseline_gf_interval) { - double alt_gf_grp_bits = - (double)cpi->twopass.kf_group_bits * - (mod_frame_err * (double)cpi->baseline_gf_interval) / - DOUBLE_DIVIDE_CHECK(cpi->twopass.kf_group_error_left); + // based on the error score of the frame itself. + if (rc->baseline_gf_interval < 1 || + mod_frame_err < gf_group_err / (double)rc->baseline_gf_interval) { + double alt_gf_grp_bits = (double)twopass->kf_group_bits * + (mod_frame_err * (double)rc->baseline_gf_interval) / + DOUBLE_DIVIDE_CHECK(twopass->kf_group_error_left); int alt_gf_bits = (int)((double)boost * (alt_gf_grp_bits / (double)allocation_chunks)); @@ -1976,68 +1650,68 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { // If it is harder than other frames in the group make sure it at // least receives an allocation in keeping with its relative error // score, otherwise it may be worse off than an "un-boosted" frame. - int alt_gf_bits = (int)((double)cpi->twopass.kf_group_bits * + int alt_gf_bits = (int)((double)twopass->kf_group_bits * mod_frame_err / - DOUBLE_DIVIDE_CHECK(cpi->twopass.kf_group_error_left)); + DOUBLE_DIVIDE_CHECK(twopass->kf_group_error_left)); if (alt_gf_bits > gf_bits) gf_bits = alt_gf_bits; } - // Dont allow a negative value for gf_bits + // Don't allow a negative value for gf_bits. if (gf_bits < 0) gf_bits = 0; - // Add in minimum for a frame - gf_bits += cpi->min_frame_bandwidth; - if (i == 0) { - cpi->twopass.gf_bits = gf_bits; + twopass->gf_bits = gf_bits; } - if (i == 1 || (!cpi->source_alt_ref_pending - && (cpi->common.frame_type != KEY_FRAME))) { - // Per frame bit target for this frame - cpi->per_frame_bandwidth = gf_bits; + if (i == 1 || + (!rc->source_alt_ref_pending && + cpi->common.frame_type != KEY_FRAME)) { + // Calculate the per frame bit target for this frame. + vp9_rc_set_frame_target(cpi, gf_bits); } } { - // Adjust KF group bits and error remaining - cpi->twopass.kf_group_error_left -= (int64_t)gf_group_err; - cpi->twopass.kf_group_bits -= cpi->twopass.gf_group_bits; - - if (cpi->twopass.kf_group_bits < 0) - cpi->twopass.kf_group_bits = 0; - - // Note the error score left in the remaining frames of the group. - // For normal GFs we want to remove the error score for the first frame - // of the group (except in Key frame case where this has already - // happened) - if (!cpi->source_alt_ref_pending && cpi->common.frame_type != KEY_FRAME) - cpi->twopass.gf_group_error_left = (int64_t)(gf_group_err + // Adjust KF group bits and error remaining. + twopass->kf_group_error_left -= (int64_t)gf_group_err; + twopass->kf_group_bits -= twopass->gf_group_bits; + + if (twopass->kf_group_bits < 0) + twopass->kf_group_bits = 0; + + // If this is an arf update we want to remove the score for the overlay + // frame at the end which will usually be very cheap to code. + // The overlay frame has already, in effect, been coded so we want to spread + // the remaining bits among the other frames. + // For normal GFs remove the score for the GF itself unless this is + // also a key frame in which case it has already been accounted for. + if (rc->source_alt_ref_pending) { + twopass->gf_group_error_left = (int64_t)(gf_group_err - mod_frame_err); + } else if (cpi->common.frame_type != KEY_FRAME) { + twopass->gf_group_error_left = (int64_t)(gf_group_err - gf_first_frame_err); - else - cpi->twopass.gf_group_error_left = (int64_t)gf_group_err; + } else { + twopass->gf_group_error_left = (int64_t)gf_group_err; + } - cpi->twopass.gf_group_bits -= cpi->twopass.gf_bits - - cpi->min_frame_bandwidth; + twopass->gf_group_bits -= twopass->gf_bits; - if (cpi->twopass.gf_group_bits < 0) - cpi->twopass.gf_group_bits = 0; + if (twopass->gf_group_bits < 0) + twopass->gf_group_bits = 0; // This condition could fail if there are two kfs very close together - // despite (MIN_GF_INTERVAL) and would cause a divide by 0 in the + // despite MIN_GF_INTERVAL and would cause a divide by 0 in the // calculation of alt_extra_bits. - if (cpi->baseline_gf_interval >= 3) { - const int boost = cpi->source_alt_ref_pending ? b_boost : cpi->gfu_boost; + if (rc->baseline_gf_interval >= 3) { + const int boost = rc->source_alt_ref_pending ? b_boost : rc->gfu_boost; if (boost >= 150) { - int alt_extra_bits; - int pct_extra = (boost - 100) / 50; - pct_extra = (pct_extra > 20) ? 20 : pct_extra; - - alt_extra_bits = (int)((cpi->twopass.gf_group_bits * pct_extra) / 100); - cpi->twopass.gf_group_bits -= alt_extra_bits; + const int pct_extra = MIN(20, (boost - 100) / 50); + const int alt_extra_bits = (int)((twopass->gf_group_bits * pct_extra) / + 100); + twopass->gf_group_bits -= alt_extra_bits; } } } @@ -2046,32 +1720,29 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { FIRSTPASS_STATS sectionstats; zero_stats(§ionstats); - reset_fpf_position(cpi, start_pos); + reset_fpf_position(twopass, start_pos); - for (i = 0; i < cpi->baseline_gf_interval; i++) { - input_stats(cpi, &next_frame); + for (i = 0; i < rc->baseline_gf_interval; ++i) { + input_stats(twopass, &next_frame); accumulate_stats(§ionstats, &next_frame); } avg_stats(§ionstats); - cpi->twopass.section_intra_rating = (int) + twopass->section_intra_rating = (int) (sectionstats.intra_error / DOUBLE_DIVIDE_CHECK(sectionstats.coded_error)); - reset_fpf_position(cpi, start_pos); + reset_fpf_position(twopass, start_pos); } } // Allocate bits to a normal frame that is neither a gf an arf or a key frame. static void assign_std_frame_bits(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { int target_frame_size; - double modified_err; double err_fraction; - - // Max for a single frame. - int max_bits = frame_max_bits(cpi); + const int max_bits = frame_max_bits(cpi); // Max for a single frame. // Calculate modified prediction error used in bit allocation. modified_err = calculate_modified_err(cpi, this_frame); @@ -2087,15 +1758,8 @@ static void assign_std_frame_bits(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { // Clip target size to 0 - max_bits (or cpi->twopass.gf_group_bits) at // the top end. - if (target_frame_size < 0) { - target_frame_size = 0; - } else { - if (target_frame_size > max_bits) - target_frame_size = max_bits; - - if (target_frame_size > cpi->twopass.gf_group_bits) - target_frame_size = (int)cpi->twopass.gf_group_bits; - } + target_frame_size = clamp(target_frame_size, 0, + MIN(max_bits, (int)cpi->twopass.gf_group_bits)); // Adjust error and bits remaining. cpi->twopass.gf_group_error_left -= (int64_t)modified_err; @@ -2104,272 +1768,69 @@ static void assign_std_frame_bits(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { if (cpi->twopass.gf_group_bits < 0) cpi->twopass.gf_group_bits = 0; - // Add in the minimum number of bits that is set aside for every frame. - target_frame_size += cpi->min_frame_bandwidth; - // Per frame bit target for this frame. - cpi->per_frame_bandwidth = target_frame_size; -} - -// Make a damped adjustment to the active max q. -static int adjust_active_maxq(int old_maxqi, int new_maxqi) { - int i; - const double old_q = vp9_convert_qindex_to_q(old_maxqi); - const double new_q = vp9_convert_qindex_to_q(new_maxqi); - const double target_q = ((old_q * 7.0) + new_q) / 8.0; - - if (target_q > old_q) { - for (i = old_maxqi; i <= new_maxqi; i++) - if (vp9_convert_qindex_to_q(i) >= target_q) - return i; - } else { - for (i = old_maxqi; i >= new_maxqi; i--) - if (vp9_convert_qindex_to_q(i) <= target_q) - return i; - } - - return new_maxqi; -} - -void vp9_second_pass(VP9_COMP *cpi) { - int tmp_q; - int frames_left = (int)(cpi->twopass.total_stats.count - - cpi->common.current_video_frame); - - FIRSTPASS_STATS this_frame; - FIRSTPASS_STATS this_frame_copy; - - double this_frame_intra_error; - double this_frame_coded_error; - - if (!cpi->twopass.stats_in) - return; - - vp9_clear_system_state(); - - if (cpi->oxcf.end_usage == USAGE_CONSTANT_QUALITY) { - cpi->active_worst_quality = cpi->oxcf.cq_level; - } else { - // Special case code for first frame. - if (cpi->common.current_video_frame == 0) { - int section_target_bandwidth = - (int)(cpi->twopass.bits_left / frames_left); - cpi->twopass.est_max_qcorrection_factor = 1.0; - - // Set a cq_level in constrained quality mode. - // Commenting this code out for now since it does not seem to be - // working well. - /* - if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) { - int est_cq = estimate_cq(cpi, &cpi->twopass.total_left_stats, - section_target_bandwidth); - - if (est_cq > cpi->cq_target_quality) - cpi->cq_target_quality = est_cq; - else - cpi->cq_target_quality = cpi->oxcf.cq_level; - } - */ - - // guess at maxq needed in 2nd pass - cpi->twopass.maxq_max_limit = cpi->worst_quality; - cpi->twopass.maxq_min_limit = cpi->best_quality; - - tmp_q = estimate_max_q(cpi, &cpi->twopass.total_left_stats, - section_target_bandwidth); - - cpi->active_worst_quality = tmp_q; - cpi->ni_av_qi = tmp_q; - cpi->avg_q = vp9_convert_qindex_to_q(tmp_q); - - // Limit the maxq value returned subsequently. - // This increases the risk of overspend or underspend if the initial - // estimate for the clip is bad, but helps prevent excessive - // variation in Q, especially near the end of a clip - // where for example a small overspend may cause Q to crash - adjust_maxq_qrange(cpi); - } - - // The last few frames of a clip almost always have to few or too many - // bits and for the sake of over exact rate control we dont want to make - // radical adjustments to the allowed quantizer range just to use up a - // few surplus bits or get beneath the target rate. - else if ((cpi->common.current_video_frame < - (((unsigned int)cpi->twopass.total_stats.count * 255) >> 8)) && - ((cpi->common.current_video_frame + cpi->baseline_gf_interval) < - (unsigned int)cpi->twopass.total_stats.count)) { - int section_target_bandwidth = - (int)(cpi->twopass.bits_left / frames_left); - if (frames_left < 1) - frames_left = 1; - - tmp_q = estimate_max_q( - cpi, - &cpi->twopass.total_left_stats, - section_target_bandwidth); - - // Make a damped adjustment to active max Q - cpi->active_worst_quality = - adjust_active_maxq(cpi->active_worst_quality, tmp_q); - } - } - vp9_zero(this_frame); - if (EOF == input_stats(cpi, &this_frame)) - return; - - this_frame_intra_error = this_frame.intra_error; - this_frame_coded_error = this_frame.coded_error; - - // keyframe and section processing ! - if (cpi->twopass.frames_to_key == 0) { - // Define next KF group and assign bits to it - this_frame_copy = this_frame; - find_next_key_frame(cpi, &this_frame_copy); - } - - // Is this a GF / ARF (Note that a KF is always also a GF) - if (cpi->frames_till_gf_update_due == 0) { - // Define next gf group and assign bits to it - this_frame_copy = this_frame; - - cpi->gf_zeromotion_pct = 0; - -#if CONFIG_MULTIPLE_ARF - if (cpi->multi_arf_enabled) { - define_fixed_arf_period(cpi); - } else { -#endif - define_gf_group(cpi, &this_frame_copy); -#if CONFIG_MULTIPLE_ARF - } -#endif - - if (cpi->gf_zeromotion_pct > 995) { - // As long as max_thresh for encode breakout is small enough, it is ok - // to enable it for no-show frame, i.e. set enable_encode_breakout to 2. - if (!cpi->common.show_frame) - cpi->enable_encode_breakout = 0; - else - cpi->enable_encode_breakout = 2; - } - - // If we are going to code an altref frame at the end of the group - // and the current frame is not a key frame.... - // If the previous group used an arf this frame has already benefited - // from that arf boost and it should not be given extra bits - // If the previous group was NOT coded using arf we may want to apply - // some boost to this GF as well - if (cpi->source_alt_ref_pending && (cpi->common.frame_type != KEY_FRAME)) { - // Assign a standard frames worth of bits from those allocated - // to the GF group - int bak = cpi->per_frame_bandwidth; - this_frame_copy = this_frame; - assign_std_frame_bits(cpi, &this_frame_copy); - cpi->per_frame_bandwidth = bak; - } - } else { - // Otherwise this is an ordinary frame - // Assign bits from those allocated to the GF group - this_frame_copy = this_frame; - assign_std_frame_bits(cpi, &this_frame_copy); - } - - // Keep a globally available copy of this and the next frame's iiratio. - cpi->twopass.this_iiratio = (int)(this_frame_intra_error / - DOUBLE_DIVIDE_CHECK(this_frame_coded_error)); - { - FIRSTPASS_STATS next_frame; - if (lookup_next_frame_stats(cpi, &next_frame) != EOF) { - cpi->twopass.next_iiratio = (int)(next_frame.intra_error / - DOUBLE_DIVIDE_CHECK(next_frame.coded_error)); - } - } - - // Set nominal per second bandwidth for this frame - cpi->target_bandwidth = (int)(cpi->per_frame_bandwidth - * cpi->output_framerate); - if (cpi->target_bandwidth < 0) - cpi->target_bandwidth = 0; - - cpi->twopass.frames_to_key--; - - // Update the total stats remaining structure - subtract_stats(&cpi->twopass.total_left_stats, &this_frame); + vp9_rc_set_frame_target(cpi, target_frame_size); } static int test_candidate_kf(VP9_COMP *cpi, - FIRSTPASS_STATS *last_frame, - FIRSTPASS_STATS *this_frame, - FIRSTPASS_STATS *next_frame) { + const FIRSTPASS_STATS *last_frame, + const FIRSTPASS_STATS *this_frame, + const FIRSTPASS_STATS *next_frame) { int is_viable_kf = 0; - // Does the frame satisfy the primary criteria of a key frame - // If so, then examine how well it predicts subsequent frames + // Does the frame satisfy the primary criteria of a key frame? + // If so, then examine how well it predicts subsequent frames. if ((this_frame->pcnt_second_ref < 0.10) && (next_frame->pcnt_second_ref < 0.10) && ((this_frame->pcnt_inter < 0.05) || - (((this_frame->pcnt_inter - this_frame->pcnt_neutral) < .35) && + (((this_frame->pcnt_inter - this_frame->pcnt_neutral) < 0.35) && ((this_frame->intra_error / DOUBLE_DIVIDE_CHECK(this_frame->coded_error)) < 2.5) && ((fabs(last_frame->coded_error - this_frame->coded_error) / - DOUBLE_DIVIDE_CHECK(this_frame->coded_error) > - .40) || + DOUBLE_DIVIDE_CHECK(this_frame->coded_error) > 0.40) || (fabs(last_frame->intra_error - this_frame->intra_error) / - DOUBLE_DIVIDE_CHECK(this_frame->intra_error) > - .40) || + DOUBLE_DIVIDE_CHECK(this_frame->intra_error) > 0.40) || ((next_frame->intra_error / DOUBLE_DIVIDE_CHECK(next_frame->coded_error)) > 3.5))))) { int i; - FIRSTPASS_STATS *start_pos; - - FIRSTPASS_STATS local_next_frame; - + const FIRSTPASS_STATS *start_pos = cpi->twopass.stats_in; + FIRSTPASS_STATS local_next_frame = *next_frame; double boost_score = 0.0; double old_boost_score = 0.0; double decay_accumulator = 1.0; - double next_iiratio; - - local_next_frame = *next_frame; - // Note the starting file position so we can reset to it - start_pos = cpi->twopass.stats_in; - - // Examine how well the key frame predicts subsequent frames - for (i = 0; i < 16; i++) { - next_iiratio = (IIKFACTOR1 * local_next_frame.intra_error / - DOUBLE_DIVIDE_CHECK(local_next_frame.coded_error)); + // Examine how well the key frame predicts subsequent frames. + for (i = 0; i < 16; ++i) { + double next_iiratio = (IIKFACTOR1 * local_next_frame.intra_error / + DOUBLE_DIVIDE_CHECK(local_next_frame.coded_error)); if (next_iiratio > RMAX) next_iiratio = RMAX; - // Cumulative effect of decay in prediction quality + // Cumulative effect of decay in prediction quality. if (local_next_frame.pcnt_inter > 0.85) - decay_accumulator = decay_accumulator * local_next_frame.pcnt_inter; + decay_accumulator *= local_next_frame.pcnt_inter; else - decay_accumulator = - decay_accumulator * ((0.85 + local_next_frame.pcnt_inter) / 2.0); - - // decay_accumulator = decay_accumulator * local_next_frame.pcnt_inter; + decay_accumulator *= (0.85 + local_next_frame.pcnt_inter) / 2.0; - // Keep a running total + // Keep a running total. boost_score += (decay_accumulator * next_iiratio); - // Test various breakout clauses + // Test various breakout clauses. if ((local_next_frame.pcnt_inter < 0.05) || (next_iiratio < 1.5) || (((local_next_frame.pcnt_inter - local_next_frame.pcnt_neutral) < 0.20) && (next_iiratio < 3.0)) || ((boost_score - old_boost_score) < 3.0) || - (local_next_frame.intra_error < 200) - ) { + (local_next_frame.intra_error < 200)) { break; } old_boost_score = boost_score; // Get the next frame details - if (EOF == input_stats(cpi, &local_next_frame)) + if (EOF == input_stats(&cpi->twopass, &local_next_frame)) break; } @@ -2379,7 +1840,7 @@ static int test_candidate_kf(VP9_COMP *cpi, is_viable_kf = 1; } else { // Reset the file position - reset_fpf_position(cpi, start_pos); + reset_fpf_position(&cpi->twopass, start_pos); is_viable_kf = 0; } @@ -2387,12 +1848,13 @@ static int test_candidate_kf(VP9_COMP *cpi, return is_viable_kf; } + static void find_next_key_frame(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { int i, j; FIRSTPASS_STATS last_frame; FIRSTPASS_STATS first_frame; FIRSTPASS_STATS next_frame; - FIRSTPASS_STATS *start_position; + const FIRSTPASS_STATS *start_position; double decay_accumulator = 1.0; double zero_motion_accumulator = 1.0; @@ -2401,177 +1863,156 @@ static void find_next_key_frame(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { double kf_mod_err = 0.0; double kf_group_err = 0.0; - double kf_group_intra_err = 0.0; - double kf_group_coded_err = 0.0; double recent_loop_decay[8] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; + RATE_CONTROL *const rc = &cpi->rc; + struct twopass_rc *const twopass = &cpi->twopass; + vp9_zero(next_frame); - vp9_clear_system_state(); // __asm emms; - start_position = cpi->twopass.stats_in; + vp9_clear_system_state(); + start_position = twopass->stats_in; cpi->common.frame_type = KEY_FRAME; - // is this a forced key frame by interval - cpi->this_key_frame_forced = cpi->next_key_frame_forced; + // Is this a forced key frame by interval. + rc->this_key_frame_forced = rc->next_key_frame_forced; - // Clear the alt ref active flag as this can never be active on a key frame - cpi->source_alt_ref_active = 0; + // Clear the alt ref active flag as this can never be active on a key frame. + rc->source_alt_ref_active = 0; - // Kf is always a gf so clear frames till next gf counter - cpi->frames_till_gf_update_due = 0; + // KF is always a GF so clear frames till next gf counter. + rc->frames_till_gf_update_due = 0; - cpi->twopass.frames_to_key = 1; + rc->frames_to_key = 1; - // Take a copy of the initial frame details + // Take a copy of the initial frame details. first_frame = *this_frame; - cpi->twopass.kf_group_bits = 0; // Total bits available to kf group - cpi->twopass.kf_group_error_left = 0; // Group modified error score. + twopass->kf_group_bits = 0; // Total bits available to kf group + twopass->kf_group_error_left = 0; // Group modified error score. kf_mod_err = calculate_modified_err(cpi, this_frame); - // find the next keyframe + // Find the next keyframe. i = 0; - while (cpi->twopass.stats_in < cpi->twopass.stats_in_end) { - // Accumulate kf group error + while (twopass->stats_in < twopass->stats_in_end) { + // Accumulate kf group error. kf_group_err += calculate_modified_err(cpi, this_frame); - // These figures keep intra and coded error counts for all frames including - // key frames in the group. The effect of the key frame itself can be - // subtracted out using the first_frame data collected above. - kf_group_intra_err += this_frame->intra_error; - kf_group_coded_err += this_frame->coded_error; - - // load a the next frame's stats + // Load the next frame's stats. last_frame = *this_frame; - input_stats(cpi, this_frame); + input_stats(twopass, this_frame); // Provided that we are not at the end of the file... - if (cpi->oxcf.auto_key - && lookup_next_frame_stats(cpi, &next_frame) != EOF) { - // Normal scene cut check + if (cpi->oxcf.auto_key && + lookup_next_frame_stats(twopass, &next_frame) != EOF) { + // Check for a scene cut. if (test_candidate_kf(cpi, &last_frame, this_frame, &next_frame)) break; - - // How fast is prediction quality decaying - loop_decay_rate = get_prediction_decay_rate(cpi, &next_frame); + // How fast is the prediction quality decaying? + loop_decay_rate = get_prediction_decay_rate(&cpi->common, &next_frame); // We want to know something about the recent past... rather than - // as used elsewhere where we are concened with decay in prediction + // as used elsewhere where we are concerned with decay in prediction // quality since the last GF or KF. recent_loop_decay[i % 8] = loop_decay_rate; decay_accumulator = 1.0; - for (j = 0; j < 8; j++) + for (j = 0; j < 8; ++j) decay_accumulator *= recent_loop_decay[j]; // Special check for transition or high motion followed by a - // to a static scene. + // static scene. if (detect_transition_to_still(cpi, i, cpi->key_frame_frequency - i, loop_decay_rate, decay_accumulator)) break; - // Step on to the next frame - cpi->twopass.frames_to_key++; + // Step on to the next frame. + ++rc->frames_to_key; // If we don't have a real key frame within the next two - // forcekeyframeevery intervals then break out of the loop. - if (cpi->twopass.frames_to_key >= 2 * (int)cpi->key_frame_frequency) + // key_frame_frequency intervals then break out of the loop. + if (rc->frames_to_key >= 2 * (int)cpi->key_frame_frequency) break; } else { - cpi->twopass.frames_to_key++; + ++rc->frames_to_key; } - i++; + ++i; } // If there is a max kf interval set by the user we must obey it. // We already breakout of the loop above at 2x max. - // This code centers the extra kf if the actual natural - // interval is between 1x and 2x - if (cpi->oxcf.auto_key - && cpi->twopass.frames_to_key > (int)cpi->key_frame_frequency) { - FIRSTPASS_STATS *current_pos = cpi->twopass.stats_in; + // This code centers the extra kf if the actual natural interval + // is between 1x and 2x. + if (cpi->oxcf.auto_key && + rc->frames_to_key > (int)cpi->key_frame_frequency) { FIRSTPASS_STATS tmp_frame; - cpi->twopass.frames_to_key /= 2; + rc->frames_to_key /= 2; - // Copy first frame details + // Copy first frame details. tmp_frame = first_frame; - // Reset to the start of the group - reset_fpf_position(cpi, start_position); + // Reset to the start of the group. + reset_fpf_position(twopass, start_position); kf_group_err = 0; - kf_group_intra_err = 0; - kf_group_coded_err = 0; - // Rescan to get the correct error data for the forced kf group - for (i = 0; i < cpi->twopass.frames_to_key; i++) { - // Accumulate kf group errors + // Rescan to get the correct error data for the forced kf group. + for (i = 0; i < rc->frames_to_key; ++i) { + // Accumulate kf group errors. kf_group_err += calculate_modified_err(cpi, &tmp_frame); - kf_group_intra_err += tmp_frame.intra_error; - kf_group_coded_err += tmp_frame.coded_error; - // Load a the next frame's stats - input_stats(cpi, &tmp_frame); + // Load the next frame's stats. + input_stats(twopass, &tmp_frame); } - - // Reset to the start of the group - reset_fpf_position(cpi, current_pos); - - cpi->next_key_frame_forced = 1; + rc->next_key_frame_forced = 1; + } else if (twopass->stats_in == twopass->stats_in_end) { + rc->next_key_frame_forced = 1; } else { - cpi->next_key_frame_forced = 0; + rc->next_key_frame_forced = 0; } - // Special case for the last frame of the file - if (cpi->twopass.stats_in >= cpi->twopass.stats_in_end) { - // Accumulate kf group error - kf_group_err += calculate_modified_err(cpi, this_frame); - // These figures keep intra and coded error counts for all frames including - // key frames in the group. The effect of the key frame itself can be - // subtracted out using the first_frame data collected above. - kf_group_intra_err += this_frame->intra_error; - kf_group_coded_err += this_frame->coded_error; + // Special case for the last key frame of the file. + if (twopass->stats_in >= twopass->stats_in_end) { + // Accumulate kf group error. + kf_group_err += calculate_modified_err(cpi, this_frame); } // Calculate the number of bits that should be assigned to the kf group. - if ((cpi->twopass.bits_left > 0) && - (cpi->twopass.modified_error_left > 0.0)) { - // Max for a single normal frame (not key frame) + if (twopass->bits_left > 0 && twopass->modified_error_left > 0.0) { + // Maximum number of bits for a single normal frame (not key frame). int max_bits = frame_max_bits(cpi); - // Maximum bits for the kf group + // Maximum number of bits allocated to the key frame group. int64_t max_grp_bits; // Default allocation based on bits left and relative - // complexity of the section - cpi->twopass.kf_group_bits = (int64_t)(cpi->twopass.bits_left * - (kf_group_err / - cpi->twopass.modified_error_left)); + // complexity of the section. + twopass->kf_group_bits = (int64_t)(twopass->bits_left * + (kf_group_err / twopass->modified_error_left)); // Clip based on maximum per frame rate defined by the user. - max_grp_bits = (int64_t)max_bits * (int64_t)cpi->twopass.frames_to_key; - if (cpi->twopass.kf_group_bits > max_grp_bits) - cpi->twopass.kf_group_bits = max_grp_bits; + max_grp_bits = (int64_t)max_bits * (int64_t)rc->frames_to_key; + if (twopass->kf_group_bits > max_grp_bits) + twopass->kf_group_bits = max_grp_bits; } else { - cpi->twopass.kf_group_bits = 0; + twopass->kf_group_bits = 0; } - // Reset the first pass file position - reset_fpf_position(cpi, start_position); + // Reset the first pass file position. + reset_fpf_position(twopass, start_position); // Determine how big to make this keyframe based on how well the subsequent // frames use inter blocks. decay_accumulator = 1.0; boost_score = 0.0; - loop_decay_rate = 1.00; // Starting decay rate // Scan through the kf group collating various stats. - for (i = 0; i < cpi->twopass.frames_to_key; i++) { + for (i = 0; i < rc->frames_to_key; ++i) { double r; - if (EOF == input_stats(cpi, &next_frame)) + if (EOF == input_stats(twopass, &next_frame)) break; // Monitor for static sections. @@ -2582,21 +2023,21 @@ static void find_next_key_frame(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { } // For the first few frames collect data to decide kf boost. - if (i <= (cpi->max_gf_interval * 2)) { - if (next_frame.intra_error > cpi->twopass.kf_intra_err_min) + if (i <= (rc->max_gf_interval * 2)) { + if (next_frame.intra_error > twopass->kf_intra_err_min) r = (IIKFACTOR2 * next_frame.intra_error / DOUBLE_DIVIDE_CHECK(next_frame.coded_error)); else - r = (IIKFACTOR2 * cpi->twopass.kf_intra_err_min / + r = (IIKFACTOR2 * twopass->kf_intra_err_min / DOUBLE_DIVIDE_CHECK(next_frame.coded_error)); if (r > RMAX) r = RMAX; - // How fast is prediction quality decaying - if (!detect_flash(cpi, 0)) { - loop_decay_rate = get_prediction_decay_rate(cpi, &next_frame); - decay_accumulator = decay_accumulator * loop_decay_rate; + // How fast is prediction quality decaying. + if (!detect_flash(twopass, 0)) { + loop_decay_rate = get_prediction_decay_rate(&cpi->common, &next_frame); + decay_accumulator *= loop_decay_rate; decay_accumulator = decay_accumulator < MIN_DECAY_FACTOR ? MIN_DECAY_FACTOR : decay_accumulator; } @@ -2609,121 +2050,249 @@ static void find_next_key_frame(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { FIRSTPASS_STATS sectionstats; zero_stats(§ionstats); - reset_fpf_position(cpi, start_position); + reset_fpf_position(twopass, start_position); - for (i = 0; i < cpi->twopass.frames_to_key; i++) { - input_stats(cpi, &next_frame); + for (i = 0; i < rc->frames_to_key; ++i) { + input_stats(twopass, &next_frame); accumulate_stats(§ionstats, &next_frame); } avg_stats(§ionstats); - cpi->twopass.section_intra_rating = (int) - (sectionstats.intra_error - / DOUBLE_DIVIDE_CHECK(sectionstats.coded_error)); + twopass->section_intra_rating = (int) (sectionstats.intra_error / + DOUBLE_DIVIDE_CHECK(sectionstats.coded_error)); } - // Reset the first pass file position - reset_fpf_position(cpi, start_position); + // Reset the first pass file position. + reset_fpf_position(twopass, start_position); - // Work out how many bits to allocate for the key frame itself + // Work out how many bits to allocate for the key frame itself. if (1) { int kf_boost = (int)boost_score; int allocation_chunks; int alt_kf_bits; - if (kf_boost < (cpi->twopass.frames_to_key * 3)) - kf_boost = (cpi->twopass.frames_to_key * 3); + if (kf_boost < (rc->frames_to_key * 3)) + kf_boost = (rc->frames_to_key * 3); - if (kf_boost < 300) // Min KF boost - kf_boost = 300; + if (kf_boost < MIN_KF_BOOST) + kf_boost = MIN_KF_BOOST; // Make a note of baseline boost and the zero motion // accumulator value for use elsewhere. - cpi->kf_boost = kf_boost; - cpi->kf_zeromotion_pct = (int)(zero_motion_accumulator * 100.0); - - // We do three calculations for kf size. - // The first is based on the error score for the whole kf group. - // The second (optionaly) on the key frames own error if this is - // smaller than the average for the group. - // The final one insures that the frame receives at least the - // allocation it would have received based on its own error score vs - // the error score remaining - // Special case if the sequence appears almost totaly static - // In this case we want to spend almost all of the bits on the - // key frame. - // cpi->twopass.frames_to_key-1 because key frame itself is taken - // care of by kf_boost. + rc->kf_boost = kf_boost; + twopass->kf_zeromotion_pct = (int)(zero_motion_accumulator * 100.0); + + // Key frame size depends on: + // (1) the error score for the whole key frame group, + // (2) the key frames' own error if this is smaller than the + // average for the group (optional), + // (3) insuring that the frame receives at least the allocation it would + // have received based on its own error score vs the error score + // remaining. + // Special case: + // If the sequence appears almost totally static we want to spend almost + // all of the bits on the key frame. + // + // We use (cpi->rc.frames_to_key - 1) below because the key frame itself is + // taken care of by kf_boost. if (zero_motion_accumulator >= 0.99) { - allocation_chunks = - ((cpi->twopass.frames_to_key - 1) * 10) + kf_boost; + allocation_chunks = ((rc->frames_to_key - 1) * 10) + kf_boost; } else { - allocation_chunks = - ((cpi->twopass.frames_to_key - 1) * 100) + kf_boost; + allocation_chunks = ((rc->frames_to_key - 1) * 100) + kf_boost; } - // Prevent overflow + // Prevent overflow. if (kf_boost > 1028) { int divisor = kf_boost >> 10; kf_boost /= divisor; allocation_chunks /= divisor; } - cpi->twopass.kf_group_bits = - (cpi->twopass.kf_group_bits < 0) ? 0 : cpi->twopass.kf_group_bits; + twopass->kf_group_bits = (twopass->kf_group_bits < 0) ? 0 + : twopass->kf_group_bits; - // Calculate the number of bits to be spent on the key frame - cpi->twopass.kf_bits = - (int)((double)kf_boost * - ((double)cpi->twopass.kf_group_bits / (double)allocation_chunks)); + // Calculate the number of bits to be spent on the key frame. + twopass->kf_bits = (int)((double)kf_boost * + ((double)twopass->kf_group_bits / allocation_chunks)); // If the key frame is actually easier than the average for the - // kf group (which does sometimes happen... eg a blank intro frame) - // Then use an alternate calculation based on the kf error score + // kf group (which does sometimes happen, e.g. a blank intro frame) + // then use an alternate calculation based on the kf error score // which should give a smaller key frame. - if (kf_mod_err < kf_group_err / cpi->twopass.frames_to_key) { - double alt_kf_grp_bits = - ((double)cpi->twopass.bits_left * - (kf_mod_err * (double)cpi->twopass.frames_to_key) / - DOUBLE_DIVIDE_CHECK(cpi->twopass.modified_error_left)); + if (kf_mod_err < kf_group_err / rc->frames_to_key) { + double alt_kf_grp_bits = ((double)twopass->bits_left * + (kf_mod_err * (double)rc->frames_to_key) / + DOUBLE_DIVIDE_CHECK(twopass->modified_error_left)); alt_kf_bits = (int)((double)kf_boost * (alt_kf_grp_bits / (double)allocation_chunks)); - if (cpi->twopass.kf_bits > alt_kf_bits) { - cpi->twopass.kf_bits = alt_kf_bits; - } + if (twopass->kf_bits > alt_kf_bits) + twopass->kf_bits = alt_kf_bits; } else { - // Else if it is much harder than other frames in the group make sure - // it at least receives an allocation in keeping with its relative - // error score - alt_kf_bits = - (int)((double)cpi->twopass.bits_left * - (kf_mod_err / - DOUBLE_DIVIDE_CHECK(cpi->twopass.modified_error_left))); - - if (alt_kf_bits > cpi->twopass.kf_bits) { - cpi->twopass.kf_bits = alt_kf_bits; + // Else if it is much harder than other frames in the group make sure + // it at least receives an allocation in keeping with its relative + // error score. + alt_kf_bits = (int)((double)twopass->bits_left * (kf_mod_err / + DOUBLE_DIVIDE_CHECK(twopass->modified_error_left))); + + if (alt_kf_bits > twopass->kf_bits) { + twopass->kf_bits = alt_kf_bits; } } - - cpi->twopass.kf_group_bits -= cpi->twopass.kf_bits; - // Add in the minimum frame allowance - cpi->twopass.kf_bits += cpi->min_frame_bandwidth; - - // Peer frame bit target for this frame - cpi->per_frame_bandwidth = cpi->twopass.kf_bits; - // Convert to a per second bitrate - cpi->target_bandwidth = (int)(cpi->twopass.kf_bits * - cpi->output_framerate); + twopass->kf_group_bits -= twopass->kf_bits; + // Per frame bit target for this frame. + vp9_rc_set_frame_target(cpi, twopass->kf_bits); } - // Note the total error score of the kf group minus the key frame itself - cpi->twopass.kf_group_error_left = (int)(kf_group_err - kf_mod_err); + // Note the total error score of the kf group minus the key frame itself. + twopass->kf_group_error_left = (int)(kf_group_err - kf_mod_err); // Adjust the count of total modified error left. // The count of bits left is adjusted elsewhere based on real coded frame // sizes. - cpi->twopass.modified_error_left -= kf_group_err; + twopass->modified_error_left -= kf_group_err; +} + +void vp9_rc_get_first_pass_params(VP9_COMP *cpi) { + VP9_COMMON *const cm = &cpi->common; + if (!cpi->refresh_alt_ref_frame && + (cm->current_video_frame == 0 || + cm->frame_flags & FRAMEFLAGS_KEY)) { + cm->frame_type = KEY_FRAME; + } else { + cm->frame_type = INTER_FRAME; + } + // Do not use periodic key frames. + cpi->rc.frames_to_key = INT_MAX; +} + +void vp9_rc_get_second_pass_params(VP9_COMP *cpi) { + VP9_COMMON *const cm = &cpi->common; + RATE_CONTROL *const rc = &cpi->rc; + struct twopass_rc *const twopass = &cpi->twopass; + const int frames_left = (int)(twopass->total_stats.count - + cm->current_video_frame); + FIRSTPASS_STATS this_frame; + FIRSTPASS_STATS this_frame_copy; + + double this_frame_intra_error; + double this_frame_coded_error; + int target; + + if (!twopass->stats_in) + return; + + if (cpi->refresh_alt_ref_frame) { + cm->frame_type = INTER_FRAME; + vp9_rc_set_frame_target(cpi, twopass->gf_bits); + return; + } + + vp9_clear_system_state(); + + if (cpi->oxcf.end_usage == USAGE_CONSTANT_QUALITY) { + twopass->active_worst_quality = cpi->oxcf.cq_level; + } else if (cm->current_video_frame == 0) { + // Special case code for first frame. + const int section_target_bandwidth = (int)(twopass->bits_left / + frames_left); + const int tmp_q = vp9_twopass_worst_quality(cpi, &twopass->total_left_stats, + section_target_bandwidth); + twopass->active_worst_quality = tmp_q; + rc->ni_av_qi = tmp_q; + rc->avg_q = vp9_convert_qindex_to_q(tmp_q); + } + vp9_zero(this_frame); + if (EOF == input_stats(twopass, &this_frame)) + return; + + this_frame_intra_error = this_frame.intra_error; + this_frame_coded_error = this_frame.coded_error; + + // Keyframe and section processing. + if (rc->frames_to_key == 0 || + (cm->frame_flags & FRAMEFLAGS_KEY)) { + // Define next KF group and assign bits to it. + this_frame_copy = this_frame; + find_next_key_frame(cpi, &this_frame_copy); + } else { + cm->frame_type = INTER_FRAME; + } + + // Is this frame a GF / ARF? (Note: a key frame is always also a GF). + if (rc->frames_till_gf_update_due == 0) { + // Define next gf group and assign bits to it. + this_frame_copy = this_frame; + +#if CONFIG_MULTIPLE_ARF + if (cpi->multi_arf_enabled) { + define_fixed_arf_period(cpi); + } else { +#endif + define_gf_group(cpi, &this_frame_copy); +#if CONFIG_MULTIPLE_ARF + } +#endif + + if (twopass->gf_zeromotion_pct > 995) { + // As long as max_thresh for encode breakout is small enough, it is ok + // to enable it for show frame, i.e. set allow_encode_breakout to + // ENCODE_BREAKOUT_LIMITED. + if (!cm->show_frame) + cpi->allow_encode_breakout = ENCODE_BREAKOUT_DISABLED; + else + cpi->allow_encode_breakout = ENCODE_BREAKOUT_LIMITED; + } + + rc->frames_till_gf_update_due = rc->baseline_gf_interval; + cpi->refresh_golden_frame = 1; + } else { + // Otherwise this is an ordinary frame. + // Assign bits from those allocated to the GF group. + this_frame_copy = this_frame; + assign_std_frame_bits(cpi, &this_frame_copy); + } + + // Keep a globally available copy of this and the next frame's iiratio. + twopass->this_iiratio = (int)(this_frame_intra_error / + DOUBLE_DIVIDE_CHECK(this_frame_coded_error)); + { + FIRSTPASS_STATS next_frame; + if (lookup_next_frame_stats(twopass, &next_frame) != EOF) { + twopass->next_iiratio = (int)(next_frame.intra_error / + DOUBLE_DIVIDE_CHECK(next_frame.coded_error)); + } + } + + if (cpi->common.frame_type == KEY_FRAME) + target = vp9_rc_clamp_iframe_target_size(cpi, rc->this_frame_target); + else + target = vp9_rc_clamp_pframe_target_size(cpi, rc->this_frame_target); + vp9_rc_set_frame_target(cpi, target); + + // Update the total stats remaining structure. + subtract_stats(&twopass->total_left_stats, &this_frame); +} + +void vp9_twopass_postencode_update(VP9_COMP *cpi, uint64_t bytes_used) { +#ifdef DISABLE_RC_LONG_TERM_MEM + cpi->twopass.bits_left -= cpi->rc.this_frame_target; +#else + cpi->twopass.bits_left -= 8 * bytes_used; + // Update bits left to the kf and gf groups to account for overshoot or + // undershoot on these frames. + if (cm->frame_type == KEY_FRAME) { + cpi->twopass.kf_group_bits += cpi->rc.this_frame_target - + cpi->rc.projected_frame_size; + + cpi->twopass.kf_group_bits = MAX(cpi->twopass.kf_group_bits, 0); + } else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame) { + cpi->twopass.gf_group_bits += cpi->rc.this_frame_target - + cpi->rc.projected_frame_size; + + cpi->twopass.gf_group_bits = MAX(cpi->twopass.gf_group_bits, 0); + } +#endif } diff --git a/libvpx/vp9/encoder/vp9_firstpass.h b/libvpx/vp9/encoder/vp9_firstpass.h index c18d11e..278b22c 100644 --- a/libvpx/vp9/encoder/vp9_firstpass.h +++ b/libvpx/vp9/encoder/vp9_firstpass.h @@ -10,14 +10,94 @@ #ifndef VP9_ENCODER_VP9_FIRSTPASS_H_ #define VP9_ENCODER_VP9_FIRSTPASS_H_ -#include "vp9/encoder/vp9_onyx_int.h" -void vp9_init_first_pass(VP9_COMP *cpi); -void vp9_first_pass(VP9_COMP *cpi); -void vp9_end_first_pass(VP9_COMP *cpi); +#ifdef __cplusplus +extern "C" { +#endif -void vp9_init_second_pass(VP9_COMP *cpi); -void vp9_second_pass(VP9_COMP *cpi); -void vp9_end_second_pass(VP9_COMP *cpi); +typedef struct { + double frame; + double intra_error; + double coded_error; + double sr_coded_error; + double ssim_weighted_pred_err; + double pcnt_inter; + double pcnt_motion; + double pcnt_second_ref; + double pcnt_neutral; + double MVr; + double mvr_abs; + double MVc; + double mvc_abs; + double MVrv; + double MVcv; + double mv_in_out_count; + double new_mv_count; + double duration; + double count; +} FIRSTPASS_STATS; + +struct twopass_rc { + unsigned int section_intra_rating; + unsigned int next_iiratio; + unsigned int this_iiratio; + FIRSTPASS_STATS total_stats; + FIRSTPASS_STATS this_frame_stats; + const FIRSTPASS_STATS *stats_in; + const FIRSTPASS_STATS *stats_in_start; + const FIRSTPASS_STATS *stats_in_end; + FIRSTPASS_STATS total_left_stats; + int first_pass_done; + int64_t bits_left; + int64_t clip_bits_total; + double avg_iiratio; + double modified_error_min; + double modified_error_max; + double modified_error_total; + double modified_error_left; + double kf_intra_err_min; + double gf_intra_err_min; + int kf_bits; + // Remaining error from uncoded frames in a gf group. Two pass use only + int64_t gf_group_error_left; + + // Projected total bits available for a key frame group of frames + int64_t kf_group_bits; + + // Error score of frames still to be coded in kf group + int64_t kf_group_error_left; + + // Projected Bits available for a group of frames including 1 GF or ARF + int64_t gf_group_bits; + // Bits for the golden frame or ARF - 2 pass only + int gf_bits; + int alt_extra_bits; + + int sr_update_lag; + + int kf_zeromotion_pct; + int gf_zeromotion_pct; + + int active_worst_quality; +}; + +struct VP9_COMP; + +void vp9_init_first_pass(struct VP9_COMP *cpi); +void vp9_rc_get_first_pass_params(struct VP9_COMP *cpi); +void vp9_first_pass(struct VP9_COMP *cpi); +void vp9_end_first_pass(struct VP9_COMP *cpi); + +void vp9_init_second_pass(struct VP9_COMP *cpi); +void vp9_rc_get_second_pass_params(struct VP9_COMP *cpi); +int vp9_twopass_worst_quality(struct VP9_COMP *cpi, FIRSTPASS_STATS *fpstats, + int section_target_bandwitdh); + +// Post encode update of the rate control parameters for 2-pass +void vp9_twopass_postencode_update(struct VP9_COMP *cpi, + uint64_t bytes_used); +#ifdef __cplusplus +} // extern "C" +#endif #endif // VP9_ENCODER_VP9_FIRSTPASS_H_ diff --git a/libvpx/vp9/encoder/vp9_lookahead.c b/libvpx/vp9/encoder/vp9_lookahead.c index c28c868..a88d5ec 100644 --- a/libvpx/vp9/encoder/vp9_lookahead.c +++ b/libvpx/vp9/encoder/vp9_lookahead.c @@ -11,9 +11,12 @@ #include <stdlib.h> #include "./vpx_config.h" + #include "vp9/common/vp9_common.h" + +#include "vp9/encoder/vp9_extend.h" #include "vp9/encoder/vp9_lookahead.h" -#include "vp9/common/vp9_extend.h" +#include "vp9/encoder/vp9_onyx_int.h" struct lookahead_ctx { unsigned int max_sz; /* Absolute size of the queue */ @@ -73,7 +76,7 @@ struct lookahead_ctx * vp9_lookahead_init(unsigned int width, for (i = 0; i < depth; i++) if (vp9_alloc_frame_buffer(&ctx->buf[i].img, width, height, subsampling_x, subsampling_y, - VP9BORDERINPIXELS)) + VP9_ENC_BORDER_IN_PIXELS)) goto bail; } return ctx; @@ -85,8 +88,7 @@ struct lookahead_ctx * vp9_lookahead_init(unsigned int width, #define USE_PARTIAL_COPY 0 int vp9_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src, - int64_t ts_start, int64_t ts_end, unsigned int flags, - unsigned char *active_map) { + int64_t ts_start, int64_t ts_end, unsigned int flags) { struct lookahead_entry *buf; #if USE_PARTIAL_COPY int row, col, active_end; @@ -173,7 +175,6 @@ struct lookahead_entry * vp9_lookahead_peek(struct lookahead_ctx *ctx, int index) { struct lookahead_entry *buf = NULL; - assert(index < (int)ctx->max_sz); if (index < (int)ctx->sz) { index += ctx->read_idx; if (index >= (int)ctx->max_sz) diff --git a/libvpx/vp9/encoder/vp9_lookahead.h b/libvpx/vp9/encoder/vp9_lookahead.h index c773f8f..ff63c0d 100644 --- a/libvpx/vp9/encoder/vp9_lookahead.h +++ b/libvpx/vp9/encoder/vp9_lookahead.h @@ -14,6 +14,10 @@ #include "vpx_scale/yv12config.h" #include "vpx/vpx_integer.h" +#ifdef __cplusplus +extern "C" { +#endif + #define MAX_LAG_BUFFERS 25 struct lookahead_entry { @@ -59,8 +63,7 @@ void vp9_lookahead_destroy(struct lookahead_ctx *ctx); * \param[in] active_map Map that specifies which macroblock is active */ int vp9_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src, - int64_t ts_start, int64_t ts_end, unsigned int flags, - unsigned char *active_map); + int64_t ts_start, int64_t ts_end, unsigned int flags); /**\brief Get the next source buffer to encode @@ -94,4 +97,8 @@ struct lookahead_entry *vp9_lookahead_peek(struct lookahead_ctx *ctx, */ unsigned int vp9_lookahead_depth(struct lookahead_ctx *ctx); +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_ENCODER_VP9_LOOKAHEAD_H_ diff --git a/libvpx/vp9/encoder/vp9_mbgraph.c b/libvpx/vp9/encoder/vp9_mbgraph.c index 7b605b2..6520389 100644 --- a/libvpx/vp9/encoder/vp9_mbgraph.c +++ b/libvpx/vp9/encoder/vp9_mbgraph.c @@ -11,7 +11,6 @@ #include <limits.h> #include "vpx_mem/vpx_mem.h" -#include "vp9/encoder/vp9_encodeintra.h" #include "vp9/encoder/vp9_rdopt.h" #include "vp9/encoder/vp9_segmentation.h" #include "vp9/encoder/vp9_mcomp.h" @@ -23,55 +22,49 @@ static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi, - int_mv *ref_mv, - int_mv *dst_mv, + const MV *ref_mv, + MV *dst_mv, int mb_row, int mb_col) { MACROBLOCK *const x = &cpi->mb; MACROBLOCKD *const xd = &x->e_mbd; vp9_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16]; - unsigned int best_err; const int tmp_col_min = x->mv_col_min; const int tmp_col_max = x->mv_col_max; const int tmp_row_min = x->mv_row_min; const int tmp_row_max = x->mv_row_max; - int_mv ref_full; + MV ref_full; // Further step/diamond searches as necessary int step_param = cpi->sf.reduce_first_step_size + (cpi->speed < 8 ? (cpi->speed > 5 ? 1 : 0) : 2); step_param = MIN(step_param, (cpi->sf.max_step_search_steps - 2)); - vp9_clamp_mv_min_max(x, &ref_mv->as_mv); + vp9_set_mv_search_range(x, ref_mv); - ref_full.as_mv.col = ref_mv->as_mv.col >> 3; - ref_full.as_mv.row = ref_mv->as_mv.row >> 3; + ref_full.col = ref_mv->col >> 3; + ref_full.row = ref_mv->row >> 3; /*cpi->sf.search_method == HEX*/ - best_err = vp9_hex_search(x, &ref_full.as_mv, step_param, x->errorperbit, - 0, &v_fn_ptr, - 0, &ref_mv->as_mv, &dst_mv->as_mv); + vp9_hex_search(x, &ref_full, step_param, x->errorperbit, 0, &v_fn_ptr, 0, + ref_mv, dst_mv); // Try sub-pixel MC // if (bestsme > error_thresh && bestsme < INT_MAX) { int distortion; unsigned int sse; - best_err = cpi->find_fractional_mv_step( - x, - &dst_mv->as_mv, &ref_mv->as_mv, - cpi->common.allow_high_precision_mv, - x->errorperbit, &v_fn_ptr, - 0, cpi->sf.subpel_iters_per_step, NULL, NULL, - & distortion, &sse); + cpi->find_fractional_mv_step( + x, dst_mv, ref_mv, cpi->common.allow_high_precision_mv, x->errorperbit, + &v_fn_ptr, 0, cpi->sf.subpel_iters_per_step, NULL, NULL, &distortion, + &sse); } - vp9_set_mbmode_and_mvs(x, NEWMV, dst_mv); + xd->mi_8x8[0]->mbmi.mode = NEWMV; + xd->mi_8x8[0]->mbmi.mv[0].as_mv = *dst_mv; + vp9_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_16X16); - best_err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, - xd->plane[0].dst.buf, xd->plane[0].dst.stride, - INT_MAX); /* restore UMV window */ x->mv_col_min = tmp_col_min; @@ -79,11 +72,13 @@ static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi, x->mv_row_min = tmp_row_min; x->mv_row_max = tmp_row_max; - return best_err; + return vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, + xd->plane[0].dst.buf, xd->plane[0].dst.stride, + INT_MAX); } -static int do_16x16_motion_search(VP9_COMP *cpi, int_mv *ref_mv, int_mv *dst_mv, - int mb_row, int mb_col) { +static int do_16x16_motion_search(VP9_COMP *cpi, const int_mv *ref_mv, + int_mv *dst_mv, int mb_row, int mb_col) { MACROBLOCK *const x = &cpi->mb; MACROBLOCKD *const xd = &x->e_mbd; unsigned int err, tmp_err; @@ -98,7 +93,8 @@ static int do_16x16_motion_search(VP9_COMP *cpi, int_mv *ref_mv, int_mv *dst_mv, // Test last reference frame using the previous best mv as the // starting point (best reference) for the search - tmp_err = do_16x16_motion_iteration(cpi, ref_mv, &tmp_mv, mb_row, mb_col); + tmp_err = do_16x16_motion_iteration(cpi, &ref_mv->as_mv, &tmp_mv.as_mv, + mb_row, mb_col); if (tmp_err < err) { err = tmp_err; dst_mv->as_int = tmp_mv.as_int; @@ -111,7 +107,7 @@ static int do_16x16_motion_search(VP9_COMP *cpi, int_mv *ref_mv, int_mv *dst_mv, int_mv zero_ref_mv, tmp_mv; zero_ref_mv.as_int = 0; - tmp_err = do_16x16_motion_iteration(cpi, &zero_ref_mv, &tmp_mv, + tmp_err = do_16x16_motion_iteration(cpi, &zero_ref_mv.as_mv, &tmp_mv.as_mv, mb_row, mb_col); if (tmp_err < err) { dst_mv->as_int = tmp_mv.as_int; @@ -138,7 +134,6 @@ static int do_16x16_zerozero_search(VP9_COMP *cpi, int_mv *dst_mv) { return err; } static int find_best_16x16_intra(VP9_COMP *cpi, - int mb_y_offset, MB_PREDICTION_MODE *pbest_mode) { MACROBLOCK *const x = &cpi->mb; MACROBLOCKD *const xd = &x->e_mbd; @@ -153,7 +148,8 @@ static int find_best_16x16_intra(VP9_COMP *cpi, xd->mi_8x8[0]->mbmi.mode = mode; vp9_predict_intra_block(xd, 0, 2, TX_16X16, mode, x->plane[0].src.buf, x->plane[0].src.stride, - xd->plane[0].dst.buf, xd->plane[0].dst.stride); + xd->plane[0].dst.buf, xd->plane[0].dst.stride, + 0, 0, 0); err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].dst.buf, xd->plane[0].dst.stride, best_err); @@ -178,10 +174,7 @@ static void update_mbgraph_mb_stats int mb_y_offset, YV12_BUFFER_CONFIG *golden_ref, int_mv *prev_golden_ref_mv, - int gld_y_offset, YV12_BUFFER_CONFIG *alt_ref, - int_mv *prev_alt_ref_mv, - int arf_y_offset, int mb_row, int mb_col ) { @@ -198,7 +191,7 @@ static void update_mbgraph_mb_stats xd->plane[0].dst.stride = get_frame_new_buffer(cm)->y_stride; // do intra 16x16 prediction - intra_error = find_best_16x16_intra(cpi, mb_y_offset, + intra_error = find_best_16x16_intra(cpi, &stats->ref[INTRA_FRAME].m.mode); if (intra_error <= 0) intra_error = 1; @@ -282,8 +275,7 @@ static void update_mbgraph_frame_stats(VP9_COMP *cpi, MBGRAPH_MB_STATS *mb_stats = &stats->mb_stats[offset + mb_col]; update_mbgraph_mb_stats(cpi, mb_stats, buf, mb_y_in_offset, - golden_ref, &gld_left_mv, gld_y_in_offset, - alt_ref, &arf_left_mv, arf_y_in_offset, + golden_ref, &gld_left_mv, alt_ref, mb_row, mb_col); arf_left_mv.as_int = mb_stats->ref[ALTREF_FRAME].m.mv.as_int; gld_left_mv.as_int = mb_stats->ref[GOLDEN_FRAME].m.mv.as_int; @@ -324,8 +316,8 @@ static void separate_arf_mbs(VP9_COMP *cpi) { 1)); // We are not interested in results beyond the alt ref itself. - if (n_frames > cpi->frames_till_gf_update_due) - n_frames = cpi->frames_till_gf_update_due; + if (n_frames > cpi->rc.frames_till_gf_update_due) + n_frames = cpi->rc.frames_till_gf_update_due; // defer cost to reference frames for (i = n_frames - 1; i >= 0; i--) { @@ -356,7 +348,7 @@ static void separate_arf_mbs(VP9_COMP *cpi) { for (mi_col = 0; mi_col < cm->mi_cols; mi_col++) { // If any of the blocks in the sequence failed then the MB // goes in segment 0 - if (arf_not_zz[mi_row/2*cm->mb_cols + mi_col/2]) { + if (arf_not_zz[mi_row / 2 * cm->mb_cols + mi_col / 2]) { ncnt[0]++; cpi->segmentation_map[mi_row * cm->mi_cols + mi_col] = 0; } else { @@ -379,10 +371,10 @@ static void separate_arf_mbs(VP9_COMP *cpi) { cpi->static_mb_pct = 0; cpi->seg0_cnt = ncnt[0]; - vp9_enable_segmentation((VP9_PTR)cpi); + vp9_enable_segmentation(&cm->seg); } else { cpi->static_mb_pct = 0; - vp9_disable_segmentation((VP9_PTR)cpi); + vp9_disable_segmentation(&cm->seg); } // Free localy allocated storage @@ -392,15 +384,13 @@ static void separate_arf_mbs(VP9_COMP *cpi) { void vp9_update_mbgraph_stats(VP9_COMP *cpi) { VP9_COMMON *const cm = &cpi->common; int i, n_frames = vp9_lookahead_depth(cpi->lookahead); - YV12_BUFFER_CONFIG *golden_ref = - &cm->yv12_fb[cm->ref_frame_map[cpi->gld_fb_idx]]; + YV12_BUFFER_CONFIG *golden_ref = get_ref_frame_buffer(cpi, GOLDEN_FRAME); // we need to look ahead beyond where the ARF transitions into // being a GF - so exit if we don't look ahead beyond that - if (n_frames <= cpi->frames_till_gf_update_due) + if (n_frames <= cpi->rc.frames_till_gf_update_due) return; - if (n_frames > (int)cpi->frames_till_alt_ref_frame) - n_frames = cpi->frames_till_alt_ref_frame; + if (n_frames > MAX_LAG_BUFFERS) n_frames = MAX_LAG_BUFFERS; @@ -426,7 +416,7 @@ void vp9_update_mbgraph_stats(VP9_COMP *cpi) { golden_ref, cpi->Source); } - vp9_clear_system_state(); // __asm emms; + vp9_clear_system_state(); separate_arf_mbs(cpi); } diff --git a/libvpx/vp9/encoder/vp9_mbgraph.h b/libvpx/vp9/encoder/vp9_mbgraph.h index c5bca4d..bc2a704 100644 --- a/libvpx/vp9/encoder/vp9_mbgraph.h +++ b/libvpx/vp9/encoder/vp9_mbgraph.h @@ -11,6 +11,30 @@ #ifndef VP9_ENCODER_VP9_MBGRAPH_H_ #define VP9_ENCODER_VP9_MBGRAPH_H_ -void vp9_update_mbgraph_stats(VP9_COMP *cpi); +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + struct { + int err; + union { + int_mv mv; + MB_PREDICTION_MODE mode; + } m; + } ref[MAX_REF_FRAMES]; +} MBGRAPH_MB_STATS; + +typedef struct { + MBGRAPH_MB_STATS *mb_stats; +} MBGRAPH_FRAME_STATS; + +struct VP9_COMP; + +void vp9_update_mbgraph_stats(struct VP9_COMP *cpi); + +#ifdef __cplusplus +} // extern "C" +#endif #endif // VP9_ENCODER_VP9_MBGRAPH_H_ diff --git a/libvpx/vp9/encoder/vp9_mcomp.c b/libvpx/vp9/encoder/vp9_mcomp.c index a52f5b1..26f1a02 100644 --- a/libvpx/vp9/encoder/vp9_mcomp.c +++ b/libvpx/vp9/encoder/vp9_mcomp.c @@ -16,7 +16,6 @@ #include "vpx_mem/vpx_mem.h" -#include "vp9/common/vp9_findnearmv.h" #include "vp9/common/vp9_common.h" #include "vp9/encoder/vp9_onyx_int.h" @@ -24,11 +23,16 @@ // #define NEW_DIAMOND_SEARCH -void vp9_clamp_mv_min_max(MACROBLOCK *x, MV *mv) { - const int col_min = (mv->col >> 3) - MAX_FULL_PEL_VAL + (mv->col & 7 ? 1 : 0); - const int row_min = (mv->row >> 3) - MAX_FULL_PEL_VAL + (mv->row & 7 ? 1 : 0); - const int col_max = (mv->col >> 3) + MAX_FULL_PEL_VAL; - const int row_max = (mv->row >> 3) + MAX_FULL_PEL_VAL; +void vp9_set_mv_search_range(MACROBLOCK *x, const MV *mv) { + int col_min = (mv->col >> 3) - MAX_FULL_PEL_VAL + (mv->col & 7 ? 1 : 0); + int row_min = (mv->row >> 3) - MAX_FULL_PEL_VAL + (mv->row & 7 ? 1 : 0); + int col_max = (mv->col >> 3) + MAX_FULL_PEL_VAL; + int row_max = (mv->row >> 3) + MAX_FULL_PEL_VAL; + + col_min = MAX(col_min, (MV_LOW >> 3) + 1); + row_min = MAX(row_min, (MV_LOW >> 3) + 1); + col_max = MIN(col_max, (MV_UPP >> 3) - 1); + row_max = MIN(row_max, (MV_UPP >> 3) - 1); // Get intersection of UMV window and valid MV window to reduce # of checks // in diamond search. @@ -51,9 +55,6 @@ int vp9_init_search_range(VP9_COMP *cpi, int size) { while ((size << sr) < MAX_FULL_PEL_VAL) sr++; - if (sr) - sr--; - sr += cpi->sf.reduce_first_step_size; sr = MIN(sr, (cpi->sf.max_step_search_steps - 2)); return sr; @@ -97,42 +98,23 @@ static int mvsad_err_cost(const MV *mv, const MV *ref, } void vp9_init_dsmotion_compensation(MACROBLOCK *x, int stride) { - int len; - int search_site_count = 0; + int len, ss_count = 1; - // Generate offsets for 4 search sites per step. - x->ss[search_site_count].mv.col = 0; - x->ss[search_site_count].mv.row = 0; - x->ss[search_site_count].offset = 0; - search_site_count++; + x->ss[0].mv.col = x->ss[0].mv.row = 0; + x->ss[0].offset = 0; for (len = MAX_FIRST_STEP; len > 0; len /= 2) { - // Compute offsets for search sites. - x->ss[search_site_count].mv.col = 0; - x->ss[search_site_count].mv.row = -len; - x->ss[search_site_count].offset = -len * stride; - search_site_count++; - - // Compute offsets for search sites. - x->ss[search_site_count].mv.col = 0; - x->ss[search_site_count].mv.row = len; - x->ss[search_site_count].offset = len * stride; - search_site_count++; - - // Compute offsets for search sites. - x->ss[search_site_count].mv.col = -len; - x->ss[search_site_count].mv.row = 0; - x->ss[search_site_count].offset = -len; - search_site_count++; - - // Compute offsets for search sites. - x->ss[search_site_count].mv.col = len; - x->ss[search_site_count].mv.row = 0; - x->ss[search_site_count].offset = len; - search_site_count++; + // Generate offsets for 4 search sites per step. + const MV ss_mvs[] = {{-len, 0}, {len, 0}, {0, -len}, {0, len}}; + int i; + for (i = 0; i < 4; ++i) { + search_site *const ss = &x->ss[ss_count++]; + ss->mv = ss_mvs[i]; + ss->offset = ss->mv.row * stride + ss->mv.col; + } } - x->ss_count = search_site_count; + x->ss_count = ss_count; x->searches_per_step = 4; } @@ -178,35 +160,35 @@ void vp9_init3smotion_compensation(MACROBLOCK *x, int stride) { error_per_bit + 4096) >> 13 : 0) -#define SP(x) (((x) & 7) << 1) // convert motion vector component to offset - // for svf calc - -#define IFMVCV(r, c, s, e) \ - if (c >= minc && c <= maxc && r >= minr && r <= maxr) \ - s \ - else \ - e; +// convert motion vector component to offset for svf calc +static INLINE int sp(int x) { + return (x & 7) << 1; +} -/* pointer to predictor base of a motionvector */ -#define PRE(r, c) (y + (((r) >> 3) * y_stride + ((c) >> 3) -(offset))) +static INLINE const uint8_t *pre(const uint8_t *buf, int stride, int r, int c, + int offset) { + return &buf[(r >> 3) * stride + (c >> 3) - offset]; +} /* returns subpixel variance error function */ #define DIST(r, c) \ - vfp->svf(PRE(r, c), y_stride, SP(c), SP(r), z, src_stride, &sse) + vfp->svf(pre(y, y_stride, r, c, offset), y_stride, sp(c), sp(r), z, \ + src_stride, &sse) /* checks if (r, c) has better score than previous best */ #define CHECK_BETTER(v, r, c) \ - IFMVCV(r, c, { \ - thismse = (DIST(r, c)); \ - if ((v = MVC(r, c) + thismse) < besterr) { \ - besterr = v; \ - br = r; \ - bc = c; \ - *distortion = thismse; \ - *sse1 = sse; \ - } \ - }, \ - v = INT_MAX;) + if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \ + thismse = (DIST(r, c)); \ + if ((v = MVC(r, c) + thismse) < besterr) { \ + besterr = v; \ + br = r; \ + bc = c; \ + *distortion = thismse; \ + *sse1 = sse; \ + } \ + } else { \ + v = INT_MAX; \ + } #define FIRST_LEVEL_CHECKS \ { \ @@ -273,105 +255,7 @@ void vp9_init3smotion_compensation(MACROBLOCK *x, int stride) { } \ } -int vp9_find_best_sub_pixel_iterative(MACROBLOCK *x, - MV *bestmv, const MV *ref_mv, - int allow_hp, - int error_per_bit, - const vp9_variance_fn_ptr_t *vfp, - int forced_stop, - int iters_per_step, - int *mvjcost, int *mvcost[2], - int *distortion, - unsigned int *sse1) { - uint8_t *z = x->plane[0].src.buf; - int src_stride = x->plane[0].src.stride; - MACROBLOCKD *xd = &x->e_mbd; - - unsigned int besterr = INT_MAX; - unsigned int sse; - unsigned int whichdir; - unsigned int halfiters = iters_per_step; - unsigned int quarteriters = iters_per_step; - unsigned int eighthiters = iters_per_step; - int thismse; - - const int y_stride = xd->plane[0].pre[0].stride; - const int offset = bestmv->row * y_stride + bestmv->col; - uint8_t *y = xd->plane[0].pre[0].buf + offset; - - int rr = ref_mv->row; - int rc = ref_mv->col; - int br = bestmv->row * 8; - int bc = bestmv->col * 8; - int hstep = 4; - const int minc = MAX(x->mv_col_min * 8, ref_mv->col - MV_MAX); - const int maxc = MIN(x->mv_col_max * 8, ref_mv->col + MV_MAX); - const int minr = MAX(x->mv_row_min * 8, ref_mv->row - MV_MAX); - const int maxr = MIN(x->mv_row_max * 8, ref_mv->row + MV_MAX); - - int tr = br; - int tc = bc; - - // central mv - bestmv->row <<= 3; - bestmv->col <<= 3; - - // calculate central point error - besterr = vfp->vf(y, y_stride, z, src_stride, sse1); - *distortion = besterr; - besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit); - - // TODO(jbb): Each subsequent iteration checks at least one point in - // common with the last iteration could be 2 if diagonal is selected. - while (halfiters--) { - // 1/2 pel - FIRST_LEVEL_CHECKS; - // no reason to check the same one again. - if (tr == br && tc == bc) - break; - tr = br; - tc = bc; - } - - // TODO(yaowu): Each subsequent iteration checks at least one point in common - // with the last iteration could be 2 if diagonal is selected. - - // Note forced_stop: 0 - full, 1 - qtr only, 2 - half only - if (forced_stop != 2) { - hstep >>= 1; - while (quarteriters--) { - FIRST_LEVEL_CHECKS; - // no reason to check the same one again. - if (tr == br && tc == bc) - break; - tr = br; - tc = bc; - } - } - - if (allow_hp && vp9_use_mv_hp(ref_mv) && forced_stop == 0) { - hstep >>= 1; - while (eighthiters--) { - FIRST_LEVEL_CHECKS; - // no reason to check the same one again. - if (tr == br && tc == bc) - break; - tr = br; - tc = bc; - } - } - - bestmv->row = br; - bestmv->col = bc; - - if ((abs(bestmv->col - ref_mv->col) > (MAX_FULL_PEL_VAL << 3)) || - (abs(bestmv->row - ref_mv->row) > (MAX_FULL_PEL_VAL << 3))) - return INT_MAX; - - return besterr; -} - -int vp9_find_best_sub_pixel_tree(MACROBLOCK *x, +int vp9_find_best_sub_pixel_tree(const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp, int error_per_bit, @@ -381,9 +265,9 @@ int vp9_find_best_sub_pixel_tree(MACROBLOCK *x, int *mvjcost, int *mvcost[2], int *distortion, unsigned int *sse1) { - uint8_t *z = x->plane[0].src.buf; + const uint8_t *z = x->plane[0].src.buf; const int src_stride = x->plane[0].src.stride; - MACROBLOCKD *xd = &x->e_mbd; + const MACROBLOCKD *xd = &x->e_mbd; unsigned int besterr = INT_MAX; unsigned int sse; unsigned int whichdir; @@ -394,7 +278,7 @@ int vp9_find_best_sub_pixel_tree(MACROBLOCK *x, const int y_stride = xd->plane[0].pre[0].stride; const int offset = bestmv->row * y_stride + bestmv->col; - uint8_t *y = xd->plane[0].pre[0].buf + offset; + const uint8_t *y = xd->plane[0].pre[0].buf + offset; int rr = ref_mv->row; int rc = ref_mv->col; @@ -446,6 +330,10 @@ int vp9_find_best_sub_pixel_tree(MACROBLOCK *x, tr = br; tc = bc; } + // These lines insure static analysis doesn't warn that + // tr and tc aren't used after the above point. + (void) tr; + (void) tc; bestmv->row = br; bestmv->col = bc; @@ -460,113 +348,10 @@ int vp9_find_best_sub_pixel_tree(MACROBLOCK *x, #undef DIST /* returns subpixel variance error function */ #define DIST(r, c) \ - vfp->svaf(PRE(r, c), y_stride, SP(c), SP(r), \ + vfp->svaf(pre(y, y_stride, r, c, offset), y_stride, sp(c), sp(r), \ z, src_stride, &sse, second_pred) -int vp9_find_best_sub_pixel_comp_iterative(MACROBLOCK *x, - MV *bestmv, const MV *ref_mv, - int allow_hp, - int error_per_bit, - const vp9_variance_fn_ptr_t *vfp, - int forced_stop, - int iters_per_step, - int *mvjcost, int *mvcost[2], - int *distortion, - unsigned int *sse1, - const uint8_t *second_pred, - int w, int h) { - uint8_t *const z = x->plane[0].src.buf; - const int src_stride = x->plane[0].src.stride; - MACROBLOCKD *const xd = &x->e_mbd; - - unsigned int besterr = INT_MAX; - unsigned int sse; - unsigned int whichdir; - unsigned int halfiters = iters_per_step; - unsigned int quarteriters = iters_per_step; - unsigned int eighthiters = iters_per_step; - int thismse; - - DECLARE_ALIGNED_ARRAY(16, uint8_t, comp_pred, 64 * 64); - const int y_stride = xd->plane[0].pre[0].stride; - const int offset = bestmv->row * y_stride + bestmv->col; - uint8_t *const y = xd->plane[0].pre[0].buf + offset; - - int rr = ref_mv->row; - int rc = ref_mv->col; - int br = bestmv->row * 8; - int bc = bestmv->col * 8; - int hstep = 4; - const int minc = MAX(x->mv_col_min * 8, ref_mv->col - MV_MAX); - const int maxc = MIN(x->mv_col_max * 8, ref_mv->col + MV_MAX); - const int minr = MAX(x->mv_row_min * 8, ref_mv->row - MV_MAX); - const int maxr = MIN(x->mv_row_max * 8, ref_mv->row + MV_MAX); - - int tr = br; - int tc = bc; - - // central mv - bestmv->row *= 8; - bestmv->col *= 8; - - // calculate central point error - // TODO(yunqingwang): central pointer error was already calculated in full- - // pixel search, and can be passed in this function. - comp_avg_pred(comp_pred, second_pred, w, h, y, y_stride); - besterr = vfp->vf(comp_pred, w, z, src_stride, sse1); - *distortion = besterr; - besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit); - - // Each subsequent iteration checks at least one point in - // common with the last iteration could be 2 ( if diag selected) - while (halfiters--) { - // 1/2 pel - FIRST_LEVEL_CHECKS; - // no reason to check the same one again. - if (tr == br && tc == bc) - break; - tr = br; - tc = bc; - } - - // Each subsequent iteration checks at least one point in common with - // the last iteration could be 2 ( if diag selected) 1/4 pel - - // Note forced_stop: 0 - full, 1 - qtr only, 2 - half only - if (forced_stop != 2) { - hstep >>= 1; - while (quarteriters--) { - FIRST_LEVEL_CHECKS; - // no reason to check the same one again. - if (tr == br && tc == bc) - break; - tr = br; - tc = bc; - } - } - - if (allow_hp && vp9_use_mv_hp(ref_mv) && forced_stop == 0) { - hstep >>= 1; - while (eighthiters--) { - FIRST_LEVEL_CHECKS; - // no reason to check the same one again. - if (tr == br && tc == bc) - break; - tr = br; - tc = bc; - } - } - bestmv->row = br; - bestmv->col = bc; - - if ((abs(bestmv->col - ref_mv->col) > (MAX_FULL_PEL_VAL << 3)) || - (abs(bestmv->row - ref_mv->row) > (MAX_FULL_PEL_VAL << 3))) - return INT_MAX; - - return besterr; -} - -int vp9_find_best_sub_pixel_comp_tree(MACROBLOCK *x, +int vp9_find_best_sub_pixel_comp_tree(const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp, int error_per_bit, @@ -578,9 +363,9 @@ int vp9_find_best_sub_pixel_comp_tree(MACROBLOCK *x, unsigned int *sse1, const uint8_t *second_pred, int w, int h) { - uint8_t *z = x->plane[0].src.buf; + const uint8_t *z = x->plane[0].src.buf; const int src_stride = x->plane[0].src.stride; - MACROBLOCKD *xd = &x->e_mbd; + const MACROBLOCKD *xd = &x->e_mbd; unsigned int besterr = INT_MAX; unsigned int sse; unsigned int whichdir; @@ -592,7 +377,7 @@ int vp9_find_best_sub_pixel_comp_tree(MACROBLOCK *x, DECLARE_ALIGNED_ARRAY(16, uint8_t, comp_pred, 64 * 64); const int y_stride = xd->plane[0].pre[0].stride; const int offset = bestmv->row * y_stride + bestmv->col; - uint8_t *y = xd->plane[0].pre[0].buf + offset; + const uint8_t *y = xd->plane[0].pre[0].buf + offset; int rr = ref_mv->row; int rc = ref_mv->col; @@ -652,6 +437,11 @@ int vp9_find_best_sub_pixel_comp_tree(MACROBLOCK *x, tr = br; tc = bc; } + // These lines insure static analysis doesn't warn that + // tr and tc aren't used after the above point. + (void) tr; + (void) tc; + bestmv->row = br; bestmv->col = bc; @@ -665,48 +455,34 @@ int vp9_find_best_sub_pixel_comp_tree(MACROBLOCK *x, #undef MVC #undef PRE #undef DIST -#undef IFMVCV #undef CHECK_BETTER -#undef SP -#define CHECK_BOUNDS(range) \ - {\ - all_in = 1;\ - all_in &= ((br-range) >= x->mv_row_min);\ - all_in &= ((br+range) <= x->mv_row_max);\ - all_in &= ((bc-range) >= x->mv_col_min);\ - all_in &= ((bc+range) <= x->mv_col_max);\ - } +static INLINE int check_bounds(const MACROBLOCK *x, int row, int col, + int range) { + return ((row - range) >= x->mv_row_min) & + ((row + range) <= x->mv_row_max) & + ((col - range) >= x->mv_col_min) & + ((col + range) <= x->mv_col_max); +} -#define CHECK_POINT \ - {\ - if (this_mv.col < x->mv_col_min) continue;\ - if (this_mv.col > x->mv_col_max) continue;\ - if (this_mv.row < x->mv_row_min) continue;\ - if (this_mv.row > x->mv_row_max) continue;\ - } +static INLINE int is_mv_in(const MACROBLOCK *x, const MV *mv) { + return (mv->col >= x->mv_col_min) && (mv->col <= x->mv_col_max) && + (mv->row >= x->mv_row_min) && (mv->row <= x->mv_row_max); +} #define CHECK_BETTER \ {\ - if (thissad < bestsad)\ - {\ + if (thissad < bestsad) {\ if (use_mvcost) \ - thissad += mvsad_err_cost(&this_mv, &fcenter_mv.as_mv, \ - mvjsadcost, mvsadcost, \ - sad_per_bit);\ - if (thissad < bestsad)\ - {\ + thissad += mvsad_err_cost(&this_mv, &fcenter_mv, \ + mvjsadcost, mvsadcost, sad_per_bit);\ + if (thissad < bestsad) {\ bestsad = thissad;\ best_site = i;\ }\ }\ } -#define get_next_chkpts(list, i, n) \ - list[0] = ((i) == 0 ? (n) - 1 : (i) - 1); \ - list[1] = (i); \ - list[2] = ((i) == (n) - 1 ? 0 : (i) + 1); - #define MAX_PATTERN_SCALES 11 #define MAX_PATTERN_CANDIDATES 8 // max number of canddiates per scale #define PATTERN_CANDIDATES_REF 3 // number of refinement candidates @@ -715,7 +491,7 @@ int vp9_find_best_sub_pixel_comp_tree(MACROBLOCK *x, // Each scale can have a different number of candidates and shape of // candidates as indicated in the num_candidates and candidates arrays // passed into this function -static int vp9_pattern_search(MACROBLOCK *x, +static int vp9_pattern_search(const MACROBLOCK *x, MV *ref_mv, int search_param, int sad_per_bit, @@ -727,43 +503,39 @@ static int vp9_pattern_search(MACROBLOCK *x, const int num_candidates[MAX_PATTERN_SCALES], const MV candidates[MAX_PATTERN_SCALES] [MAX_PATTERN_CANDIDATES]) { - const MACROBLOCKD* const xd = &x->e_mbd; + const MACROBLOCKD *const xd = &x->e_mbd; static const int search_param_to_steps[MAX_MVSEARCH_STEPS] = { 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, }; int i, j, s, t; - uint8_t *what = x->plane[0].src.buf; - int what_stride = x->plane[0].src.stride; - int in_what_stride = xd->plane[0].pre[0].stride; + const uint8_t *what = x->plane[0].src.buf; + const int what_stride = x->plane[0].src.stride; + const int in_what_stride = xd->plane[0].pre[0].stride; int br, bc; MV this_mv; int bestsad = INT_MAX; int thissad; - uint8_t *base_offset; - uint8_t *this_offset; + const uint8_t *base_offset; + const uint8_t *this_offset; int k = -1; - int all_in; int best_site = -1; - int_mv fcenter_mv; + const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; int best_init_s = search_param_to_steps[search_param]; - int *mvjsadcost = x->nmvjointsadcost; + const int *mvjsadcost = x->nmvjointsadcost; int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]}; - fcenter_mv.as_mv.row = center_mv->row >> 3; - fcenter_mv.as_mv.col = center_mv->col >> 3; - // adjust ref_mv to make sure it is within MV range clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max); br = ref_mv->row; bc = ref_mv->col; // Work out the start point for the search - base_offset = (uint8_t *)(xd->plane[0].pre[0].buf); + base_offset = xd->plane[0].pre[0].buf; this_offset = base_offset + (br * in_what_stride) + bc; this_mv.row = br; this_mv.col = bc; bestsad = vfp->sdf(what, what_stride, this_offset, in_what_stride, 0x7fffffff) - + mvsad_err_cost(&this_mv, &fcenter_mv.as_mv, + + mvsad_err_cost(&this_mv, &fcenter_mv, mvjsadcost, mvsadcost, sad_per_bit); // Search all possible scales upto the search param around the center point @@ -774,8 +546,7 @@ static int vp9_pattern_search(MACROBLOCK *x, best_init_s = -1; for (t = 0; t <= s; ++t) { best_site = -1; - CHECK_BOUNDS((1 << t)) - if (all_in) { + if (check_bounds(x, br, bc, 1 << t)) { for (i = 0; i < num_candidates[t]; i++) { this_mv.row = br + candidates[t][i].row; this_mv.col = bc + candidates[t][i].col; @@ -789,7 +560,8 @@ static int vp9_pattern_search(MACROBLOCK *x, for (i = 0; i < num_candidates[t]; i++) { this_mv.row = br + candidates[t][i].row; this_mv.col = bc + candidates[t][i].col; - CHECK_POINT + if (!is_mv_in(x, &this_mv)) + continue; this_offset = base_offset + (this_mv.row * in_what_stride) + this_mv.col; thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride, @@ -818,8 +590,7 @@ static int vp9_pattern_search(MACROBLOCK *x, do { // No need to search all 6 points the 1st time if initial search was used if (!do_init_search || s != best_init_s) { - CHECK_BOUNDS((1 << s)) - if (all_in) { + if (check_bounds(x, br, bc, 1 << s)) { for (i = 0; i < num_candidates[s]; i++) { this_mv.row = br + candidates[s][i].row; this_mv.col = bc + candidates[s][i].col; @@ -833,7 +604,8 @@ static int vp9_pattern_search(MACROBLOCK *x, for (i = 0; i < num_candidates[s]; i++) { this_mv.row = br + candidates[s][i].row; this_mv.col = bc + candidates[s][i].col; - CHECK_POINT + if (!is_mv_in(x, &this_mv)) + continue; this_offset = base_offset + (this_mv.row * in_what_stride) + this_mv.col; thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride, @@ -854,10 +626,11 @@ static int vp9_pattern_search(MACROBLOCK *x, do { int next_chkpts_indices[PATTERN_CANDIDATES_REF]; best_site = -1; - CHECK_BOUNDS((1 << s)) + next_chkpts_indices[0] = (k == 0) ? num_candidates[s] - 1 : k - 1; + next_chkpts_indices[1] = k; + next_chkpts_indices[2] = (k == num_candidates[s] - 1) ? 0 : k + 1; - get_next_chkpts(next_chkpts_indices, k, num_candidates[s]); - if (all_in) { + if (check_bounds(x, br, bc, 1 << s)) { for (i = 0; i < PATTERN_CANDIDATES_REF; i++) { this_mv.row = br + candidates[s][next_chkpts_indices[i]].row; this_mv.col = bc + candidates[s][next_chkpts_indices[i]].col; @@ -871,7 +644,8 @@ static int vp9_pattern_search(MACROBLOCK *x, for (i = 0; i < PATTERN_CANDIDATES_REF; i++) { this_mv.row = br + candidates[s][next_chkpts_indices[i]].row; this_mv.col = bc + candidates[s][next_chkpts_indices[i]].col; - CHECK_POINT + if (!is_mv_in(x, &this_mv)) + continue; this_offset = base_offset + (this_mv.row * (in_what_stride)) + this_mv.col; thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride, @@ -892,18 +666,15 @@ static int vp9_pattern_search(MACROBLOCK *x, // Check 4 1-away neighbors if do_refine is true. // For most well-designed schemes do_refine will not be necessary. if (do_refine) { - static const MV neighbors[4] = { - {0, -1}, { -1, 0}, {1, 0}, {0, 1}, - }; + static const MV neighbors[4] = { {0, -1}, { -1, 0}, {1, 0}, {0, 1} }; for (j = 0; j < 16; j++) { best_site = -1; - CHECK_BOUNDS(1) - if (all_in) { + if (check_bounds(x, br, bc, 1)) { for (i = 0; i < 4; i++) { this_mv.row = br + neighbors[i].row; this_mv.col = bc + neighbors[i].col; - this_offset = base_offset + (this_mv.row * (in_what_stride)) + - this_mv.col; + this_offset = base_offset + this_mv.row * in_what_stride + + this_mv.col; thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride, bestsad); CHECK_BETTER @@ -912,14 +683,15 @@ static int vp9_pattern_search(MACROBLOCK *x, for (i = 0; i < 4; i++) { this_mv.row = br + neighbors[i].row; this_mv.col = bc + neighbors[i].col; - CHECK_POINT - this_offset = base_offset + (this_mv.row * (in_what_stride)) + - this_mv.col; + if (!is_mv_in(x, &this_mv)) + continue; + this_offset = base_offset + this_mv.row * in_what_stride + + this_mv.col; thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride, bestsad); CHECK_BETTER } - } + } if (best_site == -1) { break; @@ -933,22 +705,54 @@ static int vp9_pattern_search(MACROBLOCK *x, best_mv->row = br; best_mv->col = bc; - this_offset = base_offset + (best_mv->row * in_what_stride) + - best_mv->col; this_mv.row = best_mv->row * 8; this_mv.col = best_mv->col * 8; - if (bestsad == INT_MAX) - return INT_MAX; + return bestsad; +} - return vfp->vf(what, what_stride, this_offset, in_what_stride, - (unsigned int *)&bestsad) + - use_mvcost ? mv_err_cost(&this_mv, center_mv, - x->nmvjointcost, x->mvcost, x->errorperbit) - : 0; +int vp9_get_mvpred_var(const MACROBLOCK *x, + const MV *best_mv, const MV *center_mv, + const vp9_variance_fn_ptr_t *vfp, + int use_mvcost) { + unsigned int unused; + + const MACROBLOCKD *const xd = &x->e_mbd; + const uint8_t *what = x->plane[0].src.buf; + const int what_stride = x->plane[0].src.stride; + const int in_what_stride = xd->plane[0].pre[0].stride; + const uint8_t *base_offset = xd->plane[0].pre[0].buf; + const uint8_t *this_offset = &base_offset[best_mv->row * in_what_stride + + best_mv->col]; + const MV mv = {best_mv->row * 8, best_mv->col * 8}; + return vfp->vf(what, what_stride, this_offset, in_what_stride, &unused) + + (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost, + x->mvcost, x->errorperbit) : 0); } +int vp9_get_mvpred_av_var(const MACROBLOCK *x, + MV *best_mv, + const MV *center_mv, + const uint8_t *second_pred, + const vp9_variance_fn_ptr_t *vfp, + int use_mvcost) { + unsigned int bestsad; + MV this_mv; + const MACROBLOCKD *const xd = &x->e_mbd; + const uint8_t *what = x->plane[0].src.buf; + const int what_stride = x->plane[0].src.stride; + const int in_what_stride = xd->plane[0].pre[0].stride; + const uint8_t *base_offset = xd->plane[0].pre[0].buf; + const uint8_t *this_offset = base_offset + (best_mv->row * in_what_stride) + + best_mv->col; + this_mv.row = best_mv->row * 8; + this_mv.col = best_mv->col * 8; + return vfp->svaf(this_offset, in_what_stride, 0, 0, what, what_stride, + &bestsad, second_pred) + + (use_mvcost ? mv_err_cost(&this_mv, center_mv, x->nmvjointcost, + x->mvcost, x->errorperbit) : 0); +} -int vp9_hex_search(MACROBLOCK *x, +int vp9_hex_search(const MACROBLOCK *x, MV *ref_mv, int search_param, int sad_per_bit, @@ -976,14 +780,13 @@ int vp9_hex_search(MACROBLOCK *x, {{-512, -1024}, {512, -1024}, {1024, 0}, {512, 1024}, { -512, 1024}, { -1024, 0}}, }; - return - vp9_pattern_search(x, ref_mv, search_param, sad_per_bit, - do_init_search, 0, vfp, use_mvcost, - center_mv, best_mv, - hex_num_candidates, hex_candidates); + return vp9_pattern_search(x, ref_mv, search_param, sad_per_bit, + do_init_search, 0, vfp, use_mvcost, + center_mv, best_mv, + hex_num_candidates, hex_candidates); } -int vp9_bigdia_search(MACROBLOCK *x, +int vp9_bigdia_search(const MACROBLOCK *x, MV *ref_mv, int search_param, int sad_per_bit, @@ -1024,7 +827,7 @@ int vp9_bigdia_search(MACROBLOCK *x, bigdia_num_candidates, bigdia_candidates); } -int vp9_square_search(MACROBLOCK *x, +int vp9_square_search(const MACROBLOCK *x, MV *ref_mv, int search_param, int sad_per_bit, @@ -1065,26 +868,153 @@ int vp9_square_search(MACROBLOCK *x, square_num_candidates, square_candidates); }; -#undef CHECK_BOUNDS -#undef CHECK_POINT +int vp9_fast_hex_search(const MACROBLOCK *x, + MV *ref_mv, + int search_param, + int sad_per_bit, + int do_init_search, // must be zero for fast_hex + const vp9_variance_fn_ptr_t *vfp, + int use_mvcost, + const MV *center_mv, + MV *best_mv) { + return vp9_hex_search(x, ref_mv, MAX(MAX_MVSEARCH_STEPS - 2, search_param), + sad_per_bit, do_init_search, vfp, use_mvcost, + center_mv, best_mv); +} + +int vp9_fast_dia_search(const MACROBLOCK *x, + MV *ref_mv, + int search_param, + int sad_per_bit, + int do_init_search, + const vp9_variance_fn_ptr_t *vfp, + int use_mvcost, + const MV *center_mv, + MV *best_mv) { + return vp9_bigdia_search(x, ref_mv, MAX(MAX_MVSEARCH_STEPS - 2, search_param), + sad_per_bit, do_init_search, vfp, use_mvcost, + center_mv, best_mv); +} + #undef CHECK_BETTER -int vp9_diamond_search_sad_c(MACROBLOCK *x, - int_mv *ref_mv, int_mv *best_mv, +int vp9_full_range_search_c(const MACROBLOCK *x, MV *ref_mv, MV *best_mv, + int search_param, int sad_per_bit, int *num00, + const vp9_variance_fn_ptr_t *fn_ptr, + int *mvjcost, int *mvcost[2], + const MV *center_mv) { + const MACROBLOCKD *const xd = &x->e_mbd; + const uint8_t *what = x->plane[0].src.buf; + const int what_stride = x->plane[0].src.stride; + const uint8_t *in_what; + const int in_what_stride = xd->plane[0].pre[0].stride; + MV this_mv; + + unsigned int bestsad = INT_MAX; + int ref_row, ref_col; + + unsigned int thissad; + const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; + + const int *mvjsadcost = x->nmvjointsadcost; + int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]}; + + int tr, tc; + int best_tr = 0; + int best_tc = 0; + int range = 64; + + int start_col, end_col; + int start_row, end_row; + int i; + + clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max); + ref_row = ref_mv->row; + ref_col = ref_mv->col; + *num00 = 11; + best_mv->row = ref_row; + best_mv->col = ref_col; + + // Work out the start point for the search + in_what = xd->plane[0].pre[0].buf + ref_row * in_what_stride + ref_col; + + // Check the starting position + bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride, 0x7fffffff) + + mvsad_err_cost(best_mv, &fcenter_mv, + mvjsadcost, mvsadcost, sad_per_bit); + + start_row = MAX(-range, x->mv_row_min - ref_row); + start_col = MAX(-range, x->mv_col_min - ref_col); + end_row = MIN(range, x->mv_row_max - ref_row); + end_col = MIN(range, x->mv_col_max - ref_col); + + for (tr = start_row; tr <= end_row; ++tr) { + for (tc = start_col; tc <= end_col; tc += 4) { + if ((tc + 3) <= end_col) { + unsigned int sad_array[4]; + unsigned char const *addr_ref[4]; + for (i = 0; i < 4; ++i) + addr_ref[i] = in_what + tr * in_what_stride + tc + i; + + fn_ptr->sdx4df(what, what_stride, addr_ref, in_what_stride, sad_array); + + for (i = 0; i < 4; ++i) { + if (sad_array[i] < bestsad) { + this_mv.row = ref_row + tr; + this_mv.col = ref_col + tc + i; + thissad = sad_array[i] + + mvsad_err_cost(&this_mv, &fcenter_mv, + mvjsadcost, mvsadcost, sad_per_bit); + if (thissad < bestsad) { + bestsad = thissad; + best_tr = tr; + best_tc = tc + i; + } + } + } + } else { + for (i = 0; i < end_col - tc; ++i) { + const uint8_t *check_here = in_what + tr * in_what_stride + tc + i; + thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, + bestsad); + + if (thissad < bestsad) { + this_mv.row = ref_row + tr; + this_mv.col = ref_col + tc + i; + thissad += mvsad_err_cost(&this_mv, &fcenter_mv, + mvjsadcost, mvsadcost, sad_per_bit); + + if (thissad < bestsad) { + bestsad = thissad; + best_tr = tr; + best_tc = tc + i; + } + } + } + } + } + } + best_mv->row += best_tr; + best_mv->col += best_tc; + return bestsad; +} + +int vp9_diamond_search_sad_c(const MACROBLOCK *x, + MV *ref_mv, MV *best_mv, int search_param, int sad_per_bit, int *num00, - vp9_variance_fn_ptr_t *fn_ptr, int *mvjcost, - int *mvcost[2], int_mv *center_mv) { + const vp9_variance_fn_ptr_t *fn_ptr, + int *mvjcost, int *mvcost[2], + const MV *center_mv) { int i, j, step; - const MACROBLOCKD* const xd = &x->e_mbd; - uint8_t *what = x->plane[0].src.buf; - int what_stride = x->plane[0].src.stride; - uint8_t *in_what; - int in_what_stride = xd->plane[0].pre[0].stride; - uint8_t *best_address; + const MACROBLOCKD *const xd = &x->e_mbd; + const uint8_t *what = x->plane[0].src.buf; + const int what_stride = x->plane[0].src.stride; + const uint8_t *in_what; + const int in_what_stride = xd->plane[0].pre[0].stride; + const uint8_t *best_address; - int tot_steps; - int_mv this_mv; + MV this_mv; int bestsad = INT_MAX; int best_site = 0; @@ -1092,63 +1022,56 @@ int vp9_diamond_search_sad_c(MACROBLOCK *x, int ref_row, ref_col; int this_row_offset, this_col_offset; - search_site *ss; - uint8_t *check_here; + // search_param determines the length of the initial step and hence the number + // of iterations + // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = + // (MAX_FIRST_STEP/4) pel... etc. + const search_site *const ss = &x->ss[search_param * x->searches_per_step]; + const int tot_steps = (x->ss_count / x->searches_per_step) - search_param; + int thissad; - int_mv fcenter_mv; + const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; - int *mvjsadcost = x->nmvjointsadcost; + const int *mvjsadcost = x->nmvjointsadcost; int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]}; - fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3; - fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3; - - clamp_mv(&ref_mv->as_mv, - x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max); - ref_row = ref_mv->as_mv.row; - ref_col = ref_mv->as_mv.col; + clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max); + ref_row = ref_mv->row; + ref_col = ref_mv->col; *num00 = 0; - best_mv->as_mv.row = ref_row; - best_mv->as_mv.col = ref_col; + best_mv->row = ref_row; + best_mv->col = ref_col; // Work out the start point for the search - in_what = (uint8_t *)(xd->plane[0].pre[0].buf + - (ref_row * (xd->plane[0].pre[0].stride)) + ref_col); + in_what = xd->plane[0].pre[0].buf + ref_row * in_what_stride + ref_col; best_address = in_what; // Check the starting position bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride, 0x7fffffff) - + mvsad_err_cost(&best_mv->as_mv, &fcenter_mv.as_mv, + + mvsad_err_cost(best_mv, &fcenter_mv, mvjsadcost, mvsadcost, sad_per_bit); - // search_param determines the length of the initial step and hence the number - // of iterations - // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = - // (MAX_FIRST_STEP/4) pel... etc. - ss = &x->ss[search_param * x->searches_per_step]; - tot_steps = (x->ss_count / x->searches_per_step) - search_param; - i = 1; for (step = 0; step < tot_steps; step++) { for (j = 0; j < x->searches_per_step; j++) { // Trap illegal vectors - this_row_offset = best_mv->as_mv.row + ss[i].mv.row; - this_col_offset = best_mv->as_mv.col + ss[i].mv.col; + this_row_offset = best_mv->row + ss[i].mv.row; + this_col_offset = best_mv->col + ss[i].mv.col; if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) && (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max)) { - check_here = ss[i].offset + best_address; + const uint8_t *const check_here = ss[i].offset + best_address; thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, bestsad); if (thissad < bestsad) { - this_mv.as_mv.row = this_row_offset; - this_mv.as_mv.col = this_col_offset; - thissad += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv, + this_mv.row = this_row_offset; + this_mv.col = this_col_offset; + thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvjsadcost, mvsadcost, sad_per_bit); if (thissad < bestsad) { @@ -1162,14 +1085,14 @@ int vp9_diamond_search_sad_c(MACROBLOCK *x, } if (best_site != last_site) { - best_mv->as_mv.row += ss[best_site].mv.row; - best_mv->as_mv.col += ss[best_site].mv.col; + best_mv->row += ss[best_site].mv.row; + best_mv->col += ss[best_site].mv.col; best_address += ss[best_site].offset; last_site = best_site; #if defined(NEW_DIAMOND_SEARCH) while (1) { - this_row_offset = best_mv->as_mv.row + ss[best_site].mv.row; - this_col_offset = best_mv->as_mv.col + ss[best_site].mv.col; + this_row_offset = best_mv->row + ss[best_site].mv.row; + this_col_offset = best_mv->col + ss[best_site].mv.col; if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) && (this_row_offset > x->mv_row_min) && @@ -1178,14 +1101,14 @@ int vp9_diamond_search_sad_c(MACROBLOCK *x, thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, bestsad); if (thissad < bestsad) { - this_mv.as_mv.row = this_row_offset; - this_mv.as_mv.col = this_col_offset; - thissad += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv, + this_mv.row = this_row_offset; + this_mv.col = this_col_offset; + thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvjsadcost, mvsadcost, sad_per_bit); if (thissad < bestsad) { bestsad = thissad; - best_mv->as_mv.row += ss[best_site].mv.row; - best_mv->as_mv.col += ss[best_site].mv.col; + best_mv->row += ss[best_site].mv.row; + best_mv->col += ss[best_site].mv.col; best_address += ss[best_site].offset; continue; } @@ -1198,35 +1121,25 @@ int vp9_diamond_search_sad_c(MACROBLOCK *x, (*num00)++; } } - - this_mv.as_mv.row = best_mv->as_mv.row * 8; - this_mv.as_mv.col = best_mv->as_mv.col * 8; - - if (bestsad == INT_MAX) - return INT_MAX; - - return fn_ptr->vf(what, what_stride, best_address, in_what_stride, - (unsigned int *)(&thissad)) + - mv_err_cost(&this_mv.as_mv, ¢er_mv->as_mv, - mvjcost, mvcost, x->errorperbit); + return bestsad; } -int vp9_diamond_search_sadx4(MACROBLOCK *x, - int_mv *ref_mv, int_mv *best_mv, int search_param, +int vp9_diamond_search_sadx4(const MACROBLOCK *x, + MV *ref_mv, MV *best_mv, int search_param, int sad_per_bit, int *num00, - vp9_variance_fn_ptr_t *fn_ptr, - int *mvjcost, int *mvcost[2], int_mv *center_mv) { + const vp9_variance_fn_ptr_t *fn_ptr, + int *mvjcost, int *mvcost[2], + const MV *center_mv) { int i, j, step; - const MACROBLOCKD* const xd = &x->e_mbd; + const MACROBLOCKD *const xd = &x->e_mbd; uint8_t *what = x->plane[0].src.buf; - int what_stride = x->plane[0].src.stride; - uint8_t *in_what; - int in_what_stride = xd->plane[0].pre[0].stride; - uint8_t *best_address; + const int what_stride = x->plane[0].src.stride; + const uint8_t *in_what; + const int in_what_stride = xd->plane[0].pre[0].stride; + const uint8_t *best_address; - int tot_steps; - int_mv this_mv; + MV this_mv; unsigned int bestsad = INT_MAX; int best_site = 0; @@ -1236,44 +1149,37 @@ int vp9_diamond_search_sadx4(MACROBLOCK *x, int ref_col; int this_row_offset; int this_col_offset; - search_site *ss; - uint8_t *check_here; + // search_param determines the length of the initial step and hence the number + // of iterations. + // 0 = initial step (MAX_FIRST_STEP) pel + // 1 = (MAX_FIRST_STEP/2) pel, + // 2 = (MAX_FIRST_STEP/4) pel... + const search_site *ss = &x->ss[search_param * x->searches_per_step]; + const int tot_steps = (x->ss_count / x->searches_per_step) - search_param; + unsigned int thissad; - int_mv fcenter_mv; + const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; - int *mvjsadcost = x->nmvjointsadcost; + const int *mvjsadcost = x->nmvjointsadcost; int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]}; - fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3; - fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3; - - clamp_mv(&ref_mv->as_mv, - x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max); - ref_row = ref_mv->as_mv.row; - ref_col = ref_mv->as_mv.col; + clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max); + ref_row = ref_mv->row; + ref_col = ref_mv->col; *num00 = 0; - best_mv->as_mv.row = ref_row; - best_mv->as_mv.col = ref_col; + best_mv->row = ref_row; + best_mv->col = ref_col; // Work out the start point for the search - in_what = (uint8_t *)(xd->plane[0].pre[0].buf + - (ref_row * (xd->plane[0].pre[0].stride)) + ref_col); + in_what = xd->plane[0].pre[0].buf + ref_row * in_what_stride + ref_col; best_address = in_what; // Check the starting position bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride, 0x7fffffff) - + mvsad_err_cost(&best_mv->as_mv, &fcenter_mv.as_mv, + + mvsad_err_cost(best_mv, &fcenter_mv, mvjsadcost, mvsadcost, sad_per_bit); - // search_param determines the length of the initial step and hence the number - // of iterations. - // 0 = initial step (MAX_FIRST_STEP) pel - // 1 = (MAX_FIRST_STEP/2) pel, - // 2 = (MAX_FIRST_STEP/4) pel... - ss = &x->ss[search_param * x->searches_per_step]; - tot_steps = (x->ss_count / x->searches_per_step) - search_param; - i = 1; for (step = 0; step < tot_steps; step++) { @@ -1281,10 +1187,10 @@ int vp9_diamond_search_sadx4(MACROBLOCK *x, // All_in is true if every one of the points we are checking are within // the bounds of the image. - all_in &= ((best_mv->as_mv.row + ss[i].mv.row) > x->mv_row_min); - all_in &= ((best_mv->as_mv.row + ss[i + 1].mv.row) < x->mv_row_max); - all_in &= ((best_mv->as_mv.col + ss[i + 2].mv.col) > x->mv_col_min); - all_in &= ((best_mv->as_mv.col + ss[i + 3].mv.col) < x->mv_col_max); + all_in &= ((best_mv->row + ss[i].mv.row) > x->mv_row_min); + all_in &= ((best_mv->row + ss[i + 1].mv.row) < x->mv_row_max); + all_in &= ((best_mv->col + ss[i + 2].mv.col) > x->mv_col_min); + all_in &= ((best_mv->col + ss[i + 3].mv.col) < x->mv_col_max); // If all the pixels are within the bounds we don't check whether the // search point is valid in this loop, otherwise we check each point @@ -1303,9 +1209,9 @@ int vp9_diamond_search_sadx4(MACROBLOCK *x, for (t = 0; t < 4; t++, i++) { if (sad_array[t] < bestsad) { - this_mv.as_mv.row = best_mv->as_mv.row + ss[i].mv.row; - this_mv.as_mv.col = best_mv->as_mv.col + ss[i].mv.col; - sad_array[t] += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv, + this_mv.row = best_mv->row + ss[i].mv.row; + this_mv.col = best_mv->col + ss[i].mv.col; + sad_array[t] += mvsad_err_cost(&this_mv, &fcenter_mv, mvjsadcost, mvsadcost, sad_per_bit); if (sad_array[t] < bestsad) { @@ -1318,21 +1224,21 @@ int vp9_diamond_search_sadx4(MACROBLOCK *x, } else { for (j = 0; j < x->searches_per_step; j++) { // Trap illegal vectors - this_row_offset = best_mv->as_mv.row + ss[i].mv.row; - this_col_offset = best_mv->as_mv.col + ss[i].mv.col; + this_row_offset = best_mv->row + ss[i].mv.row; + this_col_offset = best_mv->col + ss[i].mv.col; if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) && (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max)) { - check_here = ss[i].offset + best_address; + const uint8_t *const check_here = ss[i].offset + best_address; thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, bestsad); if (thissad < bestsad) { - this_mv.as_mv.row = this_row_offset; - this_mv.as_mv.col = this_col_offset; - thissad += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv, + this_mv.row = this_row_offset; + this_mv.col = this_col_offset; + thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvjsadcost, mvsadcost, sad_per_bit); if (thissad < bestsad) { @@ -1345,14 +1251,14 @@ int vp9_diamond_search_sadx4(MACROBLOCK *x, } } if (best_site != last_site) { - best_mv->as_mv.row += ss[best_site].mv.row; - best_mv->as_mv.col += ss[best_site].mv.col; + best_mv->row += ss[best_site].mv.row; + best_mv->col += ss[best_site].mv.col; best_address += ss[best_site].offset; last_site = best_site; #if defined(NEW_DIAMOND_SEARCH) while (1) { - this_row_offset = best_mv->as_mv.row + ss[best_site].mv.row; - this_col_offset = best_mv->as_mv.col + ss[best_site].mv.col; + this_row_offset = best_mv->row + ss[best_site].mv.row; + this_col_offset = best_mv->col + ss[best_site].mv.col; if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) && (this_row_offset > x->mv_row_min) && @@ -1361,14 +1267,14 @@ int vp9_diamond_search_sadx4(MACROBLOCK *x, thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, bestsad); if (thissad < bestsad) { - this_mv.as_mv.row = this_row_offset; - this_mv.as_mv.col = this_col_offset; - thissad += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv, + this_mv.row = this_row_offset; + this_mv.col = this_col_offset; + thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvjsadcost, mvsadcost, sad_per_bit); if (thissad < bestsad) { bestsad = thissad; - best_mv->as_mv.row += ss[best_site].mv.row; - best_mv->as_mv.col += ss[best_site].mv.col; + best_mv->row += ss[best_site].mv.row; + best_mv->col += ss[best_site].mv.col; best_address += ss[best_site].offset; continue; } @@ -1381,46 +1287,35 @@ int vp9_diamond_search_sadx4(MACROBLOCK *x, (*num00)++; } } - - this_mv.as_mv.row = best_mv->as_mv.row * 8; - this_mv.as_mv.col = best_mv->as_mv.col * 8; - - if (bestsad == INT_MAX) - return INT_MAX; - - return fn_ptr->vf(what, what_stride, best_address, in_what_stride, - (unsigned int *)(&thissad)) + - mv_err_cost(&this_mv.as_mv, ¢er_mv->as_mv, - mvjcost, mvcost, x->errorperbit); + return bestsad; } /* do_refine: If last step (1-away) of n-step search doesn't pick the center point as the best match, we will do a final 1-away diamond refining search */ -int vp9_full_pixel_diamond(VP9_COMP *cpi, MACROBLOCK *x, - int_mv *mvp_full, int step_param, - int sadpb, int further_steps, - int do_refine, vp9_variance_fn_ptr_t *fn_ptr, - int_mv *ref_mv, int_mv *dst_mv) { - int_mv temp_mv; - int thissme, n, num00; +int vp9_full_pixel_diamond(const VP9_COMP *cpi, MACROBLOCK *x, + MV *mvp_full, int step_param, + int sadpb, int further_steps, int do_refine, + const vp9_variance_fn_ptr_t *fn_ptr, + const MV *ref_mv, MV *dst_mv) { + MV temp_mv; + int thissme, n, num00 = 0; int bestsme = cpi->diamond_search_sad(x, mvp_full, &temp_mv, - step_param, sadpb, &num00, + step_param, sadpb, &n, fn_ptr, x->nmvjointcost, x->mvcost, ref_mv); - dst_mv->as_int = temp_mv.as_int; - - n = num00; - num00 = 0; + if (bestsme < INT_MAX) + bestsme = vp9_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1); + *dst_mv = temp_mv; - /* If there won't be more n-step search, check to see if refining search is - * needed. */ + // If there won't be more n-step search, check to see if refining search is + // needed. if (n > further_steps) do_refine = 0; while (n < further_steps) { - n++; + ++n; if (num00) { num00--; @@ -1429,187 +1324,126 @@ int vp9_full_pixel_diamond(VP9_COMP *cpi, MACROBLOCK *x, step_param + n, sadpb, &num00, fn_ptr, x->nmvjointcost, x->mvcost, ref_mv); + if (thissme < INT_MAX) + thissme = vp9_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1); - /* check to see if refining search is needed. */ - if (num00 > (further_steps - n)) + // check to see if refining search is needed. + if (num00 > further_steps - n) do_refine = 0; if (thissme < bestsme) { bestsme = thissme; - dst_mv->as_int = temp_mv.as_int; + *dst_mv = temp_mv; } } } - /* final 1-away diamond refining search */ - if (do_refine == 1) { - int search_range = 8; - int_mv best_mv; - best_mv.as_int = dst_mv->as_int; + // final 1-away diamond refining search + if (do_refine) { + const int search_range = 8; + MV best_mv = *dst_mv; thissme = cpi->refining_search_sad(x, &best_mv, sadpb, search_range, fn_ptr, x->nmvjointcost, x->mvcost, ref_mv); - + if (thissme < INT_MAX) + thissme = vp9_get_mvpred_var(x, &best_mv, ref_mv, fn_ptr, 1); if (thissme < bestsme) { bestsme = thissme; - dst_mv->as_int = best_mv.as_int; + *dst_mv = best_mv; } } return bestsme; } -int vp9_full_search_sad_c(MACROBLOCK *x, int_mv *ref_mv, +int vp9_full_search_sad_c(const MACROBLOCK *x, const MV *ref_mv, int sad_per_bit, int distance, - vp9_variance_fn_ptr_t *fn_ptr, int *mvjcost, - int *mvcost[2], - int_mv *center_mv, int n) { - const MACROBLOCKD* const xd = &x->e_mbd; - uint8_t *what = x->plane[0].src.buf; - int what_stride = x->plane[0].src.stride; - uint8_t *in_what; - int in_what_stride = xd->plane[0].pre[0].stride; - int mv_stride = xd->plane[0].pre[0].stride; - uint8_t *bestaddress; - int_mv *best_mv = &x->e_mbd.mi_8x8[0]->bmi[n].as_mv[0]; - int_mv this_mv; - int bestsad = INT_MAX; + const vp9_variance_fn_ptr_t *fn_ptr, + int *mvjcost, int *mvcost[2], + const MV *center_mv, MV *best_mv) { int r, c; - - uint8_t *check_here; - int thissad; - - int ref_row = ref_mv->as_mv.row; - int ref_col = ref_mv->as_mv.col; - - int row_min = ref_row - distance; - int row_max = ref_row + distance; - int col_min = ref_col - distance; - int col_max = ref_col + distance; - int_mv fcenter_mv; - - int *mvjsadcost = x->nmvjointsadcost; + const MACROBLOCKD *const xd = &x->e_mbd; + const uint8_t *const what = x->plane[0].src.buf; + const int what_stride = x->plane[0].src.stride; + const uint8_t *const in_what = xd->plane[0].pre[0].buf; + const int in_what_stride = xd->plane[0].pre[0].stride; + const int row_min = MAX(ref_mv->row - distance, x->mv_row_min); + const int row_max = MIN(ref_mv->row + distance, x->mv_row_max); + const int col_min = MAX(ref_mv->col - distance, x->mv_col_min); + const int col_max = MIN(ref_mv->col + distance, x->mv_col_max); + const int *mvjsadcost = x->nmvjointsadcost; int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]}; - - fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3; - fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3; - - // Work out the mid point for the search - in_what = xd->plane[0].pre[0].buf; - bestaddress = in_what + (ref_row * xd->plane[0].pre[0].stride) + ref_col; - - best_mv->as_mv.row = ref_row; - best_mv->as_mv.col = ref_col; - - // Baseline value at the centre - bestsad = fn_ptr->sdf(what, what_stride, bestaddress, - in_what_stride, 0x7fffffff) - + mvsad_err_cost(&best_mv->as_mv, &fcenter_mv.as_mv, - mvjsadcost, mvsadcost, sad_per_bit); - - // Apply further limits to prevent us looking using vectors that stretch - // beyond the UMV border - col_min = MAX(col_min, x->mv_col_min); - col_max = MIN(col_max, x->mv_col_max); - row_min = MAX(row_min, x->mv_row_min); - row_max = MIN(row_max, x->mv_row_max); - - for (r = row_min; r < row_max; r++) { - this_mv.as_mv.row = r; - check_here = r * mv_stride + in_what + col_min; - - for (c = col_min; c < col_max; c++) { - thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, - bestsad); - - this_mv.as_mv.col = c; - thissad += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv, - mvjsadcost, mvsadcost, sad_per_bit); - - if (thissad < bestsad) { - bestsad = thissad; - best_mv->as_mv.row = r; - best_mv->as_mv.col = c; - bestaddress = check_here; + const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; + const uint8_t *best_address = &in_what[ref_mv->row * in_what_stride + + ref_mv->col]; + int best_sad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride, + 0x7fffffff) + + mvsad_err_cost(ref_mv, &fcenter_mv, mvjsadcost, mvsadcost, sad_per_bit); + *best_mv = *ref_mv; + + for (r = row_min; r < row_max; ++r) { + for (c = col_min; c < col_max; ++c) { + const MV this_mv = {r, c}; + const uint8_t *check_here = &in_what[r * in_what_stride + c]; + const int sad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, + best_sad) + + mvsad_err_cost(&this_mv, &fcenter_mv, + mvjsadcost, mvsadcost, sad_per_bit); + + if (sad < best_sad) { + best_sad = sad; + *best_mv = this_mv; } - - check_here++; } } - - this_mv.as_mv.row = best_mv->as_mv.row * 8; - this_mv.as_mv.col = best_mv->as_mv.col * 8; - - if (bestsad < INT_MAX) - return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, - (unsigned int *)(&thissad)) + - mv_err_cost(&this_mv.as_mv, ¢er_mv->as_mv, - mvjcost, mvcost, x->errorperbit); - else - return INT_MAX; + return best_sad; } -int vp9_full_search_sadx3(MACROBLOCK *x, int_mv *ref_mv, +int vp9_full_search_sadx3(const MACROBLOCK *x, const MV *ref_mv, int sad_per_bit, int distance, - vp9_variance_fn_ptr_t *fn_ptr, int *mvjcost, - int *mvcost[2], int_mv *center_mv, int n) { - const MACROBLOCKD* const xd = &x->e_mbd; - uint8_t *what = x->plane[0].src.buf; - int what_stride = x->plane[0].src.stride; - uint8_t *in_what; - int in_what_stride = xd->plane[0].pre[0].stride; - int mv_stride = xd->plane[0].pre[0].stride; - uint8_t *bestaddress; - int_mv *best_mv = &x->e_mbd.mi_8x8[0]->bmi[n].as_mv[0]; - int_mv this_mv; + const vp9_variance_fn_ptr_t *fn_ptr, + int *mvjcost, int *mvcost[2], + const MV *center_mv, MV *best_mv) { + const MACROBLOCKD *const xd = &x->e_mbd; + const uint8_t *const what = x->plane[0].src.buf; + const int what_stride = x->plane[0].src.stride; + const uint8_t *const in_what = xd->plane[0].pre[0].buf; + const int in_what_stride = xd->plane[0].pre[0].stride; + MV this_mv; unsigned int bestsad = INT_MAX; int r, c; - - uint8_t *check_here; unsigned int thissad; + int ref_row = ref_mv->row; + int ref_col = ref_mv->col; - int ref_row = ref_mv->as_mv.row; - int ref_col = ref_mv->as_mv.col; - - int row_min = ref_row - distance; - int row_max = ref_row + distance; - int col_min = ref_col - distance; - int col_max = ref_col + distance; - + // Apply further limits to prevent us looking using vectors that stretch + // beyond the UMV border + const int row_min = MAX(ref_row - distance, x->mv_row_min); + const int row_max = MIN(ref_row + distance, x->mv_row_max); + const int col_min = MAX(ref_col - distance, x->mv_col_min); + const int col_max = MIN(ref_col + distance, x->mv_col_max); unsigned int sad_array[3]; - int_mv fcenter_mv; - - int *mvjsadcost = x->nmvjointsadcost; + const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; + const int *mvjsadcost = x->nmvjointsadcost; int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]}; - fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3; - fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3; - // Work out the mid point for the search - in_what = xd->plane[0].pre[0].buf; - bestaddress = in_what + (ref_row * xd->plane[0].pre[0].stride) + ref_col; + const uint8_t *bestaddress = &in_what[ref_row * in_what_stride + ref_col]; - best_mv->as_mv.row = ref_row; - best_mv->as_mv.col = ref_col; + best_mv->row = ref_row; + best_mv->col = ref_col; // Baseline value at the centre bestsad = fn_ptr->sdf(what, what_stride, bestaddress, in_what_stride, 0x7fffffff) - + mvsad_err_cost(&best_mv->as_mv, &fcenter_mv.as_mv, + + mvsad_err_cost(best_mv, &fcenter_mv, mvjsadcost, mvsadcost, sad_per_bit); - // Apply further limits to prevent us looking using vectors that stretch - // beyond the UMV border - col_min = MAX(col_min, x->mv_col_min); - col_max = MIN(col_max, x->mv_col_max); - row_min = MAX(row_min, x->mv_row_min); - row_max = MIN(row_max, x->mv_row_max); - for (r = row_min; r < row_max; r++) { - this_mv.as_mv.row = r; - check_here = r * mv_stride + in_what + col_min; + const uint8_t *check_here = &in_what[r * in_what_stride + col_min]; + this_mv.row = r; c = col_min; - while ((c + 2) < col_max) { + while ((c + 2) < col_max && fn_ptr->sdx3f != NULL) { int i; fn_ptr->sdx3f(what, what_stride, check_here, in_what_stride, sad_array); @@ -1618,18 +1452,16 @@ int vp9_full_search_sadx3(MACROBLOCK *x, int_mv *ref_mv, thissad = sad_array[i]; if (thissad < bestsad) { - this_mv.as_mv.col = c; - thissad += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv, + this_mv.col = c; + thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvjsadcost, mvsadcost, sad_per_bit); if (thissad < bestsad) { bestsad = thissad; - best_mv->as_mv.row = r; - best_mv->as_mv.col = c; - bestaddress = check_here; + best_mv->row = r; + best_mv->col = c; } } - check_here++; c++; } @@ -1640,15 +1472,14 @@ int vp9_full_search_sadx3(MACROBLOCK *x, int_mv *ref_mv, bestsad); if (thissad < bestsad) { - this_mv.as_mv.col = c; - thissad += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv, + this_mv.col = c; + thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvjsadcost, mvsadcost, sad_per_bit); if (thissad < bestsad) { bestsad = thissad; - best_mv->as_mv.row = r; - best_mv->as_mv.col = c; - bestaddress = check_here; + best_mv->row = r; + best_mv->col = c; } } @@ -1656,80 +1487,54 @@ int vp9_full_search_sadx3(MACROBLOCK *x, int_mv *ref_mv, c++; } } - - this_mv.as_mv.row = best_mv->as_mv.row * 8; - this_mv.as_mv.col = best_mv->as_mv.col * 8; - - if (bestsad < INT_MAX) - return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, - (unsigned int *)(&thissad)) + - mv_err_cost(&this_mv.as_mv, ¢er_mv->as_mv, - mvjcost, mvcost, x->errorperbit); - else - return INT_MAX; + return bestsad; } -int vp9_full_search_sadx8(MACROBLOCK *x, int_mv *ref_mv, +int vp9_full_search_sadx8(const MACROBLOCK *x, const MV *ref_mv, int sad_per_bit, int distance, - vp9_variance_fn_ptr_t *fn_ptr, + const vp9_variance_fn_ptr_t *fn_ptr, int *mvjcost, int *mvcost[2], - int_mv *center_mv, int n) { - const MACROBLOCKD* const xd = &x->e_mbd; - uint8_t *what = x->plane[0].src.buf; - int what_stride = x->plane[0].src.stride; - uint8_t *in_what; - int in_what_stride = xd->plane[0].pre[0].stride; - int mv_stride = xd->plane[0].pre[0].stride; - uint8_t *bestaddress; - int_mv *best_mv = &x->e_mbd.mi_8x8[0]->bmi[n].as_mv[0]; - int_mv this_mv; + const MV *center_mv, MV *best_mv) { + const MACROBLOCKD *const xd = &x->e_mbd; + const uint8_t *const what = x->plane[0].src.buf; + const int what_stride = x->plane[0].src.stride; + const uint8_t *const in_what = xd->plane[0].pre[0].buf; + const int in_what_stride = xd->plane[0].pre[0].stride; + MV this_mv; unsigned int bestsad = INT_MAX; int r, c; - - uint8_t *check_here; unsigned int thissad; + int ref_row = ref_mv->row; + int ref_col = ref_mv->col; - int ref_row = ref_mv->as_mv.row; - int ref_col = ref_mv->as_mv.col; - - int row_min = ref_row - distance; - int row_max = ref_row + distance; - int col_min = ref_col - distance; - int col_max = ref_col + distance; - + // Apply further limits to prevent us looking using vectors that stretch + // beyond the UMV border + const int row_min = MAX(ref_row - distance, x->mv_row_min); + const int row_max = MIN(ref_row + distance, x->mv_row_max); + const int col_min = MAX(ref_col - distance, x->mv_col_min); + const int col_max = MIN(ref_col + distance, x->mv_col_max); DECLARE_ALIGNED_ARRAY(16, uint32_t, sad_array8, 8); unsigned int sad_array[3]; - int_mv fcenter_mv; + const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; - int *mvjsadcost = x->nmvjointsadcost; + const int *mvjsadcost = x->nmvjointsadcost; int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]}; - fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3; - fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3; - // Work out the mid point for the search - in_what = xd->plane[0].pre[0].buf; - bestaddress = in_what + (ref_row * xd->plane[0].pre[0].stride) + ref_col; + const uint8_t *bestaddress = &in_what[ref_row * in_what_stride + ref_col]; - best_mv->as_mv.row = ref_row; - best_mv->as_mv.col = ref_col; + best_mv->row = ref_row; + best_mv->col = ref_col; - // Baseline value at the centre + // Baseline value at the center bestsad = fn_ptr->sdf(what, what_stride, bestaddress, in_what_stride, 0x7fffffff) - + mvsad_err_cost(&best_mv->as_mv, &fcenter_mv.as_mv, + + mvsad_err_cost(best_mv, &fcenter_mv, mvjsadcost, mvsadcost, sad_per_bit); - // Apply further limits to prevent us looking using vectors that stretch - // beyond the UMV border - col_min = MAX(col_min, x->mv_col_min); - col_max = MIN(col_max, x->mv_col_max); - row_min = MAX(row_min, x->mv_row_min); - row_max = MIN(row_max, x->mv_row_max); - for (r = row_min; r < row_max; r++) { - this_mv.as_mv.row = r; - check_here = r * mv_stride + in_what + col_min; + const uint8_t *check_here = &in_what[r * in_what_stride + col_min]; + this_mv.row = r; c = col_min; while ((c + 7) < col_max) { @@ -1741,15 +1546,14 @@ int vp9_full_search_sadx8(MACROBLOCK *x, int_mv *ref_mv, thissad = (unsigned int)sad_array8[i]; if (thissad < bestsad) { - this_mv.as_mv.col = c; - thissad += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv, + this_mv.col = c; + thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvjsadcost, mvsadcost, sad_per_bit); if (thissad < bestsad) { bestsad = thissad; - best_mv->as_mv.row = r; - best_mv->as_mv.col = c; - bestaddress = check_here; + best_mv->row = r; + best_mv->col = c; } } @@ -1767,15 +1571,14 @@ int vp9_full_search_sadx8(MACROBLOCK *x, int_mv *ref_mv, thissad = sad_array[i]; if (thissad < bestsad) { - this_mv.as_mv.col = c; - thissad += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv, + this_mv.col = c; + thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvjsadcost, mvsadcost, sad_per_bit); if (thissad < bestsad) { bestsad = thissad; - best_mv->as_mv.row = r; - best_mv->as_mv.col = c; - bestaddress = check_here; + best_mv->row = r; + best_mv->col = c; } } @@ -1789,15 +1592,14 @@ int vp9_full_search_sadx8(MACROBLOCK *x, int_mv *ref_mv, bestsad); if (thissad < bestsad) { - this_mv.as_mv.col = c; - thissad += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv, + this_mv.col = c; + thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvjsadcost, mvsadcost, sad_per_bit); if (thissad < bestsad) { bestsad = thissad; - best_mv->as_mv.row = r; - best_mv->as_mv.col = c; - bestaddress = check_here; + best_mv->row = r; + best_mv->col = c; } } @@ -1805,70 +1607,46 @@ int vp9_full_search_sadx8(MACROBLOCK *x, int_mv *ref_mv, c++; } } - - this_mv.as_mv.row = best_mv->as_mv.row * 8; - this_mv.as_mv.col = best_mv->as_mv.col * 8; - - if (bestsad < INT_MAX) - return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, - (unsigned int *)(&thissad)) + - mv_err_cost(&this_mv.as_mv, ¢er_mv->as_mv, - mvjcost, mvcost, x->errorperbit); - else - return INT_MAX; + return bestsad; } -int vp9_refining_search_sad_c(MACROBLOCK *x, - int_mv *ref_mv, int error_per_bit, - int search_range, vp9_variance_fn_ptr_t *fn_ptr, - int *mvjcost, int *mvcost[2], int_mv *center_mv) { - const MACROBLOCKD* const xd = &x->e_mbd; - MV neighbors[4] = {{ -1, 0}, {0, -1}, {0, 1}, {1, 0}}; - int i, j; - int this_row_offset, this_col_offset; - int what_stride = x->plane[0].src.stride; - int in_what_stride = xd->plane[0].pre[0].stride; - uint8_t *what = x->plane[0].src.buf; - uint8_t *best_address = xd->plane[0].pre[0].buf + - (ref_mv->as_mv.row * xd->plane[0].pre[0].stride) + - ref_mv->as_mv.col; - uint8_t *check_here; - unsigned int thissad; - int_mv this_mv; - unsigned int bestsad = INT_MAX; - int_mv fcenter_mv; +int vp9_refining_search_sad_c(const MACROBLOCK *x, + MV *ref_mv, int error_per_bit, + int search_range, + const vp9_variance_fn_ptr_t *fn_ptr, + int *mvjcost, int *mvcost[2], + const MV *center_mv) { + const MACROBLOCKD *const xd = &x->e_mbd; + const MV neighbors[4] = {{ -1, 0}, {0, -1}, {0, 1}, {1, 0}}; + int i, j; - int *mvjsadcost = x->nmvjointsadcost; + const int what_stride = x->plane[0].src.stride; + const uint8_t *const what = x->plane[0].src.buf; + const int in_what_stride = xd->plane[0].pre[0].stride; + const uint8_t *const in_what = xd->plane[0].pre[0].buf; + const uint8_t *best_address = &in_what[ref_mv->row * in_what_stride + + ref_mv->col]; + const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; + const int *mvjsadcost = x->nmvjointsadcost; int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]}; - fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3; - fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3; - - bestsad = fn_ptr->sdf(what, what_stride, best_address, - in_what_stride, 0x7fffffff) + - mvsad_err_cost(&ref_mv->as_mv, &fcenter_mv.as_mv, - mvjsadcost, mvsadcost, error_per_bit); + unsigned int bestsad = fn_ptr->sdf(what, what_stride, best_address, + in_what_stride, 0x7fffffff) + + mvsad_err_cost(ref_mv, &fcenter_mv, mvjsadcost, mvsadcost, error_per_bit); for (i = 0; i < search_range; i++) { int best_site = -1; for (j = 0; j < 4; j++) { - this_row_offset = ref_mv->as_mv.row + neighbors[j].row; - this_col_offset = ref_mv->as_mv.col + neighbors[j].col; - - if ((this_col_offset > x->mv_col_min) && - (this_col_offset < x->mv_col_max) && - (this_row_offset > x->mv_row_min) && - (this_row_offset < x->mv_row_max)) { - check_here = (neighbors[j].row) * in_what_stride + neighbors[j].col + - best_address; - thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, - bestsad); - + const MV this_mv = {ref_mv->row + neighbors[j].row, + ref_mv->col + neighbors[j].col}; + if (is_mv_in(x, &this_mv)) { + const uint8_t *check_here = &in_what[this_mv.row * in_what_stride + + this_mv.col]; + unsigned int thissad = fn_ptr->sdf(what, what_stride, check_here, + in_what_stride, bestsad); if (thissad < bestsad) { - this_mv.as_mv.row = this_row_offset; - this_mv.as_mv.col = this_col_offset; - thissad += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv, + thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvjsadcost, mvsadcost, error_per_bit); if (thissad < bestsad) { @@ -1882,80 +1660,63 @@ int vp9_refining_search_sad_c(MACROBLOCK *x, if (best_site == -1) { break; } else { - ref_mv->as_mv.row += neighbors[best_site].row; - ref_mv->as_mv.col += neighbors[best_site].col; - best_address += (neighbors[best_site].row) * in_what_stride + - neighbors[best_site].col; + ref_mv->row += neighbors[best_site].row; + ref_mv->col += neighbors[best_site].col; } } - - this_mv.as_mv.row = ref_mv->as_mv.row * 8; - this_mv.as_mv.col = ref_mv->as_mv.col * 8; - - if (bestsad < INT_MAX) - return fn_ptr->vf(what, what_stride, best_address, in_what_stride, - (unsigned int *)(&thissad)) + - mv_err_cost(&this_mv.as_mv, ¢er_mv->as_mv, - mvjcost, mvcost, x->errorperbit); - else - return INT_MAX; + return bestsad; } -int vp9_refining_search_sadx4(MACROBLOCK *x, - int_mv *ref_mv, int error_per_bit, - int search_range, vp9_variance_fn_ptr_t *fn_ptr, - int *mvjcost, int *mvcost[2], int_mv *center_mv) { - const MACROBLOCKD* const xd = &x->e_mbd; +int vp9_refining_search_sadx4(const MACROBLOCK *x, + MV *ref_mv, int error_per_bit, + int search_range, + const vp9_variance_fn_ptr_t *fn_ptr, + int *mvjcost, int *mvcost[2], + const MV *center_mv) { + const MACROBLOCKD *const xd = &x->e_mbd; MV neighbors[4] = {{ -1, 0}, {0, -1}, {0, 1}, {1, 0}}; int i, j; - int this_row_offset, this_col_offset; - int what_stride = x->plane[0].src.stride; - int in_what_stride = xd->plane[0].pre[0].stride; - uint8_t *what = x->plane[0].src.buf; - uint8_t *best_address = xd->plane[0].pre[0].buf + - (ref_mv->as_mv.row * xd->plane[0].pre[0].stride) + - ref_mv->as_mv.col; - uint8_t *check_here; - unsigned int thissad; - int_mv this_mv; - unsigned int bestsad = INT_MAX; - int_mv fcenter_mv; + const int what_stride = x->plane[0].src.stride; + const int in_what_stride = xd->plane[0].pre[0].stride; + const uint8_t *what = x->plane[0].src.buf; + const uint8_t *best_address = xd->plane[0].pre[0].buf + + (ref_mv->row * xd->plane[0].pre[0].stride) + + ref_mv->col; - int *mvjsadcost = x->nmvjointsadcost; - int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]}; + const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; - fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3; - fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3; + const int *mvjsadcost = x->nmvjointsadcost; + int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]}; - bestsad = fn_ptr->sdf(what, what_stride, best_address, - in_what_stride, 0x7fffffff) + - mvsad_err_cost(&ref_mv->as_mv, &fcenter_mv.as_mv, - mvjsadcost, mvsadcost, error_per_bit); + unsigned int bestsad = fn_ptr->sdf(what, what_stride, best_address, + in_what_stride, 0x7fffffff) + + mvsad_err_cost(ref_mv, &fcenter_mv, mvjsadcost, mvsadcost, error_per_bit); for (i = 0; i < search_range; i++) { int best_site = -1; - int all_in = ((ref_mv->as_mv.row - 1) > x->mv_row_min) & - ((ref_mv->as_mv.row + 1) < x->mv_row_max) & - ((ref_mv->as_mv.col - 1) > x->mv_col_min) & - ((ref_mv->as_mv.col + 1) < x->mv_col_max); + int all_in = ((ref_mv->row - 1) > x->mv_row_min) & + ((ref_mv->row + 1) < x->mv_row_max) & + ((ref_mv->col - 1) > x->mv_col_min) & + ((ref_mv->col + 1) < x->mv_col_max); if (all_in) { unsigned int sad_array[4]; - unsigned char const *block_offset[4]; - block_offset[0] = best_address - in_what_stride; - block_offset[1] = best_address - 1; - block_offset[2] = best_address + 1; - block_offset[3] = best_address + in_what_stride; + uint8_t const *block_offset[4] = { + best_address - in_what_stride, + best_address - 1, + best_address + 1, + best_address + in_what_stride + }; fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride, sad_array); for (j = 0; j < 4; j++) { if (sad_array[j] < bestsad) { - this_mv.as_mv.row = ref_mv->as_mv.row + neighbors[j].row; - this_mv.as_mv.col = ref_mv->as_mv.col + neighbors[j].col; - sad_array[j] += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv, + const MV this_mv = {ref_mv->row + neighbors[j].row, + ref_mv->col + neighbors[j].col}; + sad_array[j] += mvsad_err_cost(&this_mv, &fcenter_mv, mvjsadcost, mvsadcost, error_per_bit); if (sad_array[j] < bestsad) { @@ -1966,22 +1727,17 @@ int vp9_refining_search_sadx4(MACROBLOCK *x, } } else { for (j = 0; j < 4; j++) { - this_row_offset = ref_mv->as_mv.row + neighbors[j].row; - this_col_offset = ref_mv->as_mv.col + neighbors[j].col; + const MV this_mv = {ref_mv->row + neighbors[j].row, + ref_mv->col + neighbors[j].col}; - if ((this_col_offset > x->mv_col_min) && - (this_col_offset < x->mv_col_max) && - (this_row_offset > x->mv_row_min) && - (this_row_offset < x->mv_row_max)) { - check_here = (neighbors[j].row) * in_what_stride + neighbors[j].col + - best_address; - thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, - bestsad); + if (is_mv_in(x, &this_mv)) { + const uint8_t *check_here = neighbors[j].row * in_what_stride + + neighbors[j].col + best_address; + unsigned int thissad = fn_ptr->sdf(what, what_stride, check_here, + in_what_stride, bestsad); if (thissad < bestsad) { - this_mv.as_mv.row = this_row_offset; - this_mv.as_mv.col = this_col_offset; - thissad += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv, + thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvjsadcost, mvsadcost, error_per_bit); if (thissad < bestsad) { @@ -1996,85 +1752,64 @@ int vp9_refining_search_sadx4(MACROBLOCK *x, if (best_site == -1) { break; } else { - ref_mv->as_mv.row += neighbors[best_site].row; - ref_mv->as_mv.col += neighbors[best_site].col; + ref_mv->row += neighbors[best_site].row; + ref_mv->col += neighbors[best_site].col; best_address += (neighbors[best_site].row) * in_what_stride + neighbors[best_site].col; } } - this_mv.as_mv.row = ref_mv->as_mv.row * 8; - this_mv.as_mv.col = ref_mv->as_mv.col * 8; - - if (bestsad < INT_MAX) - return fn_ptr->vf(what, what_stride, best_address, in_what_stride, - (unsigned int *)(&thissad)) + - mv_err_cost(&this_mv.as_mv, ¢er_mv->as_mv, - mvjcost, mvcost, x->errorperbit); - else - return INT_MAX; + return bestsad; } -/* This function is called when we do joint motion search in comp_inter_inter - * mode. - */ -int vp9_refining_search_8p_c(MACROBLOCK *x, - int_mv *ref_mv, int error_per_bit, - int search_range, vp9_variance_fn_ptr_t *fn_ptr, - int *mvjcost, int *mvcost[2], int_mv *center_mv, +// This function is called when we do joint motion search in comp_inter_inter +// mode. +int vp9_refining_search_8p_c(const MACROBLOCK *x, + MV *ref_mv, int error_per_bit, + int search_range, + const vp9_variance_fn_ptr_t *fn_ptr, + int *mvjcost, int *mvcost[2], + const MV *center_mv, const uint8_t *second_pred, int w, int h) { - const MACROBLOCKD* const xd = &x->e_mbd; - MV neighbors[8] = {{-1, 0}, {0, -1}, {0, 1}, {1, 0}, - {-1, -1}, {1, -1}, {-1, 1}, {1, 1}}; + const MACROBLOCKD *const xd = &x->e_mbd; + const MV neighbors[8] = {{-1, 0}, {0, -1}, {0, 1}, {1, 0}, + {-1, -1}, {1, -1}, {-1, 1}, {1, 1}}; int i, j; - int this_row_offset, this_col_offset; - int what_stride = x->plane[0].src.stride; - int in_what_stride = xd->plane[0].pre[0].stride; - uint8_t *what = x->plane[0].src.buf; - uint8_t *best_address = xd->plane[0].pre[0].buf + - (ref_mv->as_mv.row * xd->plane[0].pre[0].stride) + - ref_mv->as_mv.col; - uint8_t *check_here; + const uint8_t *what = x->plane[0].src.buf; + const int what_stride = x->plane[0].src.stride; + const uint8_t *in_what = xd->plane[0].pre[0].buf; + const int in_what_stride = xd->plane[0].pre[0].stride; + const uint8_t *best_address = &in_what[ref_mv->row * in_what_stride + + ref_mv->col]; unsigned int thissad; - int_mv this_mv; - unsigned int bestsad = INT_MAX; - int_mv fcenter_mv; + MV this_mv; + const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; - int *mvjsadcost = x->nmvjointsadcost; + const int *mvjsadcost = x->nmvjointsadcost; int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]}; - fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3; - fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3; - /* Get compound pred by averaging two pred blocks. */ - bestsad = fn_ptr->sdaf(what, what_stride, best_address, in_what_stride, - second_pred, 0x7fffffff) + - mvsad_err_cost(&ref_mv->as_mv, &fcenter_mv.as_mv, - mvjsadcost, mvsadcost, error_per_bit); + unsigned int bestsad = fn_ptr->sdaf(what, what_stride, + best_address, in_what_stride, + second_pred, 0x7fffffff) + + mvsad_err_cost(ref_mv, &fcenter_mv, mvjsadcost, mvsadcost, error_per_bit); - for (i = 0; i < search_range; i++) { + for (i = 0; i < search_range; ++i) { int best_site = -1; for (j = 0; j < 8; j++) { - this_row_offset = ref_mv->as_mv.row + neighbors[j].row; - this_col_offset = ref_mv->as_mv.col + neighbors[j].col; + this_mv.row = ref_mv->row + neighbors[j].row; + this_mv.col = ref_mv->col + neighbors[j].col; - if ((this_col_offset > x->mv_col_min) && - (this_col_offset < x->mv_col_max) && - (this_row_offset > x->mv_row_min) && - (this_row_offset < x->mv_row_max)) { - check_here = (neighbors[j].row) * in_what_stride + neighbors[j].col + - best_address; + if (is_mv_in(x, &this_mv)) { + const uint8_t *check_here = &in_what[this_mv.row * in_what_stride + + this_mv.col]; - /* Get compound block and use it to calculate SAD. */ thissad = fn_ptr->sdaf(what, what_stride, check_here, in_what_stride, second_pred, bestsad); - if (thissad < bestsad) { - this_mv.as_mv.row = this_row_offset; - this_mv.as_mv.col = this_col_offset; - thissad += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv, + thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvjsadcost, mvsadcost, error_per_bit); if (thissad < bestsad) { bestsad = thissad; @@ -2087,24 +1822,9 @@ int vp9_refining_search_8p_c(MACROBLOCK *x, if (best_site == -1) { break; } else { - ref_mv->as_mv.row += neighbors[best_site].row; - ref_mv->as_mv.col += neighbors[best_site].col; - best_address += (neighbors[best_site].row) * in_what_stride + - neighbors[best_site].col; + ref_mv->row += neighbors[best_site].row; + ref_mv->col += neighbors[best_site].col; } } - - this_mv.as_mv.row = ref_mv->as_mv.row * 8; - this_mv.as_mv.col = ref_mv->as_mv.col * 8; - - if (bestsad < INT_MAX) { - // FIXME(rbultje, yunqing): add full-pixel averaging variance functions - // so we don't have to use the subpixel with xoff=0,yoff=0 here. - return fn_ptr->svaf(best_address, in_what_stride, 0, 0, what, what_stride, - (unsigned int *)(&thissad), second_pred) + - mv_err_cost(&this_mv.as_mv, ¢er_mv->as_mv, - mvjcost, mvcost, x->errorperbit); - } else { - return INT_MAX; - } + return bestsad; } diff --git a/libvpx/vp9/encoder/vp9_mcomp.h b/libvpx/vp9/encoder/vp9_mcomp.h index bcab679..917de75 100644 --- a/libvpx/vp9/encoder/vp9_mcomp.h +++ b/libvpx/vp9/encoder/vp9_mcomp.h @@ -15,11 +15,16 @@ #include "vp9/encoder/vp9_block.h" #include "vp9/encoder/vp9_variance.h" +#ifdef __cplusplus +extern "C" { +#endif + // The maximum number of steps in a step search given the largest // allowed initial step #define MAX_MVSEARCH_STEPS 11 -// Max full pel mv specified in 1 pel units -#define MAX_FULL_PEL_VAL ((1 << (MAX_MVSEARCH_STEPS)) - 1) +// Max full pel mv specified in the unit of full pixel +// Enable the use of motion vector in range [-1023, 1023]. +#define MAX_FULL_PEL_VAL ((1 << (MAX_MVSEARCH_STEPS - 1)) - 1) // Maximum size of the first step in full pel units #define MAX_FIRST_STEP (1 << (MAX_MVSEARCH_STEPS-1)) // Allowed motion vector pixel distance outside image border @@ -27,9 +32,21 @@ #define BORDER_MV_PIXELS_B16 (16 + VP9_INTERP_EXTEND) -void vp9_clamp_mv_min_max(MACROBLOCK *x, MV *mv); +void vp9_set_mv_search_range(MACROBLOCK *x, const MV *mv); int vp9_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost, int *mvcost[2], int weight); + +// Utility to compute variance + MV rate cost for a given MV +int vp9_get_mvpred_var(const MACROBLOCK *x, + const MV *best_mv, const MV *center_mv, + const vp9_variance_fn_ptr_t *vfp, + int use_mvcost); +int vp9_get_mvpred_av_var(const MACROBLOCK *x, + MV *best_mv, + const MV *center_mv, + const uint8_t *second_pred, + const vp9_variance_fn_ptr_t *vfp, + int use_mvcost); void vp9_init_dsmotion_compensation(MACROBLOCK *x, int stride); void vp9_init3smotion_compensation(MACROBLOCK *x, int stride); @@ -37,42 +54,31 @@ struct VP9_COMP; int vp9_init_search_range(struct VP9_COMP *cpi, int size); // Runs sequence of diamond searches in smaller steps for RD -int vp9_full_pixel_diamond(struct VP9_COMP *cpi, MACROBLOCK *x, - int_mv *mvp_full, int step_param, +int vp9_full_pixel_diamond(const struct VP9_COMP *cpi, MACROBLOCK *x, + MV *mvp_full, int step_param, int sadpb, int further_steps, int do_refine, - vp9_variance_fn_ptr_t *fn_ptr, - int_mv *ref_mv, int_mv *dst_mv); - -int vp9_hex_search(MACROBLOCK *x, - MV *ref_mv, - int search_param, - int error_per_bit, - int do_init_search, - const vp9_variance_fn_ptr_t *vf, - int use_mvcost, - const MV *center_mv, - MV *best_mv); -int vp9_bigdia_search(MACROBLOCK *x, - MV *ref_mv, - int search_param, - int error_per_bit, - int do_init_search, - const vp9_variance_fn_ptr_t *vf, - int use_mvcost, - const MV *center_mv, - MV *best_mv); -int vp9_square_search(MACROBLOCK *x, - MV *ref_mv, - int search_param, - int error_per_bit, - int do_init_search, - const vp9_variance_fn_ptr_t *vf, - int use_mvcost, - const MV *center_mv, - MV *best_mv); + const vp9_variance_fn_ptr_t *fn_ptr, + const MV *ref_mv, MV *dst_mv); + +typedef int (integer_mv_pattern_search_fn) ( + const MACROBLOCK *x, + MV *ref_mv, + int search_param, + int error_per_bit, + int do_init_search, + const vp9_variance_fn_ptr_t *vf, + int use_mvcost, + const MV *center_mv, + MV *best_mv); + +integer_mv_pattern_search_fn vp9_hex_search; +integer_mv_pattern_search_fn vp9_bigdia_search; +integer_mv_pattern_search_fn vp9_square_search; +integer_mv_pattern_search_fn vp9_fast_hex_search; +integer_mv_pattern_search_fn vp9_fast_dia_search; typedef int (fractional_mv_step_fp) ( - MACROBLOCK *x, + const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp, int error_per_bit, @@ -83,11 +89,11 @@ typedef int (fractional_mv_step_fp) ( int *mvcost[2], int *distortion, unsigned int *sse); -extern fractional_mv_step_fp vp9_find_best_sub_pixel_iterative; + extern fractional_mv_step_fp vp9_find_best_sub_pixel_tree; typedef int (fractional_mv_step_comp_fp) ( - MACROBLOCK *x, + const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp, int error_per_bit, @@ -98,34 +104,40 @@ typedef int (fractional_mv_step_comp_fp) ( int *distortion, unsigned int *sse1, const uint8_t *second_pred, int w, int h); -extern fractional_mv_step_comp_fp vp9_find_best_sub_pixel_comp_iterative; + extern fractional_mv_step_comp_fp vp9_find_best_sub_pixel_comp_tree; -typedef int (*vp9_full_search_fn_t)(MACROBLOCK *x, - int_mv *ref_mv, int sad_per_bit, - int distance, vp9_variance_fn_ptr_t *fn_ptr, +typedef int (*vp9_full_search_fn_t)(const MACROBLOCK *x, + const MV *ref_mv, int sad_per_bit, + int distance, + const vp9_variance_fn_ptr_t *fn_ptr, int *mvjcost, int *mvcost[2], - int_mv *center_mv, int n); + const MV *center_mv, MV *best_mv); -typedef int (*vp9_refining_search_fn_t)(MACROBLOCK *x, - int_mv *ref_mv, int sad_per_bit, +typedef int (*vp9_refining_search_fn_t)(const MACROBLOCK *x, + MV *ref_mv, int sad_per_bit, int distance, - vp9_variance_fn_ptr_t *fn_ptr, + const vp9_variance_fn_ptr_t *fn_ptr, int *mvjcost, int *mvcost[2], - int_mv *center_mv); + const MV *center_mv); -typedef int (*vp9_diamond_search_fn_t)(MACROBLOCK *x, - int_mv *ref_mv, int_mv *best_mv, +typedef int (*vp9_diamond_search_fn_t)(const MACROBLOCK *x, + MV *ref_mv, MV *best_mv, int search_param, int sad_per_bit, int *num00, - vp9_variance_fn_ptr_t *fn_ptr, + const vp9_variance_fn_ptr_t *fn_ptr, int *mvjcost, int *mvcost[2], - int_mv *center_mv); + const MV *center_mv); -int vp9_refining_search_8p_c(MACROBLOCK *x, - int_mv *ref_mv, int error_per_bit, - int search_range, vp9_variance_fn_ptr_t *fn_ptr, +int vp9_refining_search_8p_c(const MACROBLOCK *x, + MV *ref_mv, int error_per_bit, + int search_range, + const vp9_variance_fn_ptr_t *fn_ptr, int *mvjcost, int *mvcost[2], - int_mv *center_mv, const uint8_t *second_pred, + const MV *center_mv, const uint8_t *second_pred, int w, int h); +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_ENCODER_VP9_MCOMP_H_ diff --git a/libvpx/vp9/encoder/vp9_modecosts.c b/libvpx/vp9/encoder/vp9_modecosts.c deleted file mode 100644 index 7eb6592..0000000 --- a/libvpx/vp9/encoder/vp9_modecosts.c +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - - -#include "vp9/common/vp9_blockd.h" -#include "vp9/encoder/vp9_onyx_int.h" -#include "vp9/encoder/vp9_treewriter.h" -#include "vp9/common/vp9_entropymode.h" - - -void vp9_init_mode_costs(VP9_COMP *c) { - VP9_COMMON *const cm = &c->common; - const vp9_tree_index *KT = vp9_intra_mode_tree; - int i, j; - - for (i = 0; i < INTRA_MODES; i++) { - for (j = 0; j < INTRA_MODES; j++) { - vp9_cost_tokens((int *)c->mb.y_mode_costs[i][j], vp9_kf_y_mode_prob[i][j], - KT); - } - } - - // TODO(rbultje) separate tables for superblock costing? - vp9_cost_tokens(c->mb.mbmode_cost, cm->fc.y_mode_prob[1], - vp9_intra_mode_tree); - vp9_cost_tokens(c->mb.intra_uv_mode_cost[1], - cm->fc.uv_mode_prob[INTRA_MODES - 1], vp9_intra_mode_tree); - vp9_cost_tokens(c->mb.intra_uv_mode_cost[0], - vp9_kf_uv_mode_prob[INTRA_MODES - 1], - vp9_intra_mode_tree); - - for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) - vp9_cost_tokens((int *)c->mb.switchable_interp_costs[i], - cm->fc.switchable_interp_prob[i], - vp9_switchable_interp_tree); -} diff --git a/libvpx/vp9/encoder/vp9_modecosts.h b/libvpx/vp9/encoder/vp9_modecosts.h deleted file mode 100644 index f43033e..0000000 --- a/libvpx/vp9/encoder/vp9_modecosts.h +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - - -#ifndef VP9_ENCODER_VP9_MODECOSTS_H_ -#define VP9_ENCODER_VP9_MODECOSTS_H_ - -void vp9_init_mode_costs(VP9_COMP *x); - -#endif // VP9_ENCODER_VP9_MODECOSTS_H_ diff --git a/libvpx/vp9/encoder/vp9_onyx_if.c b/libvpx/vp9/encoder/vp9_onyx_if.c index dd4705d..0a5033f 100644 --- a/libvpx/vp9/encoder/vp9_onyx_if.c +++ b/libvpx/vp9/encoder/vp9_onyx_if.c @@ -14,6 +14,8 @@ #include "./vpx_config.h" #include "./vpx_scale_rtcd.h" +#include "vpx/internal/vpx_psnr.h" +#include "vpx_ports/vpx_timer.h" #include "vp9/common/vp9_alloccommon.h" #include "vp9/common/vp9_filter.h" @@ -24,23 +26,21 @@ #include "vp9/common/vp9_reconinter.h" #include "vp9/common/vp9_systemdependent.h" #include "vp9/common/vp9_tile_common.h" + +#include "vp9/encoder/vp9_bitstream.h" +#include "vp9/encoder/vp9_encodemv.h" #include "vp9/encoder/vp9_firstpass.h" #include "vp9/encoder/vp9_mbgraph.h" #include "vp9/encoder/vp9_onyx_int.h" #include "vp9/encoder/vp9_picklpf.h" -#include "vp9/encoder/vp9_psnr.h" #include "vp9/encoder/vp9_ratectrl.h" #include "vp9/encoder/vp9_rdopt.h" #include "vp9/encoder/vp9_segmentation.h" #include "vp9/encoder/vp9_temporal_filter.h" #include "vp9/encoder/vp9_vaq.h" +#include "vp9/encoder/vp9_resize.h" -#include "vpx_ports/vpx_timer.h" - - -extern void print_tree_update_probs(); - -static void set_default_lf_deltas(struct loopfilter *lf); +void vp9_coef_tree_initialize(); #define DEFAULT_INTERP_FILTER SWITCHABLE @@ -59,6 +59,11 @@ static void set_default_lf_deltas(struct loopfilter *lf); #define DISABLE_COMPOUND_SPLIT 0x18 #define LAST_AND_INTRA_SPLIT_ONLY 0x1E +// Max rate target for 1080P and below encodes under normal circumstances +// (1920 * 1080 / (16 * 16)) * MAX_MB_RATE bits per MB +#define MAX_MB_RATE 250 +#define MAXRATE_1080P 2025000 + #if CONFIG_INTERNAL_STATS extern double vp9_calc_ssim(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, int lumamask, @@ -87,41 +92,12 @@ FILE *kf_list; FILE *keyfile; #endif +void vp9_init_quantizer(VP9_COMP *cpi); -#ifdef ENTROPY_STATS -extern int intra_mode_stats[INTRA_MODES] - [INTRA_MODES] - [INTRA_MODES]; -#endif - -#ifdef MODE_STATS -extern void init_tx_count_stats(); -extern void write_tx_count_stats(); -extern void init_switchable_interp_stats(); -extern void write_switchable_interp_stats(); -#endif - -#ifdef SPEEDSTATS -unsigned int frames_at_speed[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0}; -#endif - -#if defined(SECTIONBITS_OUTPUT) -extern unsigned __int64 Sectionbits[500]; -#endif - -extern void vp9_init_quantizer(VP9_COMP *cpi); - -// Tables relating active max Q to active min Q -static int kf_low_motion_minq[QINDEX_RANGE]; -static int kf_high_motion_minq[QINDEX_RANGE]; -static int gf_low_motion_minq[QINDEX_RANGE]; -static int gf_high_motion_minq[QINDEX_RANGE]; -static int inter_minq[QINDEX_RANGE]; -static int afq_low_motion_minq[QINDEX_RANGE]; -static int afq_high_motion_minq[QINDEX_RANGE]; +static const double in_frame_q_adj_ratio[MAX_SEGMENTS] = + {1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; -static INLINE void Scale2Ratio(int mode, int *hr, int *hs) { +static INLINE void Scale2Ratio(VPX_SCALING mode, int *hr, int *hs) { switch (mode) { case NORMAL: *hr = 1; @@ -147,98 +123,9 @@ static INLINE void Scale2Ratio(int mode, int *hr, int *hs) { } } -// Functions to compute the active minq lookup table entries based on a -// formulaic approach to facilitate easier adjustment of the Q tables. -// The formulae were derived from computing a 3rd order polynomial best -// fit to the original data (after plotting real maxq vs minq (not q index)) -static int calculate_minq_index(double maxq, - double x3, double x2, double x1, double c) { - int i; - const double minqtarget = MIN(((x3 * maxq + x2) * maxq + x1) * maxq + c, - maxq); - - // Special case handling to deal with the step from q2.0 - // down to lossless mode represented by q 1.0. - if (minqtarget <= 2.0) - return 0; - - for (i = 0; i < QINDEX_RANGE; i++) { - if (minqtarget <= vp9_convert_qindex_to_q(i)) - return i; - } - - return QINDEX_RANGE - 1; -} - -static void init_minq_luts(void) { - int i; - - for (i = 0; i < QINDEX_RANGE; i++) { - const double maxq = vp9_convert_qindex_to_q(i); - - - kf_low_motion_minq[i] = calculate_minq_index(maxq, - 0.000001, - -0.0004, - 0.15, - 0.0); - kf_high_motion_minq[i] = calculate_minq_index(maxq, - 0.000002, - -0.0012, - 0.5, - 0.0); - - gf_low_motion_minq[i] = calculate_minq_index(maxq, - 0.0000015, - -0.0009, - 0.32, - 0.0); - gf_high_motion_minq[i] = calculate_minq_index(maxq, - 0.0000021, - -0.00125, - 0.50, - 0.0); - inter_minq[i] = calculate_minq_index(maxq, - 0.00000271, - -0.00113, - 0.75, - 0.0); - afq_low_motion_minq[i] = calculate_minq_index(maxq, - 0.0000015, - -0.0009, - 0.33, - 0.0); - afq_high_motion_minq[i] = calculate_minq_index(maxq, - 0.0000021, - -0.00125, - 0.55, - 0.0); - } -} - -static int get_active_quality(int q, - int gfu_boost, - int low, - int high, - int *low_motion_minq, - int *high_motion_minq) { - int active_best_quality; - if (gfu_boost > high) { - active_best_quality = low_motion_minq[q]; - } else if (gfu_boost < low) { - active_best_quality = high_motion_minq[q]; - } else { - const int gap = high - low; - const int offset = high - gfu_boost; - const int qdiff = high_motion_minq[q] - low_motion_minq[q]; - const int adjustment = ((offset * qdiff) + (gap >> 1)) / gap; - active_best_quality = low_motion_minq[q] + adjustment; - } - return active_best_quality; -} - -static void set_mvcost(VP9_COMP *cpi) { +static void set_high_precision_mv(VP9_COMP *cpi, int allow_high_precision_mv) { MACROBLOCK *const mb = &cpi->mb; + cpi->common.allow_high_precision_mv = allow_high_precision_mv; if (cpi->common.allow_high_precision_mv) { mb->mvcost = mb->nmvcost_hp; mb->mvsadcost = mb->nmvsadcost_hp; @@ -253,51 +140,35 @@ void vp9_initialize_enc() { if (!init_done) { vp9_initialize_common(); + vp9_coef_tree_initialize(); vp9_tokenize_initialize(); vp9_init_quant_tables(); vp9_init_me_luts(); - init_minq_luts(); + vp9_rc_init_minq_luts(); // init_base_skip_probs(); + vp9_entropy_mv_init(); + vp9_entropy_mode_init(); init_done = 1; } } -static void setup_features(VP9_COMMON *cm) { - struct loopfilter *const lf = &cm->lf; - struct segmentation *const seg = &cm->seg; - - // Set up default state for MB feature flags - seg->enabled = 0; - - seg->update_map = 0; - seg->update_data = 0; - vpx_memset(seg->tree_probs, 255, sizeof(seg->tree_probs)); - - vp9_clearall_segfeatures(seg); - - lf->mode_ref_delta_enabled = 0; - lf->mode_ref_delta_update = 0; - vp9_zero(lf->ref_deltas); - vp9_zero(lf->mode_deltas); - vp9_zero(lf->last_ref_deltas); - vp9_zero(lf->last_mode_deltas); - - set_default_lf_deltas(lf); -} - static void dealloc_compressor_data(VP9_COMP *cpi) { + VP9_COMMON *const cm = &cpi->common; + // Delete sementation map vpx_free(cpi->segmentation_map); - cpi->segmentation_map = 0; - vpx_free(cpi->common.last_frame_seg_map); - cpi->common.last_frame_seg_map = 0; + cpi->segmentation_map = NULL; + vpx_free(cm->last_frame_seg_map); + cm->last_frame_seg_map = NULL; vpx_free(cpi->coding_context.last_frame_seg_map_copy); - cpi->coding_context.last_frame_seg_map_copy = 0; + cpi->coding_context.last_frame_seg_map_copy = NULL; + vpx_free(cpi->complexity_map); + cpi->complexity_map = 0; vpx_free(cpi->active_map); cpi->active_map = 0; - vp9_free_frame_buffers(&cpi->common); + vp9_free_frame_buffers(cm); vp9_free_frame_buffer(&cpi->last_frame_uf); vp9_free_frame_buffer(&cpi->scaled_source); @@ -323,20 +194,21 @@ static void dealloc_compressor_data(VP9_COMP *cpi) { // Computes a q delta (in "q index" terms) to get from a starting q value // to a target value // target q value -int vp9_compute_qdelta(VP9_COMP *cpi, double qstart, double qtarget) { +int vp9_compute_qdelta(const VP9_COMP *cpi, double qstart, double qtarget) { + const RATE_CONTROL *const rc = &cpi->rc; + int start_index = rc->worst_quality; + int target_index = rc->worst_quality; int i; - int start_index = cpi->worst_quality; - int target_index = cpi->worst_quality; // Convert the average q value to an index. - for (i = cpi->best_quality; i < cpi->worst_quality; i++) { + for (i = rc->best_quality; i < rc->worst_quality; ++i) { start_index = i; if (vp9_convert_qindex_to_q(i) >= qstart) break; } // Convert the q target to an index - for (i = cpi->best_quality; i < cpi->worst_quality; i++) { + for (i = rc->best_quality; i < rc->worst_quality; ++i) { target_index = i; if (vp9_convert_qindex_to_q(i) >= qtarget) break; @@ -345,11 +217,75 @@ int vp9_compute_qdelta(VP9_COMP *cpi, double qstart, double qtarget) { return target_index - start_index; } +// Computes a q delta (in "q index" terms) to get from a starting q value +// to a value that should equate to thegiven rate ratio. + +static int compute_qdelta_by_rate(VP9_COMP *cpi, int base_q_index, + double rate_target_ratio) { + int i; + int target_index = cpi->rc.worst_quality; + + // Look up the current projected bits per block for the base index + const int base_bits_per_mb = vp9_rc_bits_per_mb(cpi->common.frame_type, + base_q_index, 1.0); + + // Find the target bits per mb based on the base value and given ratio. + const int target_bits_per_mb = (int)(rate_target_ratio * base_bits_per_mb); + + // Convert the q target to an index + for (i = cpi->rc.best_quality; i < cpi->rc.worst_quality; ++i) { + target_index = i; + if (vp9_rc_bits_per_mb(cpi->common.frame_type, i, 1.0) <= + target_bits_per_mb ) + break; + } + + return target_index - base_q_index; +} + +// This function sets up a set of segments with delta Q values around +// the baseline frame quantizer. +static void setup_in_frame_q_adj(VP9_COMP *cpi) { + VP9_COMMON *const cm = &cpi->common; + struct segmentation *const seg = &cm->seg; + + // Make SURE use of floating point in this function is safe. + vp9_clear_system_state(); + + if (cm->frame_type == KEY_FRAME || + cpi->refresh_alt_ref_frame || + (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) { + int segment; + + // Clear down the segment map + vpx_memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols); + + // Clear down the complexity map used for rd + vpx_memset(cpi->complexity_map, 0, cm->mi_rows * cm->mi_cols); + + vp9_enable_segmentation(seg); + vp9_clearall_segfeatures(seg); + + // Select delta coding method + seg->abs_delta = SEGMENT_DELTADATA; + + // Segment 0 "Q" feature is disabled so it defaults to the baseline Q + vp9_disable_segfeature(seg, 0, SEG_LVL_ALT_Q); + + // Use some of the segments for in frame Q adjustment + for (segment = 1; segment < 2; segment++) { + const int qindex_delta = compute_qdelta_by_rate(cpi, cm->base_qindex, + in_frame_q_adj_ratio[segment]); + vp9_enable_segfeature(seg, segment, SEG_LVL_ALT_Q); + vp9_set_segdata(seg, segment, SEG_LVL_ALT_Q, qindex_delta); + } + } +} static void configure_static_seg_features(VP9_COMP *cpi) { - VP9_COMMON *cm = &cpi->common; - struct segmentation *seg = &cm->seg; + VP9_COMMON *const cm = &cpi->common; + struct segmentation *const seg = &cm->seg; - int high_q = (int)(cpi->avg_q > 48.0); + int high_q = (int)(cpi->rc.avg_q > 48.0); int qi_delta; // Disable and clear down for KF @@ -361,7 +297,7 @@ static void configure_static_seg_features(VP9_COMP *cpi) { cpi->static_mb_pct = 0; // Disable segmentation - vp9_disable_segmentation((VP9_PTR)cpi); + vp9_disable_segmentation(seg); // Clear down the segment features. vp9_clearall_segfeatures(seg); @@ -374,7 +310,7 @@ static void configure_static_seg_features(VP9_COMP *cpi) { cpi->static_mb_pct = 0; // Disable segmentation and individual segment features by default - vp9_disable_segmentation((VP9_PTR)cpi); + vp9_disable_segmentation(seg); vp9_clearall_segfeatures(seg); // Scan frames from current to arf frame. @@ -387,7 +323,8 @@ static void configure_static_seg_features(VP9_COMP *cpi) { seg->update_map = 1; seg->update_data = 1; - qi_delta = vp9_compute_qdelta(cpi, cpi->avg_q, (cpi->avg_q * 0.875)); + qi_delta = vp9_compute_qdelta( + cpi, cpi->rc.avg_q, (cpi->rc.avg_q * 0.875)); vp9_set_segdata(seg, 1, SEG_LVL_ALT_Q, (qi_delta - 2)); vp9_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2); @@ -401,15 +338,15 @@ static void configure_static_seg_features(VP9_COMP *cpi) { // All other frames if segmentation has been enabled // First normal frame in a valid gf or alt ref group - if (cpi->frames_since_golden == 0) { + if (cpi->rc.frames_since_golden == 0) { // Set up segment features for normal frames in an arf group - if (cpi->source_alt_ref_active) { + if (cpi->rc.source_alt_ref_active) { seg->update_map = 0; seg->update_data = 1; seg->abs_delta = SEGMENT_DELTADATA; - qi_delta = vp9_compute_qdelta(cpi, cpi->avg_q, - (cpi->avg_q * 1.125)); + qi_delta = vp9_compute_qdelta(cpi, cpi->rc.avg_q, + (cpi->rc.avg_q * 1.125)); vp9_set_segdata(seg, 1, SEG_LVL_ALT_Q, (qi_delta + 2)); vp9_enable_segfeature(seg, 1, SEG_LVL_ALT_Q); @@ -426,7 +363,7 @@ static void configure_static_seg_features(VP9_COMP *cpi) { // Disable segmentation and clear down features if alt ref // is not active for this group - vp9_disable_segmentation((VP9_PTR)cpi); + vp9_disable_segmentation(seg); vpx_memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols); @@ -435,7 +372,7 @@ static void configure_static_seg_features(VP9_COMP *cpi) { vp9_clearall_segfeatures(seg); } - } else if (cpi->is_src_frame_alt_ref) { + } else if (cpi->rc.is_src_frame_alt_ref) { // Special case where we are coding over the top of a previous // alt ref frame. // Segment coding disabled for compred testing @@ -467,69 +404,6 @@ static void configure_static_seg_features(VP9_COMP *cpi) { } } -#ifdef ENTROPY_STATS -void vp9_update_mode_context_stats(VP9_COMP *cpi) { - VP9_COMMON *cm = &cpi->common; - int i, j; - unsigned int (*inter_mode_counts)[INTER_MODES - 1][2] = - cm->fc.inter_mode_counts; - int64_t (*mv_ref_stats)[INTER_MODES - 1][2] = cpi->mv_ref_stats; - FILE *f; - - // Read the past stats counters - f = fopen("mode_context.bin", "rb"); - if (!f) { - vpx_memset(cpi->mv_ref_stats, 0, sizeof(cpi->mv_ref_stats)); - } else { - fread(cpi->mv_ref_stats, sizeof(cpi->mv_ref_stats), 1, f); - fclose(f); - } - - // Add in the values for this frame - for (i = 0; i < INTER_MODE_CONTEXTS; i++) { - for (j = 0; j < INTER_MODES - 1; j++) { - mv_ref_stats[i][j][0] += (int64_t)inter_mode_counts[i][j][0]; - mv_ref_stats[i][j][1] += (int64_t)inter_mode_counts[i][j][1]; - } - } - - // Write back the accumulated stats - f = fopen("mode_context.bin", "wb"); - fwrite(cpi->mv_ref_stats, sizeof(cpi->mv_ref_stats), 1, f); - fclose(f); -} - -void print_mode_context(VP9_COMP *cpi) { - FILE *f = fopen("vp9_modecont.c", "a"); - int i, j; - - fprintf(f, "#include \"vp9_entropy.h\"\n"); - fprintf( - f, - "const int inter_mode_probs[INTER_MODE_CONTEXTS][INTER_MODES - 1] ="); - fprintf(f, "{\n"); - for (j = 0; j < INTER_MODE_CONTEXTS; j++) { - fprintf(f, " {/* %d */ ", j); - fprintf(f, " "); - for (i = 0; i < INTER_MODES - 1; i++) { - int this_prob; - int64_t count = cpi->mv_ref_stats[j][i][0] + cpi->mv_ref_stats[j][i][1]; - if (count) - this_prob = ((cpi->mv_ref_stats[j][i][0] * 256) + (count >> 1)) / count; - else - this_prob = 128; - - // context probs - fprintf(f, "%5d, ", this_prob); - } - fprintf(f, " },\n"); - } - - fprintf(f, "};\n"); - fclose(f); -} -#endif // ENTROPY_STATS - // DEBUG: Print out the segment id of each MB in the current frame. static void print_seg_map(VP9_COMP *cpi) { VP9_COMMON *cm = &cpi->common; @@ -553,44 +427,30 @@ static void print_seg_map(VP9_COMP *cpi) { static void update_reference_segmentation_map(VP9_COMP *cpi) { VP9_COMMON *const cm = &cpi->common; + MODE_INFO **mi_8x8_ptr = cm->mi_grid_visible; + uint8_t *cache_ptr = cm->last_frame_seg_map; int row, col; - MODE_INFO **mi_8x8, **mi_8x8_ptr = cm->mi_grid_visible; - uint8_t *cache_ptr = cm->last_frame_seg_map, *cache; for (row = 0; row < cm->mi_rows; row++) { - mi_8x8 = mi_8x8_ptr; - cache = cache_ptr; + MODE_INFO **mi_8x8 = mi_8x8_ptr; + uint8_t *cache = cache_ptr; for (col = 0; col < cm->mi_cols; col++, mi_8x8++, cache++) cache[0] = mi_8x8[0]->mbmi.segment_id; mi_8x8_ptr += cm->mode_info_stride; cache_ptr += cm->mi_cols; } } - -static void set_default_lf_deltas(struct loopfilter *lf) { - lf->mode_ref_delta_enabled = 1; - lf->mode_ref_delta_update = 1; - - vp9_zero(lf->ref_deltas); - vp9_zero(lf->mode_deltas); - - // Test of ref frame deltas - lf->ref_deltas[INTRA_FRAME] = 2; - lf->ref_deltas[LAST_FRAME] = 0; - lf->ref_deltas[GOLDEN_FRAME] = -2; - lf->ref_deltas[ALTREF_FRAME] = -2; - - lf->mode_deltas[0] = 0; // Zero - lf->mode_deltas[1] = 0; // New mv +static int is_slowest_mode(int mode) { + return (mode == MODE_SECONDPASS_BEST || mode == MODE_BESTQUALITY); } -static void set_rd_speed_thresholds(VP9_COMP *cpi, int mode) { +static void set_rd_speed_thresholds(VP9_COMP *cpi) { SPEED_FEATURES *sf = &cpi->sf; int i; // Set baseline threshold values for (i = 0; i < MAX_MODES; ++i) - sf->thresh_mult[i] = mode == 0 ? -500 : 0; + sf->thresh_mult[i] = is_slowest_mode(cpi->oxcf.mode) ? -500 : 0; sf->thresh_mult[THR_NEARESTMV] = 0; sf->thresh_mult[THR_NEARESTG] = 0; @@ -666,12 +526,12 @@ static void set_rd_speed_thresholds(VP9_COMP *cpi, int mode) { } } -static void set_rd_speed_thresholds_sub8x8(VP9_COMP *cpi, int mode) { +static void set_rd_speed_thresholds_sub8x8(VP9_COMP *cpi) { SPEED_FEATURES *sf = &cpi->sf; int i; for (i = 0; i < MAX_REFS; ++i) - sf->thresh_mult_sub8x8[i] = mode == 0 ? -500 : 0; + sf->thresh_mult_sub8x8[i] = is_slowest_mode(cpi->oxcf.mode) ? -500 : 0; sf->thresh_mult_sub8x8[THR_LAST] += 2500; sf->thresh_mult_sub8x8[THR_GOLD] += 2500; @@ -701,26 +561,347 @@ static void set_rd_speed_thresholds_sub8x8(VP9_COMP *cpi, int mode) { sf->thresh_mult_sub8x8[THR_COMP_GA] = INT_MAX; } +static void set_good_speed_feature(VP9_COMMON *cm, + SPEED_FEATURES *sf, + int speed) { + int i; + sf->adaptive_rd_thresh = 1; + sf->recode_loop = ((speed < 1) ? ALLOW_RECODE : ALLOW_RECODE_KFMAXBW); + if (speed == 1) { + sf->use_square_partition_only = !frame_is_intra_only(cm); + sf->less_rectangular_check = 1; + sf->tx_size_search_method = frame_is_intra_only(cm) + ? USE_FULL_RD : USE_LARGESTALL; + + if (MIN(cm->width, cm->height) >= 720) + sf->disable_split_mask = cm->show_frame ? + DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT; + else + sf->disable_split_mask = DISABLE_COMPOUND_SPLIT; + + sf->use_rd_breakout = 1; + sf->adaptive_motion_search = 1; + sf->adaptive_pred_interp_filter = 1; + sf->auto_mv_step_size = 1; + sf->adaptive_rd_thresh = 2; + sf->recode_loop = ALLOW_RECODE_KFARFGF; + sf->intra_y_mode_mask[TX_32X32] = INTRA_DC_H_V; + sf->intra_uv_mode_mask[TX_32X32] = INTRA_DC_H_V; + sf->intra_uv_mode_mask[TX_16X16] = INTRA_DC_H_V; + } + if (speed == 2) { + sf->use_square_partition_only = !frame_is_intra_only(cm); + sf->less_rectangular_check = 1; + sf->tx_size_search_method = frame_is_intra_only(cm) + ? USE_FULL_RD : USE_LARGESTALL; + + if (MIN(cm->width, cm->height) >= 720) + sf->disable_split_mask = cm->show_frame ? + DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT; + else + sf->disable_split_mask = LAST_AND_INTRA_SPLIT_ONLY; + + sf->mode_search_skip_flags = FLAG_SKIP_INTRA_DIRMISMATCH | + FLAG_SKIP_INTRA_BESTINTER | + FLAG_SKIP_COMP_BESTINTRA | + FLAG_SKIP_INTRA_LOWVAR; + sf->use_rd_breakout = 1; + sf->adaptive_motion_search = 1; + sf->adaptive_pred_interp_filter = 2; + sf->reference_masking = 1; + sf->auto_mv_step_size = 1; + + sf->disable_filter_search_var_thresh = 50; + sf->comp_inter_joint_search_thresh = BLOCK_SIZES; + + sf->auto_min_max_partition_size = RELAXED_NEIGHBORING_MIN_MAX; + sf->use_lastframe_partitioning = LAST_FRAME_PARTITION_LOW_MOTION; + sf->adjust_partitioning_from_last_frame = 1; + sf->last_partitioning_redo_frequency = 3; + + sf->adaptive_rd_thresh = 2; + sf->recode_loop = ALLOW_RECODE_KFARFGF; + sf->use_lp32x32fdct = 1; + sf->mode_skip_start = 11; + sf->intra_y_mode_mask[TX_32X32] = INTRA_DC_H_V; + sf->intra_y_mode_mask[TX_16X16] = INTRA_DC_H_V; + sf->intra_uv_mode_mask[TX_32X32] = INTRA_DC_H_V; + sf->intra_uv_mode_mask[TX_16X16] = INTRA_DC_H_V; + } + if (speed == 3) { + sf->use_square_partition_only = 1; + sf->tx_size_search_method = USE_LARGESTALL; + + if (MIN(cm->width, cm->height) >= 720) + sf->disable_split_mask = DISABLE_ALL_SPLIT; + else + sf->disable_split_mask = DISABLE_ALL_INTER_SPLIT; + + sf->mode_search_skip_flags = FLAG_SKIP_INTRA_DIRMISMATCH | + FLAG_SKIP_INTRA_BESTINTER | + FLAG_SKIP_COMP_BESTINTRA | + FLAG_SKIP_INTRA_LOWVAR; + + sf->use_rd_breakout = 1; + sf->adaptive_motion_search = 1; + sf->adaptive_pred_interp_filter = 2; + sf->reference_masking = 1; + sf->auto_mv_step_size = 1; + + sf->disable_split_var_thresh = 32; + sf->disable_filter_search_var_thresh = 100; + sf->comp_inter_joint_search_thresh = BLOCK_SIZES; + + sf->auto_min_max_partition_size = RELAXED_NEIGHBORING_MIN_MAX; + sf->use_lastframe_partitioning = LAST_FRAME_PARTITION_ALL; + sf->adjust_partitioning_from_last_frame = 1; + sf->last_partitioning_redo_frequency = 3; + + sf->use_uv_intra_rd_estimate = 1; + sf->skip_encode_sb = 1; + sf->use_lp32x32fdct = 1; + sf->subpel_iters_per_step = 1; + sf->use_fast_coef_updates = 2; + sf->use_fast_coef_costing = 1; + + sf->adaptive_rd_thresh = 4; + sf->mode_skip_start = 6; + } + if (speed == 4) { + sf->use_square_partition_only = 1; + sf->tx_size_search_method = USE_LARGESTALL; + sf->disable_split_mask = DISABLE_ALL_SPLIT; + + sf->mode_search_skip_flags = FLAG_SKIP_INTRA_DIRMISMATCH | + FLAG_SKIP_INTRA_BESTINTER | + FLAG_SKIP_COMP_BESTINTRA | + FLAG_SKIP_COMP_REFMISMATCH | + FLAG_SKIP_INTRA_LOWVAR | + FLAG_EARLY_TERMINATE; + + sf->use_rd_breakout = 1; + sf->adaptive_motion_search = 1; + sf->adaptive_pred_interp_filter = 2; + sf->reference_masking = 1; + sf->auto_mv_step_size = 1; + + sf->disable_split_var_thresh = 64; + sf->disable_filter_search_var_thresh = 200; + sf->comp_inter_joint_search_thresh = BLOCK_SIZES; + + sf->auto_min_max_partition_size = RELAXED_NEIGHBORING_MIN_MAX; + sf->use_lastframe_partitioning = LAST_FRAME_PARTITION_ALL; + sf->adjust_partitioning_from_last_frame = 1; + sf->last_partitioning_redo_frequency = 3; + + sf->use_uv_intra_rd_estimate = 1; + sf->skip_encode_sb = 1; + sf->use_lp32x32fdct = 1; + sf->subpel_iters_per_step = 1; + sf->use_fast_coef_updates = 2; + sf->use_fast_coef_costing = 1; + + sf->adaptive_rd_thresh = 4; + sf->mode_skip_start = 6; + } + if (speed >= 5) { + sf->comp_inter_joint_search_thresh = BLOCK_SIZES; + sf->partition_search_type = FIXED_PARTITION; + sf->tx_size_search_method = frame_is_intra_only(cm) ? + USE_FULL_RD : USE_LARGESTALL; + sf->mode_search_skip_flags = FLAG_SKIP_INTRA_DIRMISMATCH | + FLAG_SKIP_INTRA_BESTINTER | + FLAG_SKIP_COMP_BESTINTRA | + FLAG_SKIP_COMP_REFMISMATCH | + FLAG_SKIP_INTRA_LOWVAR | + FLAG_EARLY_TERMINATE; + sf->use_rd_breakout = 1; + sf->use_lp32x32fdct = 1; + sf->optimize_coefficients = 0; + sf->auto_mv_step_size = 1; + sf->reference_masking = 1; + + sf->disable_split_mask = DISABLE_ALL_SPLIT; + sf->search_method = HEX; + sf->subpel_iters_per_step = 1; + sf->disable_split_var_thresh = 64; + sf->disable_filter_search_var_thresh = 500; + for (i = 0; i < TX_SIZES; i++) { + sf->intra_y_mode_mask[i] = INTRA_DC_ONLY; + sf->intra_uv_mode_mask[i] = INTRA_DC_ONLY; + } + sf->use_fast_coef_updates = 2; + sf->use_fast_coef_costing = 1; + sf->adaptive_rd_thresh = 4; + sf->mode_skip_start = 6; + } +} + +static void set_rt_speed_feature(VP9_COMMON *cm, + SPEED_FEATURES *sf, + int speed) { + sf->static_segmentation = 0; + sf->adaptive_rd_thresh = 1; + sf->recode_loop = ((speed < 1) ? ALLOW_RECODE : ALLOW_RECODE_KFMAXBW); + sf->encode_breakout_thresh = 1; + sf->use_fast_coef_costing = 1; + + if (speed == 1) { + sf->use_square_partition_only = !frame_is_intra_only(cm); + sf->less_rectangular_check = 1; + sf->tx_size_search_method = + frame_is_intra_only(cm) ? USE_FULL_RD : USE_LARGESTALL; + + if (MIN(cm->width, cm->height) >= 720) + sf->disable_split_mask = cm->show_frame ? + DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT; + else + sf->disable_split_mask = DISABLE_COMPOUND_SPLIT; + + sf->use_rd_breakout = 1; + sf->adaptive_motion_search = 1; + sf->adaptive_pred_interp_filter = 1; + sf->auto_mv_step_size = 1; + sf->adaptive_rd_thresh = 2; + sf->recode_loop = ALLOW_RECODE_KFARFGF; + sf->intra_y_mode_mask[TX_32X32] = INTRA_DC_H_V; + sf->intra_uv_mode_mask[TX_32X32] = INTRA_DC_H_V; + sf->intra_uv_mode_mask[TX_16X16] = INTRA_DC_H_V; + sf->encode_breakout_thresh = 8; + } + if (speed >= 2) { + sf->use_square_partition_only = !frame_is_intra_only(cm); + sf->less_rectangular_check = 1; + sf->tx_size_search_method = + frame_is_intra_only(cm) ? USE_FULL_RD : USE_LARGESTALL; + + if (MIN(cm->width, cm->height) >= 720) + sf->disable_split_mask = cm->show_frame ? + DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT; + else + sf->disable_split_mask = LAST_AND_INTRA_SPLIT_ONLY; + + sf->mode_search_skip_flags = FLAG_SKIP_INTRA_DIRMISMATCH + | FLAG_SKIP_INTRA_BESTINTER | FLAG_SKIP_COMP_BESTINTRA + | FLAG_SKIP_INTRA_LOWVAR; + + sf->use_rd_breakout = 1; + sf->adaptive_motion_search = 1; + sf->adaptive_pred_interp_filter = 2; + sf->auto_mv_step_size = 1; + sf->reference_masking = 1; + + sf->disable_filter_search_var_thresh = 50; + sf->comp_inter_joint_search_thresh = BLOCK_SIZES; + + sf->auto_min_max_partition_size = RELAXED_NEIGHBORING_MIN_MAX; + sf->use_lastframe_partitioning = LAST_FRAME_PARTITION_LOW_MOTION; + sf->adjust_partitioning_from_last_frame = 1; + sf->last_partitioning_redo_frequency = 3; + + sf->adaptive_rd_thresh = 2; + sf->recode_loop = ALLOW_RECODE_KFARFGF; + sf->use_lp32x32fdct = 1; + sf->mode_skip_start = 11; + sf->intra_y_mode_mask[TX_32X32] = INTRA_DC_H_V; + sf->intra_y_mode_mask[TX_16X16] = INTRA_DC_H_V; + sf->intra_uv_mode_mask[TX_32X32] = INTRA_DC_H_V; + sf->intra_uv_mode_mask[TX_16X16] = INTRA_DC_H_V; + sf->encode_breakout_thresh = 200; + } + if (speed >= 3) { + sf->use_square_partition_only = 1; + sf->tx_size_search_method = USE_LARGESTALL; + + if (MIN(cm->width, cm->height) >= 720) + sf->disable_split_mask = DISABLE_ALL_SPLIT; + else + sf->disable_split_mask = DISABLE_ALL_INTER_SPLIT; + + sf->mode_search_skip_flags = FLAG_SKIP_INTRA_DIRMISMATCH + | FLAG_SKIP_INTRA_BESTINTER | FLAG_SKIP_COMP_BESTINTRA + | FLAG_SKIP_INTRA_LOWVAR; + + sf->disable_filter_search_var_thresh = 100; + sf->use_lastframe_partitioning = LAST_FRAME_PARTITION_ALL; + sf->use_uv_intra_rd_estimate = 1; + sf->skip_encode_sb = 1; + sf->subpel_iters_per_step = 1; + sf->use_fast_coef_updates = 2; + sf->adaptive_rd_thresh = 4; + sf->mode_skip_start = 6; + sf->encode_breakout_thresh = 400; + } + if (speed >= 4) { + sf->optimize_coefficients = 0; + sf->disable_split_mask = DISABLE_ALL_SPLIT; + sf->use_fast_lpf_pick = 2; + sf->encode_breakout_thresh = 700; + } + if (speed >= 5) { + int i; + sf->mode_search_skip_flags |= FLAG_SKIP_COMP_REFMISMATCH | + FLAG_EARLY_TERMINATE; + sf->use_fast_coef_costing = 0; + sf->adaptive_rd_thresh = 5; + sf->auto_min_max_partition_size = frame_is_intra_only(cm) ? + RELAXED_NEIGHBORING_MIN_MAX : STRICT_NEIGHBORING_MIN_MAX; + sf->adjust_partitioning_from_last_frame = + cm->last_frame_type == KEY_FRAME || (0 == + (cm->current_video_frame + 1) % sf->last_partitioning_redo_frequency); + sf->subpel_force_stop = 1; + for (i = 0; i < TX_SIZES; i++) { + sf->intra_y_mode_mask[i] = INTRA_DC_H_V; + sf->intra_uv_mode_mask[i] = INTRA_DC_ONLY; + } + sf->intra_y_mode_mask[TX_32X32] = INTRA_DC_ONLY; + sf->frame_parameter_update = 0; + sf->encode_breakout_thresh = 1000; + sf->search_method = FAST_HEX; + sf->disable_inter_mode_mask[BLOCK_32X32] = 1 << INTER_OFFSET(ZEROMV); + sf->disable_inter_mode_mask[BLOCK_32X64] = ~(1 << INTER_OFFSET(NEARESTMV)); + sf->disable_inter_mode_mask[BLOCK_64X32] = ~(1 << INTER_OFFSET(NEARESTMV)); + sf->disable_inter_mode_mask[BLOCK_64X64] = ~(1 << INTER_OFFSET(NEARESTMV)); + sf->max_intra_bsize = BLOCK_32X32; + } + if (speed >= 6) { + sf->partition_search_type = VAR_BASED_FIXED_PARTITION; + sf->search_method = HEX; + } + if (speed >= 7) { + sf->partition_search_type = VAR_BASED_FIXED_PARTITION; + sf->use_nonrd_pick_mode = 1; + sf->search_method = FAST_DIAMOND; + } + if (speed >= 8) { + int i; + for (i = 0; i < BLOCK_SIZES; ++i) + sf->disable_inter_mode_mask[i] = 14; // only search NEARESTMV (0) + } +} + void vp9_set_speed_features(VP9_COMP *cpi) { SPEED_FEATURES *sf = &cpi->sf; - int mode = cpi->compressor_speed; + VP9_COMMON *cm = &cpi->common; int speed = cpi->speed; int i; - // Only modes 0 and 1 supported for now in experimental code basae - if (mode > 1) - mode = 1; + // Convert negative speed to positive + if (speed < 0) + speed = -speed; +#if CONFIG_INTERNAL_STATS for (i = 0; i < MAX_MODES; ++i) cpi->mode_chosen_counts[i] = 0; +#endif // best quality defaults - sf->RD = 1; + sf->frame_parameter_update = 1; sf->search_method = NSTEP; - sf->auto_filter = 1; - sf->recode_loop = 1; + sf->recode_loop = ALLOW_RECODE; sf->subpel_search_method = SUBPEL_TREE; sf->subpel_iters_per_step = 2; + sf->subpel_force_stop = 0; sf->optimize_coefficients = !cpi->oxcf.lossless; sf->reduce_first_step_size = 0; sf->auto_mv_step_size = 0; @@ -731,12 +912,12 @@ void vp9_set_speed_features(VP9_COMP *cpi) { sf->tx_size_search_method = USE_FULL_RD; sf->use_lp32x32fdct = 0; sf->adaptive_motion_search = 0; - sf->use_avoid_tested_higherror = 0; + sf->adaptive_pred_interp_filter = 0; sf->reference_masking = 0; - sf->use_one_partition_size_always = 0; + sf->partition_search_type = SEARCH_PARTITION; sf->less_rectangular_check = 0; sf->use_square_partition_only = 0; - sf->auto_min_max_partition_size = 0; + sf->auto_min_max_partition_size = NOT_IN_USE; sf->max_partition_size = BLOCK_64X64; sf->min_partition_size = BLOCK_4X4; sf->adjust_partitioning_from_last_frame = 0; @@ -754,205 +935,35 @@ void vp9_set_speed_features(VP9_COMP *cpi) { sf->use_uv_intra_rd_estimate = 0; sf->use_fast_lpf_pick = 0; sf->use_fast_coef_updates = 0; - sf->using_small_partition_info = 0; + sf->use_fast_coef_costing = 0; sf->mode_skip_start = MAX_MODES; // Mode index at which mode skip mask set - -#if CONFIG_MULTIPLE_ARF - // Switch segmentation off. - sf->static_segmentation = 0; -#else - sf->static_segmentation = 0; -#endif - - sf->variance_adaptive_quantization = 0; - - switch (mode) { - case 0: // This is the best quality mode. + sf->use_nonrd_pick_mode = 0; + sf->encode_breakout_thresh = 0; + for (i = 0; i < BLOCK_SIZES; ++i) + sf->disable_inter_mode_mask[i] = 0; + sf->max_intra_bsize = BLOCK_64X64; + // This setting only takes effect when partition_search_type is set + // to FIXED_PARTITION. + sf->always_this_block_size = BLOCK_16X16; + + switch (cpi->oxcf.mode) { + case MODE_BESTQUALITY: + case MODE_SECONDPASS_BEST: // This is the best quality mode. + cpi->diamond_search_sad = vp9_full_range_search; break; - - case 1: -#if CONFIG_MULTIPLE_ARF - // Switch segmentation off. - sf->static_segmentation = 0; -#else - sf->static_segmentation = 0; -#endif - sf->use_avoid_tested_higherror = 1; - sf->adaptive_rd_thresh = 1; - sf->recode_loop = (speed < 1); - - if (speed == 1) { - sf->use_square_partition_only = !frame_is_intra_only(&cpi->common); - sf->less_rectangular_check = 1; - sf->tx_size_search_method = frame_is_intra_only(&cpi->common) - ? USE_FULL_RD : USE_LARGESTALL; - - if (MIN(cpi->common.width, cpi->common.height) >= 720) - sf->disable_split_mask = cpi->common.show_frame ? - DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT; - else - sf->disable_split_mask = DISABLE_COMPOUND_SPLIT; - - sf->use_rd_breakout = 1; - sf->adaptive_motion_search = 1; - sf->auto_mv_step_size = 1; - sf->adaptive_rd_thresh = 2; - sf->recode_loop = 2; - sf->intra_y_mode_mask[TX_32X32] = INTRA_DC_H_V; - sf->intra_uv_mode_mask[TX_32X32] = INTRA_DC_H_V; - sf->intra_uv_mode_mask[TX_16X16] = INTRA_DC_H_V; - } - if (speed == 2) { - sf->use_square_partition_only = !frame_is_intra_only(&cpi->common); - sf->less_rectangular_check = 1; - sf->tx_size_search_method = frame_is_intra_only(&cpi->common) - ? USE_FULL_RD : USE_LARGESTALL; - - if (MIN(cpi->common.width, cpi->common.height) >= 720) - sf->disable_split_mask = cpi->common.show_frame ? - DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT; - else - sf->disable_split_mask = LAST_AND_INTRA_SPLIT_ONLY; - - - sf->mode_search_skip_flags = FLAG_SKIP_INTRA_DIRMISMATCH | - FLAG_SKIP_INTRA_BESTINTER | - FLAG_SKIP_COMP_BESTINTRA | - FLAG_SKIP_INTRA_LOWVAR; - - sf->use_rd_breakout = 1; - sf->adaptive_motion_search = 1; - sf->auto_mv_step_size = 1; - - sf->disable_filter_search_var_thresh = 16; - sf->comp_inter_joint_search_thresh = BLOCK_SIZES; - - sf->auto_min_max_partition_size = 1; - sf->use_lastframe_partitioning = LAST_FRAME_PARTITION_LOW_MOTION; - sf->adjust_partitioning_from_last_frame = 1; - sf->last_partitioning_redo_frequency = 3; - - sf->adaptive_rd_thresh = 2; - sf->recode_loop = 2; - sf->use_lp32x32fdct = 1; - sf->mode_skip_start = 11; - sf->intra_y_mode_mask[TX_32X32] = INTRA_DC_H_V; - sf->intra_y_mode_mask[TX_16X16] = INTRA_DC_H_V; - sf->intra_uv_mode_mask[TX_32X32] = INTRA_DC_H_V; - sf->intra_uv_mode_mask[TX_16X16] = INTRA_DC_H_V; - } - if (speed == 3) { - sf->use_square_partition_only = 1; - sf->tx_size_search_method = USE_LARGESTALL; - - if (MIN(cpi->common.width, cpi->common.height) >= 720) - sf->disable_split_mask = DISABLE_ALL_SPLIT; - else - sf->disable_split_mask = DISABLE_ALL_INTER_SPLIT; - - sf->mode_search_skip_flags = FLAG_SKIP_INTRA_DIRMISMATCH | - FLAG_SKIP_INTRA_BESTINTER | - FLAG_SKIP_COMP_BESTINTRA | - FLAG_SKIP_INTRA_LOWVAR; - - sf->use_rd_breakout = 1; - sf->adaptive_motion_search = 1; - sf->auto_mv_step_size = 1; - - sf->disable_filter_search_var_thresh = 16; - sf->comp_inter_joint_search_thresh = BLOCK_SIZES; - - sf->auto_min_max_partition_size = 1; - sf->use_lastframe_partitioning = LAST_FRAME_PARTITION_ALL; - sf->adjust_partitioning_from_last_frame = 1; - sf->last_partitioning_redo_frequency = 3; - - sf->use_uv_intra_rd_estimate = 1; - sf->skip_encode_sb = 1; - sf->use_lp32x32fdct = 1; - sf->subpel_iters_per_step = 1; - sf->use_fast_coef_updates = 2; - - sf->adaptive_rd_thresh = 4; - sf->mode_skip_start = 6; - } - if (speed == 4) { - sf->use_square_partition_only = 1; - sf->tx_size_search_method = USE_LARGESTALL; - sf->disable_split_mask = DISABLE_ALL_SPLIT; - - sf->mode_search_skip_flags = FLAG_SKIP_INTRA_DIRMISMATCH | - FLAG_SKIP_INTRA_BESTINTER | - FLAG_SKIP_COMP_BESTINTRA | - FLAG_SKIP_COMP_REFMISMATCH | - FLAG_SKIP_INTRA_LOWVAR | - FLAG_EARLY_TERMINATE; - - sf->use_rd_breakout = 1; - sf->adaptive_motion_search = 1; - sf->auto_mv_step_size = 1; - - sf->disable_filter_search_var_thresh = 16; - sf->comp_inter_joint_search_thresh = BLOCK_SIZES; - - sf->auto_min_max_partition_size = 1; - sf->use_lastframe_partitioning = LAST_FRAME_PARTITION_ALL; - sf->adjust_partitioning_from_last_frame = 1; - sf->last_partitioning_redo_frequency = 3; - - sf->use_uv_intra_rd_estimate = 1; - sf->skip_encode_sb = 1; - sf->use_lp32x32fdct = 1; - sf->subpel_iters_per_step = 1; - sf->use_fast_coef_updates = 2; - - sf->adaptive_rd_thresh = 4; - sf->mode_skip_start = 6; - - /* sf->intra_y_mode_mask = INTRA_DC_ONLY; - sf->intra_uv_mode_mask = INTRA_DC_ONLY; - sf->search_method = BIGDIA; - sf->disable_split_var_thresh = 64; - sf->disable_filter_search_var_thresh = 64; */ - } - if (speed == 5) { - sf->comp_inter_joint_search_thresh = BLOCK_SIZES; - sf->use_one_partition_size_always = 1; - sf->always_this_block_size = BLOCK_16X16; - sf->tx_size_search_method = frame_is_intra_only(&cpi->common) ? - USE_FULL_RD : USE_LARGESTALL; - sf->mode_search_skip_flags = FLAG_SKIP_INTRA_DIRMISMATCH | - FLAG_SKIP_INTRA_BESTINTER | - FLAG_SKIP_COMP_BESTINTRA | - FLAG_SKIP_COMP_REFMISMATCH | - FLAG_SKIP_INTRA_LOWVAR | - FLAG_EARLY_TERMINATE; - sf->use_rd_breakout = 1; - sf->use_lp32x32fdct = 1; - sf->optimize_coefficients = 0; - sf->auto_mv_step_size = 1; - // sf->reduce_first_step_size = 1; - // sf->reference_masking = 1; - - sf->disable_split_mask = DISABLE_ALL_SPLIT; - sf->search_method = HEX; - sf->subpel_iters_per_step = 1; - sf->disable_split_var_thresh = 64; - sf->disable_filter_search_var_thresh = 96; - for (i = 0; i < TX_SIZES; i++) { - sf->intra_y_mode_mask[i] = INTRA_DC_ONLY; - sf->intra_uv_mode_mask[i] = INTRA_DC_ONLY; - } - sf->use_fast_coef_updates = 2; - sf->adaptive_rd_thresh = 4; - sf->mode_skip_start = 6; - } + case MODE_FIRSTPASS: + case MODE_GOODQUALITY: + case MODE_SECONDPASS: + set_good_speed_feature(cm, sf, speed); + break; + case MODE_REALTIME: + set_rt_speed_feature(cm, sf, speed); break; }; /* switch */ // Set rd thresholds based on mode and speed setting - set_rd_speed_thresholds(cpi, mode); - set_rd_speed_thresholds_sub8x8(cpi, mode); + set_rd_speed_thresholds(cpi); + set_rd_speed_thresholds_sub8x8(cpi); // Slow quant, dct and trellis not worthwhile for first pass // so make sure they are always turned off. @@ -962,7 +973,7 @@ void vp9_set_speed_features(VP9_COMP *cpi) { // No recode for 1 pass. if (cpi->pass == 0) { - sf->recode_loop = 0; + sf->recode_loop = DISALLOW_RECODE; sf->optimize_coefficients = 0; } @@ -971,36 +982,37 @@ void vp9_set_speed_features(VP9_COMP *cpi) { cpi->mb.fwd_txm4x4 = vp9_fwht4x4; } - if (cpi->sf.subpel_search_method == SUBPEL_ITERATIVE) { - cpi->find_fractional_mv_step = vp9_find_best_sub_pixel_iterative; - cpi->find_fractional_mv_step_comp = vp9_find_best_sub_pixel_comp_iterative; - } else if (cpi->sf.subpel_search_method == SUBPEL_TREE) { + if (cpi->sf.subpel_search_method == SUBPEL_TREE) { cpi->find_fractional_mv_step = vp9_find_best_sub_pixel_tree; cpi->find_fractional_mv_step_comp = vp9_find_best_sub_pixel_comp_tree; } cpi->mb.optimize = cpi->sf.optimize_coefficients == 1 && cpi->pass != 1; -#ifdef SPEEDSTATS - frames_at_speed[cpi->speed]++; -#endif + if (cpi->encode_breakout && cpi->oxcf.mode == MODE_REALTIME && + sf->encode_breakout_thresh > cpi->encode_breakout) + cpi->encode_breakout = sf->encode_breakout_thresh; + + if (sf->disable_split_mask == DISABLE_ALL_SPLIT) + sf->adaptive_pred_interp_filter = 0; } static void alloc_raw_frame_buffers(VP9_COMP *cpi) { VP9_COMMON *cm = &cpi->common; + const VP9_CONFIG *oxcf = &cpi->oxcf; - cpi->lookahead = vp9_lookahead_init(cpi->oxcf.width, cpi->oxcf.height, + cpi->lookahead = vp9_lookahead_init(oxcf->width, oxcf->height, cm->subsampling_x, cm->subsampling_y, - cpi->oxcf.lag_in_frames); + oxcf->lag_in_frames); if (!cpi->lookahead) - vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, + vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, "Failed to allocate lag buffers"); if (vp9_realloc_frame_buffer(&cpi->alt_ref_buffer, - cpi->oxcf.width, cpi->oxcf.height, + oxcf->width, oxcf->height, cm->subsampling_x, cm->subsampling_y, - VP9BORDERINPIXELS)) - vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, + VP9_ENC_BORDER_IN_PIXELS, NULL, NULL, NULL)) + vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, "Failed to allocate altref buffer"); } @@ -1008,21 +1020,21 @@ void vp9_alloc_compressor_data(VP9_COMP *cpi) { VP9_COMMON *cm = &cpi->common; if (vp9_alloc_frame_buffers(cm, cm->width, cm->height)) - vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, + vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, "Failed to allocate frame buffers"); if (vp9_alloc_frame_buffer(&cpi->last_frame_uf, cm->width, cm->height, cm->subsampling_x, cm->subsampling_y, - VP9BORDERINPIXELS)) - vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, + VP9_ENC_BORDER_IN_PIXELS)) + vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, "Failed to allocate last frame buffer"); if (vp9_alloc_frame_buffer(&cpi->scaled_source, cm->width, cm->height, cm->subsampling_x, cm->subsampling_y, - VP9BORDERINPIXELS)) - vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, + VP9_ENC_BORDER_IN_PIXELS)) + vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, "Failed to allocate scaled source buffer"); vpx_free(cpi->tok); @@ -1067,15 +1079,15 @@ static void update_frame_size(VP9_COMP *cpi) { if (vp9_realloc_frame_buffer(&cpi->last_frame_uf, cm->width, cm->height, cm->subsampling_x, cm->subsampling_y, - VP9BORDERINPIXELS)) - vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, + VP9_ENC_BORDER_IN_PIXELS, NULL, NULL, NULL)) + vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, "Failed to reallocate last frame buffer"); if (vp9_realloc_frame_buffer(&cpi->scaled_source, cm->width, cm->height, cm->subsampling_x, cm->subsampling_y, - VP9BORDERINPIXELS)) - vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, + VP9_ENC_BORDER_IN_PIXELS, NULL, NULL, NULL)) + vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, "Failed to reallocate scaled source buffer"); { @@ -1121,42 +1133,56 @@ int vp9_reverse_trans(int x) { return 63; }; + void vp9_new_framerate(VP9_COMP *cpi, double framerate) { - if (framerate < 0.1) - framerate = 30; + VP9_COMMON *const cm = &cpi->common; + RATE_CONTROL *const rc = &cpi->rc; + VP9_CONFIG *const oxcf = &cpi->oxcf; + int vbr_max_bits; - cpi->oxcf.framerate = framerate; + oxcf->framerate = framerate < 0.1 ? 30 : framerate; cpi->output_framerate = cpi->oxcf.framerate; - cpi->per_frame_bandwidth = (int)(cpi->oxcf.target_bandwidth - / cpi->output_framerate); - cpi->av_per_frame_bandwidth = (int)(cpi->oxcf.target_bandwidth - / cpi->output_framerate); - cpi->min_frame_bandwidth = (int)(cpi->av_per_frame_bandwidth * - cpi->oxcf.two_pass_vbrmin_section / 100); - - - cpi->min_frame_bandwidth = MAX(cpi->min_frame_bandwidth, FRAME_OVERHEAD_BITS); + rc->av_per_frame_bandwidth = (int)(oxcf->target_bandwidth / + cpi->output_framerate); + rc->min_frame_bandwidth = (int)(rc->av_per_frame_bandwidth * + oxcf->two_pass_vbrmin_section / 100); + + + rc->min_frame_bandwidth = MAX(rc->min_frame_bandwidth, FRAME_OVERHEAD_BITS); + + // A maximum bitrate for a frame is defined. + // The baseline for this aligns with HW implementations that + // can support decode of 1080P content up to a bitrate of MAX_MB_RATE bits + // per 16x16 MB (averaged over a frame). However this limit is extended if + // a very high rate is given on the command line or the the rate cannnot + // be acheived because of a user specificed max q (e.g. when the user + // specifies lossless encode. + // + vbr_max_bits = (int)(((int64_t)rc->av_per_frame_bandwidth * + oxcf->two_pass_vbrmax_section) / 100); + rc->max_frame_bandwidth = MAX(MAX((cm->MBs * MAX_MB_RATE), MAXRATE_1080P), + vbr_max_bits); // Set Maximum gf/arf interval - cpi->max_gf_interval = 16; + rc->max_gf_interval = 16; // Extended interval for genuinely static scenes - cpi->twopass.static_scene_max_gf_interval = cpi->key_frame_frequency >> 1; + rc->static_scene_max_gf_interval = cpi->key_frame_frequency >> 1; // Special conditions when alt ref frame enabled in lagged compress mode - if (cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames) { - if (cpi->max_gf_interval > cpi->oxcf.lag_in_frames - 1) - cpi->max_gf_interval = cpi->oxcf.lag_in_frames - 1; + if (oxcf->play_alternate && oxcf->lag_in_frames) { + if (rc->max_gf_interval > oxcf->lag_in_frames - 1) + rc->max_gf_interval = oxcf->lag_in_frames - 1; - if (cpi->twopass.static_scene_max_gf_interval > cpi->oxcf.lag_in_frames - 1) - cpi->twopass.static_scene_max_gf_interval = cpi->oxcf.lag_in_frames - 1; + if (rc->static_scene_max_gf_interval > oxcf->lag_in_frames - 1) + rc->static_scene_max_gf_interval = oxcf->lag_in_frames - 1; } - if (cpi->max_gf_interval > cpi->twopass.static_scene_max_gf_interval) - cpi->max_gf_interval = cpi->twopass.static_scene_max_gf_interval; + if (rc->max_gf_interval > rc->static_scene_max_gf_interval) + rc->max_gf_interval = rc->static_scene_max_gf_interval; } -static int64_t rescale(int val, int64_t num, int denom) { +static int64_t rescale(int64_t val, int64_t num, int denom) { int64_t llnum = num; int64_t llden = denom; int64_t llval = val; @@ -1164,6 +1190,124 @@ static int64_t rescale(int val, int64_t num, int denom) { return (llval * llnum / llden); } +// Initialize layer context data from init_config(). +static void init_layer_context(VP9_COMP *const cpi) { + const VP9_CONFIG *const oxcf = &cpi->oxcf; + int temporal_layer = 0; + cpi->svc.spatial_layer_id = 0; + cpi->svc.temporal_layer_id = 0; + for (temporal_layer = 0; temporal_layer < cpi->svc.number_temporal_layers; + ++temporal_layer) { + LAYER_CONTEXT *const lc = &cpi->svc.layer_context[temporal_layer]; + RATE_CONTROL *const lrc = &lc->rc; + lrc->avg_frame_qindex[INTER_FRAME] = q_trans[oxcf->worst_allowed_q]; + lrc->last_q[INTER_FRAME] = q_trans[oxcf->worst_allowed_q]; + lrc->ni_av_qi = q_trans[oxcf->worst_allowed_q]; + lrc->total_actual_bits = 0; + lrc->total_target_vs_actual = 0; + lrc->ni_tot_qi = 0; + lrc->tot_q = 0.0; + lrc->avg_q = 0.0; + lrc->ni_frames = 0; + lrc->decimation_count = 0; + lrc->decimation_factor = 0; + lrc->rate_correction_factor = 1.0; + lrc->key_frame_rate_correction_factor = 1.0; + lc->target_bandwidth = oxcf->ts_target_bitrate[temporal_layer] * + 1000; + lrc->buffer_level = rescale((int)(oxcf->starting_buffer_level), + lc->target_bandwidth, 1000); + lrc->bits_off_target = lrc->buffer_level; + } +} + +// Update the layer context from a change_config() call. +static void update_layer_context_change_config(VP9_COMP *const cpi, + const int target_bandwidth) { + const VP9_CONFIG *const oxcf = &cpi->oxcf; + const RATE_CONTROL *const rc = &cpi->rc; + int temporal_layer = 0; + float bitrate_alloc = 1.0; + for (temporal_layer = 0; temporal_layer < cpi->svc.number_temporal_layers; + ++temporal_layer) { + LAYER_CONTEXT *const lc = &cpi->svc.layer_context[temporal_layer]; + RATE_CONTROL *const lrc = &lc->rc; + lc->target_bandwidth = oxcf->ts_target_bitrate[temporal_layer] * 1000; + bitrate_alloc = (float)lc->target_bandwidth / (float)target_bandwidth; + // Update buffer-related quantities. + lc->starting_buffer_level = + (int64_t)(oxcf->starting_buffer_level * bitrate_alloc); + lc->optimal_buffer_level = + (int64_t)(oxcf->optimal_buffer_level * bitrate_alloc); + lc->maximum_buffer_size = + (int64_t)(oxcf->maximum_buffer_size * bitrate_alloc); + lrc->bits_off_target = MIN(lrc->bits_off_target, lc->maximum_buffer_size); + lrc->buffer_level = MIN(lrc->buffer_level, lc->maximum_buffer_size); + // Update framerate-related quantities. + lc->framerate = oxcf->framerate / oxcf->ts_rate_decimator[temporal_layer]; + lrc->av_per_frame_bandwidth = (int)(lc->target_bandwidth / lc->framerate); + lrc->max_frame_bandwidth = rc->max_frame_bandwidth; + // Update qp-related quantities. + lrc->worst_quality = rc->worst_quality; + lrc->best_quality = rc->best_quality; + } +} + +// Prior to encoding the frame, update framerate-related quantities +// for the current layer. +static void update_layer_framerate(VP9_COMP *const cpi) { + int temporal_layer = cpi->svc.temporal_layer_id; + const VP9_CONFIG *const oxcf = &cpi->oxcf; + LAYER_CONTEXT *const lc = &cpi->svc.layer_context[temporal_layer]; + RATE_CONTROL *const lrc = &lc->rc; + lc->framerate = oxcf->framerate / oxcf->ts_rate_decimator[temporal_layer]; + lrc->av_per_frame_bandwidth = (int)(lc->target_bandwidth / lc->framerate); + lrc->max_frame_bandwidth = cpi->rc.max_frame_bandwidth; + // Update the average layer frame size (non-cumulative per-frame-bw). + if (temporal_layer == 0) { + lc->avg_frame_size = lrc->av_per_frame_bandwidth; + } else { + double prev_layer_framerate = oxcf->framerate / + oxcf->ts_rate_decimator[temporal_layer - 1]; + int prev_layer_target_bandwidth = + oxcf->ts_target_bitrate[temporal_layer - 1] * 1000; + lc->avg_frame_size = + (int)((lc->target_bandwidth - prev_layer_target_bandwidth) / + (lc->framerate - prev_layer_framerate)); + } +} + +// Prior to encoding the frame, set the layer context, for the current layer +// to be encoded, to the cpi struct. +static void restore_layer_context(VP9_COMP *const cpi) { + int temporal_layer = cpi->svc.temporal_layer_id; + LAYER_CONTEXT *lc = &cpi->svc.layer_context[temporal_layer]; + int frame_since_key = cpi->rc.frames_since_key; + int frame_to_key = cpi->rc.frames_to_key; + cpi->rc = lc->rc; + cpi->oxcf.target_bandwidth = lc->target_bandwidth; + cpi->oxcf.starting_buffer_level = lc->starting_buffer_level; + cpi->oxcf.optimal_buffer_level = lc->optimal_buffer_level; + cpi->oxcf.maximum_buffer_size = lc->maximum_buffer_size; + cpi->output_framerate = lc->framerate; + // Reset the frames_since_key and frames_to_key counters to their values + // before the layer restore. Keep these defined for the stream (not layer). + cpi->rc.frames_since_key = frame_since_key; + cpi->rc.frames_to_key = frame_to_key; +} + +// Save the layer context after encoding the frame. +static void save_layer_context(VP9_COMP *const cpi) { + int temporal_layer = cpi->svc.temporal_layer_id; + LAYER_CONTEXT *lc = &cpi->svc.layer_context[temporal_layer]; + lc->rc = cpi->rc; + lc->target_bandwidth = (int)cpi->oxcf.target_bandwidth; + lc->starting_buffer_level = cpi->oxcf.starting_buffer_level; + lc->optimal_buffer_level = cpi->oxcf.optimal_buffer_level; + lc->maximum_buffer_size = cpi->oxcf.maximum_buffer_size; + lc->framerate = cpi->output_framerate; +} + static void set_tile_limits(VP9_COMP *cpi) { VP9_COMMON *const cm = &cpi->common; @@ -1175,8 +1319,7 @@ static void set_tile_limits(VP9_COMP *cpi) { cm->log2_tile_rows = cpi->oxcf.tile_rows; } -static void init_config(VP9_PTR ptr, VP9_CONFIG *oxcf) { - VP9_COMP *cpi = (VP9_COMP *)(ptr); +static void init_config(struct VP9_COMP *cpi, VP9_CONFIG *oxcf) { VP9_COMMON *const cm = &cpi->common; int i; @@ -1190,25 +1333,47 @@ static void init_config(VP9_PTR ptr, VP9_CONFIG *oxcf) { cm->subsampling_y = 0; vp9_alloc_compressor_data(cpi); + // Spatial scalability. + cpi->svc.number_spatial_layers = oxcf->ss_number_layers; + // Temporal scalability. + cpi->svc.number_temporal_layers = oxcf->ts_number_layers; + + if (cpi->svc.number_temporal_layers > 1 && + cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) { + init_layer_context(cpi); + } + // change includes all joint functionality - vp9_change_config(ptr, oxcf); + vp9_change_config(cpi, oxcf); // Initialize active best and worst q and average q values. - cpi->active_worst_quality = cpi->oxcf.worst_allowed_q; - cpi->active_best_quality = cpi->oxcf.best_allowed_q; - cpi->avg_frame_qindex = cpi->oxcf.worst_allowed_q; + if (cpi->pass == 0 && cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) { + cpi->rc.avg_frame_qindex[0] = cpi->oxcf.worst_allowed_q; + cpi->rc.avg_frame_qindex[1] = cpi->oxcf.worst_allowed_q; + cpi->rc.avg_frame_qindex[2] = cpi->oxcf.worst_allowed_q; + } else { + cpi->rc.avg_frame_qindex[0] = (cpi->oxcf.worst_allowed_q + + cpi->oxcf.best_allowed_q) / 2; + cpi->rc.avg_frame_qindex[1] = (cpi->oxcf.worst_allowed_q + + cpi->oxcf.best_allowed_q) / 2; + cpi->rc.avg_frame_qindex[2] = (cpi->oxcf.worst_allowed_q + + cpi->oxcf.best_allowed_q) / 2; + } + cpi->rc.last_q[0] = cpi->oxcf.best_allowed_q; + cpi->rc.last_q[1] = cpi->oxcf.best_allowed_q; + cpi->rc.last_q[2] = cpi->oxcf.best_allowed_q; // Initialise the starting buffer levels - cpi->buffer_level = cpi->oxcf.starting_buffer_level; - cpi->bits_off_target = cpi->oxcf.starting_buffer_level; + cpi->rc.buffer_level = cpi->oxcf.starting_buffer_level; + cpi->rc.bits_off_target = cpi->oxcf.starting_buffer_level; - cpi->rolling_target_bits = cpi->av_per_frame_bandwidth; - cpi->rolling_actual_bits = cpi->av_per_frame_bandwidth; - cpi->long_rolling_target_bits = cpi->av_per_frame_bandwidth; - cpi->long_rolling_actual_bits = cpi->av_per_frame_bandwidth; + cpi->rc.rolling_target_bits = cpi->rc.av_per_frame_bandwidth; + cpi->rc.rolling_actual_bits = cpi->rc.av_per_frame_bandwidth; + cpi->rc.long_rolling_target_bits = cpi->rc.av_per_frame_bandwidth; + cpi->rc.long_rolling_actual_bits = cpi->rc.av_per_frame_bandwidth; - cpi->total_actual_bits = 0; - cpi->total_target_vs_actual = 0; + cpi->rc.total_actual_bits = 0; + cpi->rc.total_target_vs_actual = 0; cpi->static_mb_pct = 0; @@ -1216,9 +1381,6 @@ static void init_config(VP9_PTR ptr, VP9_CONFIG *oxcf) { cpi->gld_fb_idx = 1; cpi->alt_fb_idx = 2; - cpi->current_layer = 0; - cpi->use_svc = 0; - set_tile_limits(cpi); cpi->fixed_divide[0] = 0; @@ -1226,9 +1388,7 @@ static void init_config(VP9_PTR ptr, VP9_CONFIG *oxcf) { cpi->fixed_divide[i] = 0x80000 / i; } - -void vp9_change_config(VP9_PTR ptr, VP9_CONFIG *oxcf) { - VP9_COMP *cpi = (VP9_COMP *)(ptr); +void vp9_change_config(struct VP9_COMP *cpi, VP9_CONFIG *oxcf) { VP9_COMMON *const cm = &cpi->common; if (!cpi || !oxcf) @@ -1240,28 +1400,35 @@ void vp9_change_config(VP9_PTR ptr, VP9_CONFIG *oxcf) { cpi->oxcf = *oxcf; - switch (cpi->oxcf.Mode) { + if (cpi->oxcf.cpu_used == -6) + cpi->oxcf.play_alternate = 0; + + switch (cpi->oxcf.mode) { // Real time and one pass deprecated in test code base case MODE_GOODQUALITY: cpi->pass = 0; - cpi->compressor_speed = 2; cpi->oxcf.cpu_used = clamp(cpi->oxcf.cpu_used, -5, 5); break; + case MODE_BESTQUALITY: + cpi->pass = 0; + break; + case MODE_FIRSTPASS: cpi->pass = 1; - cpi->compressor_speed = 1; break; case MODE_SECONDPASS: cpi->pass = 2; - cpi->compressor_speed = 1; cpi->oxcf.cpu_used = clamp(cpi->oxcf.cpu_used, -5, 5); break; case MODE_SECONDPASS_BEST: cpi->pass = 2; - cpi->compressor_speed = 0; + break; + + case MODE_REALTIME: + cpi->pass = 0; break; } @@ -1272,20 +1439,17 @@ void vp9_change_config(VP9_PTR ptr, VP9_CONFIG *oxcf) { cpi->oxcf.lossless = oxcf->lossless; cpi->mb.e_mbd.itxm_add = cpi->oxcf.lossless ? vp9_iwht4x4_add : vp9_idct4x4_add; - cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL; + cpi->rc.baseline_gf_interval = DEFAULT_GF_INTERVAL; cpi->ref_frame_flags = VP9_ALT_FLAG | VP9_GOLD_FLAG | VP9_LAST_FLAG; - // cpi->use_golden_frame_only = 0; - // cpi->use_last_frame_only = 0; cpi->refresh_golden_frame = 0; cpi->refresh_last_frame = 1; cm->refresh_frame_context = 1; cm->reset_frame_context = 0; - setup_features(cm); - cpi->common.allow_high_precision_mv = 0; // Default mv precision - set_mvcost(cpi); + vp9_reset_segment_features(&cm->seg); + set_high_precision_mv(cpi, 0); { int i; @@ -1293,9 +1457,7 @@ void vp9_change_config(VP9_PTR ptr, VP9_CONFIG *oxcf) { for (i = 0; i < MAX_SEGMENTS; i++) cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout; } - - // At the moment the first order values may not be > MAXQ - cpi->oxcf.fixed_q = MIN(cpi->oxcf.fixed_q, MAXQ); + cpi->encode_breakout = cpi->oxcf.encode_breakout; // local file playback mode == really big buffer if (cpi->oxcf.end_usage == USAGE_LOCAL_FILE_PLAYBACK) { @@ -1322,71 +1484,60 @@ void vp9_change_config(VP9_PTR ptr, VP9_CONFIG *oxcf) { else cpi->oxcf.maximum_buffer_size = rescale(cpi->oxcf.maximum_buffer_size, cpi->oxcf.target_bandwidth, 1000); + // Under a configuration change, where maximum_buffer_size may change, + // keep buffer level clipped to the maximum allowed buffer size. + cpi->rc.bits_off_target = MIN(cpi->rc.bits_off_target, + cpi->oxcf.maximum_buffer_size); + cpi->rc.buffer_level = MIN(cpi->rc.buffer_level, + cpi->oxcf.maximum_buffer_size); // Set up frame rate and related parameters rate control values. vp9_new_framerate(cpi, cpi->oxcf.framerate); // Set absolute upper and lower quality limits - cpi->worst_quality = cpi->oxcf.worst_allowed_q; - cpi->best_quality = cpi->oxcf.best_allowed_q; + cpi->rc.worst_quality = cpi->oxcf.worst_allowed_q; + cpi->rc.best_quality = cpi->oxcf.best_allowed_q; // active values should only be modified if out of new range - cpi->active_worst_quality = clamp(cpi->active_worst_quality, - cpi->oxcf.best_allowed_q, - cpi->oxcf.worst_allowed_q); - - cpi->active_best_quality = clamp(cpi->active_best_quality, - cpi->oxcf.best_allowed_q, - cpi->oxcf.worst_allowed_q); - - cpi->buffered_mode = cpi->oxcf.optimal_buffer_level > 0; cpi->cq_target_quality = cpi->oxcf.cq_level; - cm->mcomp_filter_type = DEFAULT_INTERP_FILTER; - - cpi->target_bandwidth = cpi->oxcf.target_bandwidth; + cm->interp_filter = DEFAULT_INTERP_FILTER; cm->display_width = cpi->oxcf.width; cm->display_height = cpi->oxcf.height; // VP8 sharpness level mapping 0-7 (vs 0-10 in general VPx dialogs) - cpi->oxcf.Sharpness = MIN(7, cpi->oxcf.Sharpness); + cpi->oxcf.sharpness = MIN(7, cpi->oxcf.sharpness); - cpi->common.lf.sharpness_level = cpi->oxcf.Sharpness; + cpi->common.lf.sharpness_level = cpi->oxcf.sharpness; if (cpi->initial_width) { // Increasing the size of the frame beyond the first seen frame, or some - // otherwise signalled maximum size, is not supported. + // otherwise signaled maximum size, is not supported. // TODO(jkoleszar): exit gracefully. assert(cm->width <= cpi->initial_width); assert(cm->height <= cpi->initial_height); } update_frame_size(cpi); - if (cpi->oxcf.fixed_q >= 0) { - cpi->last_q[0] = cpi->oxcf.fixed_q; - cpi->last_q[1] = cpi->oxcf.fixed_q; - cpi->last_boosted_qindex = cpi->oxcf.fixed_q; + if (cpi->svc.number_temporal_layers > 1 && + cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) { + update_layer_context_change_config(cpi, (int)cpi->oxcf.target_bandwidth); } - cpi->speed = cpi->oxcf.cpu_used; + cpi->speed = abs(cpi->oxcf.cpu_used); - if (cpi->oxcf.lag_in_frames == 0) { - // force to allowlag to 0 if lag_in_frames is 0; - cpi->oxcf.allow_lag = 0; - } else if (cpi->oxcf.lag_in_frames > MAX_LAG_BUFFERS) { - // Limit on lag buffers as these are not currently dynamically allocated + // Limit on lag buffers as these are not currently dynamically allocated. + if (cpi->oxcf.lag_in_frames > MAX_LAG_BUFFERS) cpi->oxcf.lag_in_frames = MAX_LAG_BUFFERS; - } - // YX Temp #if CONFIG_MULTIPLE_ARF vp9_zero(cpi->alt_ref_source); #else cpi->alt_ref_source = NULL; #endif - cpi->is_src_frame_alt_ref = 0; + cpi->rc.is_src_frame_alt_ref = 0; #if 0 // Experimental RD Code @@ -1395,6 +1546,9 @@ void vp9_change_config(VP9_PTR ptr, VP9_CONFIG *oxcf) { #endif set_tile_limits(cpi); + + cpi->ext_refresh_frame_flags_pending = 0; + cpi->ext_refresh_frame_context_pending = 0; } #define M_LOG2_E 0.693147180559945309417 @@ -1442,6 +1596,7 @@ static void alloc_mode_context(VP9_COMMON *cm, int num_4x4_blk, int num_pix = num_4x4_blk << 4; int i, k; ctx->num_4x4_blk = num_4x4_blk; + CHECK_MEM_ERROR(cm, ctx->zcoeff_blk, vpx_calloc(num_4x4_blk, sizeof(uint8_t))); for (i = 0; i < MAX_MB_PLANE; ++i) { @@ -1485,7 +1640,6 @@ static void init_pick_mode_context(VP9_COMP *cpi) { VP9_COMMON *const cm = &cpi->common; MACROBLOCK *const x = &cpi->mb; - for (i = 0; i < BLOCK_SIZES; ++i) { const int num_4x4_w = num_4x4_blocks_wide_lookup[i]; const int num_4x4_h = num_4x4_blocks_high_lookup[i]; @@ -1556,30 +1710,19 @@ static void free_pick_mode_context(MACROBLOCK *x) { } } -VP9_PTR vp9_create_compressor(VP9_CONFIG *oxcf) { +VP9_COMP *vp9_create_compressor(VP9_CONFIG *oxcf) { int i, j; - volatile union { - VP9_COMP *cpi; - VP9_PTR ptr; - } ctx; + VP9_COMP *cpi = vpx_memalign(32, sizeof(VP9_COMP)); + VP9_COMMON *cm = cpi != NULL ? &cpi->common : NULL; - VP9_COMP *cpi; - VP9_COMMON *cm; - - cpi = ctx.cpi = vpx_memalign(32, sizeof(VP9_COMP)); - // Check that the CPI instance is valid - if (!cpi) - return 0; - - cm = &cpi->common; + if (!cm) + return NULL; vp9_zero(*cpi); if (setjmp(cm->error.jmp)) { - VP9_PTR ptr = ctx.ptr; - - ctx.cpi->common.error.setjmp = 0; - vp9_remove_compressor(&ptr); + cm->error.setjmp = 0; + vp9_remove_compressor(cpi); return 0; } @@ -1588,35 +1731,33 @@ VP9_PTR vp9_create_compressor(VP9_CONFIG *oxcf) { CHECK_MEM_ERROR(cm, cpi->mb.ss, vpx_calloc(sizeof(search_site), (MAX_MVSEARCH_STEPS * 8) + 1)); - vp9_create_common(cm); + vp9_rtcd(); - init_config((VP9_PTR)cpi, oxcf); + cpi->use_svc = 0; + init_config(cpi, oxcf); init_pick_mode_context(cpi); - cm->current_video_frame = 0; - cpi->kf_overspend_bits = 0; - cpi->kf_bitrate_adjustment = 0; - cpi->frames_till_gf_update_due = 0; - cpi->gf_overspend_bits = 0; - cpi->non_gf_bitrate_adjustment = 0; + cm->current_video_frame = 0; // Set reference frame sign bias for ALTREF frame to 1 (for now) cm->ref_frame_sign_bias[ALTREF_FRAME] = 1; - cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL; + cpi->rc.baseline_gf_interval = DEFAULT_GF_INTERVAL; cpi->gold_is_last = 0; - cpi->alt_is_last = 0; - cpi->gold_is_alt = 0; - - // Spatial scalability - cpi->number_spatial_layers = oxcf->ss_number_layers; + cpi->alt_is_last = 0; + cpi->gold_is_alt = 0; // Create the encoder segmentation map and set all entries to 0 CHECK_MEM_ERROR(cm, cpi->segmentation_map, vpx_calloc(cm->mi_rows * cm->mi_cols, 1)); + // Create a complexity map used for rd adjustment + CHECK_MEM_ERROR(cm, cpi->complexity_map, + vpx_calloc(cm->mi_rows * cm->mi_cols, 1)); + + // And a place holder structure is the coding context // for use if we want to save and restore it CHECK_MEM_ERROR(cm, cpi->coding_context.last_frame_seg_map_copy, @@ -1633,26 +1774,16 @@ VP9_PTR vp9_create_compressor(VP9_CONFIG *oxcf) { sizeof(*cpi->mbgraph_stats[i].mb_stats), 1)); } -#ifdef ENTROPY_STATS - if (cpi->pass != 1) - init_context_counters(); -#endif - -#ifdef MODE_STATS - init_tx_count_stats(); - init_switchable_interp_stats(); -#endif - /*Initialize the feed-forward activity masking.*/ cpi->activity_avg = 90 << 12; - - cpi->frames_since_key = 8; // Sensible default for first frame. cpi->key_frame_frequency = cpi->oxcf.key_freq; - cpi->this_key_frame_forced = 0; - cpi->next_key_frame_forced = 0; - cpi->source_alt_ref_pending = 0; - cpi->source_alt_ref_active = 0; + cpi->rc.frames_since_key = 8; // Sensible default for first frame. + cpi->rc.this_key_frame_forced = 0; + cpi->rc.next_key_frame_forced = 0; + + cpi->rc.source_alt_ref_pending = 0; + cpi->rc.source_alt_ref_active = 0; cpi->refresh_alt_ref_frame = 0; #if CONFIG_MULTIPLE_ARF @@ -1676,16 +1807,20 @@ VP9_PTR vp9_create_compressor(VP9_CONFIG *oxcf) { cpi->bytes = 0; if (cpi->b_calculate_psnr) { - cpi->total_sq_error = 0.0; - cpi->total_sq_error2 = 0.0; cpi->total_y = 0.0; cpi->total_u = 0.0; cpi->total_v = 0.0; cpi->total = 0.0; + cpi->total_sq_error = 0; + cpi->total_samples = 0; + cpi->totalp_y = 0.0; cpi->totalp_u = 0.0; cpi->totalp_v = 0.0; cpi->totalp = 0.0; + cpi->totalp_sq_error = 0; + cpi->totalp_samples = 0; + cpi->tot_recode_hits = 0; cpi->summed_quality = 0; cpi->summed_weights = 0; @@ -1704,20 +1839,17 @@ VP9_PTR vp9_create_compressor(VP9_CONFIG *oxcf) { cpi->first_time_stamp_ever = INT64_MAX; - cpi->frames_till_gf_update_due = 0; - cpi->key_frame_count = 1; + cpi->rc.frames_till_gf_update_due = 0; - cpi->ni_av_qi = cpi->oxcf.worst_allowed_q; - cpi->ni_tot_qi = 0; - cpi->ni_frames = 0; - cpi->tot_q = 0.0; - cpi->avg_q = vp9_convert_qindex_to_q(cpi->oxcf.worst_allowed_q); - cpi->total_byte_count = 0; + cpi->rc.ni_av_qi = cpi->oxcf.worst_allowed_q; + cpi->rc.ni_tot_qi = 0; + cpi->rc.ni_frames = 0; + cpi->rc.tot_q = 0.0; + cpi->rc.avg_q = vp9_convert_qindex_to_q(cpi->oxcf.worst_allowed_q); - cpi->rate_correction_factor = 1.0; - cpi->key_frame_rate_correction_factor = 1.0; - cpi->gf_rate_correction_factor = 1.0; - cpi->twopass.est_max_qcorrection_factor = 1.0; + cpi->rc.rate_correction_factor = 1.0; + cpi->rc.key_frame_rate_correction_factor = 1.0; + cpi->rc.gf_rate_correction_factor = 1.0; cal_nmvjointsadcost(cpi->mb.nmvjointsadcost); cpi->mb.nmvcost[0] = &cpi->mb.nmvcosts[0][MV_MAX]; @@ -1732,9 +1864,6 @@ VP9_PTR vp9_create_compressor(VP9_CONFIG *oxcf) { cpi->mb.nmvsadcost_hp[1] = &cpi->mb.nmvsadcosts_hp[1][MV_MAX]; cal_nmvsadcosts_hp(cpi->mb.nmvsadcost_hp); - for (i = 0; i < KEY_FRAME_CONTEXT; i++) - cpi->prior_key_frame_distance[i] = (int)cpi->output_framerate; - #ifdef OUTPUT_YUV_SRC yuv_file = fopen("bd.yuv", "ab"); #endif @@ -1749,18 +1878,17 @@ VP9_PTR vp9_create_compressor(VP9_CONFIG *oxcf) { cpi->output_pkt_list = oxcf->output_pkt_list; - cpi->enable_encode_breakout = 1; + cpi->allow_encode_breakout = ENCODE_BREAKOUT_ENABLED; if (cpi->pass == 1) { vp9_init_first_pass(cpi); } else if (cpi->pass == 2) { - size_t packet_sz = sizeof(FIRSTPASS_STATS); - int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz); + const size_t packet_sz = sizeof(FIRSTPASS_STATS); + const int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz); cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf; cpi->twopass.stats_in = cpi->twopass.stats_in_start; - cpi->twopass.stats_in_end = (void *)((char *)cpi->twopass.stats_in - + (packets - 1) * packet_sz); + cpi->twopass.stats_in_end = &cpi->twopass.stats_in[packets - 1]; vp9_init_second_pass(cpi); } @@ -1869,9 +1997,6 @@ VP9_PTR vp9_create_compressor(VP9_CONFIG *oxcf) { cpi->diamond_search_sad = vp9_diamond_search_sad; cpi->refining_search_sad = vp9_refining_search_sad; - // make sure frame 1 is okay - cpi->error_bins[0] = cpi->common.MBs; - /* vp9_init_quantizer() is first called here. Add check in * vp9_frame_init_quantizer() so that vp9_init_quantizer is only * called later when needed. This will avoid unnecessary calls of @@ -1881,44 +2006,24 @@ VP9_PTR vp9_create_compressor(VP9_CONFIG *oxcf) { vp9_loop_filter_init(cm); - cpi->common.error.setjmp = 0; + cm->error.setjmp = 0; - vp9_zero(cpi->y_uv_mode_count); + vp9_zero(cpi->common.counts.uv_mode); #ifdef MODE_TEST_HIT_STATS vp9_zero(cpi->mode_test_hits); #endif - return (VP9_PTR) cpi; + return cpi; } -void vp9_remove_compressor(VP9_PTR *ptr) { - VP9_COMP *cpi = (VP9_COMP *)(*ptr); +void vp9_remove_compressor(VP9_COMP *cpi) { int i; if (!cpi) return; if (cpi && (cpi->common.current_video_frame > 0)) { - if (cpi->pass == 2) { - vp9_end_second_pass(cpi); - } - -#ifdef ENTROPY_STATS - if (cpi->pass != 1) { - print_context_counters(); - print_tree_update_probs(); - print_mode_context(cpi); - } -#endif - -#ifdef MODE_STATS - if (cpi->pass != 1) { - write_tx_count_stats(); - write_switchable_interp_stats(); - } -#endif - #if CONFIG_INTERNAL_STATS vp9_clear_system_state(); @@ -1934,22 +2039,22 @@ void vp9_remove_compressor(VP9_PTR *ptr) { / time_encoded; if (cpi->b_calculate_psnr) { - YV12_BUFFER_CONFIG *lst_yv12 = - &cpi->common.yv12_fb[cpi->common.ref_frame_map[cpi->lst_fb_idx]]; - double samples = 3.0 / 2 * cpi->count * - lst_yv12->y_width * lst_yv12->y_height; - double total_psnr = vp9_mse2psnr(samples, 255.0, cpi->total_sq_error); - double total_psnr2 = vp9_mse2psnr(samples, 255.0, cpi->total_sq_error2); - double total_ssim = 100 * pow(cpi->summed_quality / - cpi->summed_weights, 8.0); - double total_ssimp = 100 * pow(cpi->summedp_quality / - cpi->summedp_weights, 8.0); + const double total_psnr = + vpx_sse_to_psnr((double)cpi->total_samples, 255.0, + (double)cpi->total_sq_error); + const double totalp_psnr = + vpx_sse_to_psnr((double)cpi->totalp_samples, 255.0, + (double)cpi->totalp_sq_error); + const double total_ssim = 100 * pow(cpi->summed_quality / + cpi->summed_weights, 8.0); + const double totalp_ssim = 100 * pow(cpi->summedp_quality / + cpi->summedp_weights, 8.0); fprintf(f, "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\tGLPsnrP\t" "VPXSSIM\tVPSSIMP\t Time(ms)\n"); fprintf(f, "%7.2f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t%8.0f\n", dr, cpi->total / cpi->count, total_psnr, - cpi->totalp / cpi->count, total_psnr2, total_ssim, total_ssimp, + cpi->totalp / cpi->count, totalp_psnr, total_ssim, totalp_ssim, total_encode_time); } @@ -1995,56 +2100,6 @@ void vp9_remove_compressor(VP9_PTR *ptr) { } #endif -#ifdef ENTROPY_STATS - { - int i, j, k; - FILE *fmode = fopen("vp9_modecontext.c", "w"); - - fprintf(fmode, "\n#include \"vp9_entropymode.h\"\n\n"); - fprintf(fmode, "const unsigned int vp9_kf_default_bmode_counts "); - fprintf(fmode, "[INTRA_MODES][INTRA_MODES]" - "[INTRA_MODES] =\n{\n"); - - for (i = 0; i < INTRA_MODES; i++) { - fprintf(fmode, " { // Above Mode : %d\n", i); - - for (j = 0; j < INTRA_MODES; j++) { - fprintf(fmode, " {"); - - for (k = 0; k < INTRA_MODES; k++) { - if (!intra_mode_stats[i][j][k]) - fprintf(fmode, " %5d, ", 1); - else - fprintf(fmode, " %5d, ", intra_mode_stats[i][j][k]); - } - - fprintf(fmode, "}, // left_mode %d\n", j); - } - - fprintf(fmode, " },\n"); - } - - fprintf(fmode, "};\n"); - fclose(fmode); - } -#endif - - -#if defined(SECTIONBITS_OUTPUT) - - if (0) { - int i; - FILE *f = fopen("tokenbits.stt", "a"); - - for (i = 0; i < 28; i++) - fprintf(f, "%8d", (int)(Sectionbits[i] / 256)); - - fprintf(f, "\n"); - fclose(f); - } - -#endif - #if 0 { printf("\n_pick_loop_filter_level:%d\n", cpi->time_pick_lpf / 1000); @@ -2069,7 +2124,6 @@ void vp9_remove_compressor(VP9_PTR *ptr) { vp9_remove_common(&cpi->common); vpx_free(cpi); - *ptr = 0; #ifdef OUTPUT_YUV_SRC fclose(yuv_file); @@ -2093,8 +2147,8 @@ void vp9_remove_compressor(VP9_PTR *ptr) { } -static uint64_t calc_plane_error(uint8_t *orig, int orig_stride, - uint8_t *recon, int recon_stride, +static uint64_t calc_plane_error(const uint8_t *orig, int orig_stride, + const uint8_t *recon, int recon_stride, unsigned int cols, unsigned int rows) { unsigned int row, col; uint64_t total_sse = 0; @@ -2111,8 +2165,8 @@ static uint64_t calc_plane_error(uint8_t *orig, int orig_stride, /* Handle odd-sized width */ if (col < cols) { unsigned int border_row, border_col; - uint8_t *border_orig = orig; - uint8_t *border_recon = recon; + const uint8_t *border_orig = orig; + const uint8_t *border_recon = recon; for (border_row = 0; border_row < 16; border_row++) { for (border_col = col; border_col < cols; border_col++) { @@ -2143,136 +2197,136 @@ static uint64_t calc_plane_error(uint8_t *orig, int orig_stride, return total_sse; } +typedef struct { + double psnr[4]; // total/y/u/v + uint64_t sse[4]; // total/y/u/v + uint32_t samples[4]; // total/y/u/v +} PSNR_STATS; + +static void calc_psnr(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b, + PSNR_STATS *psnr) { + const int widths[3] = {a->y_width, a->uv_width, a->uv_width }; + const int heights[3] = {a->y_height, a->uv_height, a->uv_height}; + const uint8_t *a_planes[3] = {a->y_buffer, a->u_buffer, a->v_buffer }; + const int a_strides[3] = {a->y_stride, a->uv_stride, a->uv_stride}; + const uint8_t *b_planes[3] = {b->y_buffer, b->u_buffer, b->v_buffer }; + const int b_strides[3] = {b->y_stride, b->uv_stride, b->uv_stride}; + int i; + uint64_t total_sse = 0; + uint32_t total_samples = 0; -static void generate_psnr_packet(VP9_COMP *cpi) { - YV12_BUFFER_CONFIG *orig = cpi->Source; - YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show; - struct vpx_codec_cx_pkt pkt; - uint64_t sse; - int i; - unsigned int width = orig->y_crop_width; - unsigned int height = orig->y_crop_height; + for (i = 0; i < 3; ++i) { + const int w = widths[i]; + const int h = heights[i]; + const uint32_t samples = w * h; + const uint64_t sse = calc_plane_error(a_planes[i], a_strides[i], + b_planes[i], b_strides[i], + w, h); + psnr->sse[1 + i] = sse; + psnr->samples[1 + i] = samples; + psnr->psnr[1 + i] = vpx_sse_to_psnr(samples, 255.0, (double)sse); - pkt.kind = VPX_CODEC_PSNR_PKT; - sse = calc_plane_error(orig->y_buffer, orig->y_stride, - recon->y_buffer, recon->y_stride, - width, height); - pkt.data.psnr.sse[0] = sse; - pkt.data.psnr.sse[1] = sse; - pkt.data.psnr.samples[0] = width * height; - pkt.data.psnr.samples[1] = width * height; - - width = orig->uv_crop_width; - height = orig->uv_crop_height; - - sse = calc_plane_error(orig->u_buffer, orig->uv_stride, - recon->u_buffer, recon->uv_stride, - width, height); - pkt.data.psnr.sse[0] += sse; - pkt.data.psnr.sse[2] = sse; - pkt.data.psnr.samples[0] += width * height; - pkt.data.psnr.samples[2] = width * height; - - sse = calc_plane_error(orig->v_buffer, orig->uv_stride, - recon->v_buffer, recon->uv_stride, - width, height); - pkt.data.psnr.sse[0] += sse; - pkt.data.psnr.sse[3] = sse; - pkt.data.psnr.samples[0] += width * height; - pkt.data.psnr.samples[3] = width * height; - - for (i = 0; i < 4; i++) - pkt.data.psnr.psnr[i] = vp9_mse2psnr(pkt.data.psnr.samples[i], 255.0, - (double)pkt.data.psnr.sse[i]); + total_sse += sse; + total_samples += samples; + } - vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt); + psnr->sse[0] = total_sse; + psnr->samples[0] = total_samples; + psnr->psnr[0] = vpx_sse_to_psnr((double)total_samples, 255.0, + (double)total_sse); } +static void generate_psnr_packet(VP9_COMP *cpi) { + struct vpx_codec_cx_pkt pkt; + int i; + PSNR_STATS psnr; + calc_psnr(cpi->Source, cpi->common.frame_to_show, &psnr); + for (i = 0; i < 4; ++i) { + pkt.data.psnr.samples[i] = psnr.samples[i]; + pkt.data.psnr.sse[i] = psnr.sse[i]; + pkt.data.psnr.psnr[i] = psnr.psnr[i]; + } + pkt.kind = VPX_CODEC_PSNR_PKT; + vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt); +} -int vp9_use_as_reference(VP9_PTR ptr, int ref_frame_flags) { - VP9_COMP *cpi = (VP9_COMP *)(ptr); - +int vp9_use_as_reference(VP9_COMP *cpi, int ref_frame_flags) { if (ref_frame_flags > 7) return -1; cpi->ref_frame_flags = ref_frame_flags; return 0; } -int vp9_update_reference(VP9_PTR ptr, int ref_frame_flags) { - VP9_COMP *cpi = (VP9_COMP *)(ptr); +int vp9_update_reference(VP9_COMP *cpi, int ref_frame_flags) { if (ref_frame_flags > 7) return -1; - cpi->refresh_golden_frame = 0; - cpi->refresh_alt_ref_frame = 0; - cpi->refresh_last_frame = 0; + cpi->ext_refresh_golden_frame = 0; + cpi->ext_refresh_alt_ref_frame = 0; + cpi->ext_refresh_last_frame = 0; if (ref_frame_flags & VP9_LAST_FLAG) - cpi->refresh_last_frame = 1; + cpi->ext_refresh_last_frame = 1; if (ref_frame_flags & VP9_GOLD_FLAG) - cpi->refresh_golden_frame = 1; + cpi->ext_refresh_golden_frame = 1; if (ref_frame_flags & VP9_ALT_FLAG) - cpi->refresh_alt_ref_frame = 1; + cpi->ext_refresh_alt_ref_frame = 1; + cpi->ext_refresh_frame_flags_pending = 1; return 0; } -int vp9_copy_reference_enc(VP9_PTR ptr, VP9_REFFRAME ref_frame_flag, - YV12_BUFFER_CONFIG *sd) { - VP9_COMP *cpi = (VP9_COMP *)(ptr); - VP9_COMMON *cm = &cpi->common; - int ref_fb_idx; - +static YV12_BUFFER_CONFIG *get_vp9_ref_frame_buffer(VP9_COMP *cpi, + VP9_REFFRAME ref_frame_flag) { + MV_REFERENCE_FRAME ref_frame = NONE; if (ref_frame_flag == VP9_LAST_FLAG) - ref_fb_idx = cm->ref_frame_map[cpi->lst_fb_idx]; + ref_frame = LAST_FRAME; else if (ref_frame_flag == VP9_GOLD_FLAG) - ref_fb_idx = cm->ref_frame_map[cpi->gld_fb_idx]; + ref_frame = GOLDEN_FRAME; else if (ref_frame_flag == VP9_ALT_FLAG) - ref_fb_idx = cm->ref_frame_map[cpi->alt_fb_idx]; - else - return -1; + ref_frame = ALTREF_FRAME; - vp8_yv12_copy_frame(&cm->yv12_fb[ref_fb_idx], sd); + return ref_frame == NONE ? NULL : get_ref_frame_buffer(cpi, ref_frame); +} - return 0; +int vp9_copy_reference_enc(VP9_COMP *cpi, VP9_REFFRAME ref_frame_flag, + YV12_BUFFER_CONFIG *sd) { + YV12_BUFFER_CONFIG *cfg = get_vp9_ref_frame_buffer(cpi, ref_frame_flag); + if (cfg) { + vp8_yv12_copy_frame(cfg, sd); + return 0; + } else { + return -1; + } } -int vp9_get_reference_enc(VP9_PTR ptr, int index, YV12_BUFFER_CONFIG **fb) { - VP9_COMP *cpi = (VP9_COMP *)(ptr); +int vp9_get_reference_enc(VP9_COMP *cpi, int index, YV12_BUFFER_CONFIG **fb) { VP9_COMMON *cm = &cpi->common; - if (index < 0 || index >= NUM_REF_FRAMES) + if (index < 0 || index >= REF_FRAMES) return -1; - *fb = &cm->yv12_fb[cm->ref_frame_map[index]]; + *fb = &cm->frame_bufs[cm->ref_frame_map[index]].buf; return 0; } -int vp9_set_reference_enc(VP9_PTR ptr, VP9_REFFRAME ref_frame_flag, +int vp9_set_reference_enc(VP9_COMP *cpi, VP9_REFFRAME ref_frame_flag, YV12_BUFFER_CONFIG *sd) { - VP9_COMP *cpi = (VP9_COMP *)(ptr); - VP9_COMMON *cm = &cpi->common; - - int ref_fb_idx; - - if (ref_frame_flag == VP9_LAST_FLAG) - ref_fb_idx = cm->ref_frame_map[cpi->lst_fb_idx]; - else if (ref_frame_flag == VP9_GOLD_FLAG) - ref_fb_idx = cm->ref_frame_map[cpi->gld_fb_idx]; - else if (ref_frame_flag == VP9_ALT_FLAG) - ref_fb_idx = cm->ref_frame_map[cpi->alt_fb_idx]; - else + YV12_BUFFER_CONFIG *cfg = get_vp9_ref_frame_buffer(cpi, ref_frame_flag); + if (cfg) { + vp8_yv12_copy_frame(sd, cfg); + return 0; + } else { return -1; - - vp8_yv12_copy_frame(sd, &cm->yv12_fb[ref_fb_idx]); - - return 0; + } } -int vp9_update_entropy(VP9_PTR comp, int update) { - ((VP9_COMP *)comp)->common.refresh_frame_context = update; + +int vp9_update_entropy(VP9_COMP * cpi, int update) { + cpi->ext_refresh_frame_context = update; + cpi->ext_refresh_frame_context_pending = 1; return 0; } @@ -2347,6 +2401,42 @@ void vp9_write_yuv_rec_frame(VP9_COMMON *cm) { } #endif +static void scale_and_extend_frame_nonnormative(YV12_BUFFER_CONFIG *src_fb, + YV12_BUFFER_CONFIG *dst_fb) { + const int in_w = src_fb->y_crop_width; + const int in_h = src_fb->y_crop_height; + const int out_w = dst_fb->y_crop_width; + const int out_h = dst_fb->y_crop_height; + const int in_w_uv = src_fb->uv_crop_width; + const int in_h_uv = src_fb->uv_crop_height; + const int out_w_uv = dst_fb->uv_crop_width; + const int out_h_uv = dst_fb->uv_crop_height; + int i; + + uint8_t *srcs[4] = {src_fb->y_buffer, src_fb->u_buffer, src_fb->v_buffer, + src_fb->alpha_buffer}; + int src_strides[4] = {src_fb->y_stride, src_fb->uv_stride, src_fb->uv_stride, + src_fb->alpha_stride}; + + uint8_t *dsts[4] = {dst_fb->y_buffer, dst_fb->u_buffer, dst_fb->v_buffer, + dst_fb->alpha_buffer}; + int dst_strides[4] = {dst_fb->y_stride, dst_fb->uv_stride, dst_fb->uv_stride, + dst_fb->alpha_stride}; + + for (i = 0; i < MAX_MB_PLANE; ++i) { + if (i == 0 || i == 3) { + // Y and alpha planes + vp9_resize_plane(srcs[i], in_h, in_w, src_strides[i], + dsts[i], out_h, out_w, dst_strides[i]); + } else { + // Chroma planes + vp9_resize_plane(srcs[i], in_h_uv, in_w_uv, src_strides[i], + dsts[i], out_h_uv, out_w_uv, dst_strides[i]); + } + } + vp8_yv12_extend_frame_borders(dst_fb); +} + static void scale_and_extend_frame(YV12_BUFFER_CONFIG *src_fb, YV12_BUFFER_CONFIG *dst_fb) { const int in_w = src_fb->y_crop_width; @@ -2368,7 +2458,7 @@ static void scale_and_extend_frame(YV12_BUFFER_CONFIG *src_fb, for (y = 0; y < out_h; y += 16) { for (x = 0; x < out_w; x += 16) { for (i = 0; i < MAX_MB_PLANE; ++i) { - const int factor = i == 0 ? 1 : 2; + const int factor = (i == 0 || i == 3 ? 1 : 2); const int x_q4 = x * (16 / factor) * in_w / out_w; const int y_q4 = y * (16 / factor) * in_h / out_h; const int src_stride = src_strides[i]; @@ -2388,61 +2478,6 @@ static void scale_and_extend_frame(YV12_BUFFER_CONFIG *src_fb, vp8_yv12_extend_frame_borders(dst_fb); } - -static void update_alt_ref_frame_stats(VP9_COMP *cpi) { - // this frame refreshes means next frames don't unless specified by user - cpi->frames_since_golden = 0; - -#if CONFIG_MULTIPLE_ARF - if (!cpi->multi_arf_enabled) -#endif - // Clear the alternate reference update pending flag. - cpi->source_alt_ref_pending = 0; - - // Set the alternate reference frame active flag - cpi->source_alt_ref_active = 1; -} -static void update_golden_frame_stats(VP9_COMP *cpi) { - // Update the Golden frame usage counts. - if (cpi->refresh_golden_frame) { - // this frame refreshes means next frames don't unless specified by user - cpi->refresh_golden_frame = 0; - cpi->frames_since_golden = 0; - - // ******** Fixed Q test code only ************ - // If we are going to use the ALT reference for the next group of frames - // set a flag to say so. - if (cpi->oxcf.fixed_q >= 0 && - cpi->oxcf.play_alternate && !cpi->refresh_alt_ref_frame) { - cpi->source_alt_ref_pending = 1; - cpi->frames_till_gf_update_due = cpi->baseline_gf_interval; - - // TODO(ivan): For SVC encoder, GF automatic update is disabled by using - // a large GF_interval. - if (cpi->use_svc) { - cpi->frames_till_gf_update_due = INT_MAX; - } - } - - if (!cpi->source_alt_ref_pending) - cpi->source_alt_ref_active = 0; - - // Decrement count down till next gf - if (cpi->frames_till_gf_update_due > 0) - cpi->frames_till_gf_update_due--; - - } else if (!cpi->refresh_alt_ref_frame) { - // Decrement count down till next gf - if (cpi->frames_till_gf_update_due > 0) - cpi->frames_till_gf_update_due--; - - if (cpi->frames_till_alt_ref_frame) - cpi->frames_till_alt_ref_frame--; - - cpi->frames_since_golden++; - } -} - static int find_fp_qindex() { int i; @@ -2458,16 +2493,6 @@ static int find_fp_qindex() { return i; } -static void Pass1Encode(VP9_COMP *cpi, unsigned long *size, unsigned char *dest, - unsigned int *frame_flags) { - (void) size; - (void) dest; - (void) frame_flags; - - vp9_set_quantizer(cpi, find_fp_qindex()); - vp9_first_pass(cpi); -} - #define WRITE_RECON_BUFFER 0 #if WRITE_RECON_BUFFER void write_cx_frame_to_file(YV12_BUFFER_CONFIG *frame, int this_frame) { @@ -2534,40 +2559,37 @@ static double compute_edge_pixel_proportion(YV12_BUFFER_CONFIG *frame) { // Function to test for conditions that indicate we should loop // back and recode a frame. -static int recode_loop_test(VP9_COMP *cpi, +static int recode_loop_test(const VP9_COMP *cpi, int high_limit, int low_limit, int q, int maxq, int minq) { + const VP9_COMMON *const cm = &cpi->common; + const RATE_CONTROL *const rc = &cpi->rc; int force_recode = 0; - VP9_COMMON *cm = &cpi->common; - // Is frame recode allowed at all - // Yes if either recode mode 1 is selected or mode two is selected - // and the frame is a key frame. golden frame or alt_ref_frame - if ((cpi->sf.recode_loop == 1) || - ((cpi->sf.recode_loop == 2) && - ((cm->frame_type == KEY_FRAME) || - cpi->refresh_golden_frame || - cpi->refresh_alt_ref_frame))) { + // Special case trap if maximum allowed frame size exceeded. + if (rc->projected_frame_size > rc->max_frame_bandwidth) { + force_recode = 1; + + // Is frame recode allowed. + // Yes if either recode mode 1 is selected or mode 2 is selected + // and the frame is a key frame, golden frame or alt_ref_frame + } else if ((cpi->sf.recode_loop == ALLOW_RECODE) || + ((cpi->sf.recode_loop == ALLOW_RECODE_KFARFGF) && + (cm->frame_type == KEY_FRAME || + cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame))) { // General over and under shoot tests - if (((cpi->projected_frame_size > high_limit) && (q < maxq)) || - ((cpi->projected_frame_size < low_limit) && (q > minq))) { + if ((rc->projected_frame_size > high_limit && q < maxq) || + (rc->projected_frame_size < low_limit && q > minq)) { force_recode = 1; } else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) { // Deal with frame undershoot and whether or not we are // below the automatically set cq level. if (q > cpi->cq_target_quality && - cpi->projected_frame_size < ((cpi->this_frame_target * 7) >> 3)) { - force_recode = 1; - } else if (q > cpi->oxcf.cq_level && - cpi->projected_frame_size < cpi->min_frame_bandwidth && - cpi->active_best_quality > cpi->oxcf.cq_level) { - // Severe undershoot and between auto and user cq level + rc->projected_frame_size < ((rc->this_frame_target * 7) >> 3)) { force_recode = 1; - cpi->active_best_quality = cpi->oxcf.cq_level; } } } - return force_recode; } @@ -2577,9 +2599,9 @@ static void update_reference_frames(VP9_COMP * const cpi) { // At this point the new frame has been encoded. // If any buffer copy / swapping is signaled it should be done here. if (cm->frame_type == KEY_FRAME) { - ref_cnt_fb(cm->fb_idx_ref_cnt, + ref_cnt_fb(cm->frame_bufs, &cm->ref_frame_map[cpi->gld_fb_idx], cm->new_fb_idx); - ref_cnt_fb(cm->fb_idx_ref_cnt, + ref_cnt_fb(cm->frame_bufs, &cm->ref_frame_map[cpi->alt_fb_idx], cm->new_fb_idx); } #if CONFIG_MULTIPLE_ARF @@ -2600,7 +2622,7 @@ static void update_reference_frames(VP9_COMP * const cpi) { */ int tmp; - ref_cnt_fb(cm->fb_idx_ref_cnt, + ref_cnt_fb(cm->frame_bufs, &cm->ref_frame_map[cpi->alt_fb_idx], cm->new_fb_idx); tmp = cpi->alt_fb_idx; @@ -2614,18 +2636,18 @@ static void update_reference_frames(VP9_COMP * const cpi) { arf_idx = cpi->arf_buffer_idx[cpi->sequence_number + 1]; } #endif - ref_cnt_fb(cm->fb_idx_ref_cnt, + ref_cnt_fb(cm->frame_bufs, &cm->ref_frame_map[arf_idx], cm->new_fb_idx); } if (cpi->refresh_golden_frame) { - ref_cnt_fb(cm->fb_idx_ref_cnt, + ref_cnt_fb(cm->frame_bufs, &cm->ref_frame_map[cpi->gld_fb_idx], cm->new_fb_idx); } } if (cpi->refresh_last_frame) { - ref_cnt_fb(cm->fb_idx_ref_cnt, + ref_cnt_fb(cm->frame_bufs, &cm->ref_frame_map[cpi->lst_fb_idx], cm->new_fb_idx); } } @@ -2649,36 +2671,32 @@ static void loopfilter_frame(VP9_COMP *cpi, VP9_COMMON *cm) { } if (lf->filter_level > 0) { - vp9_set_alt_lf_level(cpi, lf->filter_level); vp9_loop_filter_frame(cm, xd, lf->filter_level, 0, 0); } - vp9_extend_frame_inner_borders(cm->frame_to_show, - cm->subsampling_x, cm->subsampling_y); + vp9_extend_frame_inner_borders(cm->frame_to_show); } static void scale_references(VP9_COMP *cpi) { VP9_COMMON *cm = &cpi->common; - int i; - int refs[ALLOWED_REFS_PER_FRAME] = {cpi->lst_fb_idx, cpi->gld_fb_idx, - cpi->alt_fb_idx}; + MV_REFERENCE_FRAME ref_frame; - for (i = 0; i < 3; i++) { - YV12_BUFFER_CONFIG *ref = &cm->yv12_fb[cm->ref_frame_map[refs[i]]]; + for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { + const int idx = cm->ref_frame_map[get_ref_frame_idx(cpi, ref_frame)]; + YV12_BUFFER_CONFIG *const ref = &cm->frame_bufs[idx].buf; if (ref->y_crop_width != cm->width || ref->y_crop_height != cm->height) { - int new_fb = get_free_fb(cm); - - vp9_realloc_frame_buffer(&cm->yv12_fb[new_fb], + const int new_fb = get_free_fb(cm); + vp9_realloc_frame_buffer(&cm->frame_bufs[new_fb].buf, cm->width, cm->height, cm->subsampling_x, cm->subsampling_y, - VP9BORDERINPIXELS); - scale_and_extend_frame(ref, &cm->yv12_fb[new_fb]); - cpi->scaled_ref_idx[i] = new_fb; + VP9_ENC_BORDER_IN_PIXELS, NULL, NULL, NULL); + scale_and_extend_frame(ref, &cm->frame_bufs[new_fb].buf); + cpi->scaled_ref_idx[ref_frame - 1] = new_fb; } else { - cpi->scaled_ref_idx[i] = cm->ref_frame_map[refs[i]]; - cm->fb_idx_ref_cnt[cm->ref_frame_map[refs[i]]]++; + cpi->scaled_ref_idx[ref_frame - 1] = idx; + cm->frame_bufs[idx].ref_count++; } } } @@ -2688,7 +2706,7 @@ static void release_scaled_references(VP9_COMP *cpi) { int i; for (i = 0; i < 3; i++) - cm->fb_idx_ref_cnt[cpi->scaled_ref_idx[i]]--; + cm->frame_bufs[cpi->scaled_ref_idx[i]].ref_count--; } static void full_to_model_count(unsigned int *model_count, @@ -2697,22 +2715,20 @@ static void full_to_model_count(unsigned int *model_count, model_count[ZERO_TOKEN] = full_count[ZERO_TOKEN]; model_count[ONE_TOKEN] = full_count[ONE_TOKEN]; model_count[TWO_TOKEN] = full_count[TWO_TOKEN]; - for (n = THREE_TOKEN; n < DCT_EOB_TOKEN; ++n) + for (n = THREE_TOKEN; n < EOB_TOKEN; ++n) model_count[TWO_TOKEN] += full_count[n]; - model_count[DCT_EOB_MODEL_TOKEN] = full_count[DCT_EOB_TOKEN]; + model_count[EOB_MODEL_TOKEN] = full_count[EOB_TOKEN]; } -static void full_to_model_counts( - vp9_coeff_count_model *model_count, vp9_coeff_count *full_count) { +static void full_to_model_counts(vp9_coeff_count_model *model_count, + vp9_coeff_count *full_count) { int i, j, k, l; - for (i = 0; i < BLOCK_TYPES; ++i) + + for (i = 0; i < PLANE_TYPES; ++i) for (j = 0; j < REF_TYPES; ++j) for (k = 0; k < COEF_BANDS; ++k) - for (l = 0; l < PREV_COEF_CONTEXTS; ++l) { - if (l >= 3 && k == 0) - continue; + for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) full_to_model_count(model_count[i][j][k][l], full_count[i][j][k][l]); - } } #if 0 && CONFIG_INTERNAL_STATS @@ -2721,34 +2737,37 @@ static void output_frame_level_debug_stats(VP9_COMP *cpi) { FILE *const f = fopen("tmp.stt", cm->current_video_frame ? "a" : "w"); int recon_err; - vp9_clear_system_state(); // __asm emms; + vp9_clear_system_state(); recon_err = vp9_calc_ss_err(cpi->Source, get_frame_new_buffer(cm)); if (cpi->twopass.total_left_stats.coded_error != 0.0) - fprintf(f, "%10d %10d %10d %10d %10d %10d %10d %10d %10d" - "%7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f" - "%6d %6d %5d %5d %5d %8.2f %10d %10.3f" - "%10.3f %8d %10d %10d %10d\n", - cpi->common.current_video_frame, cpi->this_frame_target, - cpi->projected_frame_size, 0, - (cpi->projected_frame_size - cpi->this_frame_target), - (int)cpi->total_target_vs_actual, - (int)(cpi->oxcf.starting_buffer_level - cpi->bits_off_target), - (int)cpi->total_actual_bits, cm->base_qindex, + fprintf(f, "%10u %10d %10d %10d %10d %10d " + "%10"PRId64" %10"PRId64" %10d " + "%7.2lf %7.2lf %7.2lf %7.2lf %7.2lf" + "%6d %6d %5d %5d %5d " + "%10"PRId64" %10.3lf" + "%10lf %8u %10d %10d %10d\n", + cpi->common.current_video_frame, cpi->rc.this_frame_target, + cpi->rc.projected_frame_size, + cpi->rc.projected_frame_size / cpi->common.MBs, + (cpi->rc.projected_frame_size - cpi->rc.this_frame_target), + cpi->rc.total_target_vs_actual, + (cpi->oxcf.starting_buffer_level - cpi->rc.bits_off_target), + cpi->rc.total_actual_bits, cm->base_qindex, vp9_convert_qindex_to_q(cm->base_qindex), (double)vp9_dc_quant(cm->base_qindex, 0) / 4.0, - vp9_convert_qindex_to_q(cpi->active_best_quality), - vp9_convert_qindex_to_q(cpi->active_worst_quality), cpi->avg_q, - vp9_convert_qindex_to_q(cpi->ni_av_qi), + cpi->rc.avg_q, + vp9_convert_qindex_to_q(cpi->rc.ni_av_qi), vp9_convert_qindex_to_q(cpi->cq_target_quality), cpi->refresh_last_frame, cpi->refresh_golden_frame, - cpi->refresh_alt_ref_frame, cm->frame_type, cpi->gfu_boost, - cpi->twopass.est_max_qcorrection_factor, (int)cpi->twopass.bits_left, + cpi->refresh_alt_ref_frame, cm->frame_type, cpi->rc.gfu_boost, + cpi->twopass.bits_left, cpi->twopass.total_left_stats.coded_error, - (double)cpi->twopass.bits_left / + cpi->twopass.bits_left / (1 + cpi->twopass.total_left_stats.coded_error), - cpi->tot_recode_hits, recon_err, cpi->kf_boost, cpi->kf_zeromotion_pct); + cpi->tot_recode_hits, recon_err, cpi->rc.kf_boost, + cpi->twopass.kf_zeromotion_pct); fclose(f); @@ -2762,8 +2781,6 @@ static void output_frame_level_debug_stats(VP9_COMP *cpi) { for (i = 0; i < MAX_MODES; ++i) fprintf(fmodes, "%5d ", cpi->mode_chosen_counts[i]); - for (i = 0; i < MAX_REFS; ++i) - fprintf(fmodes, "%5d ", cpi->sub8x8_mode_chosen_counts[i]); fprintf(fmodes, "\n"); @@ -2772,403 +2789,66 @@ static void output_frame_level_debug_stats(VP9_COMP *cpi) { } #endif -static int pick_q_and_adjust_q_bounds(VP9_COMP *cpi, - int * bottom_index, int * top_index) { - // Set an active best quality and if necessary active worst quality - int q = cpi->active_worst_quality; +static void encode_without_recode_loop(VP9_COMP *cpi, + size_t *size, + uint8_t *dest, + int q) { VP9_COMMON *const cm = &cpi->common; + vp9_clear_system_state(); + vp9_set_quantizer(cpi, q); - if (frame_is_intra_only(cm)) { -#if !CONFIG_MULTIPLE_ARF - // Handle the special case for key frames forced when we have75 reached - // the maximum key frame interval. Here force the Q to a range - // based on the ambient Q to reduce the risk of popping. - if (cpi->this_key_frame_forced) { - int delta_qindex; - int qindex = cpi->last_boosted_qindex; - double last_boosted_q = vp9_convert_qindex_to_q(qindex); - - delta_qindex = vp9_compute_qdelta(cpi, last_boosted_q, - (last_boosted_q * 0.75)); - - cpi->active_best_quality = MAX(qindex + delta_qindex, - cpi->best_quality); - } else { - int high = 5000; - int low = 400; - double q_adj_factor = 1.0; - double q_val; - - // Baseline value derived from cpi->active_worst_quality and kf boost - cpi->active_best_quality = get_active_quality(q, cpi->kf_boost, - low, high, - kf_low_motion_minq, - kf_high_motion_minq); - - // Allow somewhat lower kf minq with small image formats. - if ((cm->width * cm->height) <= (352 * 288)) { - q_adj_factor -= 0.25; - } - - // Make a further adjustment based on the kf zero motion measure. - q_adj_factor += 0.05 - (0.001 * (double)cpi->kf_zeromotion_pct); - - // Convert the adjustment factor to a qindex delta - // on active_best_quality. - q_val = vp9_convert_qindex_to_q(cpi->active_best_quality); - cpi->active_best_quality += - vp9_compute_qdelta(cpi, q_val, (q_val * q_adj_factor)); - } -#else - double current_q; - // Force the KF quantizer to be 30% of the active_worst_quality. - current_q = vp9_convert_qindex_to_q(cpi->active_worst_quality); - cpi->active_best_quality = cpi->active_worst_quality - + vp9_compute_qdelta(cpi, current_q, current_q * 0.3); -#endif - } else if (!cpi->is_src_frame_alt_ref && - (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) { - int high = 2000; - int low = 400; - - // Use the lower of cpi->active_worst_quality and recent - // average Q as basis for GF/ARF best Q limit unless last frame was - // a key frame. - if (cpi->frames_since_key > 1 && - cpi->avg_frame_qindex < cpi->active_worst_quality) { - q = cpi->avg_frame_qindex; - } - // For constrained quality dont allow Q less than the cq level - if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) { - if (q < cpi->cq_target_quality) - q = cpi->cq_target_quality; - if (cpi->frames_since_key > 1) { - cpi->active_best_quality = get_active_quality(q, cpi->gfu_boost, - low, high, - afq_low_motion_minq, - afq_high_motion_minq); - } else { - cpi->active_best_quality = get_active_quality(q, cpi->gfu_boost, - low, high, - gf_low_motion_minq, - gf_high_motion_minq); - } - // Constrained quality use slightly lower active best. - cpi->active_best_quality = cpi->active_best_quality * 15 / 16; - - } else if (cpi->oxcf.end_usage == USAGE_CONSTANT_QUALITY) { - if (!cpi->refresh_alt_ref_frame) { - cpi->active_best_quality = cpi->cq_target_quality; - } else { - if (cpi->frames_since_key > 1) { - cpi->active_best_quality = get_active_quality(q, cpi->gfu_boost, - low, high, - afq_low_motion_minq, - afq_high_motion_minq); - } else { - cpi->active_best_quality = get_active_quality(q, cpi->gfu_boost, - low, high, - gf_low_motion_minq, - gf_high_motion_minq); - } - } - } else { - cpi->active_best_quality = get_active_quality(q, cpi->gfu_boost, - low, high, - gf_low_motion_minq, - gf_high_motion_minq); - } + // Set up entropy context depending on frame type. The decoder mandates + // the use of the default context, index 0, for keyframes and inter + // frames where the error_resilient_mode or intra_only flag is set. For + // other inter-frames the encoder currently uses only two contexts; + // context 1 for ALTREF frames and context 0 for the others. + if (cm->frame_type == KEY_FRAME) { + vp9_setup_key_frame(cpi); } else { - if (cpi->oxcf.end_usage == USAGE_CONSTANT_QUALITY) { - cpi->active_best_quality = cpi->cq_target_quality; - } else { - cpi->active_best_quality = inter_minq[q]; - // 1-pass: for now, use the average Q for the active_best, if its lower - // than active_worst. - if (cpi->pass == 0 && (cpi->avg_frame_qindex < q)) - cpi->active_best_quality = inter_minq[cpi->avg_frame_qindex]; - - // For the constrained quality mode we don't want - // q to fall below the cq level. - if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) && - (cpi->active_best_quality < cpi->cq_target_quality)) { - // If we are strongly undershooting the target rate in the last - // frames then use the user passed in cq value not the auto - // cq value. - if (cpi->rolling_actual_bits < cpi->min_frame_bandwidth) - cpi->active_best_quality = cpi->oxcf.cq_level; - else - cpi->active_best_quality = cpi->cq_target_quality; - } - } - } - - // Clip the active best and worst quality values to limits - if (cpi->active_worst_quality > cpi->worst_quality) - cpi->active_worst_quality = cpi->worst_quality; - - if (cpi->active_best_quality < cpi->best_quality) - cpi->active_best_quality = cpi->best_quality; - - if (cpi->active_best_quality > cpi->worst_quality) - cpi->active_best_quality = cpi->worst_quality; - - if (cpi->active_worst_quality < cpi->active_best_quality) - cpi->active_worst_quality = cpi->active_best_quality; - - // Limit Q range for the adaptive loop. - if (cm->frame_type == KEY_FRAME && !cpi->this_key_frame_forced) { - *top_index = - (cpi->active_worst_quality + cpi->active_best_quality * 3) / 4; - // If this is the first (key) frame in 1-pass, active best is the user - // best-allowed, and leave the top_index to active_worst. - if (cpi->pass == 0 && cpi->common.current_video_frame == 0) { - cpi->active_best_quality = cpi->oxcf.best_allowed_q; - *top_index = cpi->oxcf.worst_allowed_q; + if (!cm->intra_only && !cm->error_resilient_mode && !cpi->use_svc) { + cpi->common.frame_context_idx = cpi->refresh_alt_ref_frame; } - } else if (!cpi->is_src_frame_alt_ref && - (cpi->oxcf.end_usage != USAGE_STREAM_FROM_SERVER) && - (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) { - *top_index = - (cpi->active_worst_quality + cpi->active_best_quality) / 2; - } else { - *top_index = cpi->active_worst_quality; + vp9_setup_inter_frame(cpi); } - *bottom_index = cpi->active_best_quality; - - if (cpi->oxcf.end_usage == USAGE_CONSTANT_QUALITY) { - q = cpi->active_best_quality; - // Special case code to try and match quality with forced key frames - } else if ((cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced) { - q = cpi->last_boosted_qindex; - } else { - // Determine initial Q to try. - if (cpi->pass == 0) { - // 1-pass: for now, use per-frame-bw for target size of frame, scaled - // by |x| for key frame. - int scale = (cm->frame_type == KEY_FRAME) ? 5 : 1; - q = vp9_regulate_q(cpi, scale * cpi->av_per_frame_bandwidth); - } else { - q = vp9_regulate_q(cpi, cpi->this_frame_target); - } - if (q > *top_index) - q = *top_index; + // Variance adaptive and in frame q adjustment experiments are mutually + // exclusive. + if (cpi->oxcf.aq_mode == VARIANCE_AQ) { + vp9_vaq_frame_setup(cpi); + } else if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) { + setup_in_frame_q_adj(cpi); } + // transform / motion compensation build reconstruction frame + vp9_encode_frame(cpi); - return q; + // Update the skip mb flag probabilities based on the distribution + // seen in the last encoder iteration. + // update_base_skip_probs(cpi); + vp9_clear_system_state(); } -static void encode_frame_to_data_rate(VP9_COMP *cpi, - unsigned long *size, - unsigned char *dest, - unsigned int *frame_flags) { - VP9_COMMON *const cm = &cpi->common; - TX_SIZE t; - int q; - int frame_over_shoot_limit; - int frame_under_shoot_limit; +static void encode_with_recode_loop(VP9_COMP *cpi, + size_t *size, + uint8_t *dest, + int q, + int bottom_index, + int top_index) { + VP9_COMMON *const cm = &cpi->common; + RATE_CONTROL *const rc = &cpi->rc; + int loop_count = 0; int loop = 0; - int loop_count; - - int q_low; - int q_high; - - int top_index; - int bottom_index; - int active_worst_qchanged = 0; - int overshoot_seen = 0; int undershoot_seen = 0; + int q_low = bottom_index, q_high = top_index; + int frame_over_shoot_limit; + int frame_under_shoot_limit; - SPEED_FEATURES *const sf = &cpi->sf; - unsigned int max_mv_def = MIN(cpi->common.width, cpi->common.height); - struct segmentation *const seg = &cm->seg; - - /* Scale the source buffer, if required. */ - if (cm->mi_cols * 8 != cpi->un_scaled_source->y_width || - cm->mi_rows * 8 != cpi->un_scaled_source->y_height) { - scale_and_extend_frame(cpi->un_scaled_source, &cpi->scaled_source); - cpi->Source = &cpi->scaled_source; - } else { - cpi->Source = cpi->un_scaled_source; - } - scale_references(cpi); - - // Clear down mmx registers to allow floating point in what follows. - vp9_clear_system_state(); - - // For an alt ref frame in 2 pass we skip the call to the second - // pass function that sets the target bandwidth so we must set it here. - if (cpi->refresh_alt_ref_frame) { - // Set a per frame bit target for the alt ref frame. - cpi->per_frame_bandwidth = cpi->twopass.gf_bits; - // Set a per second target bitrate. - cpi->target_bandwidth = (int)(cpi->twopass.gf_bits * cpi->output_framerate); - } - - // Clear zbin over-quant value and mode boost values. - cpi->zbin_mode_boost = 0; - - // Enable or disable mode based tweaking of the zbin. - // For 2 pass only used where GF/ARF prediction quality - // is above a threshold. - cpi->zbin_mode_boost = 0; - cpi->zbin_mode_boost_enabled = 0; - - // Current default encoder behavior for the altref sign bias. - cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = cpi->source_alt_ref_active; - - // Check to see if a key frame is signaled. - // For two pass with auto key frame enabled cm->frame_type may already be - // set, but not for one pass. - if ((cm->current_video_frame == 0) || - (cm->frame_flags & FRAMEFLAGS_KEY) || - (cpi->oxcf.auto_key && (cpi->frames_since_key % - cpi->key_frame_frequency == 0))) { - // Set frame type to key frame for the force key frame, if we exceed the - // maximum distance in an automatic keyframe selection or for the first - // frame. - cm->frame_type = KEY_FRAME; - } - - // Set default state for segment based loop filter update flags. - cm->lf.mode_ref_delta_update = 0; - - // Initialize cpi->mv_step_param to default based on max resolution. - cpi->mv_step_param = vp9_init_search_range(cpi, max_mv_def); - // Initialize cpi->max_mv_magnitude and cpi->mv_step_param if appropriate. - if (sf->auto_mv_step_size) { - if (frame_is_intra_only(&cpi->common)) { - // Initialize max_mv_magnitude for use in the first INTER frame - // after a key/intra-only frame. - cpi->max_mv_magnitude = max_mv_def; - } else { - if (cm->show_frame) - // Allow mv_steps to correspond to twice the max mv magnitude found - // in the previous frame, capped by the default max_mv_magnitude based - // on resolution. - cpi->mv_step_param = vp9_init_search_range( - cpi, MIN(max_mv_def, 2 * cpi->max_mv_magnitude)); - cpi->max_mv_magnitude = 0; - } - } - - // Set various flags etc to special state if it is a key frame. - if (frame_is_intra_only(cm)) { - vp9_setup_key_frame(cpi); - // Reset the loop filter deltas and segmentation map. - setup_features(cm); - - // If segmentation is enabled force a map update for key frames. - if (seg->enabled) { - seg->update_map = 1; - seg->update_data = 1; - } - - // The alternate reference frame cannot be active for a key frame. - cpi->source_alt_ref_active = 0; - - cm->error_resilient_mode = (cpi->oxcf.error_resilient_mode != 0); - cm->frame_parallel_decoding_mode = - (cpi->oxcf.frame_parallel_decoding_mode != 0); - if (cm->error_resilient_mode) { - cm->frame_parallel_decoding_mode = 1; - cm->reset_frame_context = 0; - cm->refresh_frame_context = 0; - } else if (cm->intra_only) { - // Only reset the current context. - cm->reset_frame_context = 2; - } - } - - // Configure experimental use of segmentation for enhanced coding of - // static regions if indicated. - // Only allowed in second pass of two pass (as requires lagged coding) - // and if the relevant speed feature flag is set. - if ((cpi->pass == 2) && (cpi->sf.static_segmentation)) { - configure_static_seg_features(cpi); - } - - // Decide how big to make the frame. - vp9_pick_frame_size(cpi); - - vp9_clear_system_state(); - - q = pick_q_and_adjust_q_bounds(cpi, &bottom_index, &top_index); - - q_high = top_index; - q_low = bottom_index; - - vp9_compute_frame_size_bounds(cpi, &frame_under_shoot_limit, - &frame_over_shoot_limit); - -#if CONFIG_MULTIPLE_ARF - // Force the quantizer determined by the coding order pattern. - if (cpi->multi_arf_enabled && (cm->frame_type != KEY_FRAME) && - cpi->oxcf.end_usage != USAGE_CONSTANT_QUALITY) { - double new_q; - double current_q = vp9_convert_qindex_to_q(cpi->active_worst_quality); - int level = cpi->this_frame_weight; - assert(level >= 0); - - // Set quantizer steps at 10% increments. - new_q = current_q * (1.0 - (0.2 * (cpi->max_arf_level - level))); - q = cpi->active_worst_quality + vp9_compute_qdelta(cpi, current_q, new_q); - - bottom_index = q; - top_index = q; - q_low = q; - q_high = q; - - printf("frame:%d q:%d\n", cm->current_video_frame, q); - } -#endif - - loop_count = 0; - vp9_zero(cpi->rd_tx_select_threshes); - - if (!frame_is_intra_only(cm)) { - cm->mcomp_filter_type = DEFAULT_INTERP_FILTER; - /* TODO: Decide this more intelligently */ - cm->allow_high_precision_mv = q < HIGH_PRECISION_MV_QTHRESH; - set_mvcost(cpi); - } - -#if CONFIG_VP9_POSTPROC - - if (cpi->oxcf.noise_sensitivity > 0) { - int l = 0; - - switch (cpi->oxcf.noise_sensitivity) { - case 1: - l = 20; - break; - case 2: - l = 40; - break; - case 3: - l = 60; - break; - case 4: - case 5: - l = 100; - break; - case 6: - l = 150; - break; - } - - vp9_denoise(cpi->Source, cpi->Source, l); - } - -#endif - -#ifdef OUTPUT_YUV_SRC - vp9_write_yuv_frame(cpi->Source); -#endif + // Decide frame size bounds + vp9_rc_compute_frame_size_bounds(cpi, rc->this_frame_target, + &frame_under_shoot_limit, + &frame_over_shoot_limit); do { - vp9_clear_system_state(); // __asm emms; + vp9_clear_system_state(); vp9_set_quantizer(cpi, q); @@ -3181,45 +2861,52 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, if (cm->frame_type == KEY_FRAME) { vp9_setup_key_frame(cpi); } else { - if (!cm->intra_only && !cm->error_resilient_mode) { + if (!cm->intra_only && !cm->error_resilient_mode && !cpi->use_svc) { cpi->common.frame_context_idx = cpi->refresh_alt_ref_frame; } vp9_setup_inter_frame(cpi); } } - if (cpi->sf.variance_adaptive_quantization) { - vp9_vaq_frame_setup(cpi); + // Variance adaptive and in frame q adjustment experiments are mutually + // exclusive. + if (cpi->oxcf.aq_mode == VARIANCE_AQ) { + vp9_vaq_frame_setup(cpi); + } else if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) { + setup_in_frame_q_adj(cpi); } // transform / motion compensation build reconstruction frame - vp9_encode_frame(cpi); // Update the skip mb flag probabilities based on the distribution // seen in the last encoder iteration. // update_base_skip_probs(cpi); - vp9_clear_system_state(); // __asm emms; + vp9_clear_system_state(); // Dummy pack of the bitstream using up to date stats to get an // accurate estimate of output frame size to determine if we need // to recode. - vp9_save_coding_context(cpi); - cpi->dummy_packing = 1; - vp9_pack_bitstream(cpi, dest, size); - cpi->projected_frame_size = (*size) << 3; - vp9_restore_coding_context(cpi); + if (cpi->sf.recode_loop >= ALLOW_RECODE_KFARFGF) { + vp9_save_coding_context(cpi); + cpi->dummy_packing = 1; + if (!cpi->sf.use_nonrd_pick_mode) + vp9_pack_bitstream(cpi, dest, size); + + rc->projected_frame_size = (int)(*size) << 3; + vp9_restore_coding_context(cpi); - if (frame_over_shoot_limit == 0) - frame_over_shoot_limit = 1; - active_worst_qchanged = 0; + if (frame_over_shoot_limit == 0) + frame_over_shoot_limit = 1; + } if (cpi->oxcf.end_usage == USAGE_CONSTANT_QUALITY) { loop = 0; } else { - // Special case handling for forced key frames - if ((cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced) { + if ((cm->frame_type == KEY_FRAME) && + rc->this_key_frame_forced && + (rc->projected_frame_size < rc->max_frame_bandwidth)) { int last_q = q; int kf_err = vp9_calc_ss_err(cpi->Source, get_frame_new_buffer(cm)); @@ -3232,9 +2919,9 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, // The key frame is not good enough or we can afford // to make it better without undue risk of popping. if ((kf_err > high_err_target && - cpi->projected_frame_size <= frame_over_shoot_limit) || + rc->projected_frame_size <= frame_over_shoot_limit) || (kf_err > low_err_target && - cpi->projected_frame_size <= frame_under_shoot_limit)) { + rc->projected_frame_size <= frame_under_shoot_limit)) { // Lower q_high q_high = q > q_low ? q - 1 : q_low; @@ -3242,7 +2929,7 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, q = (q * high_err_target) / kf_err; q = MIN(q, (q_high + q_low) >> 1); } else if (kf_err < low_err_target && - cpi->projected_frame_size >= frame_under_shoot_limit) { + rc->projected_frame_size >= frame_under_shoot_limit) { // The key frame is much better than the previous frame // Raise q_low q_low = q < q_high ? q + 1 : q_high; @@ -3258,7 +2945,7 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, loop = q != last_q; } else if (recode_loop_test( cpi, frame_over_shoot_limit, frame_under_shoot_limit, - q, top_index, bottom_index)) { + q, MAX(q_high, top_index), bottom_index)) { // Is the projected frame size out of range and are we allowed // to attempt to recode. int last_q = q; @@ -3268,28 +2955,30 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, // Update correction factor & compute new Q to try... // Frame is too large - if (cpi->projected_frame_size > cpi->this_frame_target) { + if (rc->projected_frame_size > rc->this_frame_target) { + // Special case if the projected size is > the max allowed. + if (rc->projected_frame_size >= rc->max_frame_bandwidth) + q_high = rc->worst_quality; + // Raise Qlow as to at least the current value q_low = q < q_high ? q + 1 : q_high; if (undershoot_seen || loop_count > 1) { // Update rate_correction_factor unless - // cpi->active_worst_quality has changed. - if (!active_worst_qchanged) - vp9_update_rate_correction_factors(cpi, 1); + vp9_rc_update_rate_correction_factors(cpi, 1); q = (q_high + q_low + 1) / 2; } else { // Update rate_correction_factor unless - // cpi->active_worst_quality has changed. - if (!active_worst_qchanged) - vp9_update_rate_correction_factors(cpi, 0); + vp9_rc_update_rate_correction_factors(cpi, 0); - q = vp9_regulate_q(cpi, cpi->this_frame_target); + q = vp9_rc_regulate_q(cpi, rc->this_frame_target, + bottom_index, MAX(q_high, top_index)); while (q < q_low && retries < 10) { - vp9_update_rate_correction_factors(cpi, 0); - q = vp9_regulate_q(cpi, cpi->this_frame_target); + vp9_rc_update_rate_correction_factors(cpi, 0); + q = vp9_rc_regulate_q(cpi, rc->this_frame_target, + bottom_index, MAX(q_high, top_index)); retries++; } } @@ -3300,31 +2989,25 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, q_high = q > q_low ? q - 1 : q_low; if (overshoot_seen || loop_count > 1) { - // Update rate_correction_factor unless - // cpi->active_worst_quality has changed. - if (!active_worst_qchanged) - vp9_update_rate_correction_factors(cpi, 1); - + vp9_rc_update_rate_correction_factors(cpi, 1); q = (q_high + q_low) / 2; } else { - // Update rate_correction_factor unless - // cpi->active_worst_quality has changed. - if (!active_worst_qchanged) - vp9_update_rate_correction_factors(cpi, 0); - - q = vp9_regulate_q(cpi, cpi->this_frame_target); - + vp9_rc_update_rate_correction_factors(cpi, 0); + q = vp9_rc_regulate_q(cpi, rc->this_frame_target, + bottom_index, top_index); // Special case reset for qlow for constrained quality. // This should only trigger where there is very substantial // undershoot on a frame and the auto cq level is above // the user passsed in value. - if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY && q < q_low) { + if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY && + q < q_low) { q_low = q; } while (q > q_high && retries < 10) { - vp9_update_rate_correction_factors(cpi, 0); - q = vp9_regulate_q(cpi, cpi->this_frame_target); + vp9_rc_update_rate_correction_factors(cpi, 0); + q = vp9_rc_regulate_q(cpi, rc->this_frame_target, + bottom_index, top_index); retries++; } } @@ -3341,7 +3024,9 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, } } - if (cpi->is_src_frame_alt_ref) + // Special case for overlay frame. + if (rc->is_src_frame_alt_ref && + rc->projected_frame_size < rc->max_frame_bandwidth) loop = 0; if (loop) { @@ -3352,14 +3037,225 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, #endif } } while (loop); +} + +static void get_ref_frame_flags(VP9_COMP *cpi) { + if (cpi->refresh_last_frame & cpi->refresh_golden_frame) + cpi->gold_is_last = 1; + else if (cpi->refresh_last_frame ^ cpi->refresh_golden_frame) + cpi->gold_is_last = 0; + + if (cpi->refresh_last_frame & cpi->refresh_alt_ref_frame) + cpi->alt_is_last = 1; + else if (cpi->refresh_last_frame ^ cpi->refresh_alt_ref_frame) + cpi->alt_is_last = 0; + + if (cpi->refresh_alt_ref_frame & cpi->refresh_golden_frame) + cpi->gold_is_alt = 1; + else if (cpi->refresh_alt_ref_frame ^ cpi->refresh_golden_frame) + cpi->gold_is_alt = 0; + + cpi->ref_frame_flags = VP9_ALT_FLAG | VP9_GOLD_FLAG | VP9_LAST_FLAG; + + if (cpi->gold_is_last) + cpi->ref_frame_flags &= ~VP9_GOLD_FLAG; + + if (cpi->rc.frames_till_gf_update_due == INT_MAX) + cpi->ref_frame_flags &= ~VP9_GOLD_FLAG; + + if (cpi->alt_is_last) + cpi->ref_frame_flags &= ~VP9_ALT_FLAG; + + if (cpi->gold_is_alt) + cpi->ref_frame_flags &= ~VP9_ALT_FLAG; +} + +static void set_ext_overrides(VP9_COMP *cpi) { + // Overrides the defaults with the externally supplied values with + // vp9_update_reference() and vp9_update_entropy() calls + // Note: The overrides are valid only for the next frame passed + // to encode_frame_to_data_rate() function + if (cpi->ext_refresh_frame_context_pending) { + cpi->common.refresh_frame_context = cpi->ext_refresh_frame_context; + cpi->ext_refresh_frame_context_pending = 0; + } + if (cpi->ext_refresh_frame_flags_pending) { + cpi->refresh_last_frame = cpi->ext_refresh_last_frame; + cpi->refresh_golden_frame = cpi->ext_refresh_golden_frame; + cpi->refresh_alt_ref_frame = cpi->ext_refresh_alt_ref_frame; + cpi->ext_refresh_frame_flags_pending = 0; + } +} + +static void encode_frame_to_data_rate(VP9_COMP *cpi, + size_t *size, + uint8_t *dest, + unsigned int *frame_flags) { + VP9_COMMON *const cm = &cpi->common; + TX_SIZE t; + int q; + int top_index; + int bottom_index; + + const SPEED_FEATURES *const sf = &cpi->sf; + const unsigned int max_mv_def = MIN(cm->width, cm->height); + struct segmentation *const seg = &cm->seg; + + set_ext_overrides(cpi); + + /* Scale the source buffer, if required. */ + if (cm->mi_cols * MI_SIZE != cpi->un_scaled_source->y_width || + cm->mi_rows * MI_SIZE != cpi->un_scaled_source->y_height) { + scale_and_extend_frame_nonnormative(cpi->un_scaled_source, + &cpi->scaled_source); + cpi->Source = &cpi->scaled_source; + } else { + cpi->Source = cpi->un_scaled_source; + } + scale_references(cpi); + + vp9_clear_system_state(); + + // Enable or disable mode based tweaking of the zbin. + // For 2 pass only used where GF/ARF prediction quality + // is above a threshold. + cpi->zbin_mode_boost = 0; + cpi->zbin_mode_boost_enabled = 0; + + // Current default encoder behavior for the altref sign bias. + cm->ref_frame_sign_bias[ALTREF_FRAME] = cpi->rc.source_alt_ref_active; + + // Set default state for segment based loop filter update flags. + cm->lf.mode_ref_delta_update = 0; + + // Initialize cpi->mv_step_param to default based on max resolution. + cpi->mv_step_param = vp9_init_search_range(cpi, max_mv_def); + // Initialize cpi->max_mv_magnitude and cpi->mv_step_param if appropriate. + if (sf->auto_mv_step_size) { + if (frame_is_intra_only(cm)) { + // Initialize max_mv_magnitude for use in the first INTER frame + // after a key/intra-only frame. + cpi->max_mv_magnitude = max_mv_def; + } else { + if (cm->show_frame) + // Allow mv_steps to correspond to twice the max mv magnitude found + // in the previous frame, capped by the default max_mv_magnitude based + // on resolution. + cpi->mv_step_param = vp9_init_search_range(cpi, MIN(max_mv_def, 2 * + cpi->max_mv_magnitude)); + cpi->max_mv_magnitude = 0; + } + } + + // Set various flags etc to special state if it is a key frame. + if (frame_is_intra_only(cm)) { + vp9_setup_key_frame(cpi); + // Reset the loop filter deltas and segmentation map. + vp9_reset_segment_features(&cm->seg); + + // If segmentation is enabled force a map update for key frames. + if (seg->enabled) { + seg->update_map = 1; + seg->update_data = 1; + } + + // The alternate reference frame cannot be active for a key frame. + cpi->rc.source_alt_ref_active = 0; + + cm->error_resilient_mode = (cpi->oxcf.error_resilient_mode != 0); + cm->frame_parallel_decoding_mode = + (cpi->oxcf.frame_parallel_decoding_mode != 0); + + // By default, encoder assumes decoder can use prev_mi. + cm->coding_use_prev_mi = 1; + if (cm->error_resilient_mode) { + cm->coding_use_prev_mi = 0; + cm->frame_parallel_decoding_mode = 1; + cm->reset_frame_context = 0; + cm->refresh_frame_context = 0; + } else if (cm->intra_only) { + // Only reset the current context. + cm->reset_frame_context = 2; + } + } + + // Configure experimental use of segmentation for enhanced coding of + // static regions if indicated. + // Only allowed in second pass of two pass (as requires lagged coding) + // and if the relevant speed feature flag is set. + if (cpi->pass == 2 && cpi->sf.static_segmentation) + configure_static_seg_features(cpi); + + // For 1 pass CBR, check if we are dropping this frame. + // Never drop on key frame. + if (cpi->pass == 0 && + cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER && + cm->frame_type != KEY_FRAME) { + if (vp9_rc_drop_frame(cpi)) { + vp9_rc_postencode_update_drop_frame(cpi); + ++cm->current_video_frame; + return; + } + } + + vp9_clear_system_state(); + + vp9_zero(cpi->rd_tx_select_threshes); + +#if CONFIG_VP9_POSTPROC + if (cpi->oxcf.noise_sensitivity > 0) { + int l = 0; + switch (cpi->oxcf.noise_sensitivity) { + case 1: + l = 20; + break; + case 2: + l = 40; + break; + case 3: + l = 60; + break; + case 4: + case 5: + l = 100; + break; + case 6: + l = 150; + break; + } + vp9_denoise(cpi->Source, cpi->Source, l); + } +#endif + +#ifdef OUTPUT_YUV_SRC + vp9_write_yuv_frame(cpi->Source); +#endif + + // Decide q and q bounds. + q = vp9_rc_pick_q_and_bounds(cpi, &bottom_index, &top_index); + + if (!frame_is_intra_only(cm)) { + cm->interp_filter = DEFAULT_INTERP_FILTER; + /* TODO: Decide this more intelligently */ + set_high_precision_mv(cpi, q < HIGH_PRECISION_MV_QTHRESH); + } + + vp9_set_speed_features(cpi); + + if (cpi->sf.recode_loop == DISALLOW_RECODE) { + encode_without_recode_loop(cpi, size, dest, q); + } else { + encode_with_recode_loop(cpi, size, dest, q, bottom_index, top_index); + } // Special case code to reduce pulsing when key frames are forced at a // fixed interval. Note the reconstruction error if it is the frame before // the force key frame - if (cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0)) { + if (cpi->rc.next_key_frame_forced && cpi->rc.frames_to_key == 1) { cpi->ambient_err = vp9_calc_ss_err(cpi->Source, get_frame_new_buffer(cm)); } + // If the encoder forced a KEY_FRAME decision if (cm->frame_type == KEY_FRAME) cpi->refresh_last_frame = 1; @@ -3397,185 +3293,34 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, update_reference_frames(cpi); for (t = TX_4X4; t <= TX_32X32; t++) - full_to_model_counts(cpi->common.counts.coef[t], - cpi->coef_counts[t]); - if (!cpi->common.error_resilient_mode && - !cpi->common.frame_parallel_decoding_mode) { - vp9_adapt_coef_probs(&cpi->common); - } - - if (!frame_is_intra_only(&cpi->common)) { - FRAME_COUNTS *counts = &cpi->common.counts; - - vp9_copy(counts->y_mode, cpi->y_mode_count); - vp9_copy(counts->uv_mode, cpi->y_uv_mode_count); - vp9_copy(counts->partition, cpi->partition_count); - vp9_copy(counts->intra_inter, cpi->intra_inter_count); - vp9_copy(counts->comp_inter, cpi->comp_inter_count); - vp9_copy(counts->single_ref, cpi->single_ref_count); - vp9_copy(counts->comp_ref, cpi->comp_ref_count); - counts->mv = cpi->NMVcount; - if (!cpi->common.error_resilient_mode && - !cpi->common.frame_parallel_decoding_mode) { - vp9_adapt_mode_probs(&cpi->common); - vp9_adapt_mv_probs(&cpi->common, cpi->common.allow_high_precision_mv); - } - } + full_to_model_counts(cm->counts.coef[t], cpi->coef_counts[t]); -#ifdef ENTROPY_STATS - vp9_update_mode_context_stats(cpi); -#endif - - /* Move storing frame_type out of the above loop since it is also - * needed in motion search besides loopfilter */ - cm->last_frame_type = cm->frame_type; - - // Update rate control heuristics - cpi->total_byte_count += (*size); - cpi->projected_frame_size = (*size) << 3; - - // Post encode loop adjustment of Q prediction. - if (!active_worst_qchanged) - vp9_update_rate_correction_factors(cpi, (cpi->sf.recode_loop || - cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) ? 2 : 0); - - - cpi->last_q[cm->frame_type] = cm->base_qindex; - - // Keep record of last boosted (KF/KF/ARF) Q value. - // If the current frame is coded at a lower Q then we also update it. - // If all mbs in this group are skipped only update if the Q value is - // better than that already stored. - // This is used to help set quality in forced key frames to reduce popping - if ((cm->base_qindex < cpi->last_boosted_qindex) || - ((cpi->static_mb_pct < 100) && - ((cm->frame_type == KEY_FRAME) || - cpi->refresh_alt_ref_frame || - (cpi->refresh_golden_frame && !cpi->is_src_frame_alt_ref)))) { - cpi->last_boosted_qindex = cm->base_qindex; - } - - if (cm->frame_type == KEY_FRAME) { - vp9_adjust_key_frame_context(cpi); - } - - // Keep a record of ambient average Q. - if (cm->frame_type != KEY_FRAME) - cpi->avg_frame_qindex = (2 + 3 * cpi->avg_frame_qindex + - cm->base_qindex) >> 2; + if (!cm->error_resilient_mode && !cm->frame_parallel_decoding_mode) + vp9_adapt_coef_probs(cm); - // Keep a record from which we can calculate the average Q excluding GF - // updates and key frames. - if (cm->frame_type != KEY_FRAME && - !cpi->refresh_golden_frame && - !cpi->refresh_alt_ref_frame) { - cpi->ni_frames++; - cpi->tot_q += vp9_convert_qindex_to_q(q); - cpi->avg_q = cpi->tot_q / (double)cpi->ni_frames; - - // Calculate the average Q for normal inter frames (not key or GFU frames). - cpi->ni_tot_qi += q; - cpi->ni_av_qi = cpi->ni_tot_qi / cpi->ni_frames; - } - - // Update the buffer level variable. - // Non-viewable frames are a special case and are treated as pure overhead. - if (!cm->show_frame) - cpi->bits_off_target -= cpi->projected_frame_size; - else - cpi->bits_off_target += cpi->av_per_frame_bandwidth - - cpi->projected_frame_size; - - // Clip the buffer level at the maximum buffer size - if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) - cpi->bits_off_target = cpi->oxcf.maximum_buffer_size; - - // Rolling monitors of whether we are over or underspending used to help - // regulate min and Max Q in two pass. - if (cm->frame_type != KEY_FRAME) { - cpi->rolling_target_bits = - ((cpi->rolling_target_bits * 3) + cpi->this_frame_target + 2) / 4; - cpi->rolling_actual_bits = - ((cpi->rolling_actual_bits * 3) + cpi->projected_frame_size + 2) / 4; - cpi->long_rolling_target_bits = - ((cpi->long_rolling_target_bits * 31) + cpi->this_frame_target + 16) / 32; - cpi->long_rolling_actual_bits = - ((cpi->long_rolling_actual_bits * 31) + - cpi->projected_frame_size + 16) / 32; - } - - // Actual bits spent - cpi->total_actual_bits += cpi->projected_frame_size; - - // Debug stats - cpi->total_target_vs_actual += (cpi->this_frame_target - - cpi->projected_frame_size); - - cpi->buffer_level = cpi->bits_off_target; - -#ifndef DISABLE_RC_LONG_TERM_MEM - // Update bits left to the kf and gf groups to account for overshoot or - // undershoot on these frames - if (cm->frame_type == KEY_FRAME) { - cpi->twopass.kf_group_bits += cpi->this_frame_target - - cpi->projected_frame_size; - - cpi->twopass.kf_group_bits = MAX(cpi->twopass.kf_group_bits, 0); - } else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame) { - cpi->twopass.gf_group_bits += cpi->this_frame_target - - cpi->projected_frame_size; - - cpi->twopass.gf_group_bits = MAX(cpi->twopass.gf_group_bits, 0); + if (!frame_is_intra_only(cm)) { + if (!cm->error_resilient_mode && !cm->frame_parallel_decoding_mode) { + vp9_adapt_mode_probs(cm); + vp9_adapt_mv_probs(cm, cm->allow_high_precision_mv); + } } -#endif #if 0 output_frame_level_debug_stats(cpi); #endif if (cpi->refresh_golden_frame == 1) - cm->frame_flags = cm->frame_flags | FRAMEFLAGS_GOLDEN; + cm->frame_flags |= FRAMEFLAGS_GOLDEN; else - cm->frame_flags = cm->frame_flags&~FRAMEFLAGS_GOLDEN; + cm->frame_flags &= ~FRAMEFLAGS_GOLDEN; if (cpi->refresh_alt_ref_frame == 1) - cm->frame_flags = cm->frame_flags | FRAMEFLAGS_ALTREF; + cm->frame_flags |= FRAMEFLAGS_ALTREF; else - cm->frame_flags = cm->frame_flags&~FRAMEFLAGS_ALTREF; + cm->frame_flags &= ~FRAMEFLAGS_ALTREF; + get_ref_frame_flags(cpi); - if (cpi->refresh_last_frame & cpi->refresh_golden_frame) - cpi->gold_is_last = 1; - else if (cpi->refresh_last_frame ^ cpi->refresh_golden_frame) - cpi->gold_is_last = 0; - - if (cpi->refresh_last_frame & cpi->refresh_alt_ref_frame) - cpi->alt_is_last = 1; - else if (cpi->refresh_last_frame ^ cpi->refresh_alt_ref_frame) - cpi->alt_is_last = 0; - - if (cpi->refresh_alt_ref_frame & cpi->refresh_golden_frame) - cpi->gold_is_alt = 1; - else if (cpi->refresh_alt_ref_frame ^ cpi->refresh_golden_frame) - cpi->gold_is_alt = 0; - - cpi->ref_frame_flags = VP9_ALT_FLAG | VP9_GOLD_FLAG | VP9_LAST_FLAG; - - if (cpi->gold_is_last) - cpi->ref_frame_flags &= ~VP9_GOLD_FLAG; - - if (cpi->alt_is_last) - cpi->ref_frame_flags &= ~VP9_ALT_FLAG; - - if (cpi->gold_is_alt) - cpi->ref_frame_flags &= ~VP9_ALT_FLAG; - - if (cpi->oxcf.play_alternate && cpi->refresh_alt_ref_frame - && (cm->frame_type != KEY_FRAME)) - // Update the alternate reference frame stats as appropriate. - update_alt_ref_frame_stats(cpi); - else - // Update the Golden frame stats as appropriate. - update_golden_frame_stats(cpi); + vp9_rc_postencode_update(cpi, *size); if (cm->frame_type == KEY_FRAME) { // Tell the caller that the frame was coded as a key frame @@ -3589,9 +3334,6 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, cpi->new_frame_coding_order_period = -1; } #endif - - // As this frame is a key frame the next defaults to an inter frame. - cm->frame_type = INTER_FRAME; } else { *frame_flags = cm->frame_flags&~FRAMEFLAGS_KEY; @@ -3621,7 +3363,9 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, cm->last_height = cm->height; // reset to normal state now that we are done. - cm->last_show_frame = cm->show_frame; + if (!cm->show_existing_frame) + cm->last_show_frame = cm->show_frame; + if (cm->show_frame) { // current mip will be the prev_mip for the next frame MODE_INFO *temp = cm->prev_mip; @@ -3641,71 +3385,87 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, // Don't increment frame counters if this was an altref buffer // update not a real frame ++cm->current_video_frame; - ++cpi->frames_since_key; } + // restore prev_mi cm->prev_mi = cm->prev_mip + cm->mode_info_stride + 1; cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mode_info_stride + 1; } -static void Pass2Encode(VP9_COMP *cpi, unsigned long *size, - unsigned char *dest, unsigned int *frame_flags) { - cpi->enable_encode_breakout = 1; - - if (!cpi->refresh_alt_ref_frame) - vp9_second_pass(cpi); +static void SvcEncode(VP9_COMP *cpi, size_t *size, uint8_t *dest, + unsigned int *frame_flags) { + vp9_rc_get_svc_params(cpi); + encode_frame_to_data_rate(cpi, size, dest, frame_flags); +} +static void Pass0Encode(VP9_COMP *cpi, size_t *size, uint8_t *dest, + unsigned int *frame_flags) { + if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) { + vp9_rc_get_one_pass_cbr_params(cpi); + } else { + vp9_rc_get_one_pass_vbr_params(cpi); + } encode_frame_to_data_rate(cpi, size, dest, frame_flags); - // vp9_print_modes_and_motion_vectors(&cpi->common, "encode.stt"); -#ifdef DISABLE_RC_LONG_TERM_MEM - cpi->twopass.bits_left -= cpi->this_frame_target; -#else - cpi->twopass.bits_left -= 8 * *size; -#endif +} - if (!cpi->refresh_alt_ref_frame) { - double lower_bounds_min_rate = FRAME_OVERHEAD_BITS * cpi->oxcf.framerate; - double two_pass_min_rate = (double)(cpi->oxcf.target_bandwidth - * cpi->oxcf.two_pass_vbrmin_section - / 100); +static void Pass1Encode(VP9_COMP *cpi, size_t *size, uint8_t *dest, + unsigned int *frame_flags) { + (void) size; + (void) dest; + (void) frame_flags; - if (two_pass_min_rate < lower_bounds_min_rate) - two_pass_min_rate = lower_bounds_min_rate; + vp9_rc_get_first_pass_params(cpi); + vp9_set_quantizer(cpi, find_fp_qindex()); + vp9_first_pass(cpi); +} - cpi->twopass.bits_left += (int64_t)(two_pass_min_rate - / cpi->oxcf.framerate); - } +static void Pass2Encode(VP9_COMP *cpi, size_t *size, + uint8_t *dest, unsigned int *frame_flags) { + cpi->allow_encode_breakout = ENCODE_BREAKOUT_ENABLED; + + vp9_rc_get_second_pass_params(cpi); + encode_frame_to_data_rate(cpi, size, dest, frame_flags); + + vp9_twopass_postencode_update(cpi, *size); } -static void check_initial_width(VP9_COMP *cpi, YV12_BUFFER_CONFIG *sd) { - VP9_COMMON *cm = &cpi->common; +static void check_initial_width(VP9_COMP *cpi, int subsampling_x, + int subsampling_y) { + VP9_COMMON *const cm = &cpi->common; + if (!cpi->initial_width) { - // TODO(jkoleszar): Support 1/4 subsampling? - cm->subsampling_x = (sd != NULL) && sd->uv_width < sd->y_width; - cm->subsampling_y = (sd != NULL) && sd->uv_height < sd->y_height; + cm->subsampling_x = subsampling_x; + cm->subsampling_y = subsampling_y; alloc_raw_frame_buffers(cpi); - cpi->initial_width = cm->width; cpi->initial_height = cm->height; } } -int vp9_receive_raw_frame(VP9_PTR ptr, unsigned int frame_flags, +int vp9_receive_raw_frame(VP9_COMP *cpi, unsigned int frame_flags, YV12_BUFFER_CONFIG *sd, int64_t time_stamp, int64_t end_time) { - VP9_COMP *cpi = (VP9_COMP *) ptr; - struct vpx_usec_timer timer; - int res = 0; + VP9_COMMON *cm = &cpi->common; + struct vpx_usec_timer timer; + int res = 0; + const int subsampling_x = sd->uv_width < sd->y_width; + const int subsampling_y = sd->uv_height < sd->y_height; - check_initial_width(cpi, sd); + check_initial_width(cpi, subsampling_x, subsampling_y); vpx_usec_timer_start(&timer); - if (vp9_lookahead_push(cpi->lookahead, sd, time_stamp, end_time, frame_flags, - cpi->active_map_enabled ? cpi->active_map : NULL)) + if (vp9_lookahead_push(cpi->lookahead, + sd, time_stamp, end_time, frame_flags)) res = -1; vpx_usec_timer_mark(&timer); cpi->time_receive_data += vpx_usec_timer_elapsed(&timer); + if (cm->version == 0 && (subsampling_x != 1 || subsampling_y != 1)) { + vpx_internal_error(&cm->error, VPX_CODEC_INVALID_PARAM, + "Non-4:2:0 color space requires profile >= 1"); + res = -1; + } + return res; } @@ -3730,15 +3490,52 @@ int is_next_frame_arf(VP9_COMP *cpi) { } #endif -int vp9_get_compressed_data(VP9_PTR ptr, unsigned int *frame_flags, - unsigned long *size, unsigned char *dest, +void adjust_frame_rate(VP9_COMP *cpi) { + int64_t this_duration; + int step = 0; + + if (cpi->source->ts_start == cpi->first_time_stamp_ever) { + this_duration = cpi->source->ts_end - cpi->source->ts_start; + step = 1; + } else { + int64_t last_duration = cpi->last_end_time_stamp_seen + - cpi->last_time_stamp_seen; + + this_duration = cpi->source->ts_end - cpi->last_end_time_stamp_seen; + + // do a step update if the duration changes by 10% + if (last_duration) + step = (int)((this_duration - last_duration) * 10 / last_duration); + } + + if (this_duration) { + if (step) { + vp9_new_framerate(cpi, 10000000.0 / this_duration); + } else { + // Average this frame's rate into the last second's average + // frame rate. If we haven't seen 1 second yet, then average + // over the whole interval seen. + const double interval = MIN((double)(cpi->source->ts_end + - cpi->first_time_stamp_ever), 10000000.0); + double avg_duration = 10000000.0 / cpi->oxcf.framerate; + avg_duration *= (interval - avg_duration + this_duration); + avg_duration /= interval; + + vp9_new_framerate(cpi, 10000000.0 / avg_duration); + } + } + cpi->last_time_stamp_seen = cpi->source->ts_start; + cpi->last_end_time_stamp_seen = cpi->source->ts_end; +} + +int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags, + size_t *size, uint8_t *dest, int64_t *time_stamp, int64_t *time_end, int flush) { - VP9_COMP *cpi = (VP9_COMP *) ptr; VP9_COMMON *cm = &cpi->common; + MACROBLOCKD *xd = &cpi->mb.e_mbd; struct vpx_usec_timer cmptimer; - YV12_BUFFER_CONFIG *force_src_buffer = NULL; - int i; - // FILE *fp_out = fopen("enc_frame_type.txt", "a"); + YV12_BUFFER_CONFIG *force_src_buffer = NULL; + MV_REFERENCE_FRAME ref_frame; if (!cpi) return -1; @@ -3747,11 +3544,17 @@ int vp9_get_compressed_data(VP9_PTR ptr, unsigned int *frame_flags, cpi->source = NULL; - cpi->common.allow_high_precision_mv = ALTREF_HIGH_PRECISION_MV; - set_mvcost(cpi); + set_high_precision_mv(cpi, ALTREF_HIGH_PRECISION_MV); + + // Normal defaults + cm->reset_frame_context = 0; + cm->refresh_frame_context = 1; + cpi->refresh_last_frame = 1; + cpi->refresh_golden_frame = 0; + cpi->refresh_alt_ref_frame = 0; // Should we code an alternate reference frame. - if (cpi->oxcf.play_alternate && cpi->source_alt_ref_pending) { + if (cpi->oxcf.play_alternate && cpi->rc.source_alt_ref_pending) { int frames_to_arf; #if CONFIG_MULTIPLE_ARF @@ -3760,12 +3563,12 @@ int vp9_get_compressed_data(VP9_PTR ptr, unsigned int *frame_flags, if (cpi->multi_arf_enabled && (cpi->pass == 2)) frames_to_arf = (-cpi->frame_coding_order[cpi->sequence_number]) - - cpi->next_frame_in_order; + - cpi->next_frame_in_order; else #endif - frames_to_arf = cpi->frames_till_gf_update_due; + frames_to_arf = cpi->rc.frames_till_gf_update_due; - assert(frames_to_arf < cpi->twopass.frames_to_key); + assert(frames_to_arf <= cpi->rc.frames_to_key); if ((cpi->source = vp9_lookahead_peek(cpi->lookahead, frames_to_arf))) { #if CONFIG_MULTIPLE_ARF @@ -3777,11 +3580,9 @@ int vp9_get_compressed_data(VP9_PTR ptr, unsigned int *frame_flags, if (cpi->oxcf.arnr_max_frames > 0) { // Produce the filtered ARF frame. // TODO(agrange) merge these two functions. - configure_arnr_filter(cpi, cm->current_video_frame + frames_to_arf, - cpi->gfu_boost); + vp9_configure_arnr_filter(cpi, frames_to_arf, cpi->rc.gfu_boost); vp9_temporal_filter_prepare(cpi, frames_to_arf); - vp9_extend_frame_borders(&cpi->alt_ref_buffer, - cm->subsampling_x, cm->subsampling_y); + vp9_extend_frame_borders(&cpi->alt_ref_buffer); force_src_buffer = &cpi->alt_ref_buffer; } @@ -3789,15 +3590,14 @@ int vp9_get_compressed_data(VP9_PTR ptr, unsigned int *frame_flags, cpi->refresh_alt_ref_frame = 1; cpi->refresh_golden_frame = 0; cpi->refresh_last_frame = 0; - cpi->is_src_frame_alt_ref = 0; - - // TODO(agrange) This needs to vary depending on where the next ARF is. - cpi->frames_till_alt_ref_frame = frames_to_arf; + cpi->rc.is_src_frame_alt_ref = 0; #if CONFIG_MULTIPLE_ARF if (!cpi->multi_arf_enabled) #endif - cpi->source_alt_ref_pending = 0; // Clear Pending altf Ref flag. + cpi->rc.source_alt_ref_pending = 0; + } else { + cpi->rc.source_alt_ref_pending = 0; } } @@ -3811,19 +3611,19 @@ int vp9_get_compressed_data(VP9_PTR ptr, unsigned int *frame_flags, #if CONFIG_MULTIPLE_ARF // Is this frame the ARF overlay. - cpi->is_src_frame_alt_ref = 0; + cpi->rc.is_src_frame_alt_ref = 0; for (i = 0; i < cpi->arf_buffered; ++i) { if (cpi->source == cpi->alt_ref_source[i]) { - cpi->is_src_frame_alt_ref = 1; + cpi->rc.is_src_frame_alt_ref = 1; cpi->refresh_golden_frame = 1; break; } } #else - cpi->is_src_frame_alt_ref = cpi->alt_ref_source - && (cpi->source == cpi->alt_ref_source); + cpi->rc.is_src_frame_alt_ref = cpi->alt_ref_source + && (cpi->source == cpi->alt_ref_source); #endif - if (cpi->is_src_frame_alt_ref) { + if (cpi->rc.is_src_frame_alt_ref) { // Current frame is an ARF overlay frame. #if CONFIG_MULTIPLE_ARF cpi->alt_ref_source[i] = NULL; @@ -3847,21 +3647,9 @@ int vp9_get_compressed_data(VP9_PTR ptr, unsigned int *frame_flags, *time_end = cpi->source->ts_end; *frame_flags = cpi->source->flags; - // fprintf(fp_out, " Frame:%d", cm->current_video_frame); -#if CONFIG_MULTIPLE_ARF - if (cpi->multi_arf_enabled) { - // fprintf(fp_out, " seq_no:%d this_frame_weight:%d", - // cpi->sequence_number, cpi->this_frame_weight); - } else { - // fprintf(fp_out, "\n"); - } -#else - // fprintf(fp_out, "\n"); -#endif - #if CONFIG_MULTIPLE_ARF if ((cm->frame_type != KEY_FRAME) && (cpi->pass == 2)) - cpi->source_alt_ref_pending = is_next_frame_arf(cpi); + cpi->rc.source_alt_ref_pending = is_next_frame_arf(cpi); #endif } else { *size = 0; @@ -3869,8 +3657,6 @@ int vp9_get_compressed_data(VP9_PTR ptr, unsigned int *frame_flags, vp9_end_first_pass(cpi); /* get last stats packet */ cpi->twopass.first_pass_done = 1; } - - // fclose(fp_out); return -1; } @@ -3880,55 +3666,26 @@ int vp9_get_compressed_data(VP9_PTR ptr, unsigned int *frame_flags, } // adjust frame rates based on timestamps given - if (!cpi->refresh_alt_ref_frame) { - int64_t this_duration; - int step = 0; - - if (cpi->source->ts_start == cpi->first_time_stamp_ever) { - this_duration = cpi->source->ts_end - cpi->source->ts_start; - step = 1; - } else { - int64_t last_duration = cpi->last_end_time_stamp_seen - - cpi->last_time_stamp_seen; - - this_duration = cpi->source->ts_end - cpi->last_end_time_stamp_seen; - - // do a step update if the duration changes by 10% - if (last_duration) - step = (int)((this_duration - last_duration) * 10 / last_duration); - } - - if (this_duration) { - if (step) { - vp9_new_framerate(cpi, 10000000.0 / this_duration); - } else { - // Average this frame's rate into the last second's average - // frame rate. If we haven't seen 1 second yet, then average - // over the whole interval seen. - const double interval = MIN((double)(cpi->source->ts_end - - cpi->first_time_stamp_ever), 10000000.0); - double avg_duration = 10000000.0 / cpi->oxcf.framerate; - avg_duration *= (interval - avg_duration + this_duration); - avg_duration /= interval; - - vp9_new_framerate(cpi, 10000000.0 / avg_duration); - } - } + if (cm->show_frame) { + adjust_frame_rate(cpi); + } - cpi->last_time_stamp_seen = cpi->source->ts_start; - cpi->last_end_time_stamp_seen = cpi->source->ts_end; + if (cpi->svc.number_temporal_layers > 1 && + cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) { + update_layer_framerate(cpi); + restore_layer_context(cpi); } // start with a 0 size frame *size = 0; // Clear down mmx registers - vp9_clear_system_state(); // __asm emms; + vp9_clear_system_state(); /* find a free buffer for the new frame, releasing the reference previously * held. */ - cm->fb_idx_ref_cnt[cm->new_fb_idx]--; + cm->frame_bufs[cm->new_fb_idx].ref_count--; cm->new_fb_idx = get_free_fb(cm); #if CONFIG_MULTIPLE_ARF @@ -3942,65 +3699,63 @@ int vp9_get_compressed_data(VP9_PTR ptr, unsigned int *frame_flags, } #endif - /* Get the mapping of L/G/A to the reference buffer pool */ - cm->active_ref_idx[0] = cm->ref_frame_map[cpi->lst_fb_idx]; - cm->active_ref_idx[1] = cm->ref_frame_map[cpi->gld_fb_idx]; - cm->active_ref_idx[2] = cm->ref_frame_map[cpi->alt_fb_idx]; - -#if 0 // CONFIG_MULTIPLE_ARF - if (cpi->multi_arf_enabled) { - fprintf(fp_out, " idx(%d, %d, %d, %d) active(%d, %d, %d)", - cpi->lst_fb_idx, cpi->gld_fb_idx, cpi->alt_fb_idx, cm->new_fb_idx, - cm->active_ref_idx[0], cm->active_ref_idx[1], cm->active_ref_idx[2]); - if (cpi->refresh_alt_ref_frame) - fprintf(fp_out, " type:ARF"); - if (cpi->is_src_frame_alt_ref) - fprintf(fp_out, " type:OVERLAY[%d]", cpi->alt_fb_idx); - fprintf(fp_out, "\n"); - } -#endif - - cm->frame_type = INTER_FRAME; cm->frame_flags = *frame_flags; // Reset the frame pointers to the current frame size vp9_realloc_frame_buffer(get_frame_new_buffer(cm), cm->width, cm->height, cm->subsampling_x, cm->subsampling_y, - VP9BORDERINPIXELS); + VP9_ENC_BORDER_IN_PIXELS, NULL, NULL, NULL); + + for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { + const int idx = cm->ref_frame_map[get_ref_frame_idx(cpi, ref_frame)]; + YV12_BUFFER_CONFIG *const buf = &cm->frame_bufs[idx].buf; + RefBuffer *const ref_buf = &cm->frame_refs[ref_frame - 1]; + ref_buf->buf = buf; + ref_buf->idx = idx; + vp9_setup_scale_factors_for_frame(&ref_buf->sf, + buf->y_crop_width, buf->y_crop_height, + cm->width, cm->height); - // Calculate scaling factors for each of the 3 available references - for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) - vp9_setup_scale_factors(cm, i); + if (vp9_is_scaled(&ref_buf->sf)) + vp9_extend_frame_borders(buf); + } - vp9_setup_interp_filters(&cpi->mb.e_mbd, DEFAULT_INTERP_FILTER, cm); + set_ref_ptrs(cm, xd, LAST_FRAME, LAST_FRAME); + xd->interp_kernel = vp9_get_interp_kernel( + DEFAULT_INTERP_FILTER == SWITCHABLE ? EIGHTTAP : DEFAULT_INTERP_FILTER); - if (cpi->sf.variance_adaptive_quantization) { - vp9_vaq_init(); + if (cpi->oxcf.aq_mode == VARIANCE_AQ) { + vp9_vaq_init(); } - if (cpi->pass == 1) { + if (cpi->use_svc) { + SvcEncode(cpi, size, dest, frame_flags); + } else if (cpi->pass == 1) { Pass1Encode(cpi, size, dest, frame_flags); } else if (cpi->pass == 2) { Pass2Encode(cpi, size, dest, frame_flags); } else { - encode_frame_to_data_rate(cpi, size, dest, frame_flags); + // One pass encode + Pass0Encode(cpi, size, dest, frame_flags); } if (cm->refresh_frame_context) cm->frame_contexts[cm->frame_context_idx] = cm->fc; + // Frame was dropped, release scaled references. + if (*size == 0) { + release_scaled_references(cpi); + } + if (*size > 0) { - // if its a dropped frame honor the requests on subsequent frames cpi->droppable = !frame_is_reference(cpi); + } - // return to normal state - cm->reset_frame_context = 0; - cm->refresh_frame_context = 1; - cpi->refresh_alt_ref_frame = 0; - cpi->refresh_golden_frame = 0; - cpi->refresh_last_frame = 1; - cm->frame_type = INTER_FRAME; + // Save layer specific state. + if (cpi->svc.number_temporal_layers > 1 && + cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) { + save_layer_context(cpi); } vpx_usec_timer_mark(&cmptimer); @@ -4012,82 +3767,49 @@ int vp9_get_compressed_data(VP9_PTR ptr, unsigned int *frame_flags, #if CONFIG_INTERNAL_STATS if (cpi->pass != 1) { - cpi->bytes += *size; + cpi->bytes += (int)(*size); if (cm->show_frame) { cpi->count++; if (cpi->b_calculate_psnr) { - double ye, ue, ve; - double frame_psnr; - YV12_BUFFER_CONFIG *orig = cpi->Source; - YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show; - YV12_BUFFER_CONFIG *pp = &cm->post_proc_buffer; - int y_samples = orig->y_height * orig->y_width; - int uv_samples = orig->uv_height * orig->uv_width; - int t_samples = y_samples + 2 * uv_samples; - double sq_error; - - ye = (double)calc_plane_error(orig->y_buffer, orig->y_stride, - recon->y_buffer, recon->y_stride, - orig->y_crop_width, orig->y_crop_height); - - ue = (double)calc_plane_error(orig->u_buffer, orig->uv_stride, - recon->u_buffer, recon->uv_stride, - orig->uv_crop_width, orig->uv_crop_height); - - ve = (double)calc_plane_error(orig->v_buffer, orig->uv_stride, - recon->v_buffer, recon->uv_stride, - orig->uv_crop_width, orig->uv_crop_height); - - sq_error = ye + ue + ve; - - frame_psnr = vp9_mse2psnr(t_samples, 255.0, sq_error); - - cpi->total_y += vp9_mse2psnr(y_samples, 255.0, ye); - cpi->total_u += vp9_mse2psnr(uv_samples, 255.0, ue); - cpi->total_v += vp9_mse2psnr(uv_samples, 255.0, ve); - cpi->total_sq_error += sq_error; - cpi->total += frame_psnr; + YV12_BUFFER_CONFIG *orig = cpi->Source; + YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show; + YV12_BUFFER_CONFIG *pp = &cm->post_proc_buffer; + PSNR_STATS psnr; + calc_psnr(orig, recon, &psnr); + + cpi->total += psnr.psnr[0]; + cpi->total_y += psnr.psnr[1]; + cpi->total_u += psnr.psnr[2]; + cpi->total_v += psnr.psnr[3]; + cpi->total_sq_error += psnr.sse[0]; + cpi->total_samples += psnr.samples[0]; + { - double frame_psnr2, frame_ssim2 = 0; - double weight = 0; + PSNR_STATS psnr2; + double frame_ssim2 = 0, weight = 0; #if CONFIG_VP9_POSTPROC vp9_deblock(cm->frame_to_show, &cm->post_proc_buffer, cm->lf.filter_level * 10 / 6); #endif vp9_clear_system_state(); - ye = (double)calc_plane_error(orig->y_buffer, orig->y_stride, - pp->y_buffer, pp->y_stride, - orig->y_crop_width, orig->y_crop_height); + calc_psnr(orig, pp, &psnr2); - ue = (double)calc_plane_error(orig->u_buffer, orig->uv_stride, - pp->u_buffer, pp->uv_stride, - orig->uv_crop_width, orig->uv_crop_height); + cpi->totalp += psnr2.psnr[0]; + cpi->totalp_y += psnr2.psnr[1]; + cpi->totalp_u += psnr2.psnr[2]; + cpi->totalp_v += psnr2.psnr[3]; + cpi->totalp_sq_error += psnr2.sse[0]; + cpi->totalp_samples += psnr2.samples[0]; - ve = (double)calc_plane_error(orig->v_buffer, orig->uv_stride, - pp->v_buffer, pp->uv_stride, - orig->uv_crop_width, orig->uv_crop_height); - - sq_error = ye + ue + ve; - - frame_psnr2 = vp9_mse2psnr(t_samples, 255.0, sq_error); - - cpi->totalp_y += vp9_mse2psnr(y_samples, 255.0, ye); - cpi->totalp_u += vp9_mse2psnr(uv_samples, 255.0, ue); - cpi->totalp_v += vp9_mse2psnr(uv_samples, 255.0, ve); - cpi->total_sq_error2 += sq_error; - cpi->totalp += frame_psnr2; - - frame_ssim2 = vp9_calc_ssim(cpi->Source, - recon, 1, &weight); + frame_ssim2 = vp9_calc_ssim(orig, recon, 1, &weight); cpi->summed_quality += frame_ssim2 * weight; cpi->summed_weights += weight; - frame_ssim2 = vp9_calc_ssim(cpi->Source, - &cm->post_proc_buffer, 1, &weight); + frame_ssim2 = vp9_calc_ssim(orig, &cm->post_proc_buffer, 1, &weight); cpi->summedp_quality += frame_ssim2 * weight; cpi->summedp_weights += weight; @@ -4105,8 +3827,7 @@ int vp9_get_compressed_data(VP9_PTR ptr, unsigned int *frame_flags, if (cpi->b_calculate_ssimg) { double y, u, v, frame_all; - frame_all = vp9_calc_ssimg(cpi->Source, cm->frame_to_show, - &y, &u, &v); + frame_all = vp9_calc_ssimg(cpi->Source, cm->frame_to_show, &y, &u, &v); cpi->total_ssimg_y += y; cpi->total_ssimg_u += u; cpi->total_ssimg_v += v; @@ -4116,27 +3837,27 @@ int vp9_get_compressed_data(VP9_PTR ptr, unsigned int *frame_flags, } #endif - // fclose(fp_out); return 0; } -int vp9_get_preview_raw_frame(VP9_PTR comp, YV12_BUFFER_CONFIG *dest, +int vp9_get_preview_raw_frame(VP9_COMP *cpi, YV12_BUFFER_CONFIG *dest, vp9_ppflags_t *flags) { - VP9_COMP *cpi = (VP9_COMP *) comp; + VP9_COMMON *cm = &cpi->common; - if (!cpi->common.show_frame) { + if (!cm->show_frame) { return -1; } else { int ret; #if CONFIG_VP9_POSTPROC - ret = vp9_post_proc_frame(&cpi->common, dest, flags); + ret = vp9_post_proc_frame(cm, dest, flags); #else - if (cpi->common.frame_to_show) { - *dest = *cpi->common.frame_to_show; - dest->y_width = cpi->common.width; - dest->y_height = cpi->common.height; - dest->uv_height = cpi->common.height / 2; + if (cm->frame_to_show) { + *dest = *cm->frame_to_show; + dest->y_width = cm->width; + dest->y_height = cm->height; + dest->uv_width = cm->width >> cm->subsampling_x; + dest->uv_height = cm->height >> cm->subsampling_y; ret = 0; } else { ret = -1; @@ -4148,11 +3869,10 @@ int vp9_get_preview_raw_frame(VP9_PTR comp, YV12_BUFFER_CONFIG *dest, } } -int vp9_set_roimap(VP9_PTR comp, unsigned char *map, unsigned int rows, +int vp9_set_roimap(VP9_COMP *cpi, unsigned char *map, unsigned int rows, unsigned int cols, int delta_q[MAX_SEGMENTS], int delta_lf[MAX_SEGMENTS], unsigned int threshold[MAX_SEGMENTS]) { - VP9_COMP *cpi = (VP9_COMP *) comp; signed char feature_data[SEG_LVL_MAX][MAX_SEGMENTS]; struct segmentation *seg = &cpi->common.seg; int i; @@ -4161,15 +3881,15 @@ int vp9_set_roimap(VP9_PTR comp, unsigned char *map, unsigned int rows, return -1; if (!map) { - vp9_disable_segmentation((VP9_PTR)cpi); + vp9_disable_segmentation(seg); return 0; } // Set the segmentation Map - vp9_set_segmentation_map((VP9_PTR)cpi, map); + vp9_set_segmentation_map(cpi, map); // Activate segmentation. - vp9_enable_segmentation((VP9_PTR)cpi); + vp9_enable_segmentation(seg); // Set up the quant, LF and breakout threshold segment data for (i = 0; i < MAX_SEGMENTS; i++) { @@ -4193,15 +3913,13 @@ int vp9_set_roimap(VP9_PTR comp, unsigned char *map, unsigned int rows, // Initialize the feature data structure // SEGMENT_DELTADATA 0, SEGMENT_ABSDATA 1 - vp9_set_segment_data((VP9_PTR)cpi, &feature_data[0][0], SEGMENT_DELTADATA); + vp9_set_segment_data(seg, &feature_data[0][0], SEGMENT_DELTADATA); return 0; } -int vp9_set_active_map(VP9_PTR comp, unsigned char *map, +int vp9_set_active_map(VP9_COMP *cpi, unsigned char *map, unsigned int rows, unsigned int cols) { - VP9_COMP *cpi = (VP9_COMP *) comp; - if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols) { if (map) { vpx_memcpy(cpi->active_map, map, rows * cols); @@ -4217,9 +3935,8 @@ int vp9_set_active_map(VP9_PTR comp, unsigned char *map, } } -int vp9_set_internal_size(VP9_PTR comp, +int vp9_set_internal_size(VP9_COMP *cpi, VPX_SCALING horiz_mode, VPX_SCALING vert_mode) { - VP9_COMP *cpi = (VP9_COMP *) comp; VP9_COMMON *cm = &cpi->common; int hr = 0, hs = 0, vr = 0, vs = 0; @@ -4239,22 +3956,21 @@ int vp9_set_internal_size(VP9_PTR comp, return 0; } -int vp9_set_size_literal(VP9_PTR comp, unsigned int width, +int vp9_set_size_literal(VP9_COMP *cpi, unsigned int width, unsigned int height) { - VP9_COMP *cpi = (VP9_COMP *)comp; VP9_COMMON *cm = &cpi->common; - check_initial_width(cpi, NULL); + check_initial_width(cpi, 1, 1); if (width) { cm->width = width; if (cm->width * 5 < cpi->initial_width) { cm->width = cpi->initial_width / 5 + 1; - printf("Warning: Desired width too small, changed to %d \n", cm->width); + printf("Warning: Desired width too small, changed to %d\n", cm->width); } if (cm->width > cpi->initial_width) { cm->width = cpi->initial_width; - printf("Warning: Desired width too large, changed to %d \n", cm->width); + printf("Warning: Desired width too large, changed to %d\n", cm->width); } } @@ -4262,11 +3978,11 @@ int vp9_set_size_literal(VP9_PTR comp, unsigned int width, cm->height = height; if (cm->height * 5 < cpi->initial_height) { cm->height = cpi->initial_height / 5 + 1; - printf("Warning: Desired height too small, changed to %d \n", cm->height); + printf("Warning: Desired height too small, changed to %d\n", cm->height); } if (cm->height > cpi->initial_height) { cm->height = cpi->initial_height; - printf("Warning: Desired height too large, changed to %d \n", cm->height); + printf("Warning: Desired height too large, changed to %d\n", cm->height); } } @@ -4276,36 +3992,36 @@ int vp9_set_size_literal(VP9_PTR comp, unsigned int width, return 0; } -void vp9_set_svc(VP9_PTR comp, int use_svc) { - VP9_COMP *cpi = (VP9_COMP *)comp; +void vp9_set_svc(VP9_COMP *cpi, int use_svc) { cpi->use_svc = use_svc; return; } -int vp9_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest) { +int vp9_calc_ss_err(const YV12_BUFFER_CONFIG *source, + const YV12_BUFFER_CONFIG *reference) { int i, j; int total = 0; - uint8_t *src = source->y_buffer; - uint8_t *dst = dest->y_buffer; + const uint8_t *src = source->y_buffer; + const uint8_t *ref = reference->y_buffer; // Loop through the Y plane raw and reconstruction data summing // (square differences) for (i = 0; i < source->y_height; i += 16) { for (j = 0; j < source->y_width; j += 16) { unsigned int sse; - total += vp9_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride, - &sse); + total += vp9_mse16x16(src + j, source->y_stride, + ref + j, reference->y_stride, &sse); } src += 16 * source->y_stride; - dst += 16 * dest->y_stride; + ref += 16 * reference->y_stride; } return total; } -int vp9_get_quantizer(VP9_PTR c) { - return ((VP9_COMP *)c)->common.base_qindex; +int vp9_get_quantizer(VP9_COMP *cpi) { + return cpi->common.base_qindex; } diff --git a/libvpx/vp9/encoder/vp9_onyx_int.h b/libvpx/vp9/encoder/vp9_onyx_int.h index 9e80212..8c6b48c 100644 --- a/libvpx/vp9/encoder/vp9_onyx_int.h +++ b/libvpx/vp9/encoder/vp9_onyx_int.h @@ -8,39 +8,46 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP9_ENCODER_VP9_ONYX_INT_H_ #define VP9_ENCODER_VP9_ONYX_INT_H_ #include <stdio.h> + #include "./vpx_config.h" -#include "vp9/common/vp9_onyx.h" -#include "vp9/encoder/vp9_treewriter.h" -#include "vp9/encoder/vp9_tokenize.h" -#include "vp9/common/vp9_onyxc_int.h" -#include "vp9/encoder/vp9_variance.h" -#include "vp9/encoder/vp9_encodemb.h" -#include "vp9/encoder/vp9_quantize.h" -#include "vp9/common/vp9_entropy.h" -#include "vp9/common/vp9_entropymode.h" #include "vpx_ports/mem.h" #include "vpx/internal/vpx_codec_internal.h" -#include "vp9/encoder/vp9_mcomp.h" -#include "vp9/common/vp9_findnearmv.h" +#include "vpx/vp8cx.h" + +#include "vp9/common/vp9_ppflags.h" +#include "vp9/common/vp9_entropy.h" +#include "vp9/common/vp9_entropymode.h" +#include "vp9/common/vp9_onyxc_int.h" + +#include "vp9/encoder/vp9_encodemb.h" +#include "vp9/encoder/vp9_firstpass.h" #include "vp9/encoder/vp9_lookahead.h" +#include "vp9/encoder/vp9_mbgraph.h" +#include "vp9/encoder/vp9_mcomp.h" +#include "vp9/encoder/vp9_quantize.h" +#include "vp9/encoder/vp9_ratectrl.h" +#include "vp9/encoder/vp9_tokenize.h" +#include "vp9/encoder/vp9_variance.h" -#define DISABLE_RC_LONG_TERM_MEM 0 +#ifdef __cplusplus +extern "C" { +#endif // #define MODE_TEST_HIT_STATS -// #define SPEEDSTATS 1 #if CONFIG_MULTIPLE_ARF // Set MIN_GF_INTERVAL to 1 for the full decomposition. #define MIN_GF_INTERVAL 2 #else #define MIN_GF_INTERVAL 4 #endif -#define DEFAULT_GF_INTERVAL 7 +#define DEFAULT_GF_INTERVAL 10 +#define DEFAULT_KF_BOOST 2000 +#define DEFAULT_GF_BOOST 2000 #define KEY_FRAME_CONTEXT 5 @@ -70,58 +77,9 @@ typedef struct { // 0 = ZERO_MV, MV signed char last_mode_lf_deltas[MAX_MODE_LF_DELTAS]; - int inter_mode_counts[INTER_MODE_CONTEXTS][INTER_MODES - 1][2]; FRAME_CONTEXT fc; } CODING_CONTEXT; -typedef struct { - double frame; - double intra_error; - double coded_error; - double sr_coded_error; - double ssim_weighted_pred_err; - double pcnt_inter; - double pcnt_motion; - double pcnt_second_ref; - double pcnt_neutral; - double MVr; - double mvr_abs; - double MVc; - double mvc_abs; - double MVrv; - double MVcv; - double mv_in_out_count; - double new_mv_count; - double duration; - double count; -} FIRSTPASS_STATS; - -typedef struct { - int frames_so_far; - double frame_intra_error; - double frame_coded_error; - double frame_pcnt_inter; - double frame_pcnt_motion; - double frame_mvr; - double frame_mvr_abs; - double frame_mvc; - double frame_mvc_abs; -} ONEPASS_FRAMESTATS; - -typedef struct { - struct { - int err; - union { - int_mv mv; - MB_PREDICTION_MODE mode; - } m; - } ref[MAX_REF_FRAMES]; -} MBGRAPH_MB_STATS; - -typedef struct { - MBGRAPH_MB_STATS *mb_stats; -} MBGRAPH_FRAME_STATS; - // This enumerator type needs to be kept aligned with the mode order in // const MODE_DEFINITION vp9_mode_order[MAX_MODES] used in the rd code. typedef enum { @@ -178,7 +136,9 @@ typedef enum { NSTEP = 1, HEX = 2, BIGDIA = 3, - SQUARE = 4 + SQUARE = 4, + FAST_HEX = 5, + FAST_DIAMOND = 6 } SEARCH_METHODS; typedef enum { @@ -189,6 +149,12 @@ typedef enum { } TX_SIZE_SEARCH_METHOD; typedef enum { + NOT_IN_USE = 0, + RELAXED_NEIGHBORING_MIN_MAX = 1, + STRICT_NEIGHBORING_MIN_MAX = 2 +} AUTO_MIN_MAX_MODE; + +typedef enum { // Values should be powers of 2 so that they can be selected as bits of // an integer flags field @@ -217,8 +183,7 @@ typedef enum { } MODE_SEARCH_SKIP_LOGIC; typedef enum { - SUBPEL_ITERATIVE = 0, - SUBPEL_TREE = 1, + SUBPEL_TREE = 0, // Other methods to come } SUBPEL_SEARCH_METHODS; @@ -234,62 +199,407 @@ typedef enum { LAST_FRAME_PARTITION_ALL = 2 } LAST_FRAME_PARTITION_METHOD; +typedef enum { + // No recode. + DISALLOW_RECODE = 0, + // Allow recode for KF and exceeding maximum frame bandwidth. + ALLOW_RECODE_KFMAXBW = 1, + // Allow recode only for KF/ARF/GF frames. + ALLOW_RECODE_KFARFGF = 2, + // Allow recode for all frames based on bitrate constraints. + ALLOW_RECODE = 3, +} RECODE_LOOP_TYPE; + +typedef enum { + // encode_breakout is disabled. + ENCODE_BREAKOUT_DISABLED = 0, + // encode_breakout is enabled. + ENCODE_BREAKOUT_ENABLED = 1, + // encode_breakout is enabled with small max_thresh limit. + ENCODE_BREAKOUT_LIMITED = 2 +} ENCODE_BREAKOUT_TYPE; + +typedef enum { + // Search partitions using RD/NONRD criterion + SEARCH_PARTITION = 0, + + // Always use a fixed size partition + FIXED_PARTITION = 1, + + // Use a fixed size partition in every 64X64 SB, where the size is + // determined based on source variance + VAR_BASED_FIXED_PARTITION = 2, + + // Use an arbitrary partitioning scheme based on source variance within + // a 64X64 SB + VAR_BASED_PARTITION +} PARTITION_SEARCH_TYPE; + typedef struct { - int RD; + // Frame level coding parameter update + int frame_parameter_update; + + // Motion search method (Diamond, NSTEP, Hex, Big Diamond, Square, etc). SEARCH_METHODS search_method; - int auto_filter; - int recode_loop; + + RECODE_LOOP_TYPE recode_loop; + + // Subpel_search_method can only be subpel_tree which does a subpixel + // logarithmic search that keeps stepping at 1/2 pixel units until + // you stop getting a gain, and then goes on to 1/4 and repeats + // the same process. Along the way it skips many diagonals. SUBPEL_SEARCH_METHODS subpel_search_method; + + // Maximum number of steps in logarithmic subpel search before giving up. int subpel_iters_per_step; + + // Control when to stop subpel search + int subpel_force_stop; + + // Thresh_mult is used to set a threshold for the rd score. A higher value + // means that we will accept the best mode so far more often. This number + // is used in combination with the current block size, and thresh_freq_fact + // to pick a threshold. int thresh_mult[MAX_MODES]; int thresh_mult_sub8x8[MAX_REFS]; + + // This parameter controls the number of steps we'll do in a diamond + // search. int max_step_search_steps; + + // This parameter controls which step in the n-step process we start at. + // It's changed adaptively based on circumstances. int reduce_first_step_size; + + // If this is set to 1, we limit the motion search range to 2 times the + // largest motion vector found in the last frame. int auto_mv_step_size; + + // Trellis (dynamic programming) optimization of quantized values (+1, 0). int optimize_coefficients; + + // Always set to 0. If on it enables 0 cost background transmission + // (except for the initial transmission of the segmentation). The feature is + // disabled because the addition of very large block sizes make the + // backgrounds very to cheap to encode, and the segmentation we have + // adds overhead. int static_segmentation; - int variance_adaptive_quantization; + + // If 1 we iterate finding a best reference for 2 ref frames together - via + // a log search that iterates 4 times (check around mv for last for best + // error of combined predictor then check around mv for alt). If 0 we + // we just use the best motion vector found for each frame by itself. int comp_inter_joint_search_thresh; + + // This variable is used to cap the maximum number of times we skip testing a + // mode to be evaluated. A high value means we will be faster. int adaptive_rd_thresh; + + // Enables skipping the reconstruction step (idct, recon) in the + // intermediate steps assuming the last frame didn't have too many intra + // blocks and the q is less than a threshold. int skip_encode_sb; int skip_encode_frame; + + // This variable allows us to reuse the last frames partition choices + // (64x64 v 32x32 etc) for this frame. It can be set to only use the last + // frame as a starting point in low motion scenes or always use it. If set + // we use last partitioning_redo frequency to determine how often to redo + // the partitioning from scratch. Adjust_partitioning_from_last_frame + // enables us to adjust up or down one partitioning from the last frames + // partitioning. LAST_FRAME_PARTITION_METHOD use_lastframe_partitioning; + + // Determine which method we use to determine transform size. We can choose + // between options like full rd, largest for prediction size, largest + // for intra and model coefs for the rest. TX_SIZE_SEARCH_METHOD tx_size_search_method; + + // Low precision 32x32 fdct keeps everything in 16 bits and thus is less + // precise but significantly faster than the non lp version. int use_lp32x32fdct; - int use_avoid_tested_higherror; - int use_one_partition_size_always; - int less_rectangular_check; - int use_square_partition_only; + + // TODO(JBB): remove this as its no longer used. + + // After looking at the first set of modes (set by index here), skip + // checking modes for reference frames that don't match the reference frame + // of the best so far. int mode_skip_start; + + // TODO(JBB): Remove this. int reference_masking; + + PARTITION_SEARCH_TYPE partition_search_type; + + // Used if partition_search_type = FIXED_SIZE_PARTITION BLOCK_SIZE always_this_block_size; - int auto_min_max_partition_size; + + // Skip rectangular partition test when partition type none gives better + // rd than partition type split. + int less_rectangular_check; + + // Disable testing non square partitions. (eg 16x32) + int use_square_partition_only; + + // Sets min and max partition sizes for this 64x64 region based on the + // same 64x64 in last encoded frame, and the left and above neighbor. + AUTO_MIN_MAX_MODE auto_min_max_partition_size; + + // Min and max partition size we enable (block_size) as per auto + // min max, but also used by adjust partitioning, and pick_partitioning. BLOCK_SIZE min_partition_size; BLOCK_SIZE max_partition_size; + + // Whether or not we allow partitions one smaller or one greater than the last + // frame's partitioning. Only used if use_lastframe_partitioning is set. int adjust_partitioning_from_last_frame; + + // How frequently we re do the partitioning from scratch. Only used if + // use_lastframe_partitioning is set. int last_partitioning_redo_frequency; + + // Disables sub 8x8 blocksizes in different scenarios: Choices are to disable + // it always, to allow it for only Last frame and Intra, disable it for all + // inter modes or to enable it always. int disable_split_mask; - int using_small_partition_info; + // TODO(jingning): combine the related motion search speed features + // This allows us to use motion search at other sizes as a starting + // point for this motion search and limits the search range around it. int adaptive_motion_search; + // Allows sub 8x8 modes to use the prediction filter that was determined + // best for 8x8 mode. If set to 0 we always re check all the filters for + // sizes less than 8x8, 1 means we check all filter modes if no 8x8 filter + // was selected, and 2 means we use 8 tap if no 8x8 filter mode was selected. + int adaptive_pred_interp_filter; + // Implements various heuristics to skip searching modes // The heuristics selected are based on flags // defined in the MODE_SEARCH_SKIP_HEURISTICS enum unsigned int mode_search_skip_flags; + // A source variance threshold below which the split mode is disabled unsigned int disable_split_var_thresh; + // A source variance threshold below which filter search is disabled // Choose a very large value (UINT_MAX) to use 8-tap always unsigned int disable_filter_search_var_thresh; + + // These bit masks allow you to enable or disable intra modes for each + // transform size separately. int intra_y_mode_mask[TX_SIZES]; int intra_uv_mode_mask[TX_SIZES]; + + // This variable enables an early break out of mode testing if the model for + // rd built from the prediction signal indicates a value that's much + // higher than the best rd we've seen so far. int use_rd_breakout; + + // This enables us to use an estimate for intra rd based on dc mode rather + // than choosing an actual uv mode in the stage of encoding before the actual + // final encode. int use_uv_intra_rd_estimate; + + // This feature controls how the loop filter level is determined: + // 0: Try the full image with different values. + // 1: Try a small portion of the image with different values. + // 2: Estimate the level based on quantizer and frame type int use_fast_lpf_pick; + + // This feature limits the number of coefficients updates we actually do + // by only looking at counts from 1/2 the bands. int use_fast_coef_updates; // 0: 2-loop, 1: 1-loop, 2: 1-loop reduced + + // This flag controls the use of non-RD mode decision. + int use_nonrd_pick_mode; + + // This variable sets the encode_breakout threshold. Currently, it is only + // enabled in real time mode. + int encode_breakout_thresh; + + // A binary mask indicating if NEARESTMV, NEARMV, ZEROMV, NEWMV + // modes are disabled in order from LSB to MSB for each BLOCK_SIZE. + int disable_inter_mode_mask[BLOCK_SIZES]; + + // This feature controls whether we do the expensive context update and + // calculation in the rd coefficient costing loop. + int use_fast_coef_costing; + + // This variable controls the maximum block size where intra blocks can be + // used in inter frames. + // TODO(aconverse): Fold this into one of the other many mode skips + BLOCK_SIZE max_intra_bsize; } SPEED_FEATURES; +typedef struct { + RATE_CONTROL rc; + int target_bandwidth; + int64_t starting_buffer_level; + int64_t optimal_buffer_level; + int64_t maximum_buffer_size; + double framerate; + int avg_frame_size; +} LAYER_CONTEXT; + +typedef enum { + NORMAL = 0, + FOURFIVE = 1, + THREEFIVE = 2, + ONETWO = 3 +} VPX_SCALING; + +typedef enum { + VP9_LAST_FLAG = 1, + VP9_GOLD_FLAG = 2, + VP9_ALT_FLAG = 4 +} VP9_REFFRAME; + +typedef enum { + USAGE_LOCAL_FILE_PLAYBACK = 0x0, + USAGE_STREAM_FROM_SERVER = 0x1, + USAGE_CONSTRAINED_QUALITY = 0x2, + USAGE_CONSTANT_QUALITY = 0x3, +} END_USAGE; + + +typedef enum { + MODE_GOODQUALITY = 0x1, + MODE_BESTQUALITY = 0x2, + MODE_FIRSTPASS = 0x3, + MODE_SECONDPASS = 0x4, + MODE_SECONDPASS_BEST = 0x5, + MODE_REALTIME = 0x6, +} MODE; + +typedef enum { + FRAMEFLAGS_KEY = 1, + FRAMEFLAGS_GOLDEN = 2, + FRAMEFLAGS_ALTREF = 4, +} FRAMETYPE_FLAGS; + +typedef enum { + NO_AQ = 0, + VARIANCE_AQ = 1, + COMPLEXITY_AQ = 2, + AQ_MODE_COUNT // This should always be the last member of the enum +} AQ_MODE; + +typedef struct { + int version; // 4 versions of bitstream defined: + // 0 - best quality/slowest decode, + // 3 - lowest quality/fastest decode + int width; // width of data passed to the compressor + int height; // height of data passed to the compressor + double framerate; // set to passed in framerate + int64_t target_bandwidth; // bandwidth to be used in kilobits per second + + int noise_sensitivity; // pre processing blur: recommendation 0 + int sharpness; // sharpening output: recommendation 0: + int cpu_used; + unsigned int rc_max_intra_bitrate_pct; + + // mode -> + // (0)=Realtime/Live Encoding. This mode is optimized for realtime + // encoding (for example, capturing a television signal or feed from + // a live camera). ( speed setting controls how fast ) + // (1)=Good Quality Fast Encoding. The encoder balances quality with the + // amount of time it takes to encode the output. ( speed setting + // controls how fast ) + // (2)=One Pass - Best Quality. The encoder places priority on the + // quality of the output over encoding speed. The output is compressed + // at the highest possible quality. This option takes the longest + // amount of time to encode. ( speed setting ignored ) + // (3)=Two Pass - First Pass. The encoder generates a file of statistics + // for use in the second encoding pass. ( speed setting controls how + // fast ) + // (4)=Two Pass - Second Pass. The encoder uses the statistics that were + // generated in the first encoding pass to create the compressed + // output. ( speed setting controls how fast ) + // (5)=Two Pass - Second Pass Best. The encoder uses the statistics that + // were generated in the first encoding pass to create the compressed + // output using the highest possible quality, and taking a + // longer amount of time to encode.. ( speed setting ignored ) + MODE mode; + + // Key Framing Operations + int auto_key; // autodetect cut scenes and set the keyframes + int key_freq; // maximum distance to key frame. + + int lag_in_frames; // how many frames lag before we start encoding + + // ---------------------------------------------------------------- + // DATARATE CONTROL OPTIONS + + END_USAGE end_usage; // vbr or cbr + + // buffer targeting aggressiveness + int under_shoot_pct; + int over_shoot_pct; + + // buffering parameters + int64_t starting_buffer_level; // in seconds + int64_t optimal_buffer_level; + int64_t maximum_buffer_size; + + // Frame drop threshold. + int drop_frames_water_mark; + + // controlling quality + int fixed_q; + int worst_allowed_q; + int best_allowed_q; + int cq_level; + int lossless; + AQ_MODE aq_mode; // Adaptive Quantization mode + + // two pass datarate control + int two_pass_vbrbias; // two pass datarate control tweaks + int two_pass_vbrmin_section; + int two_pass_vbrmax_section; + // END DATARATE CONTROL OPTIONS + // ---------------------------------------------------------------- + + // Spatial and temporal scalability. + int ss_number_layers; // Number of spatial layers. + int ts_number_layers; // Number of temporal layers. + // Bitrate allocation for spatial layers. + int ss_target_bitrate[VPX_SS_MAX_LAYERS]; + // Bitrate allocation (CBR mode) and framerate factor, for temporal layers. + int ts_target_bitrate[VPX_TS_MAX_LAYERS]; + int ts_rate_decimator[VPX_TS_MAX_LAYERS]; + + // these parameters aren't to be used in final build don't use!!! + int play_alternate; + int alt_freq; + + int encode_breakout; // early breakout : for video conf recommend 800 + + /* Bitfield defining the error resiliency features to enable. + * Can provide decodable frames after losses in previous + * frames and decodable partitions after losses in the same frame. + */ + unsigned int error_resilient_mode; + + /* Bitfield defining the parallel decoding mode where the + * decoding in successive frames may be conducted in parallel + * just by decoding the frame headers. + */ + unsigned int frame_parallel_decoding_mode; + + int arnr_max_frames; + int arnr_strength; + int arnr_type; + + int tile_columns; + int tile_rows; + + struct vpx_fixed_buf two_pass_stats_in; + struct vpx_codec_pkt_list *output_pkt_list; + + vp8e_tuning tuning; +} VP9_CONFIG; + typedef struct VP9_COMP { DECLARE_ALIGNED(16, int16_t, y_quant[QINDEX_RANGE][8]); DECLARE_ALIGNED(16, int16_t, y_quant_shift[QINDEX_RANGE][8]); @@ -311,11 +621,10 @@ typedef struct VP9_COMP { MACROBLOCK mb; VP9_COMMON common; VP9_CONFIG oxcf; - struct rdcost_block_args rdcost_stack; struct lookahead_ctx *lookahead; struct lookahead_entry *source; #if CONFIG_MULTIPLE_ARF - struct lookahead_entry *alt_ref_source[NUM_REF_FRAMES]; + struct lookahead_entry *alt_ref_source[REF_FRAMES]; #else struct lookahead_entry *alt_ref_source; #endif @@ -324,11 +633,7 @@ typedef struct VP9_COMP { YV12_BUFFER_CONFIG *un_scaled_source; YV12_BUFFER_CONFIG scaled_source; - unsigned int frames_till_alt_ref_frame; - int source_alt_ref_pending; - int source_alt_ref_active; - - int is_src_frame_alt_ref; + int key_frame_frequency; int gold_is_last; // gold same as last frame ( short circuit gold searches) int alt_is_last; // Alt same as last ( short circuit altref search) @@ -339,25 +644,26 @@ typedef struct VP9_COMP { int gld_fb_idx; int alt_fb_idx; - int current_layer; - int use_svc; - #if CONFIG_MULTIPLE_ARF - int alt_ref_fb_idx[NUM_REF_FRAMES - 3]; + int alt_ref_fb_idx[REF_FRAMES - 3]; #endif int refresh_last_frame; int refresh_golden_frame; int refresh_alt_ref_frame; + + int ext_refresh_frame_flags_pending; + int ext_refresh_last_frame; + int ext_refresh_golden_frame; + int ext_refresh_alt_ref_frame; + + int ext_refresh_frame_context_pending; + int ext_refresh_frame_context; + YV12_BUFFER_CONFIG last_frame_uf; TOKENEXTRA *tok; unsigned int tok_count[4][1 << 6]; - - unsigned int frames_since_key; - unsigned int key_frame_frequency; - unsigned int this_key_frame_forced; - unsigned int next_key_frame_forced; #if CONFIG_MULTIPLE_ARF // Position within a frame coding order (including any additional ARF frames). unsigned int sequence_number; @@ -368,161 +674,82 @@ typedef struct VP9_COMP { // Ambient reconstruction err target for force key frames int ambient_err; - unsigned int mode_chosen_counts[MAX_MODES]; - unsigned int sub8x8_mode_chosen_counts[MAX_REFS]; - int64_t mode_skip_mask; - int ref_frame_mask; - int set_ref_frame_mask; - int rd_threshes[MAX_SEGMENTS][BLOCK_SIZES][MAX_MODES]; int rd_thresh_freq_fact[BLOCK_SIZES][MAX_MODES]; int rd_thresh_sub8x8[MAX_SEGMENTS][BLOCK_SIZES][MAX_REFS]; int rd_thresh_freq_sub8x8[BLOCK_SIZES][MAX_REFS]; - int64_t rd_comp_pred_diff[NB_PREDICTION_TYPES]; - int64_t rd_prediction_type_threshes[4][NB_PREDICTION_TYPES]; - unsigned int intra_inter_count[INTRA_INTER_CONTEXTS][2]; - unsigned int comp_inter_count[COMP_INTER_CONTEXTS][2]; - unsigned int single_ref_count[REF_CONTEXTS][2][2]; - unsigned int comp_ref_count[REF_CONTEXTS][2]; - + int64_t rd_comp_pred_diff[REFERENCE_MODES]; + int64_t rd_prediction_type_threshes[MAX_REF_FRAMES][REFERENCE_MODES]; int64_t rd_tx_select_diff[TX_MODES]; // FIXME(rbultje) can this overflow? - int rd_tx_select_threshes[4][TX_MODES]; + int rd_tx_select_threshes[MAX_REF_FRAMES][TX_MODES]; int64_t rd_filter_diff[SWITCHABLE_FILTER_CONTEXTS]; - int64_t rd_filter_threshes[4][SWITCHABLE_FILTER_CONTEXTS]; + int64_t rd_filter_threshes[MAX_REF_FRAMES][SWITCHABLE_FILTER_CONTEXTS]; int64_t rd_filter_cache[SWITCHABLE_FILTER_CONTEXTS]; + int64_t mask_filter_rd; int RDMULT; int RDDIV; CODING_CONTEXT coding_context; - // Rate targetting variables - int this_frame_target; - int projected_frame_size; - int last_q[2]; // Separate values for Intra/Inter - int last_boosted_qindex; // Last boosted GF/KF/ARF q - - double rate_correction_factor; - double key_frame_rate_correction_factor; - double gf_rate_correction_factor; - - unsigned int frames_since_golden; - int frames_till_gf_update_due; // Count down till next GF - - int gf_overspend_bits; // cumulative bits overspent because of GF boost - - int non_gf_bitrate_adjustment; // Following GF to recover extra bits spent - - int kf_overspend_bits; // Bits spent on key frames to be recovered on inters - int kf_bitrate_adjustment; // number of bits to recover on each inter frame. - int max_gf_interval; - int baseline_gf_interval; + int zbin_mode_boost; + int zbin_mode_boost_enabled; int active_arnr_frames; // <= cpi->oxcf.arnr_max_frames int active_arnr_strength; // <= cpi->oxcf.arnr_max_strength - int64_t key_frame_count; - int prior_key_frame_distance[KEY_FRAME_CONTEXT]; - int per_frame_bandwidth; // Current section per frame bandwidth target - int av_per_frame_bandwidth; // Average frame size target for clip - int min_frame_bandwidth; // Minimum allocation used for any frame - int inter_frame_target; double output_framerate; int64_t last_time_stamp_seen; int64_t last_end_time_stamp_seen; int64_t first_time_stamp_ever; - int ni_av_qi; - int ni_tot_qi; - int ni_frames; - int avg_frame_qindex; - double tot_q; - double avg_q; - - int zbin_mode_boost; - int zbin_mode_boost_enabled; - - int64_t total_byte_count; - - int buffered_mode; - - int buffer_level; - int bits_off_target; - - int rolling_target_bits; - int rolling_actual_bits; - - int long_rolling_target_bits; - int long_rolling_actual_bits; - - int64_t total_actual_bits; - int total_target_vs_actual; // debug stats - - int worst_quality; - int active_worst_quality; - int best_quality; - int active_best_quality; + RATE_CONTROL rc; int cq_target_quality; - int y_mode_count[4][INTRA_MODES]; - int y_uv_mode_count[INTRA_MODES][INTRA_MODES]; - unsigned int partition_count[PARTITION_CONTEXTS][PARTITION_TYPES]; - - nmv_context_counts NMVcount; + vp9_coeff_count coef_counts[TX_SIZES][PLANE_TYPES]; + vp9_coeff_probs_model frame_coef_probs[TX_SIZES][PLANE_TYPES]; + vp9_coeff_stats frame_branch_ct[TX_SIZES][PLANE_TYPES]; - vp9_coeff_count coef_counts[TX_SIZES][BLOCK_TYPES]; - vp9_coeff_probs_model frame_coef_probs[TX_SIZES][BLOCK_TYPES]; - vp9_coeff_stats frame_branch_ct[TX_SIZES][BLOCK_TYPES]; - - int gfu_boost; - int last_boost; - int kf_boost; - int kf_zeromotion_pct; - int gf_zeromotion_pct; - - int64_t target_bandwidth; struct vpx_codec_pkt_list *output_pkt_list; -#if 0 - // Experimental code for lagged and one pass - ONEPASS_FRAMESTATS one_pass_frame_stats[MAX_LAG_BUFFERS]; - int one_pass_frame_index; -#endif MBGRAPH_FRAME_STATS mbgraph_stats[MAX_LAG_BUFFERS]; int mbgraph_n_frames; // number of frames filled in the above int static_mb_pct; // % forced skip mbs by segmentation int seg0_progress, seg0_idx, seg0_cnt; - int decimation_factor; - int decimation_count; - // for real time encoding int speed; - int compressor_speed; - int auto_worst_q; int cpu_used; int pass; - vp9_prob last_skip_false_probs[3][MBSKIP_CONTEXTS]; + vp9_prob last_skip_false_probs[3][SKIP_CONTEXTS]; int last_skip_probs_q[3]; int ref_frame_flags; SPEED_FEATURES sf; - int error_bins[1024]; unsigned int max_mv_magnitude; int mv_step_param; + // Default value is 1. From first pass stats, encode_breakout may be disabled. + ENCODE_BREAKOUT_TYPE allow_encode_breakout; + + // Get threshold from external input. In real time mode, it can be + // overwritten according to encoding speed. + int encode_breakout; + unsigned char *segmentation_map; // segment threashold for encode breakout int segment_encode_breakout[MAX_SEGMENTS]; + unsigned char *complexity_map; + unsigned char *active_map; unsigned int active_map_enabled; @@ -537,63 +764,30 @@ typedef struct VP9_COMP { uint64_t time_pick_lpf; uint64_t time_encode_sb_row; - struct twopass_rc { - unsigned int section_intra_rating; - unsigned int next_iiratio; - unsigned int this_iiratio; - FIRSTPASS_STATS total_stats; - FIRSTPASS_STATS this_frame_stats; - FIRSTPASS_STATS *stats_in, *stats_in_end, *stats_in_start; - FIRSTPASS_STATS total_left_stats; - int first_pass_done; - int64_t bits_left; - int64_t clip_bits_total; - double avg_iiratio; - double modified_error_total; - double modified_error_used; - double modified_error_left; - double kf_intra_err_min; - double gf_intra_err_min; - int frames_to_key; - int maxq_max_limit; - int maxq_min_limit; - int static_scene_max_gf_interval; - int kf_bits; - // Remaining error from uncoded frames in a gf group. Two pass use only - int64_t gf_group_error_left; - - // Projected total bits available for a key frame group of frames - int64_t kf_group_bits; - - // Error score of frames still to be coded in kf group - int64_t kf_group_error_left; - - // Projected Bits available for a group of frames including 1 GF or ARF - int64_t gf_group_bits; - // Bits for the golden frame or ARF - 2 pass only - int gf_bits; - int alt_extra_bits; - - int sr_update_lag; - double est_max_qcorrection_factor; - } twopass; + struct twopass_rc twopass; YV12_BUFFER_CONFIG alt_ref_buffer; YV12_BUFFER_CONFIG *frames[MAX_LAG_BUFFERS]; int fixed_divide[512]; #if CONFIG_INTERNAL_STATS + unsigned int mode_chosen_counts[MAX_MODES]; + int count; double total_y; double total_u; double total_v; double total; - double total_sq_error; + uint64_t total_sq_error; + uint64_t total_samples; + double totalp_y; double totalp_u; double totalp_v; double totalp; - double total_sq_error2; + uint64_t totalp_sq_error; + uint64_t totalp_samples; + int bytes; double summed_quality; double summed_weights; @@ -617,24 +811,29 @@ typedef struct VP9_COMP { int *mb_norm_activity_map; int output_partition; - /* force next frame to intra when kf_auto says so */ + // Force next frame to intra when kf_auto says so. int force_next_frame_intra; int droppable; int dummy_packing; /* flag to indicate if packing is dummy */ - unsigned int switchable_interp_count[SWITCHABLE_FILTER_CONTEXTS] - [SWITCHABLE_FILTERS]; - unsigned int tx_stepdown_count[TX_SIZES]; int initial_width; int initial_height; - int number_spatial_layers; - int enable_encode_breakout; // Default value is 1. From first pass stats, - // encode_breakout may be disabled. + int use_svc; + + struct svc { + int spatial_layer_id; + int temporal_layer_id; + int number_spatial_layers; + int number_temporal_layers; + // Layer context used for rate control in CBR mode, only defined for + // temporal layers for now. + LAYER_CONTEXT layer_context[VPX_TS_MAX_LAYERS]; + } svc; #if CONFIG_MULTIPLE_ARF // ARF tracking variables. @@ -649,17 +848,12 @@ typedef struct VP9_COMP { int max_arf_level; #endif -#ifdef ENTROPY_STATS - int64_t mv_ref_stats[INTER_MODE_CONTEXTS][INTER_MODES - 1][2]; -#endif - - #ifdef MODE_TEST_HIT_STATS // Debug / test stats int64_t mode_test_hits[BLOCK_SIZES]; #endif - /* Y,U,V,(A) */ + // Y,U,V,(A) ENTROPY_CONTEXT *above_context[MAX_MB_PLANE]; ENTROPY_CONTEXT left_context[MAX_MB_PLANE][16]; @@ -667,7 +861,62 @@ typedef struct VP9_COMP { PARTITION_CONTEXT left_seg_context[8]; } VP9_COMP; -static int get_ref_frame_idx(VP9_COMP *cpi, MV_REFERENCE_FRAME ref_frame) { +void vp9_initialize_enc(); + +struct VP9_COMP *vp9_create_compressor(VP9_CONFIG *oxcf); +void vp9_remove_compressor(VP9_COMP *cpi); + +void vp9_change_config(VP9_COMP *cpi, VP9_CONFIG *oxcf); + + // receive a frames worth of data. caller can assume that a copy of this + // frame is made and not just a copy of the pointer.. +int vp9_receive_raw_frame(VP9_COMP *cpi, unsigned int frame_flags, + YV12_BUFFER_CONFIG *sd, int64_t time_stamp, + int64_t end_time_stamp); + +int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags, + size_t *size, uint8_t *dest, + int64_t *time_stamp, int64_t *time_end, int flush); + +int vp9_get_preview_raw_frame(VP9_COMP *cpi, YV12_BUFFER_CONFIG *dest, + vp9_ppflags_t *flags); + +int vp9_use_as_reference(VP9_COMP *cpi, int ref_frame_flags); + +int vp9_update_reference(VP9_COMP *cpi, int ref_frame_flags); + +int vp9_copy_reference_enc(VP9_COMP *cpi, VP9_REFFRAME ref_frame_flag, + YV12_BUFFER_CONFIG *sd); + +int vp9_get_reference_enc(VP9_COMP *cpi, int index, + YV12_BUFFER_CONFIG **fb); + +int vp9_set_reference_enc(VP9_COMP *cpi, VP9_REFFRAME ref_frame_flag, + YV12_BUFFER_CONFIG *sd); + +int vp9_update_entropy(VP9_COMP *cpi, int update); + +int vp9_set_roimap(VP9_COMP *cpi, unsigned char *map, + unsigned int rows, unsigned int cols, + int delta_q[MAX_SEGMENTS], + int delta_lf[MAX_SEGMENTS], + unsigned int threshold[MAX_SEGMENTS]); + +int vp9_set_active_map(VP9_COMP *cpi, unsigned char *map, + unsigned int rows, unsigned int cols); + +int vp9_set_internal_size(VP9_COMP *cpi, + VPX_SCALING horiz_mode, VPX_SCALING vert_mode); + +int vp9_set_size_literal(VP9_COMP *cpi, unsigned int width, + unsigned int height); + +void vp9_set_svc(VP9_COMP *cpi, int use_svc); + +int vp9_get_quantizer(struct VP9_COMP *cpi); + +static int get_ref_frame_idx(const VP9_COMP *cpi, + MV_REFERENCE_FRAME ref_frame) { if (ref_frame == LAST_FRAME) { return cpi->lst_fb_idx; } else if (ref_frame == GOLDEN_FRAME) { @@ -677,34 +926,38 @@ static int get_ref_frame_idx(VP9_COMP *cpi, MV_REFERENCE_FRAME ref_frame) { } } -static int get_scale_ref_frame_idx(VP9_COMP *cpi, - MV_REFERENCE_FRAME ref_frame) { - if (ref_frame == LAST_FRAME) { - return 0; - } else if (ref_frame == GOLDEN_FRAME) { - return 1; - } else { - return 2; - } +static YV12_BUFFER_CONFIG *get_ref_frame_buffer(VP9_COMP *cpi, + MV_REFERENCE_FRAME ref_frame) { + VP9_COMMON *const cm = &cpi->common; + return &cm->frame_bufs[cm->ref_frame_map[get_ref_frame_idx(cpi, + ref_frame)]].buf; } void vp9_encode_frame(VP9_COMP *cpi); -void vp9_pack_bitstream(VP9_COMP *cpi, unsigned char *dest, - unsigned long *size); - -void vp9_activity_masking(VP9_COMP *cpi, MACROBLOCK *x); - void vp9_set_speed_features(VP9_COMP *cpi); -int vp9_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest); +int vp9_calc_ss_err(const YV12_BUFFER_CONFIG *source, + const YV12_BUFFER_CONFIG *reference); void vp9_alloc_compressor_data(VP9_COMP *cpi); -int vp9_compute_qdelta(VP9_COMP *cpi, double qstart, double qtarget); +int vp9_compute_qdelta(const VP9_COMP *cpi, double qstart, double qtarget); static int get_token_alloc(int mb_rows, int mb_cols) { return mb_rows * mb_cols * (48 * 16 + 4); } +static void set_ref_ptrs(VP9_COMMON *cm, MACROBLOCKD *xd, + MV_REFERENCE_FRAME ref0, MV_REFERENCE_FRAME ref1) { + xd->block_refs[0] = &cm->frame_refs[ref0 >= LAST_FRAME ? ref0 - LAST_FRAME + : 0]; + xd->block_refs[1] = &cm->frame_refs[ref1 >= LAST_FRAME ? ref1 - LAST_FRAME + : 0]; +} + +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_ENCODER_VP9_ONYX_INT_H_ diff --git a/libvpx/vp9/encoder/vp9_picklpf.c b/libvpx/vp9/encoder/vp9_picklpf.c index 476ecaa..b5f4901 100644 --- a/libvpx/vp9/encoder/vp9_picklpf.c +++ b/libvpx/vp9/encoder/vp9_picklpf.c @@ -14,158 +14,86 @@ #include "vp9/encoder/vp9_onyx_int.h" #include "vp9/encoder/vp9_picklpf.h" #include "vp9/encoder/vp9_quantize.h" +#include "vp9/common/vp9_quant_common.h" #include "vpx_mem/vpx_mem.h" #include "vpx_scale/vpx_scale.h" #include "vp9/common/vp9_alloccommon.h" #include "vp9/common/vp9_loopfilter.h" #include "./vpx_scale_rtcd.h" -void vp9_yv12_copy_partial_frame_c(YV12_BUFFER_CONFIG *src_ybc, - YV12_BUFFER_CONFIG *dst_ybc, int fraction) { - const int height = src_ybc->y_height; - const int stride = src_ybc->y_stride; - const int offset = stride * ((height >> 5) * 16 - 8); - const int lines_to_copy = MAX(height >> (fraction + 4), 1) << 4; - - assert(src_ybc->y_stride == dst_ybc->y_stride); - vpx_memcpy(dst_ybc->y_buffer + offset, src_ybc->y_buffer + offset, - stride * (lines_to_copy + 16)); +static int get_max_filter_level(VP9_COMP *cpi) { + return cpi->twopass.section_intra_rating > 8 ? MAX_LOOP_FILTER * 3 / 4 + : MAX_LOOP_FILTER; } -static int calc_partial_ssl_err(YV12_BUFFER_CONFIG *source, - YV12_BUFFER_CONFIG *dest, int Fraction) { - int i, j; - int Total = 0; - int srcoffset, dstoffset; - uint8_t *src = source->y_buffer; - uint8_t *dst = dest->y_buffer; - - int linestocopy = (source->y_height >> (Fraction + 4)); - - if (linestocopy < 1) - linestocopy = 1; - - linestocopy <<= 4; +static int try_filter_frame(const YV12_BUFFER_CONFIG *sd, VP9_COMP *const cpi, + MACROBLOCKD *const xd, VP9_COMMON *const cm, + int filt_level, int partial_frame) { + int filt_err; - srcoffset = source->y_stride * (dest->y_height >> 5) * 16; - dstoffset = dest->y_stride * (dest->y_height >> 5) * 16; - - src += srcoffset; - dst += dstoffset; - - // Loop through the raw Y plane and reconstruction data summing the square - // differences. - for (i = 0; i < linestocopy; i += 16) { - for (j = 0; j < source->y_width; j += 16) { - unsigned int sse; - Total += vp9_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride, - &sse); - } - - src += 16 * source->y_stride; - dst += 16 * dest->y_stride; - } - - return Total; -} - -// Enforce a minimum filter level based upon baseline Q -static int get_min_filter_level(VP9_COMP *cpi, int base_qindex) { - int min_filter_level; - min_filter_level = 0; - - return min_filter_level; -} - -// Enforce a maximum filter level based upon baseline Q -static int get_max_filter_level(VP9_COMP *cpi, int base_qindex) { - int max_filter_level = MAX_LOOP_FILTER; - (void)base_qindex; - - if (cpi->twopass.section_intra_rating > 8) - max_filter_level = MAX_LOOP_FILTER * 3 / 4; - - return max_filter_level; -} + vp9_loop_filter_frame(cm, xd, filt_level, 1, partial_frame); + filt_err = vp9_calc_ss_err(sd, cm->frame_to_show); + // Re-instate the unfiltered frame + vpx_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show); -// Stub function for now Alt LF not used -void vp9_set_alt_lf_level(VP9_COMP *cpi, int filt_val) { + return filt_err; } -void vp9_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP9_COMP *cpi, int partial) { +static void search_filter_level(const YV12_BUFFER_CONFIG *sd, VP9_COMP *cpi, + int partial_frame) { + MACROBLOCKD *const xd = &cpi->mb.e_mbd; VP9_COMMON *const cm = &cpi->common; struct loopfilter *const lf = &cm->lf; - - int best_err = 0; - int filt_err = 0; - const int min_filter_level = get_min_filter_level(cpi, cm->base_qindex); - const int max_filter_level = get_max_filter_level(cpi, cm->base_qindex); - - int filter_step; - int filt_high = 0; - // Start search at previous frame filter level - int filt_mid = lf->filter_level; - int filt_low = 0; + const int min_filter_level = 0; + const int max_filter_level = get_max_filter_level(cpi); + int best_err; int filt_best; int filt_direction = 0; - - int Bias = 0; // Bias against raising loop filter in favor of lowering it. - - // Make a copy of the unfiltered / processed recon buffer - vpx_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_uf); - - lf->sharpness_level = cm->frame_type == KEY_FRAME ? 0 - : cpi->oxcf.Sharpness; - // Start the search at the previous frame filter level unless it is now out of // range. - filt_mid = clamp(lf->filter_level, min_filter_level, max_filter_level); + int filt_mid = clamp(lf->filter_level, min_filter_level, max_filter_level); + int filter_step = filt_mid < 16 ? 4 : filt_mid / 4; + // Sum squared error at each filter level + int ss_err[MAX_LOOP_FILTER + 1]; - // Define the initial step size - filter_step = filt_mid < 16 ? 4 : filt_mid / 4; + // Set each entry to -1 + vpx_memset(ss_err, 0xFF, sizeof(ss_err)); - // Get baseline error score - vp9_set_alt_lf_level(cpi, filt_mid); - vp9_loop_filter_frame(cm, &cpi->mb.e_mbd, filt_mid, 1, partial); + // Make a copy of the unfiltered / processed recon buffer + vpx_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_uf); - best_err = vp9_calc_ss_err(sd, cm->frame_to_show); + best_err = try_filter_frame(sd, cpi, xd, cm, filt_mid, partial_frame); filt_best = filt_mid; - - // Re-instate the unfiltered frame - vpx_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show); + ss_err[filt_mid] = best_err; while (filter_step > 0) { - Bias = (best_err >> (15 - (filt_mid / 8))) * filter_step; + const int filt_high = MIN(filt_mid + filter_step, max_filter_level); + const int filt_low = MAX(filt_mid - filter_step, min_filter_level); + int filt_err; + + // Bias against raising loop filter in favor of lowering it. + int bias = (best_err >> (15 - (filt_mid / 8))) * filter_step; if (cpi->twopass.section_intra_rating < 20) - Bias = Bias * cpi->twopass.section_intra_rating / 20; + bias = bias * cpi->twopass.section_intra_rating / 20; // yx, bias less for large block size - if (cpi->common.tx_mode != ONLY_4X4) - Bias >>= 1; - - filt_high = ((filt_mid + filter_step) > max_filter_level) - ? max_filter_level - : (filt_mid + filter_step); - filt_low = ((filt_mid - filter_step) < min_filter_level) - ? min_filter_level - : (filt_mid - filter_step); + if (cm->tx_mode != ONLY_4X4) + bias >>= 1; - if ((filt_direction <= 0) && (filt_low != filt_mid)) { + if (filt_direction <= 0 && filt_low != filt_mid) { // Get Low filter error score - vp9_set_alt_lf_level(cpi, filt_low); - vp9_loop_filter_frame(cm, &cpi->mb.e_mbd, filt_low, 1, partial); - - filt_err = vp9_calc_ss_err(sd, cm->frame_to_show); - - // Re-instate the unfiltered frame - vpx_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show); - + if (ss_err[filt_low] < 0) { + filt_err = try_filter_frame(sd, cpi, xd, cm, filt_low, partial_frame); + ss_err[filt_low] = filt_err; + } else { + filt_err = ss_err[filt_low]; + } // If value is close to the best so far then bias towards a lower loop // filter value. - if ((filt_err - Bias) < best_err) { + if ((filt_err - bias) < best_err) { // Was it actually better than the previous best? if (filt_err < best_err) best_err = filt_err; @@ -175,17 +103,15 @@ void vp9_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP9_COMP *cpi, int partial) { } // Now look at filt_high - if ((filt_direction >= 0) && (filt_high != filt_mid)) { - vp9_set_alt_lf_level(cpi, filt_high); - vp9_loop_filter_frame(cm, &cpi->mb.e_mbd, filt_high, 1, partial); - - filt_err = vp9_calc_ss_err(sd, cm->frame_to_show); - - // Re-instate the unfiltered frame - vpx_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show); - + if (filt_direction >= 0 && filt_high != filt_mid) { + if (ss_err[filt_high] < 0) { + filt_err = try_filter_frame(sd, cpi, xd, cm, filt_high, partial_frame); + ss_err[filt_high] = filt_err; + } else { + filt_err = ss_err[filt_high]; + } // Was it better than the previous best? - if (filt_err < (best_err - Bias)) { + if (filt_err < (best_err - bias)) { best_err = filt_err; filt_best = filt_high; } @@ -203,3 +129,27 @@ void vp9_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP9_COMP *cpi, int partial) { lf->filter_level = filt_best; } + +void vp9_pick_filter_level(const YV12_BUFFER_CONFIG *sd, VP9_COMP *cpi, + int method) { + VP9_COMMON *const cm = &cpi->common; + struct loopfilter *const lf = &cm->lf; + + lf->sharpness_level = cm->frame_type == KEY_FRAME ? 0 + : cpi->oxcf.sharpness; + + if (method == 2) { + const int min_filter_level = 0; + const int max_filter_level = get_max_filter_level(cpi); + const int q = vp9_ac_quant(cm->base_qindex, 0); + // These values were determined by linear fitting the result of the + // searched level + // filt_guess = q * 0.316206 + 3.87252 + int filt_guess = (q * 20723 + 1015158 + (1 << 17)) >> 18; + if (cm->frame_type == KEY_FRAME) + filt_guess -= 4; + lf->filter_level = clamp(filt_guess, min_filter_level, max_filter_level); + } else { + search_filter_level(sd, cpi, method == 1); + } +} diff --git a/libvpx/vp9/encoder/vp9_picklpf.h b/libvpx/vp9/encoder/vp9_picklpf.h index 9de4cf8..203ef87 100644 --- a/libvpx/vp9/encoder/vp9_picklpf.h +++ b/libvpx/vp9/encoder/vp9_picklpf.h @@ -12,11 +12,17 @@ #ifndef VP9_ENCODER_VP9_PICKLPF_H_ #define VP9_ENCODER_VP9_PICKLPF_H_ +#ifdef __cplusplus +extern "C" { +#endif + struct yv12_buffer_config; struct VP9_COMP; -void vp9_set_alt_lf_level(struct VP9_COMP *cpi, int filt_val); +void vp9_pick_filter_level(const struct yv12_buffer_config *sd, + struct VP9_COMP *cpi, int method); +#ifdef __cplusplus +} // extern "C" +#endif -void vp9_pick_filter_level(struct yv12_buffer_config *sd, - struct VP9_COMP *cpi, int partial); #endif // VP9_ENCODER_VP9_PICKLPF_H_ diff --git a/libvpx/vp9/encoder/vp9_pickmode.c b/libvpx/vp9/encoder/vp9_pickmode.c new file mode 100644 index 0000000..c10b4f3 --- /dev/null +++ b/libvpx/vp9/encoder/vp9_pickmode.c @@ -0,0 +1,365 @@ +/* + * Copyright (c) 2014 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include <assert.h> +#include <limits.h> +#include <math.h> +#include <stdio.h> + +#include "./vp9_rtcd.h" + +#include "vpx_mem/vpx_mem.h" + +#include "vp9/common/vp9_common.h" +#include "vp9/common/vp9_mvref_common.h" +#include "vp9/common/vp9_reconinter.h" +#include "vp9/common/vp9_reconintra.h" + +#include "vp9/encoder/vp9_onyx_int.h" +#include "vp9/encoder/vp9_ratectrl.h" +#include "vp9/encoder/vp9_rdopt.h" + +static int full_pixel_motion_search(VP9_COMP *cpi, MACROBLOCK *x, + const TileInfo *const tile, + BLOCK_SIZE bsize, int mi_row, int mi_col, + int_mv *tmp_mv, int *rate_mv) { + MACROBLOCKD *xd = &x->e_mbd; + MB_MODE_INFO *mbmi = &xd->mi_8x8[0]->mbmi; + struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0}}; + int bestsme = INT_MAX; + int step_param; + int sadpb = x->sadperbit16; + MV mvp_full; + int ref = mbmi->ref_frame[0]; + const MV ref_mv = mbmi->ref_mvs[ref][0].as_mv; + int i; + + int tmp_col_min = x->mv_col_min; + int tmp_col_max = x->mv_col_max; + int tmp_row_min = x->mv_row_min; + int tmp_row_max = x->mv_row_max; + + int buf_offset; + int stride = xd->plane[0].pre[0].stride; + + const YV12_BUFFER_CONFIG *scaled_ref_frame = vp9_get_scaled_ref_frame(cpi, + ref); + if (scaled_ref_frame) { + int i; + // Swap out the reference frame for a version that's been scaled to + // match the resolution of the current frame, allowing the existing + // motion search code to be used without additional modifications. + for (i = 0; i < MAX_MB_PLANE; i++) + backup_yv12[i] = xd->plane[i].pre[0]; + + vp9_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL); + } + + vp9_set_mv_search_range(x, &ref_mv); + + // TODO(jingning) exploiting adaptive motion search control in non-RD + // mode decision too. + step_param = 6; + + for (i = LAST_FRAME; i <= LAST_FRAME && cpi->common.show_frame; ++i) { + if ((x->pred_mv_sad[ref] >> 3) > x->pred_mv_sad[i]) { + tmp_mv->as_int = INVALID_MV; + + if (scaled_ref_frame) { + int i; + for (i = 0; i < MAX_MB_PLANE; i++) + xd->plane[i].pre[0] = backup_yv12[i]; + } + return INT_MAX; + } + } + + mvp_full = mbmi->ref_mvs[ref][x->mv_best_ref_index[ref]].as_mv; + + mvp_full.col >>= 3; + mvp_full.row >>= 3; + + if (cpi->sf.search_method == FAST_DIAMOND) { + // NOTE: this returns SAD + vp9_fast_dia_search(x, &mvp_full, step_param, sadpb, 0, + &cpi->fn_ptr[bsize], 1, + &ref_mv, &tmp_mv->as_mv); + } else if (cpi->sf.search_method == FAST_HEX) { + // NOTE: this returns SAD + vp9_fast_hex_search(x, &mvp_full, step_param, sadpb, 0, + &cpi->fn_ptr[bsize], 1, + &ref_mv, &tmp_mv->as_mv); + } else if (cpi->sf.search_method == HEX) { + // NOTE: this returns SAD + vp9_hex_search(x, &mvp_full, step_param, sadpb, 1, + &cpi->fn_ptr[bsize], 1, + &ref_mv, &tmp_mv->as_mv); + } else if (cpi->sf.search_method == SQUARE) { + // NOTE: this returns SAD + vp9_square_search(x, &mvp_full, step_param, sadpb, 1, + &cpi->fn_ptr[bsize], 1, + &ref_mv, &tmp_mv->as_mv); + } else if (cpi->sf.search_method == BIGDIA) { + // NOTE: this returns SAD + vp9_bigdia_search(x, &mvp_full, step_param, sadpb, 1, + &cpi->fn_ptr[bsize], 1, + &ref_mv, &tmp_mv->as_mv); + } else { + int further_steps = (cpi->sf.max_step_search_steps - 1) - step_param; + // NOTE: this returns variance + vp9_full_pixel_diamond(cpi, x, &mvp_full, step_param, + sadpb, further_steps, 1, + &cpi->fn_ptr[bsize], + &ref_mv, &tmp_mv->as_mv); + } + x->mv_col_min = tmp_col_min; + x->mv_col_max = tmp_col_max; + x->mv_row_min = tmp_row_min; + x->mv_row_max = tmp_row_max; + + if (scaled_ref_frame) { + int i; + for (i = 0; i < MAX_MB_PLANE; i++) + xd->plane[i].pre[0] = backup_yv12[i]; + } + + // TODO(jingning) This step can be merged into full pixel search step in the + // re-designed log-diamond search + buf_offset = tmp_mv->as_mv.row * stride + tmp_mv->as_mv.col; + + // Find sad for current vector. + bestsme = cpi->fn_ptr[bsize].sdf(x->plane[0].src.buf, x->plane[0].src.stride, + xd->plane[0].pre[0].buf + buf_offset, + stride, 0x7fffffff); + + // scale to 1/8 pixel resolution + tmp_mv->as_mv.row = tmp_mv->as_mv.row * 8; + tmp_mv->as_mv.col = tmp_mv->as_mv.col * 8; + + // calculate the bit cost on motion vector + *rate_mv = vp9_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, + x->nmvjointcost, x->mvcost, MV_COST_WEIGHT); + return bestsme; +} + +static void sub_pixel_motion_search(VP9_COMP *cpi, MACROBLOCK *x, + const TileInfo *const tile, + BLOCK_SIZE bsize, int mi_row, int mi_col, + MV *tmp_mv) { + MACROBLOCKD *xd = &x->e_mbd; + MB_MODE_INFO *mbmi = &xd->mi_8x8[0]->mbmi; + struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0}}; + int ref = mbmi->ref_frame[0]; + MV ref_mv = mbmi->ref_mvs[ref][0].as_mv; + int dis; + + const YV12_BUFFER_CONFIG *scaled_ref_frame = vp9_get_scaled_ref_frame(cpi, + ref); + if (scaled_ref_frame) { + int i; + // Swap out the reference frame for a version that's been scaled to + // match the resolution of the current frame, allowing the existing + // motion search code to be used without additional modifications. + for (i = 0; i < MAX_MB_PLANE; i++) + backup_yv12[i] = xd->plane[i].pre[0]; + + vp9_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL); + } + + tmp_mv->col >>= 3; + tmp_mv->row >>= 3; + + cpi->find_fractional_mv_step(x, tmp_mv, &ref_mv, + cpi->common.allow_high_precision_mv, + x->errorperbit, + &cpi->fn_ptr[bsize], + cpi->sf.subpel_force_stop, + cpi->sf.subpel_iters_per_step, + x->nmvjointcost, x->mvcost, + &dis, &x->pred_sse[ref]); + + if (scaled_ref_frame) { + int i; + for (i = 0; i < MAX_MB_PLANE; i++) + xd->plane[i].pre[0] = backup_yv12[i]; + } +} + +static void model_rd_for_sb_y(VP9_COMP *cpi, BLOCK_SIZE bsize, + MACROBLOCK *x, MACROBLOCKD *xd, + int *out_rate_sum, int64_t *out_dist_sum) { + // Note our transform coeffs are 8 times an orthogonal transform. + // Hence quantizer step is also 8 times. To get effective quantizer + // we need to divide by 8 before sending to modeling function. + unsigned int sse; + int rate; + int64_t dist; + + + struct macroblock_plane *const p = &x->plane[0]; + struct macroblockd_plane *const pd = &xd->plane[0]; + const BLOCK_SIZE bs = get_plane_block_size(bsize, pd); + + (void) cpi->fn_ptr[bs].vf(p->src.buf, p->src.stride, + pd->dst.buf, pd->dst.stride, &sse); + + vp9_model_rd_from_var_lapndz(sse, 1 << num_pels_log2_lookup[bs], + pd->dequant[1] >> 3, &rate, &dist); + + *out_rate_sum = rate; + *out_dist_sum = dist << 4; +} + +// TODO(jingning) placeholder for inter-frame non-RD mode decision. +// this needs various further optimizations. to be continued.. +int64_t vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, + const TileInfo *const tile, + int mi_row, int mi_col, + int *returnrate, + int64_t *returndistortion, + BLOCK_SIZE bsize) { + MACROBLOCKD *xd = &x->e_mbd; + MB_MODE_INFO *mbmi = &xd->mi_8x8[0]->mbmi; + struct macroblock_plane *const p = &x->plane[0]; + struct macroblockd_plane *const pd = &xd->plane[0]; + const BLOCK_SIZE block_size = get_plane_block_size(bsize, &xd->plane[0]); + MB_PREDICTION_MODE this_mode, best_mode = ZEROMV; + MV_REFERENCE_FRAME ref_frame, best_ref_frame = LAST_FRAME; + int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES]; + struct buf_2d yv12_mb[4][MAX_MB_PLANE]; + static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG, + VP9_ALT_FLAG }; + int64_t best_rd = INT64_MAX; + int64_t this_rd = INT64_MAX; + + const int64_t inter_mode_thresh = 300; + const int64_t intra_mode_cost = 50; + + int rate = INT_MAX; + int64_t dist = INT64_MAX; + + x->skip_encode = cpi->sf.skip_encode_frame && x->q_index < QIDX_SKIP_THRESH; + + x->skip = 0; + if (cpi->active_map_enabled && x->active_ptr[0] == 0) + x->skip = 1; + + // initialize mode decisions + *returnrate = INT_MAX; + *returndistortion = INT64_MAX; + vpx_memset(mbmi, 0, sizeof(MB_MODE_INFO)); + mbmi->sb_type = bsize; + mbmi->ref_frame[0] = NONE; + mbmi->ref_frame[1] = NONE; + mbmi->tx_size = MIN(max_txsize_lookup[bsize], + tx_mode_to_biggest_tx_size[cpi->common.tx_mode]); + mbmi->interp_filter = cpi->common.interp_filter == SWITCHABLE ? + EIGHTTAP : cpi->common.interp_filter; + mbmi->skip = 0; + mbmi->segment_id = 0; + + for (ref_frame = LAST_FRAME; ref_frame <= LAST_FRAME ; ++ref_frame) { + x->pred_mv_sad[ref_frame] = INT_MAX; + if (cpi->ref_frame_flags & flag_list[ref_frame]) { + vp9_setup_buffer_inter(cpi, x, tile, + ref_frame, block_size, mi_row, mi_col, + frame_mv[NEARESTMV], frame_mv[NEARMV], yv12_mb); + } + frame_mv[NEWMV][ref_frame].as_int = INVALID_MV; + frame_mv[ZEROMV][ref_frame].as_int = 0; + } + + for (ref_frame = LAST_FRAME; ref_frame <= LAST_FRAME ; ++ref_frame) { + if (!(cpi->ref_frame_flags & flag_list[ref_frame])) + continue; + + // Select prediction reference frames. + xd->plane[0].pre[0] = yv12_mb[ref_frame][0]; + + clamp_mv2(&frame_mv[NEARESTMV][ref_frame].as_mv, xd); + clamp_mv2(&frame_mv[NEARMV][ref_frame].as_mv, xd); + + mbmi->ref_frame[0] = ref_frame; + + for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) { + int rate_mv = 0; + + if (cpi->sf.disable_inter_mode_mask[bsize] & + (1 << INTER_OFFSET(this_mode))) + continue; + + if (this_mode == NEWMV) { + if (this_rd < (int64_t)(1 << num_pels_log2_lookup[bsize])) + continue; + + x->mode_sad[ref_frame][INTER_OFFSET(NEWMV)] = + full_pixel_motion_search(cpi, x, tile, bsize, mi_row, mi_col, + &frame_mv[NEWMV][ref_frame], &rate_mv); + + if (frame_mv[NEWMV][ref_frame].as_int == INVALID_MV) + continue; + + sub_pixel_motion_search(cpi, x, tile, bsize, mi_row, mi_col, + &frame_mv[NEWMV][ref_frame].as_mv); + } + + if (this_mode != NEARESTMV) + if (frame_mv[this_mode][ref_frame].as_int == + frame_mv[NEARESTMV][ref_frame].as_int) + continue; + + mbmi->mode = this_mode; + mbmi->mv[0].as_int = frame_mv[this_mode][ref_frame].as_int; + vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize); + + model_rd_for_sb_y(cpi, bsize, x, xd, &rate, &dist); + rate += rate_mv; + rate += x->inter_mode_cost[mbmi->mode_context[ref_frame]] + [INTER_OFFSET(this_mode)]; + this_rd = RDCOST(x->rdmult, x->rddiv, rate, dist); + + if (this_rd < best_rd) { + best_rd = this_rd; + best_mode = this_mode; + best_ref_frame = ref_frame; + } + } + } + + mbmi->mode = best_mode; + mbmi->ref_frame[0] = best_ref_frame; + mbmi->mv[0].as_int = frame_mv[best_mode][best_ref_frame].as_int; + xd->mi_8x8[0]->bmi[0].as_mv[0].as_int = mbmi->mv[0].as_int; + + // Perform intra prediction search, if the best SAD is above a certain + // threshold. + if (best_rd > inter_mode_thresh) { + for (this_mode = DC_PRED; this_mode <= DC_PRED; ++this_mode) { + vp9_predict_intra_block(xd, 0, b_width_log2(bsize), + mbmi->tx_size, this_mode, + &p->src.buf[0], p->src.stride, + &pd->dst.buf[0], pd->dst.stride, 0, 0, 0); + + model_rd_for_sb_y(cpi, bsize, x, xd, &rate, &dist); + rate += x->mbmode_cost[this_mode]; + this_rd = RDCOST(x->rdmult, x->rddiv, rate, dist); + + if (this_rd + intra_mode_cost < best_rd) { + best_rd = this_rd; + mbmi->mode = this_mode; + mbmi->ref_frame[0] = INTRA_FRAME; + mbmi->uv_mode = this_mode; + mbmi->mv[0].as_int = INVALID_MV; + } + } + } + + return INT64_MAX; +} diff --git a/libvpx/vp9/encoder/vp9_pickmode.h b/libvpx/vp9/encoder/vp9_pickmode.h new file mode 100644 index 0000000..05ff187 --- /dev/null +++ b/libvpx/vp9/encoder/vp9_pickmode.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2014 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VP9_ENCODER_VP9_PICKMODE_H_ +#define VP9_ENCODER_VP9_PICKMODE_H_ + +#include "vp9/encoder/vp9_onyx_int.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int64_t vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, + const struct TileInfo *const tile, + int mi_row, int mi_col, + int *returnrate, + int64_t *returndistortion, + BLOCK_SIZE bsize); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // VP9_ENCODER_VP9_PICKMODE_H_ diff --git a/libvpx/vp9/encoder/vp9_psnr.c b/libvpx/vp9/encoder/vp9_psnr.c deleted file mode 100644 index 58294e1..0000000 --- a/libvpx/vp9/encoder/vp9_psnr.c +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include <math.h> - -#include "vpx_scale/yv12config.h" - -#define MAX_PSNR 100 - -double vp9_mse2psnr(double samples, double peak, double mse) { - double psnr; - - if (mse > 0.0) - psnr = 10.0 * log10(peak * peak * samples / mse); - else - psnr = MAX_PSNR; // Limit to prevent / 0 - - if (psnr > MAX_PSNR) - psnr = MAX_PSNR; - - return psnr; -} diff --git a/libvpx/vp9/encoder/vp9_psnr.h b/libvpx/vp9/encoder/vp9_psnr.h deleted file mode 100644 index 15dd836..0000000 --- a/libvpx/vp9/encoder/vp9_psnr.h +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - - -#ifndef VP9_ENCODER_VP9_PSNR_H_ -#define VP9_ENCODER_VP9_PSNR_H_ - -double vp9_mse2psnr(double samples, double peak, double mse); - -#endif // VP9_ENCODER_VP9_PSNR_H_ diff --git a/libvpx/vp9/encoder/vp9_quantize.c b/libvpx/vp9/encoder/vp9_quantize.c index d24be96..4ab8995 100644 --- a/libvpx/vp9/encoder/vp9_quantize.c +++ b/libvpx/vp9/encoder/vp9_quantize.c @@ -9,18 +9,15 @@ */ #include <math.h> + #include "vpx_mem/vpx_mem.h" -#include "vp9/encoder/vp9_onyx_int.h" -#include "vp9/encoder/vp9_rdopt.h" -#include "vp9/encoder/vp9_quantize.h" #include "vp9/common/vp9_quant_common.h" - #include "vp9/common/vp9_seg_common.h" -#ifdef ENC_DEBUG -extern int enc_debug; -#endif +#include "vp9/encoder/vp9_onyx_int.h" +#include "vp9/encoder/vp9_quantize.h" +#include "vp9/encoder/vp9_rdopt.h" void vp9_quantize_b_c(const int16_t *coeff_ptr, intptr_t count, int skip_block, @@ -30,7 +27,7 @@ void vp9_quantize_b_c(const int16_t *coeff_ptr, intptr_t count, const int16_t *dequant_ptr, int zbin_oq_value, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan) { - int i, non_zero_count = count, eob = -1; + int i, non_zero_count = (int)count, eob = -1; const int zbins[2] = { zbin_ptr[0] + zbin_oq_value, zbin_ptr[1] + zbin_oq_value }; const int nzbins[2] = { zbins[0] * -1, @@ -41,7 +38,7 @@ void vp9_quantize_b_c(const int16_t *coeff_ptr, intptr_t count, if (!skip_block) { // Pre-scan pass - for (i = count - 1; i >= 0; i--) { + for (i = (int)count - 1; i >= 0; i--) { const int rc = scan[i]; const int coeff = coeff_ptr[rc]; @@ -83,99 +80,64 @@ void vp9_quantize_b_32x32_c(const int16_t *coeff_ptr, intptr_t n_coeffs, const int16_t *dequant_ptr, int zbin_oq_value, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan) { - int i, rc, eob; - int zbins[2], nzbins[2]; - int x, y, z, sz; + const int zbins[2] = { ROUND_POWER_OF_TWO(zbin_ptr[0] + zbin_oq_value, 1), + ROUND_POWER_OF_TWO(zbin_ptr[1] + zbin_oq_value, 1) }; + const int nzbins[2] = {zbins[0] * -1, zbins[1] * -1}; + int idx = 0; int idx_arr[1024]; + int i, eob = -1; - vpx_memset(qcoeff_ptr, 0, n_coeffs*sizeof(int16_t)); - vpx_memset(dqcoeff_ptr, 0, n_coeffs*sizeof(int16_t)); - - eob = -1; - - // Base ZBIN - zbins[0] = ROUND_POWER_OF_TWO(zbin_ptr[0] + zbin_oq_value, 1); - zbins[1] = ROUND_POWER_OF_TWO(zbin_ptr[1] + zbin_oq_value, 1); - nzbins[0] = zbins[0] * -1; - nzbins[1] = zbins[1] * -1; + vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(int16_t)); + vpx_memset(dqcoeff_ptr, 0, n_coeffs * sizeof(int16_t)); if (!skip_block) { // Pre-scan pass for (i = 0; i < n_coeffs; i++) { - rc = scan[i]; - z = coeff_ptr[rc]; + const int rc = scan[i]; + const int coeff = coeff_ptr[rc]; // If the coefficient is out of the base ZBIN range, keep it for // quantization. - if (z >= zbins[rc != 0] || z <= nzbins[rc != 0]) + if (coeff >= zbins[rc != 0] || coeff <= nzbins[rc != 0]) idx_arr[idx++] = i; } // Quantization pass: only process the coefficients selected in // pre-scan pass. Note: idx can be zero. for (i = 0; i < idx; i++) { - rc = scan[idx_arr[i]]; - - z = coeff_ptr[rc]; - sz = (z >> 31); // sign of z - x = (z ^ sz) - sz; // x = abs(z) - - x += ROUND_POWER_OF_TWO(round_ptr[rc != 0], 1); - x = clamp(x, INT16_MIN, INT16_MAX); - y = ((((x * quant_ptr[rc != 0]) >> 16) + x) * - quant_shift_ptr[rc != 0]) >> 15; // quantize (x) - - x = (y ^ sz) - sz; // get the sign back - qcoeff_ptr[rc] = x; // write to destination - dqcoeff_ptr[rc] = x * dequant_ptr[rc != 0] / 2; // dequantized value - - if (y) - eob = idx_arr[i]; // last nonzero coeffs + const int rc = scan[idx_arr[i]]; + const int coeff = coeff_ptr[rc]; + const int coeff_sign = (coeff >> 31); + int tmp; + int abs_coeff = (coeff ^ coeff_sign) - coeff_sign; + abs_coeff += ROUND_POWER_OF_TWO(round_ptr[rc != 0], 1); + abs_coeff = clamp(abs_coeff, INT16_MIN, INT16_MAX); + tmp = ((((abs_coeff * quant_ptr[rc != 0]) >> 16) + abs_coeff) * + quant_shift_ptr[rc != 0]) >> 15; + + qcoeff_ptr[rc] = (tmp ^ coeff_sign) - coeff_sign; + dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0] / 2; + + if (tmp) + eob = idx_arr[i]; } } *eob_ptr = eob + 1; } -struct plane_block_idx { - int plane; - int block; -}; - -// TODO(jkoleszar): returning a struct so it can be used in a const context, -// expect to refactor this further later. -static INLINE struct plane_block_idx plane_block_idx(int y_blocks, - int b_idx) { - const int v_offset = y_blocks * 5 / 4; - struct plane_block_idx res; - - if (b_idx < y_blocks) { - res.plane = 0; - res.block = b_idx; - } else if (b_idx < v_offset) { - res.plane = 1; - res.block = b_idx - y_blocks; - } else { - assert(b_idx < y_blocks * 3 / 2); - res.plane = 2; - res.block = b_idx - v_offset; - } - return res; -} - -void vp9_regular_quantize_b_4x4(MACROBLOCK *x, int y_blocks, int b_idx, +void vp9_regular_quantize_b_4x4(MACROBLOCK *x, int plane, int block, const int16_t *scan, const int16_t *iscan) { MACROBLOCKD *const xd = &x->e_mbd; - const struct plane_block_idx pb_idx = plane_block_idx(y_blocks, b_idx); - struct macroblock_plane* p = &x->plane[pb_idx.plane]; - struct macroblockd_plane* pd = &xd->plane[pb_idx.plane]; + struct macroblock_plane *p = &x->plane[plane]; + struct macroblockd_plane *pd = &xd->plane[plane]; - vp9_quantize_b(BLOCK_OFFSET(p->coeff, pb_idx.block), + vp9_quantize_b(BLOCK_OFFSET(p->coeff, block), 16, x->skip_block, p->zbin, p->round, p->quant, p->quant_shift, - BLOCK_OFFSET(pd->qcoeff, pb_idx.block), - BLOCK_OFFSET(pd->dqcoeff, pb_idx.block), - pd->dequant, p->zbin_extra, &pd->eobs[pb_idx.block], scan, iscan); + BLOCK_OFFSET(p->qcoeff, block), + BLOCK_OFFSET(pd->dqcoeff, block), + pd->dequant, p->zbin_extra, &p->eobs[block], scan, iscan); } static void invert_quant(int16_t *quant, int16_t *shift, int d) { @@ -190,44 +152,40 @@ static void invert_quant(int16_t *quant, int16_t *shift, int d) { } void vp9_init_quantizer(VP9_COMP *cpi) { - int i, q; VP9_COMMON *const cm = &cpi->common; + int i, q, quant; for (q = 0; q < QINDEX_RANGE; q++) { const int qzbin_factor = q == 0 ? 64 : (vp9_dc_quant(q, 0) < 148 ? 84 : 80); const int qrounding_factor = q == 0 ? 64 : 48; - // y for (i = 0; i < 2; ++i) { - const int quant = i == 0 ? vp9_dc_quant(q, cm->y_dc_delta_q) - : vp9_ac_quant(q, 0); + // y + quant = i == 0 ? vp9_dc_quant(q, cm->y_dc_delta_q) + : vp9_ac_quant(q, 0); invert_quant(&cpi->y_quant[q][i], &cpi->y_quant_shift[q][i], quant); cpi->y_zbin[q][i] = ROUND_POWER_OF_TWO(qzbin_factor * quant, 7); cpi->y_round[q][i] = (qrounding_factor * quant) >> 7; cm->y_dequant[q][i] = quant; - } - // uv - for (i = 0; i < 2; ++i) { - const int quant = i == 0 ? vp9_dc_quant(q, cm->uv_dc_delta_q) - : vp9_ac_quant(q, cm->uv_ac_delta_q); + // uv + quant = i == 0 ? vp9_dc_quant(q, cm->uv_dc_delta_q) + : vp9_ac_quant(q, cm->uv_ac_delta_q); invert_quant(&cpi->uv_quant[q][i], &cpi->uv_quant_shift[q][i], quant); cpi->uv_zbin[q][i] = ROUND_POWER_OF_TWO(qzbin_factor * quant, 7); cpi->uv_round[q][i] = (qrounding_factor * quant) >> 7; cm->uv_dequant[q][i] = quant; - } #if CONFIG_ALPHA - // alpha - for (i = 0; i < 2; ++i) { - const int quant = i == 0 ? vp9_dc_quant(q, cm->a_dc_delta_q) - : vp9_ac_quant(q, cm->a_ac_delta_q); + // alpha + quant = i == 0 ? vp9_dc_quant(q, cm->a_dc_delta_q) + : vp9_ac_quant(q, cm->a_ac_delta_q); invert_quant(&cpi->a_quant[q][i], &cpi->a_quant_shift[q][i], quant); cpi->a_zbin[q][i] = ROUND_POWER_OF_TWO(qzbin_factor * quant, 7); cpi->a_round[q][i] = (qrounding_factor * quant) >> 7; cm->a_dequant[q][i] = quant; - } #endif + } for (i = 2; i < 8; i++) { cpi->y_quant[q][i] = cpi->y_quant[q][1]; @@ -253,39 +211,31 @@ void vp9_init_quantizer(VP9_COMP *cpi) { } } -void vp9_mb_init_quantizer(VP9_COMP *cpi, MACROBLOCK *x) { - int i; - VP9_COMMON *const cm = &cpi->common; +void vp9_init_plane_quantizers(VP9_COMP *cpi, MACROBLOCK *x) { + const VP9_COMMON *const cm = &cpi->common; MACROBLOCKD *xd = &x->e_mbd; - int zbin_extra; - int segment_id = xd->mi_8x8[0]->mbmi.segment_id; - const int qindex = vp9_get_qindex(&cpi->common.seg, segment_id, - cpi->common.base_qindex); - - int rdmult = vp9_compute_rd_mult(cpi, qindex + cm->y_dc_delta_q); + const int segment_id = xd->mi_8x8[0]->mbmi.segment_id; + const int qindex = vp9_get_qindex(&cm->seg, segment_id, cm->base_qindex); + const int rdmult = vp9_compute_rd_mult(cpi, qindex + cm->y_dc_delta_q); + const int zbin = cpi->zbin_mode_boost + x->act_zbin_adj; + int i; // Y - zbin_extra = (cpi->common.y_dequant[qindex][1] * - (cpi->zbin_mode_boost + x->act_zbin_adj)) >> 7; - x->plane[0].quant = cpi->y_quant[qindex]; x->plane[0].quant_shift = cpi->y_quant_shift[qindex]; x->plane[0].zbin = cpi->y_zbin[qindex]; x->plane[0].round = cpi->y_round[qindex]; - x->plane[0].zbin_extra = (int16_t)zbin_extra; - x->e_mbd.plane[0].dequant = cpi->common.y_dequant[qindex]; + x->plane[0].zbin_extra = (int16_t)((cm->y_dequant[qindex][1] * zbin) >> 7); + xd->plane[0].dequant = cm->y_dequant[qindex]; // UV - zbin_extra = (cpi->common.uv_dequant[qindex][1] * - (cpi->zbin_mode_boost + x->act_zbin_adj)) >> 7; - for (i = 1; i < 3; i++) { x->plane[i].quant = cpi->uv_quant[qindex]; x->plane[i].quant_shift = cpi->uv_quant_shift[qindex]; x->plane[i].zbin = cpi->uv_zbin[qindex]; x->plane[i].round = cpi->uv_round[qindex]; - x->plane[i].zbin_extra = (int16_t)zbin_extra; - x->e_mbd.plane[i].dequant = cpi->common.uv_dequant[qindex]; + x->plane[i].zbin_extra = (int16_t)((cm->uv_dequant[qindex][1] * zbin) >> 7); + xd->plane[i].dequant = cm->uv_dequant[qindex]; } #if CONFIG_ALPHA @@ -293,19 +243,15 @@ void vp9_mb_init_quantizer(VP9_COMP *cpi, MACROBLOCK *x) { x->plane[3].quant_shift = cpi->a_quant_shift[qindex]; x->plane[3].zbin = cpi->a_zbin[qindex]; x->plane[3].round = cpi->a_round[qindex]; - x->plane[3].zbin_extra = (int16_t)zbin_extra; - x->e_mbd.plane[3].dequant = cpi->common.a_dequant[qindex]; + x->plane[3].zbin_extra = (int16_t)((cm->a_dequant[qindex][1] * zbin) >> 7); + xd->plane[3].dequant = cm->a_dequant[qindex]; #endif - x->skip_block = vp9_segfeature_active(&cpi->common.seg, segment_id, - SEG_LVL_SKIP); - - /* save this macroblock QIndex for vp9_update_zbin_extra() */ + x->skip_block = vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP); x->q_index = qindex; - /* R/D setup */ - cpi->mb.errorperbit = rdmult >> 6; - cpi->mb.errorperbit += (cpi->mb.errorperbit == 0); + x->errorperbit = rdmult >> 6; + x->errorperbit += (x->errorperbit == 0); vp9_initialize_me_consts(cpi, x->q_index); } @@ -323,26 +269,17 @@ void vp9_update_zbin_extra(VP9_COMP *cpi, MACROBLOCK *x) { } void vp9_frame_init_quantizer(VP9_COMP *cpi) { - // Clear Zbin mode boost for default case cpi->zbin_mode_boost = 0; - - // MB level quantizer setup - vp9_mb_init_quantizer(cpi, &cpi->mb); + vp9_init_plane_quantizers(cpi, &cpi->mb); } void vp9_set_quantizer(struct VP9_COMP *cpi, int q) { - VP9_COMMON *cm = &cpi->common; + VP9_COMMON *const cm = &cpi->common; + // quantizer has to be reinitialized with vp9_init_quantizer() if any + // delta_q changes. cm->base_qindex = q; - - // if any of the delta_q values are changing update flag will - // have to be set. cm->y_dc_delta_q = 0; cm->uv_dc_delta_q = 0; cm->uv_ac_delta_q = 0; - - // quantizer has to be reinitialized if any delta_q changes. - // As there are not any here for now this is inactive code. - // if(update) - // vp9_init_quantizer(cpi); } diff --git a/libvpx/vp9/encoder/vp9_quantize.h b/libvpx/vp9/encoder/vp9_quantize.h index c078e1d..f356b12 100644 --- a/libvpx/vp9/encoder/vp9_quantize.h +++ b/libvpx/vp9/encoder/vp9_quantize.h @@ -13,7 +13,11 @@ #include "vp9/encoder/vp9_block.h" -void vp9_regular_quantize_b_4x4(MACROBLOCK *x, int y_blocks, int b_idx, +#ifdef __cplusplus +extern "C" { +#endif + +void vp9_regular_quantize_b_4x4(MACROBLOCK *x, int plane, int block, const int16_t *scan, const int16_t *iscan); struct VP9_COMP; @@ -24,8 +28,12 @@ void vp9_frame_init_quantizer(struct VP9_COMP *cpi); void vp9_update_zbin_extra(struct VP9_COMP *cpi, MACROBLOCK *x); -void vp9_mb_init_quantizer(struct VP9_COMP *cpi, MACROBLOCK *x); +void vp9_init_plane_quantizers(struct VP9_COMP *cpi, MACROBLOCK *x); void vp9_init_quantizer(struct VP9_COMP *cpi); +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_ENCODER_VP9_QUANTIZE_H_ diff --git a/libvpx/vp9/encoder/vp9_ratectrl.c b/libvpx/vp9/encoder/vp9_ratectrl.c index 0aa3a68..8430e4b 100644 --- a/libvpx/vp9/encoder/vp9_ratectrl.c +++ b/libvpx/vp9/encoder/vp9_ratectrl.c @@ -8,23 +8,26 @@ * be found in the AUTHORS file in the root of the source tree. */ - -#include <stdlib.h> -#include <stdio.h> -#include <string.h> -#include <limits.h> #include <assert.h> +#include <limits.h> #include <math.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#include "vpx_mem/vpx_mem.h" #include "vp9/common/vp9_alloccommon.h" #include "vp9/common/vp9_common.h" -#include "vp9/encoder/vp9_ratectrl.h" #include "vp9/common/vp9_entropymode.h" -#include "vpx_mem/vpx_mem.h" -#include "vp9/common/vp9_systemdependent.h" -#include "vp9/encoder/vp9_encodemv.h" #include "vp9/common/vp9_quant_common.h" #include "vp9/common/vp9_seg_common.h" +#include "vp9/common/vp9_systemdependent.h" + +#include "vp9/encoder/vp9_encodemv.h" +#include "vp9/encoder/vp9_ratectrl.h" + +#define LIMIT_QRANGE_FOR_ALTREF_AND_KEY 1 #define MIN_BPB_FACTOR 0.005 #define MAX_BPB_FACTOR 50 @@ -32,8 +35,87 @@ // Bits Per MB at different Q (Multiplied by 512) #define BPER_MB_NORMBITS 9 -static const unsigned int prior_key_frame_weight[KEY_FRAME_CONTEXT] = - { 1, 2, 3, 4, 5 }; +// Tables relating active max Q to active min Q +static int kf_low_motion_minq[QINDEX_RANGE]; +static int kf_high_motion_minq[QINDEX_RANGE]; +static int gf_low_motion_minq[QINDEX_RANGE]; +static int gf_high_motion_minq[QINDEX_RANGE]; +static int inter_minq[QINDEX_RANGE]; +static int afq_low_motion_minq[QINDEX_RANGE]; +static int afq_high_motion_minq[QINDEX_RANGE]; +static int gf_high = 2000; +static int gf_low = 400; +static int kf_high = 5000; +static int kf_low = 400; + +// Functions to compute the active minq lookup table entries based on a +// formulaic approach to facilitate easier adjustment of the Q tables. +// The formulae were derived from computing a 3rd order polynomial best +// fit to the original data (after plotting real maxq vs minq (not q index)) +static int calculate_minq_index(double maxq, + double x3, double x2, double x1, double c) { + int i; + const double minqtarget = MIN(((x3 * maxq + x2) * maxq + x1) * maxq + c, + maxq); + + // Special case handling to deal with the step from q2.0 + // down to lossless mode represented by q 1.0. + if (minqtarget <= 2.0) + return 0; + + for (i = 0; i < QINDEX_RANGE; i++) { + if (minqtarget <= vp9_convert_qindex_to_q(i)) + return i; + } + + return QINDEX_RANGE - 1; +} + +void vp9_rc_init_minq_luts(void) { + int i; + + for (i = 0; i < QINDEX_RANGE; i++) { + const double maxq = vp9_convert_qindex_to_q(i); + + + kf_low_motion_minq[i] = calculate_minq_index(maxq, + 0.000001, + -0.0004, + 0.15, + 0.0); + kf_high_motion_minq[i] = calculate_minq_index(maxq, + 0.000002, + -0.0012, + 0.50, + 0.0); + + gf_low_motion_minq[i] = calculate_minq_index(maxq, + 0.0000015, + -0.0009, + 0.32, + 0.0); + gf_high_motion_minq[i] = calculate_minq_index(maxq, + 0.0000021, + -0.00125, + 0.50, + 0.0); + afq_low_motion_minq[i] = calculate_minq_index(maxq, + 0.0000015, + -0.0009, + 0.33, + 0.0); + afq_high_motion_minq[i] = calculate_minq_index(maxq, + 0.0000021, + -0.00125, + 0.55, + 0.0); + inter_minq[i] = calculate_minq_index(maxq, + 0.00000271, + -0.00113, + 0.75, + 0.0); + } +} // These functions use formulaic calculations to make playing with the // quantizer tables easier. If necessary they can be replaced by lookup @@ -43,22 +125,8 @@ double vp9_convert_qindex_to_q(int qindex) { return vp9_ac_quant(qindex, 0) / 4.0; } -int vp9_gfboost_qadjust(int qindex) { - const double q = vp9_convert_qindex_to_q(qindex); - return (int)((0.00000828 * q * q * q) + - (-0.0055 * q * q) + - (1.32 * q) + 79.3); -} - -static int kfboost_qadjust(int qindex) { - const double q = vp9_convert_qindex_to_q(qindex); - return (int)((0.00000973 * q * q * q) + - (-0.00613 * q * q) + - (1.316 * q) + 121.2); -} - -int vp9_bits_per_mb(FRAME_TYPE frame_type, int qindex, - double correction_factor) { +int vp9_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex, + double correction_factor) { const double q = vp9_convert_qindex_to_q(qindex); int enumerator = frame_type == KEY_FRAME ? 3300000 : 2250000; @@ -117,8 +185,6 @@ void vp9_setup_key_frame(VP9_COMP *cpi) { vp9_setup_past_independence(cm); - // interval before next GF - cpi->frames_till_gf_update_due = cpi->baseline_gf_interval; /* All buffers are implicitly updated on key frames. */ cpi->refresh_golden_frame = 1; cpi->refresh_alt_ref_frame = 1; @@ -129,13 +195,13 @@ void vp9_setup_inter_frame(VP9_COMP *cpi) { if (cm->error_resilient_mode || cm->intra_only) vp9_setup_past_independence(cm); - assert(cm->frame_context_idx < NUM_FRAME_CONTEXTS); + assert(cm->frame_context_idx < FRAME_CONTEXTS); cm->fc = cm->frame_contexts[cm->frame_context_idx]; } static int estimate_bits_at_q(int frame_kind, int q, int mbs, double correction_factor) { - const int bpm = (int)(vp9_bits_per_mb(frame_kind, q, correction_factor)); + const int bpm = (int)(vp9_rc_bits_per_mb(frame_kind, q, correction_factor)); // Attempt to retain reasonable accuracy without overflow. The cutoff is // chosen such that the maximum product of Bpm and MBs fits 31 bits. The @@ -144,118 +210,152 @@ static int estimate_bits_at_q(int frame_kind, int q, int mbs, : (bpm * mbs) >> BPER_MB_NORMBITS; } - -static void calc_iframe_target_size(VP9_COMP *cpi) { - // boost defaults to half second - int target; - - // Clear down mmx registers to allow floating point in what follows - vp9_clear_system_state(); // __asm emms; - - // New Two pass RC - target = cpi->per_frame_bandwidth; - - if (cpi->oxcf.rc_max_intra_bitrate_pct) { - int max_rate = cpi->per_frame_bandwidth - * cpi->oxcf.rc_max_intra_bitrate_pct / 100; - - if (target > max_rate) - target = max_rate; +int vp9_rc_clamp_pframe_target_size(const VP9_COMP *const cpi, int target) { + const RATE_CONTROL *rc = &cpi->rc; + const int min_frame_target = MAX(rc->min_frame_bandwidth, + rc->av_per_frame_bandwidth >> 5); + if (target < min_frame_target) + target = min_frame_target; + if (cpi->refresh_golden_frame && rc->is_src_frame_alt_ref) { + // If there is an active ARF at this location use the minimum + // bits on this frame even if it is a constructed arf. + // The active maximum quantizer insures that an appropriate + // number of bits will be spent if needed for constructed ARFs. + target = min_frame_target; } + // Clip the frame target to the maximum allowed value. + if (target > rc->max_frame_bandwidth) + target = rc->max_frame_bandwidth; + return target; +} - cpi->this_frame_target = target; +int vp9_rc_clamp_iframe_target_size(const VP9_COMP *const cpi, int target) { + const RATE_CONTROL *rc = &cpi->rc; + const VP9_CONFIG *oxcf = &cpi->oxcf; + if (oxcf->rc_max_intra_bitrate_pct) { + const int max_rate = rc->av_per_frame_bandwidth * + oxcf->rc_max_intra_bitrate_pct / 100; + target = MIN(target, max_rate); + } + if (target > rc->max_frame_bandwidth) + target = rc->max_frame_bandwidth; + return target; } -// Do the best we can to define the parameters for the next GF based -// on what information we have available. -// -// In this experimental code only two pass is supported -// so we just use the interval determined in the two pass code. -static void calc_gf_params(VP9_COMP *cpi) { - // Set the gf interval - cpi->frames_till_gf_update_due = cpi->baseline_gf_interval; +// Update the buffer level for higher layers, given the encoded current layer. +static void update_layer_buffer_level(VP9_COMP *const cpi, + int encoded_frame_size) { + int temporal_layer = 0; + int current_temporal_layer = cpi->svc.temporal_layer_id; + for (temporal_layer = current_temporal_layer + 1; + temporal_layer < cpi->svc.number_temporal_layers; ++temporal_layer) { + LAYER_CONTEXT *lc = &cpi->svc.layer_context[temporal_layer]; + RATE_CONTROL *lrc = &lc->rc; + int bits_off_for_this_layer = (int)(lc->target_bandwidth / lc->framerate - + encoded_frame_size); + lrc->bits_off_target += bits_off_for_this_layer; + + // Clip buffer level to maximum buffer size for the layer. + lrc->bits_off_target = MIN(lrc->bits_off_target, lc->maximum_buffer_size); + lrc->buffer_level = lrc->bits_off_target; + } } +// Update the buffer level: leaky bucket model. +static void update_buffer_level(VP9_COMP *cpi, int encoded_frame_size) { + const VP9_COMMON *const cm = &cpi->common; + const VP9_CONFIG *oxcf = &cpi->oxcf; + RATE_CONTROL *const rc = &cpi->rc; -static void calc_pframe_target_size(VP9_COMP *cpi) { - const int min_frame_target = MAX(cpi->min_frame_bandwidth, - cpi->av_per_frame_bandwidth >> 5); - if (cpi->refresh_alt_ref_frame) { - // Special alt reference frame case - // Per frame bit target for the alt ref frame - cpi->per_frame_bandwidth = cpi->twopass.gf_bits; - cpi->this_frame_target = cpi->per_frame_bandwidth; + // Non-viewable frames are a special case and are treated as pure overhead. + if (!cm->show_frame) { + rc->bits_off_target -= encoded_frame_size; } else { - // Normal frames (gf,and inter) - cpi->this_frame_target = cpi->per_frame_bandwidth; + rc->bits_off_target += rc->av_per_frame_bandwidth - encoded_frame_size; } - // Check that the total sum of adjustments is not above the maximum allowed. - // That is, having allowed for the KF and GF penalties, we have not pushed - // the current inter-frame target too low. If the adjustment we apply here is - // not capable of recovering all the extra bits we have spent in the KF or GF, - // then the remainder will have to be recovered over a longer time span via - // other buffer / rate control mechanisms. - if (cpi->this_frame_target < min_frame_target) - cpi->this_frame_target = min_frame_target; - - if (!cpi->refresh_alt_ref_frame) - // Note the baseline target data rate for this inter frame. - cpi->inter_frame_target = cpi->this_frame_target; + // Clip the buffer level to the maximum specified buffer size. + rc->bits_off_target = MIN(rc->bits_off_target, oxcf->maximum_buffer_size); + rc->buffer_level = rc->bits_off_target; - // Adjust target frame size for Golden Frames: - if (cpi->frames_till_gf_update_due == 0) { - const int q = (cpi->oxcf.fixed_q < 0) ? cpi->last_q[INTER_FRAME] - : cpi->oxcf.fixed_q; - - cpi->refresh_golden_frame = 1; + if (cpi->use_svc && cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) { + update_layer_buffer_level(cpi, encoded_frame_size); + } +} - calc_gf_params(cpi); +int vp9_rc_drop_frame(VP9_COMP *cpi) { + const VP9_CONFIG *oxcf = &cpi->oxcf; + RATE_CONTROL *const rc = &cpi->rc; - // If we are using alternate ref instead of gf then do not apply the boost - // It will instead be applied to the altref update - // Jims modified boost - if (!cpi->source_alt_ref_active) { - if (cpi->oxcf.fixed_q < 0) { - // The spend on the GF is defined in the two pass code - // for two pass encodes - cpi->this_frame_target = cpi->per_frame_bandwidth; + if (!oxcf->drop_frames_water_mark) { + return 0; + } else { + if (rc->buffer_level < 0) { + // Always drop if buffer is below 0. + return 1; + } else { + // If buffer is below drop_mark, for now just drop every other frame + // (starting with the next frame) until it increases back over drop_mark. + int drop_mark = (int)(oxcf->drop_frames_water_mark * + oxcf->optimal_buffer_level / 100); + if ((rc->buffer_level > drop_mark) && + (rc->decimation_factor > 0)) { + --rc->decimation_factor; + } else if (rc->buffer_level <= drop_mark && + rc->decimation_factor == 0) { + rc->decimation_factor = 1; + } + if (rc->decimation_factor > 0) { + if (rc->decimation_count > 0) { + --rc->decimation_count; + return 1; + } else { + rc->decimation_count = rc->decimation_factor; + return 0; + } } else { - cpi->this_frame_target = - (estimate_bits_at_q(1, q, cpi->common.MBs, 1.0) - * cpi->last_boost) / 100; + rc->decimation_count = 0; + return 0; } - } else { - // If there is an active ARF at this location use the minimum - // bits on this frame even if it is a constructed arf. - // The active maximum quantizer insures that an appropriate - // number of bits will be spent if needed for constructed ARFs. - cpi->this_frame_target = 0; } } } +static double get_rate_correction_factor(const VP9_COMP *cpi) { + if (cpi->common.frame_type == KEY_FRAME) { + return cpi->rc.key_frame_rate_correction_factor; + } else { + if ((cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame) && + !(cpi->use_svc && cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) + return cpi->rc.gf_rate_correction_factor; + else + return cpi->rc.rate_correction_factor; + } +} -void vp9_update_rate_correction_factors(VP9_COMP *cpi, int damp_var) { +static void set_rate_correction_factor(VP9_COMP *cpi, double factor) { + if (cpi->common.frame_type == KEY_FRAME) { + cpi->rc.key_frame_rate_correction_factor = factor; + } else { + if ((cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame) && + !(cpi->use_svc && cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) + cpi->rc.gf_rate_correction_factor = factor; + else + cpi->rc.rate_correction_factor = factor; + } +} + +void vp9_rc_update_rate_correction_factors(VP9_COMP *cpi, int damp_var) { const int q = cpi->common.base_qindex; int correction_factor = 100; - double rate_correction_factor; + double rate_correction_factor = get_rate_correction_factor(cpi); double adjustment_limit; int projected_size_based_on_q = 0; // Clear down mmx registers to allow floating point in what follows - vp9_clear_system_state(); // __asm emms; - - if (cpi->common.frame_type == KEY_FRAME) { - rate_correction_factor = cpi->key_frame_rate_correction_factor; - } else { - if (cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame) - rate_correction_factor = cpi->gf_rate_correction_factor; - else - rate_correction_factor = cpi->rate_correction_factor; - } + vp9_clear_system_state(); // Work out how big we would have expected the frame to be at this Q given // the current correction factor. @@ -263,11 +363,10 @@ void vp9_update_rate_correction_factors(VP9_COMP *cpi, int damp_var) { projected_size_based_on_q = estimate_bits_at_q(cpi->common.frame_type, q, cpi->common.MBs, rate_correction_factor); - // Work out a size correction factor. if (projected_size_based_on_q > 0) - correction_factor = - (100 * cpi->projected_frame_size) / projected_size_based_on_q; + correction_factor = (100 * cpi->rc.projected_frame_size) / + projected_size_based_on_q; // More heavily damped adjustment used if we have been oscillating either side // of target. @@ -284,7 +383,6 @@ void vp9_update_rate_correction_factors(VP9_COMP *cpi, int damp_var) { break; } - // if ( (correction_factor > 102) && (Q < cpi->active_worst_quality) ) if (correction_factor > 102) { // We are not already at the worst allowable quality correction_factor = @@ -307,51 +405,31 @@ void vp9_update_rate_correction_factors(VP9_COMP *cpi, int damp_var) { rate_correction_factor = MIN_BPB_FACTOR; } - if (cpi->common.frame_type == KEY_FRAME) { - cpi->key_frame_rate_correction_factor = rate_correction_factor; - } else { - if (cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame) - cpi->gf_rate_correction_factor = rate_correction_factor; - else - cpi->rate_correction_factor = rate_correction_factor; - } + set_rate_correction_factor(cpi, rate_correction_factor); } -int vp9_regulate_q(VP9_COMP *cpi, int target_bits_per_frame) { - int q = cpi->active_worst_quality; - - int i; +int vp9_rc_regulate_q(const VP9_COMP *cpi, int target_bits_per_frame, + int active_best_quality, int active_worst_quality) { + const VP9_COMMON *const cm = &cpi->common; + int q = active_worst_quality; int last_error = INT_MAX; - int target_bits_per_mb; - int bits_per_mb_at_this_q; - double correction_factor; - - // Select the appropriate correction factor based upon type of frame. - if (cpi->common.frame_type == KEY_FRAME) { - correction_factor = cpi->key_frame_rate_correction_factor; - } else { - if (cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame) - correction_factor = cpi->gf_rate_correction_factor; - else - correction_factor = cpi->rate_correction_factor; - } + int i, target_bits_per_mb; + const double correction_factor = get_rate_correction_factor(cpi); // Calculate required scaling factor based on target frame size and size of // frame produced using previous Q. if (target_bits_per_frame >= (INT_MAX >> BPER_MB_NORMBITS)) - target_bits_per_mb = - (target_bits_per_frame / cpi->common.MBs) - << BPER_MB_NORMBITS; // Case where we would overflow int + // Case where we would overflow int + target_bits_per_mb = (target_bits_per_frame / cm->MBs) << BPER_MB_NORMBITS; else - target_bits_per_mb = - (target_bits_per_frame << BPER_MB_NORMBITS) / cpi->common.MBs; + target_bits_per_mb = (target_bits_per_frame << BPER_MB_NORMBITS) / cm->MBs; - i = cpi->active_best_quality; + i = active_best_quality; do { - bits_per_mb_at_this_q = (int)vp9_bits_per_mb(cpi->common.frame_type, i, - correction_factor); + const int bits_per_mb_at_this_q = (int)vp9_rc_bits_per_mb(cm->frame_type, i, + correction_factor); if (bits_per_mb_at_this_q <= target_bits_per_mb) { if ((target_bits_per_mb - bits_per_mb_at_this_q) <= last_error) @@ -363,91 +441,638 @@ int vp9_regulate_q(VP9_COMP *cpi, int target_bits_per_frame) { } else { last_error = bits_per_mb_at_this_q - target_bits_per_mb; } - } while (++i <= cpi->active_worst_quality); + } while (++i <= active_worst_quality); return q; } +static int get_active_quality(int q, int gfu_boost, int low, int high, + int *low_motion_minq, int *high_motion_minq) { + if (gfu_boost > high) { + return low_motion_minq[q]; + } else if (gfu_boost < low) { + return high_motion_minq[q]; + } else { + const int gap = high - low; + const int offset = high - gfu_boost; + const int qdiff = high_motion_minq[q] - low_motion_minq[q]; + const int adjustment = ((offset * qdiff) + (gap >> 1)) / gap; + return low_motion_minq[q] + adjustment; + } +} -static int estimate_keyframe_frequency(VP9_COMP *cpi) { - int i; +static int calc_active_worst_quality_one_pass_vbr(const VP9_COMP *cpi) { + int active_worst_quality; + if (cpi->common.frame_type == KEY_FRAME) { + if (cpi->common.current_video_frame == 0) { + active_worst_quality = cpi->rc.worst_quality; + } else { + // Choose active worst quality twice as large as the last q. + active_worst_quality = cpi->rc.last_q[KEY_FRAME] * 2; + } + } else if (!cpi->rc.is_src_frame_alt_ref && + (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) { + if (cpi->common.current_video_frame == 1) { + active_worst_quality = cpi->rc.last_q[KEY_FRAME] * 5 / 4; + } else { + // Choose active worst quality twice as large as the last q. + active_worst_quality = cpi->rc.last_q[INTER_FRAME]; + } + } else { + if (cpi->common.current_video_frame == 1) { + active_worst_quality = cpi->rc.last_q[KEY_FRAME] * 2; + } else { + // Choose active worst quality twice as large as the last q. + active_worst_quality = cpi->rc.last_q[INTER_FRAME] * 2; + } + } + if (active_worst_quality > cpi->rc.worst_quality) + active_worst_quality = cpi->rc.worst_quality; + return active_worst_quality; +} - // Average key frame frequency - int av_key_frame_frequency = 0; +// Adjust active_worst_quality level based on buffer level. +static int calc_active_worst_quality_one_pass_cbr(const VP9_COMP *cpi) { + // Adjust active_worst_quality: If buffer is above the optimal/target level, + // bring active_worst_quality down depending on fullness of buffer. + // If buffer is below the optimal level, let the active_worst_quality go from + // ambient Q (at buffer = optimal level) to worst_quality level + // (at buffer = critical level). + const VP9_CONFIG *oxcf = &cpi->oxcf; + const RATE_CONTROL *rc = &cpi->rc; + // Buffer level below which we push active_worst to worst_quality. + int64_t critical_level = oxcf->optimal_buffer_level >> 2; + int64_t buff_lvl_step = 0; + int adjustment = 0; + int active_worst_quality; + if (cpi->common.frame_type == KEY_FRAME) + return rc->worst_quality; + if (cpi->common.current_video_frame > 1) + active_worst_quality = MIN(rc->worst_quality, + rc->avg_frame_qindex[INTER_FRAME] * 5 / 4); + else + active_worst_quality = MIN(rc->worst_quality, + rc->avg_frame_qindex[KEY_FRAME] * 3 / 2); + if (rc->buffer_level > oxcf->optimal_buffer_level) { + // Adjust down. + // Maximum limit for down adjustment, ~30%. + int max_adjustment_down = active_worst_quality / 3; + if (max_adjustment_down) { + buff_lvl_step = ((oxcf->maximum_buffer_size - + oxcf->optimal_buffer_level) / max_adjustment_down); + if (buff_lvl_step) + adjustment = (int)((rc->buffer_level - oxcf->optimal_buffer_level) / + buff_lvl_step); + active_worst_quality -= adjustment; + } + } else if (rc->buffer_level > critical_level) { + // Adjust up from ambient Q. + if (critical_level) { + buff_lvl_step = (oxcf->optimal_buffer_level - critical_level); + if (buff_lvl_step) { + adjustment = + (int)((rc->worst_quality - rc->avg_frame_qindex[INTER_FRAME]) * + (oxcf->optimal_buffer_level - rc->buffer_level) / + buff_lvl_step); + } + active_worst_quality = rc->avg_frame_qindex[INTER_FRAME] + adjustment; + } + } else { + // Set to worst_quality if buffer is below critical level. + active_worst_quality = rc->worst_quality; + } + return active_worst_quality; +} - /* First key frame at start of sequence is a special case. We have no - * frequency data. - */ - if (cpi->key_frame_count == 1) { - /* Assume a default of 1 kf every 2 seconds, or the max kf interval, - * whichever is smaller. - */ - int key_freq = cpi->oxcf.key_freq > 0 ? cpi->oxcf.key_freq : 1; - av_key_frame_frequency = (int)cpi->output_framerate * 2; +static int rc_pick_q_and_bounds_one_pass_cbr(const VP9_COMP *cpi, + int *bottom_index, + int *top_index) { + const VP9_COMMON *const cm = &cpi->common; + const RATE_CONTROL *const rc = &cpi->rc; + int active_best_quality; + int active_worst_quality = calc_active_worst_quality_one_pass_cbr(cpi); + int q; + + if (frame_is_intra_only(cm)) { + active_best_quality = rc->best_quality; + // Handle the special case for key frames forced when we have75 reached + // the maximum key frame interval. Here force the Q to a range + // based on the ambient Q to reduce the risk of popping. + if (rc->this_key_frame_forced) { + int qindex = rc->last_boosted_qindex; + double last_boosted_q = vp9_convert_qindex_to_q(qindex); + int delta_qindex = vp9_compute_qdelta(cpi, last_boosted_q, + (last_boosted_q * 0.75)); + active_best_quality = MAX(qindex + delta_qindex, rc->best_quality); + } else if (cm->current_video_frame > 0) { + // not first frame of one pass and kf_boost is set + double q_adj_factor = 1.0; + double q_val; + + active_best_quality = get_active_quality(rc->avg_frame_qindex[KEY_FRAME], + rc->kf_boost, + kf_low, kf_high, + kf_low_motion_minq, + kf_high_motion_minq); + + // Allow somewhat lower kf minq with small image formats. + if ((cm->width * cm->height) <= (352 * 288)) { + q_adj_factor -= 0.25; + } - if (cpi->oxcf.auto_key && av_key_frame_frequency > key_freq) - av_key_frame_frequency = cpi->oxcf.key_freq; + // Convert the adjustment factor to a qindex delta + // on active_best_quality. + q_val = vp9_convert_qindex_to_q(active_best_quality); + active_best_quality += vp9_compute_qdelta(cpi, q_val, q_val * + q_adj_factor); + } + } else if (!rc->is_src_frame_alt_ref && + !cpi->use_svc && + (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) { + // Use the lower of active_worst_quality and recent + // average Q as basis for GF/ARF best Q limit unless last frame was + // a key frame. + if (rc->frames_since_key > 1 && + rc->avg_frame_qindex[INTER_FRAME] < active_worst_quality) { + q = rc->avg_frame_qindex[INTER_FRAME]; + } else { + q = active_worst_quality; + } + active_best_quality = get_active_quality( + q, rc->gfu_boost, gf_low, gf_high, + gf_low_motion_minq, gf_high_motion_minq); + } else { + // Use the lower of active_worst_quality and recent/average Q. + if (cm->current_video_frame > 1) { + if (rc->avg_frame_qindex[INTER_FRAME] < active_worst_quality) + active_best_quality = inter_minq[rc->avg_frame_qindex[INTER_FRAME]]; + else + active_best_quality = inter_minq[active_worst_quality]; + } else { + if (rc->avg_frame_qindex[KEY_FRAME] < active_worst_quality) + active_best_quality = inter_minq[rc->avg_frame_qindex[KEY_FRAME]]; + else + active_best_quality = inter_minq[active_worst_quality]; + } + } + + // Clip the active best and worst quality values to limits + active_best_quality = clamp(active_best_quality, + rc->best_quality, rc->worst_quality); + active_worst_quality = clamp(active_worst_quality, + active_best_quality, rc->worst_quality); - cpi->prior_key_frame_distance[KEY_FRAME_CONTEXT - 1] - = av_key_frame_frequency; + *top_index = active_worst_quality; + *bottom_index = active_best_quality; + +#if LIMIT_QRANGE_FOR_ALTREF_AND_KEY + // Limit Q range for the adaptive loop. + if (cm->frame_type == KEY_FRAME && !rc->this_key_frame_forced) { + if (!(cm->current_video_frame == 0)) + *top_index = (active_worst_quality + active_best_quality * 3) / 4; + } +#endif + // Special case code to try and match quality with forced key frames + if (cm->frame_type == KEY_FRAME && rc->this_key_frame_forced) { + q = rc->last_boosted_qindex; } else { - unsigned int total_weight = 0; - int last_kf_interval = - (cpi->frames_since_key > 0) ? cpi->frames_since_key : 1; - - /* reset keyframe context and calculate weighted average of last - * KEY_FRAME_CONTEXT keyframes - */ - for (i = 0; i < KEY_FRAME_CONTEXT; i++) { - if (i < KEY_FRAME_CONTEXT - 1) - cpi->prior_key_frame_distance[i] - = cpi->prior_key_frame_distance[i + 1]; + q = vp9_rc_regulate_q(cpi, rc->this_frame_target, + active_best_quality, active_worst_quality); + if (q > *top_index) { + // Special case when we are targeting the max allowed rate + if (cpi->rc.this_frame_target >= cpi->rc.max_frame_bandwidth) + *top_index = q; else - cpi->prior_key_frame_distance[i] = last_kf_interval; + q = *top_index; + } + } + assert(*top_index <= rc->worst_quality && + *top_index >= rc->best_quality); + assert(*bottom_index <= rc->worst_quality && + *bottom_index >= rc->best_quality); + assert(q <= rc->worst_quality && q >= rc->best_quality); + return q; +} + +static int rc_pick_q_and_bounds_one_pass_vbr(const VP9_COMP *cpi, + int *bottom_index, + int *top_index) { + const VP9_COMMON *const cm = &cpi->common; + const RATE_CONTROL *const rc = &cpi->rc; + const VP9_CONFIG *const oxcf = &cpi->oxcf; + int active_best_quality; + int active_worst_quality = calc_active_worst_quality_one_pass_vbr(cpi); + int q; + + if (frame_is_intra_only(cm)) { + active_best_quality = rc->best_quality; +#if !CONFIG_MULTIPLE_ARF + // Handle the special case for key frames forced when we have75 reached + // the maximum key frame interval. Here force the Q to a range + // based on the ambient Q to reduce the risk of popping. + if (rc->this_key_frame_forced) { + int qindex = rc->last_boosted_qindex; + double last_boosted_q = vp9_convert_qindex_to_q(qindex); + int delta_qindex = vp9_compute_qdelta(cpi, last_boosted_q, + (last_boosted_q * 0.75)); + active_best_quality = MAX(qindex + delta_qindex, rc->best_quality); + } else if (cm->current_video_frame > 0) { + // not first frame of one pass and kf_boost is set + double q_adj_factor = 1.0; + double q_val; + + active_best_quality = get_active_quality(rc->avg_frame_qindex[KEY_FRAME], + rc->kf_boost, + kf_low, kf_high, + kf_low_motion_minq, + kf_high_motion_minq); + + // Allow somewhat lower kf minq with small image formats. + if ((cm->width * cm->height) <= (352 * 288)) { + q_adj_factor -= 0.25; + } + + // Convert the adjustment factor to a qindex delta + // on active_best_quality. + q_val = vp9_convert_qindex_to_q(active_best_quality); + active_best_quality += vp9_compute_qdelta(cpi, q_val, q_val * + q_adj_factor); + } +#else + double current_q; + // Force the KF quantizer to be 30% of the active_worst_quality. + current_q = vp9_convert_qindex_to_q(active_worst_quality); + active_best_quality = active_worst_quality + + vp9_compute_qdelta(cpi, current_q, current_q * 0.3); +#endif + } else if (!rc->is_src_frame_alt_ref && + (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) { + // Use the lower of active_worst_quality and recent + // average Q as basis for GF/ARF best Q limit unless last frame was + // a key frame. + if (rc->frames_since_key > 1 && + rc->avg_frame_qindex[INTER_FRAME] < active_worst_quality) { + q = rc->avg_frame_qindex[INTER_FRAME]; + } else { + q = rc->avg_frame_qindex[KEY_FRAME]; + } + // For constrained quality dont allow Q less than the cq level + if (oxcf->end_usage == USAGE_CONSTRAINED_QUALITY) { + if (q < cpi->cq_target_quality) + q = cpi->cq_target_quality; + if (rc->frames_since_key > 1) { + active_best_quality = get_active_quality(q, rc->gfu_boost, + gf_low, gf_high, + afq_low_motion_minq, + afq_high_motion_minq); + } else { + active_best_quality = get_active_quality(q, rc->gfu_boost, + gf_low, gf_high, + gf_low_motion_minq, + gf_high_motion_minq); + } + // Constrained quality use slightly lower active best. + active_best_quality = active_best_quality * 15 / 16; - av_key_frame_frequency += prior_key_frame_weight[i] - * cpi->prior_key_frame_distance[i]; - total_weight += prior_key_frame_weight[i]; + } else if (oxcf->end_usage == USAGE_CONSTANT_QUALITY) { + if (!cpi->refresh_alt_ref_frame) { + active_best_quality = cpi->cq_target_quality; + } else { + if (rc->frames_since_key > 1) { + active_best_quality = get_active_quality( + q, rc->gfu_boost, gf_low, gf_high, + afq_low_motion_minq, afq_high_motion_minq); + } else { + active_best_quality = get_active_quality( + q, rc->gfu_boost, gf_low, gf_high, + gf_low_motion_minq, gf_high_motion_minq); + } + } + } else { + active_best_quality = get_active_quality( + q, rc->gfu_boost, gf_low, gf_high, + gf_low_motion_minq, gf_high_motion_minq); + } + } else { + if (oxcf->end_usage == USAGE_CONSTANT_QUALITY) { + active_best_quality = cpi->cq_target_quality; + } else { + // Use the lower of active_worst_quality and recent/average Q. + if (cm->current_video_frame > 1) + active_best_quality = inter_minq[rc->avg_frame_qindex[INTER_FRAME]]; + else + active_best_quality = inter_minq[rc->avg_frame_qindex[KEY_FRAME]]; + // For the constrained quality mode we don't want + // q to fall below the cq level. + if ((oxcf->end_usage == USAGE_CONSTRAINED_QUALITY) && + (active_best_quality < cpi->cq_target_quality)) { + // If we are strongly undershooting the target rate in the last + // frames then use the user passed in cq value not the auto + // cq value. + if (rc->rolling_actual_bits < rc->min_frame_bandwidth) + active_best_quality = oxcf->cq_level; + else + active_best_quality = cpi->cq_target_quality; + } } + } - av_key_frame_frequency /= total_weight; + // Clip the active best and worst quality values to limits + active_best_quality = clamp(active_best_quality, + rc->best_quality, rc->worst_quality); + active_worst_quality = clamp(active_worst_quality, + active_best_quality, rc->worst_quality); + + *top_index = active_worst_quality; + *bottom_index = active_best_quality; + +#if LIMIT_QRANGE_FOR_ALTREF_AND_KEY + // Limit Q range for the adaptive loop. + if (cm->frame_type == KEY_FRAME && !rc->this_key_frame_forced) { + if (!(cm->current_video_frame == 0)) + *top_index = (active_worst_quality + active_best_quality * 3) / 4; + } else if (!rc->is_src_frame_alt_ref && + (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) { + *top_index = (active_worst_quality + active_best_quality) / 2; + } +#endif + if (oxcf->end_usage == USAGE_CONSTANT_QUALITY) { + q = active_best_quality; + // Special case code to try and match quality with forced key frames + } else if ((cm->frame_type == KEY_FRAME) && rc->this_key_frame_forced) { + q = rc->last_boosted_qindex; + } else { + q = vp9_rc_regulate_q(cpi, rc->this_frame_target, + active_best_quality, active_worst_quality); + if (q > *top_index) { + // Special case when we are targeting the max allowed rate + if (cpi->rc.this_frame_target >= cpi->rc.max_frame_bandwidth) + *top_index = q; + else + q = *top_index; + } } - return av_key_frame_frequency; +#if CONFIG_MULTIPLE_ARF + // Force the quantizer determined by the coding order pattern. + if (cpi->multi_arf_enabled && (cm->frame_type != KEY_FRAME) && + cpi->oxcf.end_usage != USAGE_CONSTANT_QUALITY) { + double new_q; + double current_q = vp9_convert_qindex_to_q(active_worst_quality); + int level = cpi->this_frame_weight; + assert(level >= 0); + new_q = current_q * (1.0 - (0.2 * (cpi->max_arf_level - level))); + q = active_worst_quality + + vp9_compute_qdelta(cpi, current_q, new_q); + + *bottom_index = q; + *top_index = q; + printf("frame:%d q:%d\n", cm->current_video_frame, q); + } +#endif + assert(*top_index <= rc->worst_quality && + *top_index >= rc->best_quality); + assert(*bottom_index <= rc->worst_quality && + *bottom_index >= rc->best_quality); + assert(q <= rc->worst_quality && q >= rc->best_quality); + return q; } +static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi, + int *bottom_index, + int *top_index) { + const VP9_COMMON *const cm = &cpi->common; + const RATE_CONTROL *const rc = &cpi->rc; + const VP9_CONFIG *const oxcf = &cpi->oxcf; + int active_best_quality; + int active_worst_quality = cpi->twopass.active_worst_quality; + int q; + + if (frame_is_intra_only(cm)) { +#if !CONFIG_MULTIPLE_ARF + // Handle the special case for key frames forced when we have75 reached + // the maximum key frame interval. Here force the Q to a range + // based on the ambient Q to reduce the risk of popping. + if (rc->this_key_frame_forced) { + int qindex = rc->last_boosted_qindex; + double last_boosted_q = vp9_convert_qindex_to_q(qindex); + int delta_qindex = vp9_compute_qdelta(cpi, last_boosted_q, + (last_boosted_q * 0.75)); + active_best_quality = MAX(qindex + delta_qindex, rc->best_quality); + } else { + // Not forced keyframe. + double q_adj_factor = 1.0; + double q_val; + // Baseline value derived from cpi->active_worst_quality and kf boost. + active_best_quality = get_active_quality(active_worst_quality, + rc->kf_boost, + kf_low, kf_high, + kf_low_motion_minq, + kf_high_motion_minq); + + // Allow somewhat lower kf minq with small image formats. + if ((cm->width * cm->height) <= (352 * 288)) { + q_adj_factor -= 0.25; + } -void vp9_adjust_key_frame_context(VP9_COMP *cpi) { - // Clear down mmx registers to allow floating point in what follows - vp9_clear_system_state(); + // Make a further adjustment based on the kf zero motion measure. + q_adj_factor += 0.05 - (0.001 * (double)cpi->twopass.kf_zeromotion_pct); + + // Convert the adjustment factor to a qindex delta + // on active_best_quality. + q_val = vp9_convert_qindex_to_q(active_best_quality); + active_best_quality += vp9_compute_qdelta(cpi, q_val, q_val * + q_adj_factor); + } +#else + double current_q; + // Force the KF quantizer to be 30% of the active_worst_quality. + current_q = vp9_convert_qindex_to_q(active_worst_quality); + active_best_quality = active_worst_quality + + vp9_compute_qdelta(cpi, current_q, current_q * 0.3); +#endif + } else if (!rc->is_src_frame_alt_ref && + (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) { + // Use the lower of active_worst_quality and recent + // average Q as basis for GF/ARF best Q limit unless last frame was + // a key frame. + if (rc->frames_since_key > 1 && + rc->avg_frame_qindex[INTER_FRAME] < active_worst_quality) { + q = rc->avg_frame_qindex[INTER_FRAME]; + } else { + q = active_worst_quality; + } + // For constrained quality dont allow Q less than the cq level + if (oxcf->end_usage == USAGE_CONSTRAINED_QUALITY) { + if (q < cpi->cq_target_quality) + q = cpi->cq_target_quality; + if (rc->frames_since_key > 1) { + active_best_quality = get_active_quality(q, rc->gfu_boost, + gf_low, gf_high, + afq_low_motion_minq, + afq_high_motion_minq); + } else { + active_best_quality = get_active_quality(q, rc->gfu_boost, + gf_low, gf_high, + gf_low_motion_minq, + gf_high_motion_minq); + } + // Constrained quality use slightly lower active best. + active_best_quality = active_best_quality * 15 / 16; + + } else if (oxcf->end_usage == USAGE_CONSTANT_QUALITY) { + if (!cpi->refresh_alt_ref_frame) { + active_best_quality = cpi->cq_target_quality; + } else { + if (rc->frames_since_key > 1) { + active_best_quality = get_active_quality( + q, rc->gfu_boost, gf_low, gf_high, + afq_low_motion_minq, afq_high_motion_minq); + } else { + active_best_quality = get_active_quality( + q, rc->gfu_boost, gf_low, gf_high, + gf_low_motion_minq, gf_high_motion_minq); + } + } + } else { + active_best_quality = get_active_quality( + q, rc->gfu_boost, gf_low, gf_high, + gf_low_motion_minq, gf_high_motion_minq); + } + } else { + if (oxcf->end_usage == USAGE_CONSTANT_QUALITY) { + active_best_quality = cpi->cq_target_quality; + } else { + active_best_quality = inter_minq[active_worst_quality]; + + // For the constrained quality mode we don't want + // q to fall below the cq level. + if ((oxcf->end_usage == USAGE_CONSTRAINED_QUALITY) && + (active_best_quality < cpi->cq_target_quality)) { + // If we are strongly undershooting the target rate in the last + // frames then use the user passed in cq value not the auto + // cq value. + if (rc->rolling_actual_bits < rc->min_frame_bandwidth) + active_best_quality = oxcf->cq_level; + else + active_best_quality = cpi->cq_target_quality; + } + } + } + + // Clip the active best and worst quality values to limits. + active_best_quality = clamp(active_best_quality, + rc->best_quality, rc->worst_quality); + active_worst_quality = clamp(active_worst_quality, + active_best_quality, rc->worst_quality); + + *top_index = active_worst_quality; + *bottom_index = active_best_quality; + +#if LIMIT_QRANGE_FOR_ALTREF_AND_KEY + // Limit Q range for the adaptive loop. + if (cm->frame_type == KEY_FRAME && !rc->this_key_frame_forced) { + *top_index = (active_worst_quality + active_best_quality * 3) / 4; + } else if (!rc->is_src_frame_alt_ref && + (oxcf->end_usage != USAGE_STREAM_FROM_SERVER) && + (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) { + *top_index = (active_worst_quality + active_best_quality) / 2; + } +#endif - cpi->frames_since_key = 0; - cpi->key_frame_count++; + if (oxcf->end_usage == USAGE_CONSTANT_QUALITY) { + q = active_best_quality; + // Special case code to try and match quality with forced key frames. + } else if ((cm->frame_type == KEY_FRAME) && rc->this_key_frame_forced) { + q = rc->last_boosted_qindex; + } else { + q = vp9_rc_regulate_q(cpi, rc->this_frame_target, + active_best_quality, active_worst_quality); + if (q > *top_index) { + // Special case when we are targeting the max allowed rate. + if (cpi->rc.this_frame_target >= cpi->rc.max_frame_bandwidth) + *top_index = q; + else + q = *top_index; + } + } +#if CONFIG_MULTIPLE_ARF + // Force the quantizer determined by the coding order pattern. + if (cpi->multi_arf_enabled && (cm->frame_type != KEY_FRAME) && + cpi->oxcf.end_usage != USAGE_CONSTANT_QUALITY) { + double new_q; + double current_q = vp9_convert_qindex_to_q(active_worst_quality); + int level = cpi->this_frame_weight; + assert(level >= 0); + new_q = current_q * (1.0 - (0.2 * (cpi->max_arf_level - level))); + q = active_worst_quality + + vp9_compute_qdelta(cpi, current_q, new_q); + + *bottom_index = q; + *top_index = q; + printf("frame:%d q:%d\n", cm->current_video_frame, q); + } +#endif + assert(*top_index <= rc->worst_quality && + *top_index >= rc->best_quality); + assert(*bottom_index <= rc->worst_quality && + *bottom_index >= rc->best_quality); + assert(q <= rc->worst_quality && q >= rc->best_quality); + return q; } +int vp9_rc_pick_q_and_bounds(const VP9_COMP *cpi, + int *bottom_index, + int *top_index) { + int q; + if (cpi->pass == 0) { + if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) + q = rc_pick_q_and_bounds_one_pass_cbr(cpi, bottom_index, top_index); + else + q = rc_pick_q_and_bounds_one_pass_vbr(cpi, bottom_index, top_index); + } else { + q = rc_pick_q_and_bounds_two_pass(cpi, bottom_index, top_index); + } + + // JBB : This is realtime mode. In real time mode the first frame + // should be larger. Q of 0 is disabled because we force tx size to be + // 16x16... + if (cpi->sf.use_nonrd_pick_mode) { + if (cpi->common.current_video_frame == 0) + q /= 3; + if (q == 0) + q++; + if (q < *bottom_index) + *bottom_index = q; + else if (q > *top_index) + *top_index = q; + } + return q; +} -void vp9_compute_frame_size_bounds(VP9_COMP *cpi, int *frame_under_shoot_limit, - int *frame_over_shoot_limit) { +void vp9_rc_compute_frame_size_bounds(const VP9_COMP *cpi, + int this_frame_target, + int *frame_under_shoot_limit, + int *frame_over_shoot_limit) { // Set-up bounds on acceptable frame size: - if (cpi->oxcf.fixed_q >= 0) { - // Fixed Q scenario: frame size never outranges target (there is no target!) + if (cpi->oxcf.end_usage == USAGE_CONSTANT_QUALITY) { *frame_under_shoot_limit = 0; *frame_over_shoot_limit = INT_MAX; } else { if (cpi->common.frame_type == KEY_FRAME) { - *frame_over_shoot_limit = cpi->this_frame_target * 9 / 8; - *frame_under_shoot_limit = cpi->this_frame_target * 7 / 8; + *frame_over_shoot_limit = this_frame_target * 9 / 8; + *frame_under_shoot_limit = this_frame_target * 7 / 8; } else { if (cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame) { - *frame_over_shoot_limit = cpi->this_frame_target * 9 / 8; - *frame_under_shoot_limit = cpi->this_frame_target * 7 / 8; + *frame_over_shoot_limit = this_frame_target * 9 / 8; + *frame_under_shoot_limit = this_frame_target * 7 / 8; } else { // Stron overshoot limit for constrained quality if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) { - *frame_over_shoot_limit = cpi->this_frame_target * 11 / 8; - *frame_under_shoot_limit = cpi->this_frame_target * 2 / 8; + *frame_over_shoot_limit = this_frame_target * 11 / 8; + *frame_under_shoot_limit = this_frame_target * 2 / 8; } else { - *frame_over_shoot_limit = cpi->this_frame_target * 11 / 8; - *frame_under_shoot_limit = cpi->this_frame_target * 5 / 8; + *frame_over_shoot_limit = this_frame_target * 11 / 8; + *frame_under_shoot_limit = this_frame_target * 5 / 8; } } } @@ -459,18 +1084,316 @@ void vp9_compute_frame_size_bounds(VP9_COMP *cpi, int *frame_under_shoot_limit, *frame_under_shoot_limit -= 200; if (*frame_under_shoot_limit < 0) *frame_under_shoot_limit = 0; + + // Clip to maximum allowed rate for a frame. + if (*frame_over_shoot_limit > cpi->rc.max_frame_bandwidth) { + *frame_over_shoot_limit = cpi->rc.max_frame_bandwidth; + } } } +void vp9_rc_set_frame_target(VP9_COMP *cpi, int target) { + const VP9_COMMON *const cm = &cpi->common; + RATE_CONTROL *const rc = &cpi->rc; -// return of 0 means drop frame -int vp9_pick_frame_size(VP9_COMP *cpi) { - VP9_COMMON *cm = &cpi->common; + rc->this_frame_target = target; + // Target rate per SB64 (including partial SB64s. + rc->sb64_target_rate = ((int64_t)rc->this_frame_target * 64 * 64) / + (cm->width * cm->height); +} + +static void update_alt_ref_frame_stats(VP9_COMP *cpi) { + // this frame refreshes means next frames don't unless specified by user + cpi->rc.frames_since_golden = 0; + +#if CONFIG_MULTIPLE_ARF + if (!cpi->multi_arf_enabled) +#endif + // Clear the alternate reference update pending flag. + cpi->rc.source_alt_ref_pending = 0; + + // Set the alternate reference frame active flag + cpi->rc.source_alt_ref_active = 1; +} + +static void update_golden_frame_stats(VP9_COMP *cpi) { + RATE_CONTROL *const rc = &cpi->rc; + + // Update the Golden frame usage counts. + if (cpi->refresh_golden_frame) { + // this frame refreshes means next frames don't unless specified by user + rc->frames_since_golden = 0; + + if (!rc->source_alt_ref_pending) + rc->source_alt_ref_active = 0; + + // Decrement count down till next gf + if (rc->frames_till_gf_update_due > 0) + rc->frames_till_gf_update_due--; + + } else if (!cpi->refresh_alt_ref_frame) { + // Decrement count down till next gf + if (rc->frames_till_gf_update_due > 0) + rc->frames_till_gf_update_due--; + + rc->frames_since_golden++; + } +} + +void vp9_rc_postencode_update(VP9_COMP *cpi, uint64_t bytes_used) { + VP9_COMMON *const cm = &cpi->common; + RATE_CONTROL *const rc = &cpi->rc; + + cm->last_frame_type = cm->frame_type; + // Update rate control heuristics + rc->projected_frame_size = (int)(bytes_used << 3); + + // Post encode loop adjustment of Q prediction. + vp9_rc_update_rate_correction_factors( + cpi, (cpi->sf.recode_loop >= ALLOW_RECODE_KFARFGF || + cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) ? 2 : 0); + + // Keep a record of last Q and ambient average Q. + if (cm->frame_type == KEY_FRAME) { + rc->last_q[KEY_FRAME] = cm->base_qindex; + rc->avg_frame_qindex[KEY_FRAME] = ROUND_POWER_OF_TWO( + 3 * rc->avg_frame_qindex[KEY_FRAME] + cm->base_qindex, 2); + } else if (!rc->is_src_frame_alt_ref && + (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame) && + !(cpi->use_svc && cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) { + rc->last_q[2] = cm->base_qindex; + rc->avg_frame_qindex[2] = ROUND_POWER_OF_TWO( + 3 * rc->avg_frame_qindex[2] + cm->base_qindex, 2); + } else { + rc->last_q[INTER_FRAME] = cm->base_qindex; + rc->avg_frame_qindex[INTER_FRAME] = ROUND_POWER_OF_TWO( + 3 * rc->avg_frame_qindex[INTER_FRAME] + cm->base_qindex, 2); + rc->ni_frames++; + rc->tot_q += vp9_convert_qindex_to_q(cm->base_qindex); + rc->avg_q = rc->tot_q / (double)rc->ni_frames; + + // Calculate the average Q for normal inter frames (not key or GFU frames). + rc->ni_tot_qi += cm->base_qindex; + rc->ni_av_qi = rc->ni_tot_qi / rc->ni_frames; + } + + // Keep record of last boosted (KF/KF/ARF) Q value. + // If the current frame is coded at a lower Q then we also update it. + // If all mbs in this group are skipped only update if the Q value is + // better than that already stored. + // This is used to help set quality in forced key frames to reduce popping + if ((cm->base_qindex < rc->last_boosted_qindex) || + ((cpi->static_mb_pct < 100) && + ((cm->frame_type == KEY_FRAME) || cpi->refresh_alt_ref_frame || + (cpi->refresh_golden_frame && !rc->is_src_frame_alt_ref)))) { + rc->last_boosted_qindex = cm->base_qindex; + } + + update_buffer_level(cpi, rc->projected_frame_size); + + // Rolling monitors of whether we are over or underspending used to help + // regulate min and Max Q in two pass. + if (cm->frame_type != KEY_FRAME) { + rc->rolling_target_bits = ROUND_POWER_OF_TWO( + rc->rolling_target_bits * 3 + rc->this_frame_target, 2); + rc->rolling_actual_bits = ROUND_POWER_OF_TWO( + rc->rolling_actual_bits * 3 + rc->projected_frame_size, 2); + rc->long_rolling_target_bits = ROUND_POWER_OF_TWO( + rc->long_rolling_target_bits * 31 + rc->this_frame_target, 5); + rc->long_rolling_actual_bits = ROUND_POWER_OF_TWO( + rc->long_rolling_actual_bits * 31 + rc->projected_frame_size, 5); + } + + // Actual bits spent + rc->total_actual_bits += rc->projected_frame_size; + + // Debug stats + rc->total_target_vs_actual += (rc->this_frame_target - + rc->projected_frame_size); + + if (cpi->oxcf.play_alternate && cpi->refresh_alt_ref_frame && + (cm->frame_type != KEY_FRAME)) + // Update the alternate reference frame stats as appropriate. + update_alt_ref_frame_stats(cpi); + else + // Update the Golden frame stats as appropriate. + update_golden_frame_stats(cpi); if (cm->frame_type == KEY_FRAME) - calc_iframe_target_size(cpi); + rc->frames_since_key = 0; + if (cm->show_frame) { + rc->frames_since_key++; + rc->frames_to_key--; + } +} + +void vp9_rc_postencode_update_drop_frame(VP9_COMP *cpi) { + // Update buffer level with zero size, update frame counters, and return. + update_buffer_level(cpi, 0); + cpi->common.last_frame_type = cpi->common.frame_type; + cpi->rc.frames_since_key++; + cpi->rc.frames_to_key--; +} + +static int test_for_kf_one_pass(VP9_COMP *cpi) { + // Placeholder function for auto key frame + return 0; +} +// Use this macro to turn on/off use of alt-refs in one-pass mode. +#define USE_ALTREF_FOR_ONE_PASS 1 + +static int calc_pframe_target_size_one_pass_vbr(const VP9_COMP *const cpi) { + static const int af_ratio = 10; + const RATE_CONTROL *rc = &cpi->rc; + int target; +#if USE_ALTREF_FOR_ONE_PASS + target = (!rc->is_src_frame_alt_ref && + (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) ? + (rc->av_per_frame_bandwidth * cpi->rc.baseline_gf_interval * af_ratio) / + (cpi->rc.baseline_gf_interval + af_ratio - 1) : + (rc->av_per_frame_bandwidth * cpi->rc.baseline_gf_interval) / + (cpi->rc.baseline_gf_interval + af_ratio - 1); +#else + target = rc->av_per_frame_bandwidth; +#endif + return vp9_rc_clamp_pframe_target_size(cpi, target); +} + +static int calc_iframe_target_size_one_pass_vbr(const VP9_COMP *const cpi) { + static const int kf_ratio = 25; + const RATE_CONTROL *rc = &cpi->rc; + int target = rc->av_per_frame_bandwidth * kf_ratio; + return vp9_rc_clamp_iframe_target_size(cpi, target); +} + +void vp9_rc_get_one_pass_vbr_params(VP9_COMP *cpi) { + VP9_COMMON *const cm = &cpi->common; + RATE_CONTROL *const rc = &cpi->rc; + int target; + if (!cpi->refresh_alt_ref_frame && + (cm->current_video_frame == 0 || + cm->frame_flags & FRAMEFLAGS_KEY || + rc->frames_to_key == 0 || + (cpi->oxcf.auto_key && test_for_kf_one_pass(cpi)))) { + cm->frame_type = KEY_FRAME; + rc->this_key_frame_forced = cm->current_video_frame != 0 && + rc->frames_to_key == 0; + rc->frames_to_key = cpi->key_frame_frequency; + rc->kf_boost = DEFAULT_KF_BOOST; + rc->source_alt_ref_active = 0; + } else { + cm->frame_type = INTER_FRAME; + } + if (rc->frames_till_gf_update_due == 0) { + rc->baseline_gf_interval = DEFAULT_GF_INTERVAL; + rc->frames_till_gf_update_due = rc->baseline_gf_interval; + // NOTE: frames_till_gf_update_due must be <= frames_to_key. + if (rc->frames_till_gf_update_due > rc->frames_to_key) + rc->frames_till_gf_update_due = rc->frames_to_key; + cpi->refresh_golden_frame = 1; + rc->source_alt_ref_pending = USE_ALTREF_FOR_ONE_PASS; + rc->gfu_boost = DEFAULT_GF_BOOST; + } + if (cm->frame_type == KEY_FRAME) + target = calc_iframe_target_size_one_pass_vbr(cpi); else - calc_pframe_target_size(cpi); + target = calc_pframe_target_size_one_pass_vbr(cpi); + vp9_rc_set_frame_target(cpi, target); +} + +static int calc_pframe_target_size_one_pass_cbr(const VP9_COMP *cpi) { + const VP9_CONFIG *oxcf = &cpi->oxcf; + const RATE_CONTROL *rc = &cpi->rc; + const int64_t diff = oxcf->optimal_buffer_level - rc->buffer_level; + const int64_t one_pct_bits = 1 + oxcf->optimal_buffer_level / 100; + int min_frame_target = MAX(rc->av_per_frame_bandwidth >> 4, + FRAME_OVERHEAD_BITS); + int target = rc->av_per_frame_bandwidth; + if (cpi->svc.number_temporal_layers > 1 && + cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) { + // Note that for layers, av_per_frame_bandwidth is the cumulative + // per-frame-bandwidth. For the target size of this frame, use the + // layer average frame size (i.e., non-cumulative per-frame-bw). + int current_temporal_layer = cpi->svc.temporal_layer_id; + const LAYER_CONTEXT *lc = &cpi->svc.layer_context[current_temporal_layer]; + target = lc->avg_frame_size; + min_frame_target = MAX(lc->avg_frame_size >> 4, FRAME_OVERHEAD_BITS); + } + if (diff > 0) { + // Lower the target bandwidth for this frame. + const int pct_low = (int)MIN(diff / one_pct_bits, oxcf->under_shoot_pct); + target -= (target * pct_low) / 200; + } else if (diff < 0) { + // Increase the target bandwidth for this frame. + const int pct_high = (int)MIN(-diff / one_pct_bits, oxcf->over_shoot_pct); + target += (target * pct_high) / 200; + } + return MAX(min_frame_target, target); +} - return 1; +static int calc_iframe_target_size_one_pass_cbr(const VP9_COMP *cpi) { + const RATE_CONTROL *rc = &cpi->rc; + int target; + + if (cpi->common.current_video_frame == 0) { + target = ((cpi->oxcf.starting_buffer_level / 2) > INT_MAX) + ? INT_MAX : (int)(cpi->oxcf.starting_buffer_level / 2); + } else { + const int initial_boost = 32; + int kf_boost = MAX(initial_boost, (int)(2 * cpi->output_framerate - 16)); + if (rc->frames_since_key < cpi->output_framerate / 2) { + kf_boost = (int)(kf_boost * rc->frames_since_key / + (cpi->output_framerate / 2)); + } + target = ((16 + kf_boost) * rc->av_per_frame_bandwidth) >> 4; + } + return vp9_rc_clamp_iframe_target_size(cpi, target); +} + +void vp9_rc_get_svc_params(VP9_COMP *cpi) { + VP9_COMMON *const cm = &cpi->common; + int target = cpi->rc.av_per_frame_bandwidth; + if ((cm->current_video_frame == 0) || + (cm->frame_flags & FRAMEFLAGS_KEY) || + (cpi->oxcf.auto_key && (cpi->rc.frames_since_key % + cpi->key_frame_frequency == 0))) { + cm->frame_type = KEY_FRAME; + cpi->rc.source_alt_ref_active = 0; + if (cpi->pass == 0 && cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) { + target = calc_iframe_target_size_one_pass_cbr(cpi); + } + } else { + cm->frame_type = INTER_FRAME; + if (cpi->pass == 0 && cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) { + target = calc_pframe_target_size_one_pass_cbr(cpi); + } + } + vp9_rc_set_frame_target(cpi, target); + cpi->rc.frames_till_gf_update_due = INT_MAX; + cpi->rc.baseline_gf_interval = INT_MAX; +} + +void vp9_rc_get_one_pass_cbr_params(VP9_COMP *cpi) { + VP9_COMMON *const cm = &cpi->common; + RATE_CONTROL *const rc = &cpi->rc; + int target; + if ((cm->current_video_frame == 0 || + cm->frame_flags & FRAMEFLAGS_KEY || + rc->frames_to_key == 0 || + (cpi->oxcf.auto_key && test_for_kf_one_pass(cpi)))) { + cm->frame_type = KEY_FRAME; + rc->this_key_frame_forced = cm->current_video_frame != 0 && + rc->frames_to_key == 0; + rc->frames_to_key = cpi->key_frame_frequency; + rc->kf_boost = DEFAULT_KF_BOOST; + rc->source_alt_ref_active = 0; + target = calc_iframe_target_size_one_pass_cbr(cpi); + } else { + cm->frame_type = INTER_FRAME; + target = calc_pframe_target_size_one_pass_cbr(cpi); + } + vp9_rc_set_frame_target(cpi, target); + // Don't use gf_update by default in CBR mode. + rc->frames_till_gf_update_due = INT_MAX; + rc->baseline_gf_interval = INT_MAX; } diff --git a/libvpx/vp9/encoder/vp9_ratectrl.h b/libvpx/vp9/encoder/vp9_ratectrl.h index ddda713..a6f2c31 100644 --- a/libvpx/vp9/encoder/vp9_ratectrl.h +++ b/libvpx/vp9/encoder/vp9_ratectrl.h @@ -12,28 +12,158 @@ #ifndef VP9_ENCODER_VP9_RATECTRL_H_ #define VP9_ENCODER_VP9_RATECTRL_H_ -#include "vp9/encoder/vp9_onyx_int.h" +#ifdef __cplusplus +extern "C" { +#endif #define FRAME_OVERHEAD_BITS 200 -void vp9_save_coding_context(VP9_COMP *cpi); -void vp9_restore_coding_context(VP9_COMP *cpi); +typedef struct { + // Rate targetting variables + int this_frame_target; + int projected_frame_size; + int sb64_target_rate; + int last_q[3]; // Separate values for Intra/Inter/ARF-GF + int last_boosted_qindex; // Last boosted GF/KF/ARF q -void vp9_setup_key_frame(VP9_COMP *cpi); -void vp9_update_rate_correction_factors(VP9_COMP *cpi, int damp_var); -int vp9_regulate_q(VP9_COMP *cpi, int target_bits_per_frame); -void vp9_adjust_key_frame_context(VP9_COMP *cpi); -void vp9_compute_frame_size_bounds(VP9_COMP *cpi, - int *frame_under_shoot_limit, - int *frame_over_shoot_limit); + int gfu_boost; + int last_boost; + int kf_boost; -// return of 0 means drop frame -int vp9_pick_frame_size(VP9_COMP *cpi); + double rate_correction_factor; + double key_frame_rate_correction_factor; + double gf_rate_correction_factor; + + int frames_since_golden; + int frames_till_gf_update_due; + int max_gf_interval; + int static_scene_max_gf_interval; + int baseline_gf_interval; + int frames_to_key; + int frames_since_key; + int this_key_frame_forced; + int next_key_frame_forced; + int source_alt_ref_pending; + int source_alt_ref_active; + int is_src_frame_alt_ref; + + int av_per_frame_bandwidth; // Average frame size target for clip + int min_frame_bandwidth; // Minimum allocation used for any frame + int max_frame_bandwidth; // Maximum burst rate allowed for a frame. + + int ni_av_qi; + int ni_tot_qi; + int ni_frames; + int avg_frame_qindex[3]; // 0 - KEY, 1 - INTER, 2 - ARF/GF + double tot_q; + double avg_q; + + int64_t buffer_level; + int64_t bits_off_target; + + int decimation_factor; + int decimation_count; + + int rolling_target_bits; + int rolling_actual_bits; + + int long_rolling_target_bits; + int long_rolling_actual_bits; + + int64_t total_actual_bits; + int total_target_vs_actual; // debug stats + + int worst_quality; + int best_quality; + // int active_best_quality; +} RATE_CONTROL; + +struct VP9_COMP; + +void vp9_save_coding_context(struct VP9_COMP *cpi); +void vp9_restore_coding_context(struct VP9_COMP *cpi); + +void vp9_setup_key_frame(struct VP9_COMP *cpi); +void vp9_setup_inter_frame(struct VP9_COMP *cpi); double vp9_convert_qindex_to_q(int qindex); -int vp9_gfboost_qadjust(int qindex); -int vp9_bits_per_mb(FRAME_TYPE frame_type, int qindex, - double correction_factor); -void vp9_setup_inter_frame(VP9_COMP *cpi); + +// initialize luts for minq +void vp9_rc_init_minq_luts(void); + +// Generally at the high level, the following flow is expected +// to be enforced for rate control: +// First call per frame, one of: +// vp9_rc_get_one_pass_vbr_params() +// vp9_rc_get_one_pass_cbr_params() +// vp9_rc_get_svc_params() +// vp9_rc_get_first_pass_params() +// vp9_rc_get_second_pass_params() +// depending on the usage to set the rate control encode parameters desired. +// +// Then, call encode_frame_to_data_rate() to perform the +// actual encode. This function will in turn call encode_frame() +// one or more times, followed by one of: +// vp9_rc_postencode_update() +// vp9_rc_postencode_update_drop_frame() +// +// The majority of rate control parameters are only expected +// to be set in the vp9_rc_get_..._params() functions and +// updated during the vp9_rc_postencode_update...() functions. +// The only exceptions are vp9_rc_drop_frame() and +// vp9_rc_update_rate_correction_factors() functions. + +// Functions to set parameters for encoding before the actual +// encode_frame_to_data_rate() function. +void vp9_rc_get_one_pass_vbr_params(struct VP9_COMP *cpi); +void vp9_rc_get_one_pass_cbr_params(struct VP9_COMP *cpi); +void vp9_rc_get_svc_params(struct VP9_COMP *cpi); + +// Post encode update of the rate control parameters based +// on bytes used +void vp9_rc_postencode_update(struct VP9_COMP *cpi, + uint64_t bytes_used); +// Post encode update of the rate control parameters for dropped frames +void vp9_rc_postencode_update_drop_frame(struct VP9_COMP *cpi); + +// Updates rate correction factors +// Changes only the rate correction factors in the rate control structure. +void vp9_rc_update_rate_correction_factors(struct VP9_COMP *cpi, int damp_var); + +// Decide if we should drop this frame: For 1-pass CBR. +// Changes only the decimation count in the rate control structure +int vp9_rc_drop_frame(struct VP9_COMP *cpi); + +// Computes frame size bounds. +void vp9_rc_compute_frame_size_bounds(const struct VP9_COMP *cpi, + int this_frame_target, + int *frame_under_shoot_limit, + int *frame_over_shoot_limit); + +// Picks q and q bounds given the target for bits +int vp9_rc_pick_q_and_bounds(const struct VP9_COMP *cpi, + int *bottom_index, + int *top_index); + +// Estimates q to achieve a target bits per frame +int vp9_rc_regulate_q(const struct VP9_COMP *cpi, int target_bits_per_frame, + int active_best_quality, int active_worst_quality); + +// Estimates bits per mb for a given qindex and correction factor. +int vp9_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex, + double correction_factor); + +// Clamping utilities for bitrate targets for iframes and pframes. +int vp9_rc_clamp_iframe_target_size(const struct VP9_COMP *const cpi, + int target); +int vp9_rc_clamp_pframe_target_size(const struct VP9_COMP *const cpi, + int target); +// Utility to set frame_target into the RATE_CONTROL structure +// This function is called only from the vp9_rc_get_..._params() functions. +void vp9_rc_set_frame_target(struct VP9_COMP *cpi, int target); + +#ifdef __cplusplus +} // extern "C" +#endif #endif // VP9_ENCODER_VP9_RATECTRL_H_ diff --git a/libvpx/vp9/encoder/vp9_rdopt.c b/libvpx/vp9/encoder/vp9_rdopt.c index 78cb06b..7db3e4f 100644 --- a/libvpx/vp9/encoder/vp9_rdopt.c +++ b/libvpx/vp9/encoder/vp9_rdopt.c @@ -8,39 +8,38 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include <stdio.h> -#include <math.h> -#include <limits.h> #include <assert.h> +#include <limits.h> +#include <math.h> +#include <stdio.h> -#include "vp9/common/vp9_pragmas.h" -#include "vp9/encoder/vp9_tokenize.h" -#include "vp9/encoder/vp9_treewriter.h" -#include "vp9/encoder/vp9_onyx_int.h" -#include "vp9/encoder/vp9_modecosts.h" -#include "vp9/encoder/vp9_encodeintra.h" +#include "./vp9_rtcd.h" + +#include "vpx_mem/vpx_mem.h" + +#include "vp9/common/vp9_common.h" +#include "vp9/common/vp9_entropy.h" #include "vp9/common/vp9_entropymode.h" +#include "vp9/common/vp9_idct.h" +#include "vp9/common/vp9_mvref_common.h" +#include "vp9/common/vp9_pragmas.h" +#include "vp9/common/vp9_pred_common.h" +#include "vp9/common/vp9_quant_common.h" #include "vp9/common/vp9_reconinter.h" #include "vp9/common/vp9_reconintra.h" -#include "vp9/common/vp9_findnearmv.h" -#include "vp9/common/vp9_quant_common.h" +#include "vp9/common/vp9_seg_common.h" +#include "vp9/common/vp9_systemdependent.h" + +#include "vp9/encoder/vp9_cost.h" #include "vp9/encoder/vp9_encodemb.h" -#include "vp9/encoder/vp9_quantize.h" -#include "vp9/encoder/vp9_variance.h" +#include "vp9/encoder/vp9_encodemv.h" #include "vp9/encoder/vp9_mcomp.h" -#include "vp9/encoder/vp9_rdopt.h" +#include "vp9/encoder/vp9_onyx_int.h" +#include "vp9/encoder/vp9_quantize.h" #include "vp9/encoder/vp9_ratectrl.h" -#include "vpx_mem/vpx_mem.h" -#include "vp9/common/vp9_systemdependent.h" -#include "vp9/encoder/vp9_encodemv.h" -#include "vp9/common/vp9_seg_common.h" -#include "vp9/common/vp9_pred_common.h" -#include "vp9/common/vp9_entropy.h" -#include "./vp9_rtcd.h" -#include "vp9/common/vp9_mvref_common.h" -#include "vp9/common/vp9_common.h" - -#define INVALID_MV 0x80008000 +#include "vp9/encoder/vp9_rdopt.h" +#include "vp9/encoder/vp9_tokenize.h" +#include "vp9/encoder/vp9_variance.h" /* Factor to weigh the rate for switchable interp filters */ #define SWITCHABLE_INTERP_RATE_FACTOR 1 @@ -51,53 +50,79 @@ #define MIN_EARLY_TERM_INDEX 3 +typedef struct { + MB_PREDICTION_MODE mode; + MV_REFERENCE_FRAME ref_frame[2]; +} MODE_DEFINITION; + +typedef struct { + MV_REFERENCE_FRAME ref_frame[2]; +} REF_DEFINITION; + +struct rdcost_block_args { + MACROBLOCK *x; + ENTROPY_CONTEXT t_above[16]; + ENTROPY_CONTEXT t_left[16]; + int rate; + int64_t dist; + int64_t sse; + int this_rate; + int64_t this_dist; + int64_t this_sse; + int64_t this_rd; + int64_t best_rd; + int skip; + int use_fast_coef_costing; + const scan_order *so; +}; + const MODE_DEFINITION vp9_mode_order[MAX_MODES] = { - {NEARESTMV, LAST_FRAME, NONE}, - {NEARESTMV, ALTREF_FRAME, NONE}, - {NEARESTMV, GOLDEN_FRAME, NONE}, - - {DC_PRED, INTRA_FRAME, NONE}, - - {NEWMV, LAST_FRAME, NONE}, - {NEWMV, ALTREF_FRAME, NONE}, - {NEWMV, GOLDEN_FRAME, NONE}, - - {NEARMV, LAST_FRAME, NONE}, - {NEARMV, ALTREF_FRAME, NONE}, - {NEARESTMV, LAST_FRAME, ALTREF_FRAME}, - {NEARESTMV, GOLDEN_FRAME, ALTREF_FRAME}, - - {TM_PRED, INTRA_FRAME, NONE}, - - {NEARMV, LAST_FRAME, ALTREF_FRAME}, - {NEWMV, LAST_FRAME, ALTREF_FRAME}, - {NEARMV, GOLDEN_FRAME, NONE}, - {NEARMV, GOLDEN_FRAME, ALTREF_FRAME}, - {NEWMV, GOLDEN_FRAME, ALTREF_FRAME}, - - {ZEROMV, LAST_FRAME, NONE}, - {ZEROMV, GOLDEN_FRAME, NONE}, - {ZEROMV, ALTREF_FRAME, NONE}, - {ZEROMV, LAST_FRAME, ALTREF_FRAME}, - {ZEROMV, GOLDEN_FRAME, ALTREF_FRAME}, - - {H_PRED, INTRA_FRAME, NONE}, - {V_PRED, INTRA_FRAME, NONE}, - {D135_PRED, INTRA_FRAME, NONE}, - {D207_PRED, INTRA_FRAME, NONE}, - {D153_PRED, INTRA_FRAME, NONE}, - {D63_PRED, INTRA_FRAME, NONE}, - {D117_PRED, INTRA_FRAME, NONE}, - {D45_PRED, INTRA_FRAME, NONE}, + {NEARESTMV, {LAST_FRAME, NONE}}, + {NEARESTMV, {ALTREF_FRAME, NONE}}, + {NEARESTMV, {GOLDEN_FRAME, NONE}}, + + {DC_PRED, {INTRA_FRAME, NONE}}, + + {NEWMV, {LAST_FRAME, NONE}}, + {NEWMV, {ALTREF_FRAME, NONE}}, + {NEWMV, {GOLDEN_FRAME, NONE}}, + + {NEARMV, {LAST_FRAME, NONE}}, + {NEARMV, {ALTREF_FRAME, NONE}}, + {NEARESTMV, {LAST_FRAME, ALTREF_FRAME}}, + {NEARESTMV, {GOLDEN_FRAME, ALTREF_FRAME}}, + + {TM_PRED, {INTRA_FRAME, NONE}}, + + {NEARMV, {LAST_FRAME, ALTREF_FRAME}}, + {NEWMV, {LAST_FRAME, ALTREF_FRAME}}, + {NEARMV, {GOLDEN_FRAME, NONE}}, + {NEARMV, {GOLDEN_FRAME, ALTREF_FRAME}}, + {NEWMV, {GOLDEN_FRAME, ALTREF_FRAME}}, + + {ZEROMV, {LAST_FRAME, NONE}}, + {ZEROMV, {GOLDEN_FRAME, NONE}}, + {ZEROMV, {ALTREF_FRAME, NONE}}, + {ZEROMV, {LAST_FRAME, ALTREF_FRAME}}, + {ZEROMV, {GOLDEN_FRAME, ALTREF_FRAME}}, + + {H_PRED, {INTRA_FRAME, NONE}}, + {V_PRED, {INTRA_FRAME, NONE}}, + {D135_PRED, {INTRA_FRAME, NONE}}, + {D207_PRED, {INTRA_FRAME, NONE}}, + {D153_PRED, {INTRA_FRAME, NONE}}, + {D63_PRED, {INTRA_FRAME, NONE}}, + {D117_PRED, {INTRA_FRAME, NONE}}, + {D45_PRED, {INTRA_FRAME, NONE}}, }; const REF_DEFINITION vp9_ref_order[MAX_REFS] = { - {LAST_FRAME, NONE}, - {GOLDEN_FRAME, NONE}, - {ALTREF_FRAME, NONE}, - {LAST_FRAME, ALTREF_FRAME}, - {GOLDEN_FRAME, ALTREF_FRAME}, - {INTRA_FRAME, NONE}, + {{LAST_FRAME, NONE}}, + {{GOLDEN_FRAME, NONE}}, + {{ALTREF_FRAME, NONE}}, + {{LAST_FRAME, ALTREF_FRAME}}, + {{GOLDEN_FRAME, ALTREF_FRAME}}, + {{INTRA_FRAME, NONE}}, }; // The baseline rd thresholds for breaking out of the rd loop for @@ -107,31 +132,58 @@ const REF_DEFINITION vp9_ref_order[MAX_REFS] = { static int rd_thresh_block_size_factor[BLOCK_SIZES] = {2, 3, 3, 4, 6, 6, 8, 12, 12, 16, 24, 24, 32}; -#define RD_THRESH_MAX_FACT 64 -#define RD_THRESH_INC 1 -#define RD_THRESH_POW 1.25 -#define RD_MULT_EPB_RATIO 64 +static int raster_block_offset(BLOCK_SIZE plane_bsize, + int raster_block, int stride) { + const int bw = b_width_log2(plane_bsize); + const int y = 4 * (raster_block >> bw); + const int x = 4 * (raster_block & ((1 << bw) - 1)); + return y * stride + x; +} +static int16_t* raster_block_offset_int16(BLOCK_SIZE plane_bsize, + int raster_block, int16_t *base) { + const int stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize]; + return base + raster_block_offset(plane_bsize, raster_block, stride); +} + +static void fill_mode_costs(VP9_COMP *cpi) { + MACROBLOCK *const x = &cpi->mb; + const FRAME_CONTEXT *const fc = &cpi->common.fc; + int i, j; -#define MV_COST_WEIGHT 108 -#define MV_COST_WEIGHT_SUB 120 + for (i = 0; i < INTRA_MODES; i++) + for (j = 0; j < INTRA_MODES; j++) + vp9_cost_tokens((int *)x->y_mode_costs[i][j], vp9_kf_y_mode_prob[i][j], + vp9_intra_mode_tree); + + // TODO(rbultje) separate tables for superblock costing? + vp9_cost_tokens(x->mbmode_cost, fc->y_mode_prob[1], vp9_intra_mode_tree); + vp9_cost_tokens(x->intra_uv_mode_cost[KEY_FRAME], + vp9_kf_uv_mode_prob[TM_PRED], vp9_intra_mode_tree); + vp9_cost_tokens(x->intra_uv_mode_cost[INTER_FRAME], + fc->uv_mode_prob[TM_PRED], vp9_intra_mode_tree); + + for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) + vp9_cost_tokens((int *)x->switchable_interp_costs[i], + fc->switchable_interp_prob[i], vp9_switchable_interp_tree); +} static void fill_token_costs(vp9_coeff_cost *c, - vp9_coeff_probs_model (*p)[BLOCK_TYPES]) { + vp9_coeff_probs_model (*p)[PLANE_TYPES]) { int i, j, k, l; TX_SIZE t; - for (t = TX_4X4; t <= TX_32X32; t++) - for (i = 0; i < BLOCK_TYPES; i++) - for (j = 0; j < REF_TYPES; j++) - for (k = 0; k < COEF_BANDS; k++) - for (l = 0; l < PREV_COEF_CONTEXTS; l++) { + for (t = TX_4X4; t <= TX_32X32; ++t) + for (i = 0; i < PLANE_TYPES; ++i) + for (j = 0; j < REF_TYPES; ++j) + for (k = 0; k < COEF_BANDS; ++k) + for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { vp9_prob probs[ENTROPY_NODES]; vp9_model_to_full_probs(p[t][i][j][k][l], probs); vp9_cost_tokens((int *)c[t][i][j][k][0][l], probs, vp9_coef_tree); vp9_cost_tokens_skip((int *)c[t][i][j][k][1][l], probs, vp9_coef_tree); - assert(c[t][i][j][k][0][l][DCT_EOB_TOKEN] == - c[t][i][j][k][1][l][DCT_EOB_TOKEN]); + assert(c[t][i][j][k][0][l][EOB_TOKEN] == + c[t][i][j][k][1][l][EOB_TOKEN]); } } @@ -155,13 +207,13 @@ void vp9_init_me_luts() { // This is to make it easier to resolve the impact of experimental changes // to the quantizer tables. for (i = 0; i < QINDEX_RANGE; i++) { - sad_per_bit16lut[i] = - (int)((0.0418 * vp9_convert_qindex_to_q(i)) + 2.4107); - sad_per_bit4lut[i] = (int)(0.063 * vp9_convert_qindex_to_q(i) + 2.742); + const double q = vp9_convert_qindex_to_q(i); + sad_per_bit16lut[i] = (int)(0.0418 * q + 2.4107); + sad_per_bit4lut[i] = (int)(0.063 * q + 2.742); } } -int vp9_compute_rd_mult(VP9_COMP *cpi, int qindex) { +int vp9_compute_rd_mult(const VP9_COMP *cpi, int qindex) { const int q = vp9_dc_quant(qindex, 0); // TODO(debargha): Adjust the function below int rdmult = 88 * q * q / 25; @@ -175,12 +227,9 @@ int vp9_compute_rd_mult(VP9_COMP *cpi, int qindex) { } static int compute_rd_thresh_factor(int qindex) { - int q; // TODO(debargha): Adjust the function below - q = (int)(pow(vp9_dc_quant(qindex, 0) / 4.0, RD_THRESH_POW) * 5.12); - if (q < 8) - q = 8; - return q; + const int q = (int)(pow(vp9_dc_quant(qindex, 0) / 4.0, RD_THRESH_POW) * 5.12); + return MAX(q, 8); } void vp9_initialize_me_consts(VP9_COMP *cpi, int qindex) { @@ -189,117 +238,88 @@ void vp9_initialize_me_consts(VP9_COMP *cpi, int qindex) { } static void set_block_thresholds(VP9_COMP *cpi) { + const VP9_COMMON *const cm = &cpi->common; + const SPEED_FEATURES *const sf = &cpi->sf; int i, bsize, segment_id; - VP9_COMMON *cm = &cpi->common; for (segment_id = 0; segment_id < MAX_SEGMENTS; ++segment_id) { - int q; - int segment_qindex = vp9_get_qindex(&cm->seg, segment_id, cm->base_qindex); - segment_qindex = clamp(segment_qindex + cm->y_dc_delta_q, 0, MAXQ); - q = compute_rd_thresh_factor(segment_qindex); + const int qindex = clamp(vp9_get_qindex(&cm->seg, segment_id, + cm->base_qindex) + cm->y_dc_delta_q, + 0, MAXQ); + const int q = compute_rd_thresh_factor(qindex); for (bsize = 0; bsize < BLOCK_SIZES; ++bsize) { - // Threshold here seem unecessarily harsh but fine given actual - // range of values used for cpi->sf.thresh_mult[] - int thresh_max = INT_MAX / (q * rd_thresh_block_size_factor[bsize]); - - for (i = 0; i < MAX_MODES; ++i) { - if (cpi->sf.thresh_mult[i] < thresh_max) { - cpi->rd_threshes[segment_id][bsize][i] = - cpi->sf.thresh_mult[i] * q * - rd_thresh_block_size_factor[bsize] / 4; - } else { - cpi->rd_threshes[segment_id][bsize][i] = INT_MAX; - } - } + // Threshold here seems unnecessarily harsh but fine given actual + // range of values used for cpi->sf.thresh_mult[]. + const int t = q * rd_thresh_block_size_factor[bsize]; + const int thresh_max = INT_MAX / t; + + for (i = 0; i < MAX_MODES; ++i) + cpi->rd_threshes[segment_id][bsize][i] = + sf->thresh_mult[i] < thresh_max ? sf->thresh_mult[i] * t / 4 + : INT_MAX; for (i = 0; i < MAX_REFS; ++i) { - if (cpi->sf.thresh_mult_sub8x8[i] < thresh_max) { - cpi->rd_thresh_sub8x8[segment_id][bsize][i] = - cpi->sf.thresh_mult_sub8x8[i] * q * - rd_thresh_block_size_factor[bsize] / 4; - } else { - cpi->rd_thresh_sub8x8[segment_id][bsize][i] = INT_MAX; - } + cpi->rd_thresh_sub8x8[segment_id][bsize][i] = + sf->thresh_mult_sub8x8[i] < thresh_max + ? sf->thresh_mult_sub8x8[i] * t / 4 + : INT_MAX; } } } } void vp9_initialize_rd_consts(VP9_COMP *cpi) { - VP9_COMMON *cm = &cpi->common; - int qindex, i; - - vp9_clear_system_state(); // __asm emms; + VP9_COMMON *const cm = &cpi->common; + MACROBLOCK *const x = &cpi->mb; + int i; - // Further tests required to see if optimum is different - // for key frames, golden frames and arf frames. - // if (cpi->common.refresh_golden_frame || - // cpi->common.refresh_alt_ref_frame) - qindex = clamp(cm->base_qindex + cm->y_dc_delta_q, 0, MAXQ); + vp9_clear_system_state(); cpi->RDDIV = RDDIV_BITS; // in bits (to multiply D by 128) - cpi->RDMULT = vp9_compute_rd_mult(cpi, qindex); - - cpi->mb.errorperbit = cpi->RDMULT / RD_MULT_EPB_RATIO; - cpi->mb.errorperbit += (cpi->mb.errorperbit == 0); + cpi->RDMULT = vp9_compute_rd_mult(cpi, cm->base_qindex + cm->y_dc_delta_q); - vp9_set_speed_features(cpi); + x->errorperbit = cpi->RDMULT / RD_MULT_EPB_RATIO; + x->errorperbit += (x->errorperbit == 0); - cpi->mb.select_txfm_size = (cpi->sf.tx_size_search_method == USE_LARGESTALL && - cm->frame_type != KEY_FRAME) ? - 0 : 1; + x->select_txfm_size = (cpi->sf.tx_size_search_method == USE_LARGESTALL && + cm->frame_type != KEY_FRAME) ? 0 : 1; set_block_thresholds(cpi); - fill_token_costs(cpi->mb.token_costs, cm->fc.coef_probs); - - for (i = 0; i < PARTITION_CONTEXTS; i++) - vp9_cost_tokens(cpi->mb.partition_cost[i], get_partition_probs(cm, i), - vp9_partition_tree); + if (!cpi->sf.use_nonrd_pick_mode || cm->frame_type == KEY_FRAME) { + fill_token_costs(x->token_costs, cm->fc.coef_probs); - /*rough estimate for costing*/ - vp9_init_mode_costs(cpi); + for (i = 0; i < PARTITION_CONTEXTS; i++) + vp9_cost_tokens(x->partition_cost[i], get_partition_probs(cm, i), + vp9_partition_tree); + } - if (!frame_is_intra_only(cm)) { - vp9_build_nmv_cost_table( - cpi->mb.nmvjointcost, - cm->allow_high_precision_mv ? cpi->mb.nmvcost_hp : cpi->mb.nmvcost, - &cm->fc.nmvc, - cm->allow_high_precision_mv, 1, 1); + if (!cpi->sf.use_nonrd_pick_mode || (cm->current_video_frame & 0x07) == 1 || + cm->frame_type == KEY_FRAME) { + fill_mode_costs(cpi); - for (i = 0; i < INTER_MODE_CONTEXTS; i++) { - MB_PREDICTION_MODE m; + if (!frame_is_intra_only(cm)) { + vp9_build_nmv_cost_table(x->nmvjointcost, + cm->allow_high_precision_mv ? x->nmvcost_hp + : x->nmvcost, + &cm->fc.nmvc, cm->allow_high_precision_mv); - for (m = NEARESTMV; m < MB_MODE_COUNT; m++) - cpi->mb.inter_mode_cost[i][INTER_OFFSET(m)] = - cost_token(vp9_inter_mode_tree, - cm->fc.inter_mode_probs[i], - &vp9_inter_mode_encodings[INTER_OFFSET(m)]); + for (i = 0; i < INTER_MODE_CONTEXTS; ++i) + vp9_cost_tokens((int *)x->inter_mode_cost[i], + cm->fc.inter_mode_probs[i], vp9_inter_mode_tree); } } } -static INLINE void linear_interpolate2(double x, int ntab, int inv_step, - const double *tab1, const double *tab2, - double *v1, double *v2) { - double y = x * inv_step; - int d = (int) y; - if (d >= ntab - 1) { - *v1 = tab1[ntab - 1]; - *v2 = tab2[ntab - 1]; - } else { - double a = y - d; - *v1 = tab1[d] * (1 - a) + tab1[d + 1] * a; - *v2 = tab2[d] * (1 - a) + tab2[d + 1] * a; - } -} +static const int MAX_XSQ_Q10 = 245727; -static void model_rd_norm(double x, double *R, double *D) { - static const int inv_tab_step = 8; - static const int tab_size = 120; +static void model_rd_norm(int xsq_q10, int *r_q10, int *d_q10) { // NOTE: The tables below must be of the same size - // + + // The functions described below are sampled at the four most significant + // bits of x^2 + 8 / 256 + // Normalized rate // This table models the rate for a Laplacian source // source with given variance when quantized with a uniform quantizer @@ -307,22 +327,20 @@ static void model_rd_norm(double x, double *R, double *D) { // Rn(x) = H(sqrt(r)) + sqrt(r)*[1 + H(r)/(1 - r)], // where r = exp(-sqrt(2) * x) and x = qpstep / sqrt(variance), // and H(x) is the binary entropy function. - static const double rate_tab[] = { - 64.00, 4.944, 3.949, 3.372, 2.966, 2.655, 2.403, 2.194, - 2.014, 1.858, 1.720, 1.596, 1.485, 1.384, 1.291, 1.206, - 1.127, 1.054, 0.986, 0.923, 0.863, 0.808, 0.756, 0.708, - 0.662, 0.619, 0.579, 0.541, 0.506, 0.473, 0.442, 0.412, - 0.385, 0.359, 0.335, 0.313, 0.291, 0.272, 0.253, 0.236, - 0.220, 0.204, 0.190, 0.177, 0.165, 0.153, 0.142, 0.132, - 0.123, 0.114, 0.106, 0.099, 0.091, 0.085, 0.079, 0.073, - 0.068, 0.063, 0.058, 0.054, 0.050, 0.047, 0.043, 0.040, - 0.037, 0.034, 0.032, 0.029, 0.027, 0.025, 0.023, 0.022, - 0.020, 0.019, 0.017, 0.016, 0.015, 0.014, 0.013, 0.012, - 0.011, 0.010, 0.009, 0.008, 0.008, 0.007, 0.007, 0.006, - 0.006, 0.005, 0.005, 0.005, 0.004, 0.004, 0.004, 0.003, - 0.003, 0.003, 0.003, 0.002, 0.002, 0.002, 0.002, 0.002, - 0.002, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, - 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.000, + static const int rate_tab_q10[] = { + 65536, 6086, 5574, 5275, 5063, 4899, 4764, 4651, + 4553, 4389, 4255, 4142, 4044, 3958, 3881, 3811, + 3748, 3635, 3538, 3453, 3376, 3307, 3244, 3186, + 3133, 3037, 2952, 2877, 2809, 2747, 2690, 2638, + 2589, 2501, 2423, 2353, 2290, 2232, 2179, 2130, + 2084, 2001, 1928, 1862, 1802, 1748, 1698, 1651, + 1608, 1530, 1460, 1398, 1342, 1290, 1243, 1199, + 1159, 1086, 1021, 963, 911, 864, 821, 781, + 745, 680, 623, 574, 530, 490, 455, 424, + 395, 345, 304, 269, 239, 213, 190, 171, + 154, 126, 104, 87, 73, 61, 52, 44, + 38, 28, 21, 16, 12, 10, 8, 6, + 5, 3, 2, 1, 1, 1, 0, 0, }; // Normalized distortion // This table models the normalized distortion for a Laplacian source @@ -331,54 +349,74 @@ static void model_rd_norm(double x, double *R, double *D) { // Dn(x) = 1 - 1/sqrt(2) * x / sinh(x/sqrt(2)) // where x = qpstep / sqrt(variance) // Note the actual distortion is Dn * variance. - static const double dist_tab[] = { - 0.000, 0.001, 0.005, 0.012, 0.021, 0.032, 0.045, 0.061, - 0.079, 0.098, 0.119, 0.142, 0.166, 0.190, 0.216, 0.242, - 0.269, 0.296, 0.324, 0.351, 0.378, 0.405, 0.432, 0.458, - 0.484, 0.509, 0.534, 0.557, 0.580, 0.603, 0.624, 0.645, - 0.664, 0.683, 0.702, 0.719, 0.735, 0.751, 0.766, 0.780, - 0.794, 0.807, 0.819, 0.830, 0.841, 0.851, 0.861, 0.870, - 0.878, 0.886, 0.894, 0.901, 0.907, 0.913, 0.919, 0.925, - 0.930, 0.935, 0.939, 0.943, 0.947, 0.951, 0.954, 0.957, - 0.960, 0.963, 0.966, 0.968, 0.971, 0.973, 0.975, 0.976, - 0.978, 0.980, 0.981, 0.982, 0.984, 0.985, 0.986, 0.987, - 0.988, 0.989, 0.990, 0.990, 0.991, 0.992, 0.992, 0.993, - 0.993, 0.994, 0.994, 0.995, 0.995, 0.996, 0.996, 0.996, - 0.996, 0.997, 0.997, 0.997, 0.997, 0.998, 0.998, 0.998, - 0.998, 0.998, 0.998, 0.999, 0.999, 0.999, 0.999, 0.999, - 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 1.000, + static const int dist_tab_q10[] = { + 0, 0, 1, 1, 1, 2, 2, 2, + 3, 3, 4, 5, 5, 6, 7, 7, + 8, 9, 11, 12, 13, 15, 16, 17, + 18, 21, 24, 26, 29, 31, 34, 36, + 39, 44, 49, 54, 59, 64, 69, 73, + 78, 88, 97, 106, 115, 124, 133, 142, + 151, 167, 184, 200, 215, 231, 245, 260, + 274, 301, 327, 351, 375, 397, 418, 439, + 458, 495, 528, 559, 587, 613, 637, 659, + 680, 717, 749, 777, 801, 823, 842, 859, + 874, 899, 919, 936, 949, 960, 969, 977, + 983, 994, 1001, 1006, 1010, 1013, 1015, 1017, + 1018, 1020, 1022, 1022, 1023, 1023, 1023, 1024, + }; + static const int xsq_iq_q10[] = { + 0, 4, 8, 12, 16, 20, 24, 28, + 32, 40, 48, 56, 64, 72, 80, 88, + 96, 112, 128, 144, 160, 176, 192, 208, + 224, 256, 288, 320, 352, 384, 416, 448, + 480, 544, 608, 672, 736, 800, 864, 928, + 992, 1120, 1248, 1376, 1504, 1632, 1760, 1888, + 2016, 2272, 2528, 2784, 3040, 3296, 3552, 3808, + 4064, 4576, 5088, 5600, 6112, 6624, 7136, 7648, + 8160, 9184, 10208, 11232, 12256, 13280, 14304, 15328, + 16352, 18400, 20448, 22496, 24544, 26592, 28640, 30688, + 32736, 36832, 40928, 45024, 49120, 53216, 57312, 61408, + 65504, 73696, 81888, 90080, 98272, 106464, 114656, 122848, + 131040, 147424, 163808, 180192, 196576, 212960, 229344, 245728, }; /* - assert(sizeof(rate_tab) == tab_size * sizeof(rate_tab[0]); - assert(sizeof(dist_tab) == tab_size * sizeof(dist_tab[0]); - assert(sizeof(rate_tab) == sizeof(dist_tab)); + static const int tab_size = sizeof(rate_tab_q10) / sizeof(rate_tab_q10[0]); + assert(sizeof(dist_tab_q10) / sizeof(dist_tab_q10[0]) == tab_size); + assert(sizeof(xsq_iq_q10) / sizeof(xsq_iq_q10[0]) == tab_size); + assert(MAX_XSQ_Q10 + 1 == xsq_iq_q10[tab_size - 1]); */ - assert(x >= 0.0); - linear_interpolate2(x, tab_size, inv_tab_step, - rate_tab, dist_tab, R, D); + int tmp = (xsq_q10 >> 2) + 8; + int k = get_msb(tmp) - 3; + int xq = (k << 3) + ((tmp >> k) & 0x7); + const int one_q10 = 1 << 10; + const int a_q10 = ((xsq_q10 - xsq_iq_q10[xq]) << 10) >> (2 + k); + const int b_q10 = one_q10 - a_q10; + *r_q10 = (rate_tab_q10[xq] * b_q10 + rate_tab_q10[xq + 1] * a_q10) >> 10; + *d_q10 = (dist_tab_q10[xq] * b_q10 + dist_tab_q10[xq + 1] * a_q10) >> 10; } -static void model_rd_from_var_lapndz(int var, int n, int qstep, - int *rate, int64_t *dist) { +void vp9_model_rd_from_var_lapndz(unsigned int var, unsigned int n, + unsigned int qstep, int *rate, + int64_t *dist) { // This function models the rate and distortion for a Laplacian // source with given variance when quantized with a uniform quantizer // with given stepsize. The closed form expressions are in: // Hang and Chen, "Source Model for transform video coder and its // application - Part I: Fundamental Theory", IEEE Trans. Circ. // Sys. for Video Tech., April 1997. - vp9_clear_system_state(); - if (var == 0 || n == 0) { + if (var == 0) { *rate = 0; *dist = 0; } else { - double D, R; - double s2 = (double) var / n; - double x = qstep / sqrt(s2); - model_rd_norm(x, &R, &D); - *rate = (int)((n << 8) * R + 0.5); - *dist = (int)(var * D + 0.5); + int d_q10, r_q10; + const uint64_t xsq_q10_64 = + ((((uint64_t)qstep * qstep * n) << 10) + (var >> 1)) / var; + const int xsq_q10 = xsq_q10_64 > MAX_XSQ_Q10 ? + MAX_XSQ_Q10 : (int)xsq_q10_64; + model_rd_norm(xsq_q10, &r_q10, &d_q10); + *rate = (n * r_q10 + 2) >> 2; + *dist = (var * (int64_t)d_q10 + 512) >> 10; } - vp9_clear_system_state(); } static void model_rd_for_sb(VP9_COMP *cpi, BLOCK_SIZE bsize, @@ -387,26 +425,48 @@ static void model_rd_for_sb(VP9_COMP *cpi, BLOCK_SIZE bsize, // Note our transform coeffs are 8 times an orthogonal transform. // Hence quantizer step is also 8 times. To get effective quantizer // we need to divide by 8 before sending to modeling function. - int i, rate_sum = 0, dist_sum = 0; + int i; + int64_t rate_sum = 0; + int64_t dist_sum = 0; + const int ref = xd->mi_8x8[0]->mbmi.ref_frame[0]; + unsigned int sse; for (i = 0; i < MAX_MB_PLANE; ++i) { struct macroblock_plane *const p = &x->plane[i]; struct macroblockd_plane *const pd = &xd->plane[i]; const BLOCK_SIZE bs = get_plane_block_size(bsize, pd); - unsigned int sse; - int rate; - int64_t dist; + (void) cpi->fn_ptr[bs].vf(p->src.buf, p->src.stride, pd->dst.buf, pd->dst.stride, &sse); - // sse works better than var, since there is no dc prediction used - model_rd_from_var_lapndz(sse, 1 << num_pels_log2_lookup[bs], - pd->dequant[1] >> 3, &rate, &dist); - rate_sum += rate; - dist_sum += (int)dist; + if (i == 0) + x->pred_sse[ref] = sse; + + // Fast approximate the modelling function. + if (cpi->speed > 4) { + int64_t rate; + int64_t dist; + int64_t square_error = sse; + int quantizer = (pd->dequant[1] >> 3); + + if (quantizer < 120) + rate = (square_error * (280 - quantizer)) >> 8; + else + rate = 0; + dist = (square_error * quantizer) >> 8; + rate_sum += rate; + dist_sum += dist; + } else { + int rate; + int64_t dist; + vp9_model_rd_from_var_lapndz(sse, 1 << num_pels_log2_lookup[bs], + pd->dequant[1] >> 3, &rate, &dist); + rate_sum += rate; + dist_sum += dist; + } } - *out_rate_sum = rate_sum; + *out_rate_sum = (int)rate_sum; *out_dist_sum = dist_sum << 4; } @@ -417,10 +477,10 @@ static void model_rd_for_sb_y_tx(VP9_COMP *cpi, BLOCK_SIZE bsize, int *out_skip) { int j, k; BLOCK_SIZE bs; - struct macroblock_plane *const p = &x->plane[0]; - struct macroblockd_plane *const pd = &xd->plane[0]; - const int width = 4 << num_4x4_blocks_wide_lookup[bsize]; - const int height = 4 << num_4x4_blocks_high_lookup[bsize]; + const struct macroblock_plane *const p = &x->plane[0]; + const struct macroblockd_plane *const pd = &xd->plane[0]; + const int width = 4 * num_4x4_blocks_wide_lookup[bsize]; + const int height = 4 * num_4x4_blocks_high_lookup[bsize]; int rate_sum = 0; int64_t dist_sum = 0; const int t = 4 << tx_size; @@ -447,7 +507,8 @@ static void model_rd_for_sb_y_tx(VP9_COMP *cpi, BLOCK_SIZE bsize, &pd->dst.buf[j * pd->dst.stride + k], pd->dst.stride, &sse); // sse works better than var, since there is no dc prediction used - model_rd_from_var_lapndz(sse, t * t, pd->dequant[1] >> 3, &rate, &dist); + vp9_model_rd_from_var_lapndz(sse, t * t, pd->dequant[1] >> 3, + &rate, &dist); rate_sum += rate; dist_sum += dist; *out_skip &= (rate < 1024); @@ -458,15 +519,15 @@ static void model_rd_for_sb_y_tx(VP9_COMP *cpi, BLOCK_SIZE bsize, *out_dist_sum = dist_sum << 4; } -int64_t vp9_block_error_c(int16_t *coeff, int16_t *dqcoeff, +int64_t vp9_block_error_c(const int16_t *coeff, const int16_t *dqcoeff, intptr_t block_size, int64_t *ssz) { int i; int64_t error = 0, sqcoeff = 0; for (i = 0; i < block_size; i++) { - int this_diff = coeff[i] - dqcoeff[i]; - error += (unsigned)this_diff * this_diff; - sqcoeff += (unsigned) coeff[i] * coeff[i]; + const int diff = coeff[i] - dqcoeff[i]; + error += diff * diff; + sqcoeff += coeff[i] * coeff[i]; } *ssz = sqcoeff; @@ -484,40 +545,38 @@ static const int16_t band_counts[TX_SIZES][8] = { { 1, 2, 3, 4, 11, 256 - 21, 0 }, { 1, 2, 3, 4, 11, 1024 - 21, 0 }, }; - static INLINE int cost_coeffs(MACROBLOCK *x, int plane, int block, ENTROPY_CONTEXT *A, ENTROPY_CONTEXT *L, TX_SIZE tx_size, - const int16_t *scan, const int16_t *nb) { + const int16_t *scan, const int16_t *nb, + int use_fast_coef_costing) { MACROBLOCKD *const xd = &x->e_mbd; MB_MODE_INFO *mbmi = &xd->mi_8x8[0]->mbmi; - struct macroblockd_plane *pd = &xd->plane[plane]; + const struct macroblock_plane *p = &x->plane[plane]; + const struct macroblockd_plane *pd = &xd->plane[plane]; const PLANE_TYPE type = pd->plane_type; const int16_t *band_count = &band_counts[tx_size][1]; - const int eob = pd->eobs[block]; - const int16_t *const qcoeff_ptr = BLOCK_OFFSET(pd->qcoeff, block); - const int ref = mbmi->ref_frame[0] != INTRA_FRAME; - unsigned int (*token_costs)[2][PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS] = - x->token_costs[tx_size][type][ref]; - const ENTROPY_CONTEXT above_ec = !!*A, left_ec = !!*L; + const int eob = p->eobs[block]; + const int16_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block); + unsigned int (*token_costs)[2][COEFF_CONTEXTS][ENTROPY_TOKENS] = + x->token_costs[tx_size][type][is_inter_block(mbmi)]; uint8_t *p_tok = x->token_cache; - int pt = combine_entropy_contexts(above_ec, left_ec); + int pt = combine_entropy_contexts(*A, *L); int c, cost; - // Check for consistency of tx_size with mode info - assert(type == PLANE_TYPE_Y_WITH_DC ? mbmi->tx_size == tx_size - : get_uv_tx_size(mbmi) == tx_size); + assert(type == PLANE_TYPE_Y ? mbmi->tx_size == tx_size + : get_uv_tx_size(mbmi) == tx_size); if (eob == 0) { // single eob token - cost = token_costs[0][0][pt][DCT_EOB_TOKEN]; + cost = token_costs[0][0][pt][EOB_TOKEN]; c = 0; } else { int band_left = *band_count++; // dc token - int v = qcoeff_ptr[0]; + int v = qcoeff[0]; int prev_t = vp9_dct_value_tokens_ptr[v].token; cost = (*token_costs)[0][pt][prev_t] + vp9_dct_value_cost_ptr[v]; p_tok[0] = vp9_pt_energy_class[prev_t]; @@ -528,11 +587,15 @@ static INLINE int cost_coeffs(MACROBLOCK *x, const int rc = scan[c]; int t; - v = qcoeff_ptr[rc]; + v = qcoeff[rc]; t = vp9_dct_value_tokens_ptr[v].token; - pt = get_coef_context(nb, p_tok, c); - cost += (*token_costs)[!prev_t][pt][t] + vp9_dct_value_cost_ptr[v]; - p_tok[rc] = vp9_pt_energy_class[t]; + if (use_fast_coef_costing) { + cost += (*token_costs)[!prev_t][!prev_t][t] + vp9_dct_value_cost_ptr[v]; + } else { + pt = get_coef_context(nb, p_tok, c); + cost += (*token_costs)[!prev_t][pt][t] + vp9_dct_value_cost_ptr[v]; + p_tok[rc] = vp9_pt_energy_class[t]; + } prev_t = t; if (!--band_left) { band_left = *band_count++; @@ -542,8 +605,12 @@ static INLINE int cost_coeffs(MACROBLOCK *x, // eob token if (band_left) { - pt = get_coef_context(nb, p_tok, c); - cost += (*token_costs)[0][pt][DCT_EOB_TOKEN]; + if (use_fast_coef_costing) { + cost += (*token_costs)[0][!prev_t][EOB_TOKEN]; + } else { + pt = get_coef_context(nb, p_tok, c); + cost += (*token_costs)[0][pt][EOB_TOKEN]; + } } } @@ -552,24 +619,22 @@ static INLINE int cost_coeffs(MACROBLOCK *x, return cost; } - -static void dist_block(int plane, int block, TX_SIZE tx_size, void *arg) { +static void dist_block(int plane, int block, TX_SIZE tx_size, + struct rdcost_block_args* args) { const int ss_txfrm_size = tx_size << 1; - struct rdcost_block_args* args = arg; MACROBLOCK* const x = args->x; MACROBLOCKD* const xd = &x->e_mbd; - struct macroblock_plane *const p = &x->plane[plane]; - struct macroblockd_plane *const pd = &xd->plane[plane]; + const struct macroblock_plane *const p = &x->plane[plane]; + const struct macroblockd_plane *const pd = &xd->plane[plane]; int64_t this_sse; - int shift = args->tx_size == TX_32X32 ? 0 : 2; + int shift = tx_size == TX_32X32 ? 0 : 2; int16_t *const coeff = BLOCK_OFFSET(p->coeff, block); int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); args->dist = vp9_block_error(coeff, dqcoeff, 16 << ss_txfrm_size, &this_sse) >> shift; args->sse = this_sse >> shift; - if (x->skip_encode && - xd->mi_8x8[0]->mbmi.ref_frame[0] == INTRA_FRAME) { + if (x->skip_encode && !is_inter_block(&xd->mi_8x8[0]->mbmi)) { // TODO(jingning): tune the model to better capture the distortion. int64_t p = (pd->dequant[1] * pd->dequant[1] * (1 << ss_txfrm_size)) >> (shift + 2); @@ -579,32 +644,31 @@ static void dist_block(int plane, int block, TX_SIZE tx_size, void *arg) { } static void rate_block(int plane, int block, BLOCK_SIZE plane_bsize, - TX_SIZE tx_size, void *arg) { - struct rdcost_block_args* args = arg; - + TX_SIZE tx_size, struct rdcost_block_args* args) { int x_idx, y_idx; - txfrm_block_to_raster_xy(plane_bsize, args->tx_size, block, &x_idx, &y_idx); + txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x_idx, &y_idx); args->rate = cost_coeffs(args->x, plane, block, args->t_above + x_idx, - args->t_left + y_idx, args->tx_size, - args->scan, args->nb); + args->t_left + y_idx, tx_size, + args->so->scan, args->so->neighbors, + args->use_fast_coef_costing); } -static void block_yrd_txfm(int plane, int block, BLOCK_SIZE plane_bsize, - TX_SIZE tx_size, void *arg) { +static void block_rd_txfm(int plane, int block, BLOCK_SIZE plane_bsize, + TX_SIZE tx_size, void *arg) { struct rdcost_block_args *args = arg; MACROBLOCK *const x = args->x; MACROBLOCKD *const xd = &x->e_mbd; - struct encode_b_args encode_args = {x, NULL}; + MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi; int64_t rd1, rd2, rd; if (args->skip) return; - if (!is_inter_block(&xd->mi_8x8[0]->mbmi)) - vp9_encode_block_intra(plane, block, plane_bsize, tx_size, &encode_args); + if (!is_inter_block(mbmi)) + vp9_encode_block_intra(x, plane, block, plane_bsize, tx_size, &mbmi->skip); else - vp9_xform_quant(plane, block, plane_bsize, tx_size, &encode_args); + vp9_xform_quant(x, plane, block, plane_bsize, tx_size); dist_block(plane, block, tx_size, args); rate_block(plane, block, plane_bsize, tx_size, args); @@ -613,8 +677,9 @@ static void block_yrd_txfm(int plane, int block, BLOCK_SIZE plane_bsize, // TODO(jingning): temporarily enabled only for luma component rd = MIN(rd1, rd2); - if (!xd->lossless && plane == 0) - x->zcoeff_blk[tx_size][block] = rd1 > rd2 || !xd->plane[plane].eobs[block]; + if (plane == 0) + x->zcoeff_blk[tx_size][block] = !x->plane[plane].eobs[block] || + (rd1 > rd2 && !xd->lossless); args->this_rate += args->rate; args->this_dist += args->dist; @@ -627,10 +692,16 @@ static void block_yrd_txfm(int plane, int block, BLOCK_SIZE plane_bsize, } } -void vp9_get_entropy_contexts(TX_SIZE tx_size, - ENTROPY_CONTEXT t_above[16], ENTROPY_CONTEXT t_left[16], - const ENTROPY_CONTEXT *above, const ENTROPY_CONTEXT *left, - int num_4x4_w, int num_4x4_h) { +void vp9_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size, + const struct macroblockd_plane *pd, + ENTROPY_CONTEXT t_above[16], + ENTROPY_CONTEXT t_left[16]) { + const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd); + const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize]; + const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize]; + const ENTROPY_CONTEXT *const above = pd->above_context; + const ENTROPY_CONTEXT *const left = pd->left_context; + int i; switch (tx_size) { case TX_4X4: @@ -656,57 +727,42 @@ void vp9_get_entropy_contexts(TX_SIZE tx_size, t_left[i] = !!*(const uint64_t *)&left[i]; break; default: - assert(!"Invalid transform size."); + assert(0 && "Invalid transform size."); } } -static void init_rdcost_stack(MACROBLOCK *x, TX_SIZE tx_size, - const int num_4x4_w, const int num_4x4_h, - const int64_t ref_rdcost, - struct rdcost_block_args *arg) { - vpx_memset(arg, 0, sizeof(struct rdcost_block_args)); - arg->x = x; - arg->tx_size = tx_size; - arg->bw = num_4x4_w; - arg->bh = num_4x4_h; - arg->best_rd = ref_rdcost; -} - static void txfm_rd_in_plane(MACROBLOCK *x, - struct rdcost_block_args *rd_stack, int *rate, int64_t *distortion, int *skippable, int64_t *sse, int64_t ref_best_rd, int plane, - BLOCK_SIZE bsize, TX_SIZE tx_size) { + BLOCK_SIZE bsize, TX_SIZE tx_size, + int use_fast_coef_casting) { MACROBLOCKD *const xd = &x->e_mbd; - struct macroblockd_plane *const pd = &xd->plane[plane]; - const BLOCK_SIZE bs = get_plane_block_size(bsize, pd); - const int num_4x4_w = num_4x4_blocks_wide_lookup[bs]; - const int num_4x4_h = num_4x4_blocks_high_lookup[bs]; + const struct macroblockd_plane *const pd = &xd->plane[plane]; + struct rdcost_block_args args = { 0 }; + args.x = x; + args.best_rd = ref_best_rd; + args.use_fast_coef_costing = use_fast_coef_casting; - init_rdcost_stack(x, tx_size, num_4x4_w, num_4x4_h, - ref_best_rd, rd_stack); if (plane == 0) xd->mi_8x8[0]->mbmi.tx_size = tx_size; - vp9_get_entropy_contexts(tx_size, rd_stack->t_above, rd_stack->t_left, - pd->above_context, pd->left_context, - num_4x4_w, num_4x4_h); + vp9_get_entropy_contexts(bsize, tx_size, pd, args.t_above, args.t_left); - get_scan(xd, tx_size, pd->plane_type, 0, &rd_stack->scan, &rd_stack->nb); + args.so = get_scan(xd, tx_size, pd->plane_type, 0); - foreach_transformed_block_in_plane(xd, bsize, plane, - block_yrd_txfm, rd_stack); - if (rd_stack->skip) { + vp9_foreach_transformed_block_in_plane(xd, bsize, plane, + block_rd_txfm, &args); + if (args.skip) { *rate = INT_MAX; *distortion = INT64_MAX; *sse = INT64_MAX; *skippable = 0; } else { - *distortion = rd_stack->this_dist; - *rate = rd_stack->this_rate; - *sse = rd_stack->this_sse; - *skippable = vp9_is_skippable_in_plane(xd, bsize, plane); + *distortion = args.this_dist; + *rate = args.this_rate; + *sse = args.this_sse; + *skippable = vp9_is_skippable_in_plane(x, bsize, plane); } } @@ -723,9 +779,9 @@ static void choose_largest_txfm_size(VP9_COMP *cpi, MACROBLOCK *x, mbmi->tx_size = MIN(max_tx_size, largest_tx_size); - txfm_rd_in_plane(x, &cpi->rdcost_stack, rate, distortion, skip, + txfm_rd_in_plane(x, rate, distortion, skip, &sse[mbmi->tx_size], ref_best_rd, 0, bs, - mbmi->tx_size); + mbmi->tx_size, cpi->sf.use_fast_coef_costing); cpi->tx_stepdown_count[0]++; } @@ -739,63 +795,49 @@ static void choose_txfm_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x, VP9_COMMON *const cm = &cpi->common; MACROBLOCKD *const xd = &x->e_mbd; MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi; - vp9_prob skip_prob = vp9_get_pred_prob_mbskip(cm, xd); - int64_t rd[TX_SIZES][2]; + vp9_prob skip_prob = vp9_get_skip_prob(cm, xd); + int64_t rd[TX_SIZES][2] = {{INT64_MAX, INT64_MAX}, + {INT64_MAX, INT64_MAX}, + {INT64_MAX, INT64_MAX}, + {INT64_MAX, INT64_MAX}}; int n, m; int s0, s1; + const TX_SIZE max_mode_tx_size = tx_mode_to_biggest_tx_size[cm->tx_mode]; + int64_t best_rd = INT64_MAX; + TX_SIZE best_tx = TX_4X4; const vp9_prob *tx_probs = get_tx_probs2(max_tx_size, xd, &cm->fc.tx_probs); - - for (n = TX_4X4; n <= max_tx_size; n++) { - r[n][1] = r[n][0]; - if (r[n][0] == INT_MAX) - continue; - for (m = 0; m <= n - (n == max_tx_size); m++) { - if (m == n) - r[n][1] += vp9_cost_zero(tx_probs[m]); - else - r[n][1] += vp9_cost_one(tx_probs[m]); - } - } - assert(skip_prob > 0); s0 = vp9_cost_bit(skip_prob, 0); s1 = vp9_cost_bit(skip_prob, 1); for (n = TX_4X4; n <= max_tx_size; n++) { + r[n][1] = r[n][0]; + if (r[n][0] < INT_MAX) { + for (m = 0; m <= n - (n == max_tx_size); m++) { + if (m == n) + r[n][1] += vp9_cost_zero(tx_probs[m]); + else + r[n][1] += vp9_cost_one(tx_probs[m]); + } + } if (d[n] == INT64_MAX) { rd[n][0] = rd[n][1] = INT64_MAX; - continue; - } - if (s[n]) { + } else if (s[n]) { rd[n][0] = rd[n][1] = RDCOST(x->rdmult, x->rddiv, s1, d[n]); } else { rd[n][0] = RDCOST(x->rdmult, x->rddiv, r[n][0] + s0, d[n]); rd[n][1] = RDCOST(x->rdmult, x->rddiv, r[n][1] + s0, d[n]); } - } - if (max_tx_size == TX_32X32 && - (cm->tx_mode == ALLOW_32X32 || - (cm->tx_mode == TX_MODE_SELECT && - rd[TX_32X32][1] < rd[TX_16X16][1] && rd[TX_32X32][1] < rd[TX_8X8][1] && - rd[TX_32X32][1] < rd[TX_4X4][1]))) { - mbmi->tx_size = TX_32X32; - } else if (max_tx_size >= TX_16X16 && - (cm->tx_mode == ALLOW_16X16 || - cm->tx_mode == ALLOW_32X32 || - (cm->tx_mode == TX_MODE_SELECT && - rd[TX_16X16][1] < rd[TX_8X8][1] && - rd[TX_16X16][1] < rd[TX_4X4][1]))) { - mbmi->tx_size = TX_16X16; - } else if (cm->tx_mode == ALLOW_8X8 || - cm->tx_mode == ALLOW_16X16 || - cm->tx_mode == ALLOW_32X32 || - (cm->tx_mode == TX_MODE_SELECT && rd[TX_8X8][1] < rd[TX_4X4][1])) { - mbmi->tx_size = TX_8X8; - } else { - mbmi->tx_size = TX_4X4; + if (rd[n][1] < best_rd) { + best_tx = n; + best_rd = rd[n][1]; + } } + mbmi->tx_size = cm->tx_mode == TX_MODE_SELECT ? + best_tx : MIN(max_tx_size, max_mode_tx_size); + *distortion = d[mbmi->tx_size]; *rate = r[mbmi->tx_size][cm->tx_mode == TX_MODE_SELECT]; @@ -805,33 +847,27 @@ static void choose_txfm_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x, tx_cache[ALLOW_8X8] = rd[TX_8X8][0]; tx_cache[ALLOW_16X16] = rd[MIN(max_tx_size, TX_16X16)][0]; tx_cache[ALLOW_32X32] = rd[MIN(max_tx_size, TX_32X32)][0]; - if (max_tx_size == TX_32X32 && - rd[TX_32X32][1] < rd[TX_16X16][1] && rd[TX_32X32][1] < rd[TX_8X8][1] && - rd[TX_32X32][1] < rd[TX_4X4][1]) - tx_cache[TX_MODE_SELECT] = rd[TX_32X32][1]; - else if (max_tx_size >= TX_16X16 && - rd[TX_16X16][1] < rd[TX_8X8][1] && rd[TX_16X16][1] < rd[TX_4X4][1]) - tx_cache[TX_MODE_SELECT] = rd[TX_16X16][1]; - else - tx_cache[TX_MODE_SELECT] = rd[TX_4X4][1] < rd[TX_8X8][1] ? - rd[TX_4X4][1] : rd[TX_8X8][1]; - if (max_tx_size == TX_32X32 && - rd[TX_32X32][1] < rd[TX_16X16][1] && - rd[TX_32X32][1] < rd[TX_8X8][1] && - rd[TX_32X32][1] < rd[TX_4X4][1]) { + if (max_tx_size == TX_32X32 && best_tx == TX_32X32) { + tx_cache[TX_MODE_SELECT] = rd[TX_32X32][1]; cpi->tx_stepdown_count[0]++; - } else if (max_tx_size >= TX_16X16 && - rd[TX_16X16][1] < rd[TX_8X8][1] && - rd[TX_16X16][1] < rd[TX_4X4][1]) { + } else if (max_tx_size >= TX_16X16 && best_tx == TX_16X16) { + tx_cache[TX_MODE_SELECT] = rd[TX_16X16][1]; cpi->tx_stepdown_count[max_tx_size - TX_16X16]++; } else if (rd[TX_8X8][1] < rd[TX_4X4][1]) { + tx_cache[TX_MODE_SELECT] = rd[TX_8X8][1]; cpi->tx_stepdown_count[max_tx_size - TX_8X8]++; } else { + tx_cache[TX_MODE_SELECT] = rd[TX_4X4][1]; cpi->tx_stepdown_count[max_tx_size - TX_4X4]++; } } +static int64_t scaled_rd_cost(int rdmult, int rddiv, + int rate, int64_t dist, double scale) { + return (int64_t) (RDCOST(rdmult, rddiv, rate, dist) * scale); +} + static void choose_txfm_size_from_modelrd(VP9_COMP *cpi, MACROBLOCK *x, int (*r)[2], int *rate, int64_t *d, int64_t *distortion, @@ -842,19 +878,25 @@ static void choose_txfm_size_from_modelrd(VP9_COMP *cpi, MACROBLOCK *x, VP9_COMMON *const cm = &cpi->common; MACROBLOCKD *const xd = &x->e_mbd; MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi; - vp9_prob skip_prob = vp9_get_pred_prob_mbskip(cm, xd); - int64_t rd[TX_SIZES][2]; + vp9_prob skip_prob = vp9_get_skip_prob(cm, xd); + int64_t rd[TX_SIZES][2] = {{INT64_MAX, INT64_MAX}, + {INT64_MAX, INT64_MAX}, + {INT64_MAX, INT64_MAX}, + {INT64_MAX, INT64_MAX}}; int n, m; int s0, s1; double scale_rd[TX_SIZES] = {1.73, 1.44, 1.20, 1.00}; - // double scale_r[TX_SIZES] = {2.82, 2.00, 1.41, 1.00}; + const TX_SIZE max_mode_tx_size = tx_mode_to_biggest_tx_size[cm->tx_mode]; + int64_t best_rd = INT64_MAX; + TX_SIZE best_tx = TX_4X4; const vp9_prob *tx_probs = get_tx_probs2(max_tx_size, xd, &cm->fc.tx_probs); - - // for (n = TX_4X4; n <= max_txfm_size; n++) - // r[n][0] = (r[n][0] * scale_r[n]); + assert(skip_prob > 0); + s0 = vp9_cost_bit(skip_prob, 0); + s1 = vp9_cost_bit(skip_prob, 1); for (n = TX_4X4; n <= max_tx_size; n++) { + double scale = scale_rd[n]; r[n][1] = r[n][0]; for (m = 0; m <= n - (n == max_tx_size); m++) { if (m == n) @@ -862,62 +904,33 @@ static void choose_txfm_size_from_modelrd(VP9_COMP *cpi, MACROBLOCK *x, else r[n][1] += vp9_cost_one(tx_probs[m]); } - } - - assert(skip_prob > 0); - s0 = vp9_cost_bit(skip_prob, 0); - s1 = vp9_cost_bit(skip_prob, 1); - - for (n = TX_4X4; n <= max_tx_size; n++) { if (s[n]) { - rd[n][0] = rd[n][1] = RDCOST(x->rdmult, x->rddiv, s1, d[n]); + rd[n][0] = rd[n][1] = scaled_rd_cost(x->rdmult, x->rddiv, s1, d[n], + scale); } else { - rd[n][0] = RDCOST(x->rdmult, x->rddiv, r[n][0] + s0, d[n]); - rd[n][1] = RDCOST(x->rdmult, x->rddiv, r[n][1] + s0, d[n]); + rd[n][0] = scaled_rd_cost(x->rdmult, x->rddiv, r[n][0] + s0, d[n], + scale); + rd[n][1] = scaled_rd_cost(x->rdmult, x->rddiv, r[n][1] + s0, d[n], + scale); + } + if (rd[n][1] < best_rd) { + best_rd = rd[n][1]; + best_tx = n; } - } - for (n = TX_4X4; n <= max_tx_size; n++) { - rd[n][0] = (int64_t)(scale_rd[n] * rd[n][0]); - rd[n][1] = (int64_t)(scale_rd[n] * rd[n][1]); } - if (max_tx_size == TX_32X32 && - (cm->tx_mode == ALLOW_32X32 || - (cm->tx_mode == TX_MODE_SELECT && - rd[TX_32X32][1] <= rd[TX_16X16][1] && - rd[TX_32X32][1] <= rd[TX_8X8][1] && - rd[TX_32X32][1] <= rd[TX_4X4][1]))) { - mbmi->tx_size = TX_32X32; - } else if (max_tx_size >= TX_16X16 && - (cm->tx_mode == ALLOW_16X16 || - cm->tx_mode == ALLOW_32X32 || - (cm->tx_mode == TX_MODE_SELECT && - rd[TX_16X16][1] <= rd[TX_8X8][1] && - rd[TX_16X16][1] <= rd[TX_4X4][1]))) { - mbmi->tx_size = TX_16X16; - } else if (cm->tx_mode == ALLOW_8X8 || - cm->tx_mode == ALLOW_16X16 || - cm->tx_mode == ALLOW_32X32 || - (cm->tx_mode == TX_MODE_SELECT && - rd[TX_8X8][1] <= rd[TX_4X4][1])) { - mbmi->tx_size = TX_8X8; - } else { - mbmi->tx_size = TX_4X4; - } + mbmi->tx_size = cm->tx_mode == TX_MODE_SELECT ? + best_tx : MIN(max_tx_size, max_mode_tx_size); // Actually encode using the chosen mode if a model was used, but do not // update the r, d costs - txfm_rd_in_plane(x, &cpi->rdcost_stack, rate, distortion, skip, - &sse[mbmi->tx_size], ref_best_rd, 0, bs, mbmi->tx_size); + txfm_rd_in_plane(x, rate, distortion, skip, + &sse[mbmi->tx_size], ref_best_rd, 0, bs, mbmi->tx_size, + cpi->sf.use_fast_coef_costing); - if (max_tx_size == TX_32X32 && - rd[TX_32X32][1] <= rd[TX_16X16][1] && - rd[TX_32X32][1] <= rd[TX_8X8][1] && - rd[TX_32X32][1] <= rd[TX_4X4][1]) { + if (max_tx_size == TX_32X32 && best_tx == TX_32X32) { cpi->tx_stepdown_count[0]++; - } else if (max_tx_size >= TX_16X16 && - rd[TX_16X16][1] <= rd[TX_8X8][1] && - rd[TX_16X16][1] <= rd[TX_4X4][1]) { + } else if (max_tx_size >= TX_16X16 && best_tx == TX_16X16) { cpi->tx_stepdown_count[max_tx_size - TX_16X16]++; } else if (rd[TX_8X8][1] <= rd[TX_4X4][1]) { cpi->tx_stepdown_count[max_tx_size - TX_8X8]++; @@ -926,25 +939,23 @@ static void choose_txfm_size_from_modelrd(VP9_COMP *cpi, MACROBLOCK *x, } } -static void super_block_yrd(VP9_COMP *cpi, - MACROBLOCK *x, int *rate, int64_t *distortion, - int *skip, int64_t *psse, BLOCK_SIZE bs, - int64_t txfm_cache[TX_MODES], - int64_t ref_best_rd) { +static void inter_super_block_yrd(VP9_COMP *cpi, MACROBLOCK *x, int *rate, + int64_t *distortion, int *skip, + int64_t *psse, BLOCK_SIZE bs, + int64_t txfm_cache[TX_MODES], + int64_t ref_best_rd) { int r[TX_SIZES][2], s[TX_SIZES]; int64_t d[TX_SIZES], sse[TX_SIZES]; MACROBLOCKD *xd = &x->e_mbd; MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi; - struct rdcost_block_args *rdcost_stack = &cpi->rdcost_stack; - const int b_inter_mode = is_inter_block(mbmi); + const TX_SIZE max_tx_size = max_txsize_lookup[bs]; + TX_SIZE tx_size; assert(bs == mbmi->sb_type); - if (b_inter_mode) - vp9_subtract_sby(x, bs); - if (cpi->sf.tx_size_search_method == USE_LARGESTALL || - (cpi->sf.tx_size_search_method != USE_FULL_RD && - !b_inter_mode)) { + vp9_subtract_plane(x, bs, 0); + + if (cpi->sf.tx_size_search_method == USE_LARGESTALL) { vpx_memset(txfm_cache, 0, TX_MODES * sizeof(int64_t)); choose_largest_txfm_size(cpi, x, rate, distortion, skip, sse, ref_best_rd, bs); @@ -953,36 +964,18 @@ static void super_block_yrd(VP9_COMP *cpi, return; } - if (cpi->sf.tx_size_search_method == USE_LARGESTINTRA_MODELINTER && - b_inter_mode) { - if (bs >= BLOCK_32X32) - model_rd_for_sb_y_tx(cpi, bs, TX_32X32, x, xd, - &r[TX_32X32][0], &d[TX_32X32], &s[TX_32X32]); - if (bs >= BLOCK_16X16) - model_rd_for_sb_y_tx(cpi, bs, TX_16X16, x, xd, - &r[TX_16X16][0], &d[TX_16X16], &s[TX_16X16]); - - model_rd_for_sb_y_tx(cpi, bs, TX_8X8, x, xd, - &r[TX_8X8][0], &d[TX_8X8], &s[TX_8X8]); - - model_rd_for_sb_y_tx(cpi, bs, TX_4X4, x, xd, - &r[TX_4X4][0], &d[TX_4X4], &s[TX_4X4]); - + if (cpi->sf.tx_size_search_method == USE_LARGESTINTRA_MODELINTER) { + for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size) + model_rd_for_sb_y_tx(cpi, bs, tx_size, x, xd, + &r[tx_size][0], &d[tx_size], &s[tx_size]); choose_txfm_size_from_modelrd(cpi, x, r, rate, d, distortion, s, skip, sse, ref_best_rd, bs); } else { - if (bs >= BLOCK_32X32) - txfm_rd_in_plane(x, rdcost_stack, &r[TX_32X32][0], &d[TX_32X32], - &s[TX_32X32], &sse[TX_32X32], - ref_best_rd, 0, bs, TX_32X32); - if (bs >= BLOCK_16X16) - txfm_rd_in_plane(x, rdcost_stack, &r[TX_16X16][0], &d[TX_16X16], - &s[TX_16X16], &sse[TX_16X16], - ref_best_rd, 0, bs, TX_16X16); - txfm_rd_in_plane(x, rdcost_stack, &r[TX_8X8][0], &d[TX_8X8], &s[TX_8X8], - &sse[TX_8X8], ref_best_rd, 0, bs, TX_8X8); - txfm_rd_in_plane(x, rdcost_stack, &r[TX_4X4][0], &d[TX_4X4], &s[TX_4X4], - &sse[TX_4X4], ref_best_rd, 0, bs, TX_4X4); + for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size) + txfm_rd_in_plane(x, &r[tx_size][0], &d[tx_size], + &s[tx_size], &sse[tx_size], + ref_best_rd, 0, bs, tx_size, + cpi->sf.use_fast_coef_costing); choose_txfm_size_from_rd(cpi, x, r, rate, d, distortion, s, skip, txfm_cache, bs); } @@ -990,6 +983,37 @@ static void super_block_yrd(VP9_COMP *cpi, *psse = sse[mbmi->tx_size]; } +static void intra_super_block_yrd(VP9_COMP *cpi, MACROBLOCK *x, int *rate, + int64_t *distortion, int *skip, + int64_t *psse, BLOCK_SIZE bs, + int64_t txfm_cache[TX_MODES], + int64_t ref_best_rd) { + int64_t sse[TX_SIZES]; + MACROBLOCKD *xd = &x->e_mbd; + MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi; + + assert(bs == mbmi->sb_type); + if (cpi->sf.tx_size_search_method != USE_FULL_RD) { + vpx_memset(txfm_cache, 0, TX_MODES * sizeof(int64_t)); + choose_largest_txfm_size(cpi, x, rate, distortion, skip, sse, + ref_best_rd, bs); + } else { + int r[TX_SIZES][2], s[TX_SIZES]; + int64_t d[TX_SIZES]; + TX_SIZE tx_size; + for (tx_size = TX_4X4; tx_size <= max_txsize_lookup[bs]; ++tx_size) + txfm_rd_in_plane(x, &r[tx_size][0], &d[tx_size], + &s[tx_size], &sse[tx_size], + ref_best_rd, 0, bs, tx_size, + cpi->sf.use_fast_coef_costing); + choose_txfm_size_from_rd(cpi, x, r, rate, d, distortion, s, + skip, txfm_cache, bs); + } + if (psse) + *psse = sse[mbmi->tx_size]; +} + + static int conditional_skipintra(MB_PREDICTION_MODE mode, MB_PREDICTION_MODE best_intra_mode) { if (mode == D117_PRED && @@ -1013,26 +1037,23 @@ static int conditional_skipintra(MB_PREDICTION_MODE mode, static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib, MB_PREDICTION_MODE *best_mode, - int *bmode_costs, + const int *bmode_costs, ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l, int *bestrate, int *bestratey, int64_t *bestdistortion, BLOCK_SIZE bsize, int64_t rd_thresh) { MB_PREDICTION_MODE mode; - MACROBLOCKD *xd = &x->e_mbd; + MACROBLOCKD *const xd = &x->e_mbd; int64_t best_rd = rd_thresh; - int rate = 0; - int64_t distortion; + struct macroblock_plane *p = &x->plane[0]; struct macroblockd_plane *pd = &xd->plane[0]; const int src_stride = p->src.stride; const int dst_stride = pd->dst.stride; - uint8_t *src_init = raster_block_offset_uint8(BLOCK_8X8, ib, - p->src.buf, src_stride); - uint8_t *dst_init = raster_block_offset_uint8(BLOCK_8X8, ib, - pd->dst.buf, dst_stride); - int16_t *src_diff, *coeff; - + const uint8_t *src_init = &p->src.buf[raster_block_offset(BLOCK_8X8, ib, + src_stride)]; + uint8_t *dst_init = &pd->dst.buf[raster_block_offset(BLOCK_8X8, ib, + dst_stride)]; ENTROPY_CONTEXT ta[2], tempa[2]; ENTROPY_CONTEXT tl[2], templ[2]; @@ -1050,6 +1071,8 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib, for (mode = DC_PRED; mode <= TM_PRED; ++mode) { int64_t this_rd; int ratey = 0; + int64_t distortion = 0; + int rate = bmode_costs[mode]; if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode))) continue; @@ -1061,56 +1084,52 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib, continue; } - rate = bmode_costs[mode]; - distortion = 0; - vpx_memcpy(tempa, ta, sizeof(ta)); vpx_memcpy(templ, tl, sizeof(tl)); for (idy = 0; idy < num_4x4_blocks_high; ++idy) { for (idx = 0; idx < num_4x4_blocks_wide; ++idx) { - int64_t ssz; - const int16_t *scan; - const int16_t *nb; - uint8_t *src = src_init + idx * 4 + idy * 4 * src_stride; - uint8_t *dst = dst_init + idx * 4 + idy * 4 * dst_stride; const int block = ib + idy * 2 + idx; - TX_TYPE tx_type; + const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride]; + uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride]; + int16_t *const src_diff = raster_block_offset_int16(BLOCK_8X8, block, + p->src_diff); + int16_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block); xd->mi_8x8[0]->bmi[block].as_mode = mode; - src_diff = raster_block_offset_int16(BLOCK_8X8, block, p->src_diff); - coeff = BLOCK_OFFSET(x->plane[0].coeff, block); vp9_predict_intra_block(xd, block, 1, TX_4X4, mode, x->skip_encode ? src : dst, x->skip_encode ? src_stride : dst_stride, - dst, dst_stride); - vp9_subtract_block(4, 4, src_diff, 8, - src, src_stride, - dst, dst_stride); - - tx_type = get_tx_type_4x4(PLANE_TYPE_Y_WITH_DC, xd, block); - get_scan_nb_4x4(tx_type, &scan, &nb); - - if (tx_type != DCT_DCT) - vp9_short_fht4x4(src_diff, coeff, 8, tx_type); - else - x->fwd_txm4x4(src_diff, coeff, 8); - - vp9_regular_quantize_b_4x4(x, 4, block, scan, get_iscan_4x4(tx_type)); - - ratey += cost_coeffs(x, 0, block, - tempa + idx, templ + idy, TX_4X4, scan, nb); - distortion += vp9_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, block), - 16, &ssz) >> 2; - if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd) - goto next; - - if (tx_type != DCT_DCT) - vp9_iht4x4_16_add(BLOCK_OFFSET(pd->dqcoeff, block), - dst, pd->dst.stride, tx_type); - else - xd->itxm_add(BLOCK_OFFSET(pd->dqcoeff, block), dst, pd->dst.stride, - 16); + dst, dst_stride, idx, idy, 0); + vp9_subtract_block(4, 4, src_diff, 8, src, src_stride, dst, dst_stride); + + if (xd->lossless) { + const scan_order *so = &vp9_default_scan_orders[TX_4X4]; + vp9_fwht4x4(src_diff, coeff, 8); + vp9_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan); + ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4, + so->scan, so->neighbors, + cpi->sf.use_fast_coef_costing); + if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd) + goto next; + vp9_iwht4x4_add(BLOCK_OFFSET(pd->dqcoeff, block), dst, dst_stride, + p->eobs[block]); + } else { + int64_t unused; + const TX_TYPE tx_type = get_tx_type_4x4(PLANE_TYPE_Y, xd, block); + const scan_order *so = &vp9_scan_orders[TX_4X4][tx_type]; + vp9_fht4x4(src_diff, coeff, 8, tx_type); + vp9_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan); + ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4, + so->scan, so->neighbors, + cpi->sf.use_fast_coef_costing); + distortion += vp9_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, block), + 16, &unused) >> 2; + if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd) + goto next; + vp9_iht4x4_add(tx_type, BLOCK_OFFSET(pd->dqcoeff, block), + dst, dst_stride, p->eobs[block]); + } } } @@ -1143,14 +1162,12 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib, return best_rd; } -static int64_t rd_pick_intra_sub_8x8_y_mode(VP9_COMP * const cpi, - MACROBLOCK * const mb, - int * const rate, - int * const rate_y, - int64_t * const distortion, +static int64_t rd_pick_intra_sub_8x8_y_mode(VP9_COMP *cpi, MACROBLOCK *mb, + int *rate, int *rate_y, + int64_t *distortion, int64_t best_rd) { int i, j; - MACROBLOCKD *const xd = &mb->e_mbd; + const MACROBLOCKD *const xd = &mb->e_mbd; MODE_INFO *const mic = xd->mi_8x8[0]; const MODE_INFO *above_mi = xd->mi_8x8[-xd->mode_info_stride]; const MODE_INFO *left_mi = xd->left_available ? xd->mi_8x8[-1] : NULL; @@ -1163,13 +1180,11 @@ static int64_t rd_pick_intra_sub_8x8_y_mode(VP9_COMP * const cpi, int tot_rate_y = 0; int64_t total_rd = 0; ENTROPY_CONTEXT t_above[4], t_left[4]; - int *bmode_costs; + const int *bmode_costs = mb->mbmode_cost; vpx_memcpy(t_above, xd->plane[0].above_context, sizeof(t_above)); vpx_memcpy(t_left, xd->plane[0].left_context, sizeof(t_left)); - bmode_costs = mb->mbmode_cost; - // Pick modes for each sub-block (of size 4x4, 4x8, or 8x4) in an 8x8 block. for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { @@ -1178,8 +1193,8 @@ static int64_t rd_pick_intra_sub_8x8_y_mode(VP9_COMP * const cpi, int64_t d = INT64_MAX, this_rd = INT64_MAX; i = idy * 2 + idx; if (cpi->common.frame_type == KEY_FRAME) { - const MB_PREDICTION_MODE A = above_block_mode(mic, above_mi, i); - const MB_PREDICTION_MODE L = left_block_mode(mic, left_mi, i); + const MB_PREDICTION_MODE A = vp9_above_block_mode(mic, above_mi, i); + const MB_PREDICTION_MODE L = vp9_left_block_mode(mic, left_mi, i); bmode_costs = mb->y_mode_costs[A][L]; } @@ -1244,15 +1259,15 @@ static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x, continue; if (cpi->common.frame_type == KEY_FRAME) { - const MB_PREDICTION_MODE A = above_block_mode(mic, above_mi, 0); - const MB_PREDICTION_MODE L = left_block_mode(mic, left_mi, 0); + const MB_PREDICTION_MODE A = vp9_above_block_mode(mic, above_mi, 0); + const MB_PREDICTION_MODE L = vp9_left_block_mode(mic, left_mi, 0); bmode_costs = x->y_mode_costs[A][L]; } mic->mbmi.mode = mode; - super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s, NULL, - bsize, local_tx_cache, best_rd); + intra_super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion, + &s, NULL, bsize, local_tx_cache, best_rd); if (this_rate_tokenonly == INT_MAX) continue; @@ -1287,7 +1302,7 @@ static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x, return best_rd; } -static void super_block_uvrd(VP9_COMP *const cpi, MACROBLOCK *x, +static void super_block_uvrd(const VP9_COMP *cpi, MACROBLOCK *x, int *rate, int64_t *distortion, int *skippable, int64_t *sse, BLOCK_SIZE bsize, int64_t ref_best_rd) { @@ -1301,8 +1316,11 @@ static void super_block_uvrd(VP9_COMP *const cpi, MACROBLOCK *x, if (ref_best_rd < 0) goto term; - if (is_inter_block(mbmi)) - vp9_subtract_sbuv(x, bsize); + if (is_inter_block(mbmi)) { + int plane; + for (plane = 1; plane < MAX_MB_PLANE; ++plane) + vp9_subtract_plane(x, bsize, plane); + } *rate = 0; *distortion = 0; @@ -1310,8 +1328,9 @@ static void super_block_uvrd(VP9_COMP *const cpi, MACROBLOCK *x, *skippable = 1; for (plane = 1; plane < MAX_MB_PLANE; ++plane) { - txfm_rd_in_plane(x, &cpi->rdcost_stack, &pnrate, &pndist, &pnskip, &pnsse, - ref_best_rd, plane, bsize, uv_txfm_size); + txfm_rd_in_plane(x, &pnrate, &pndist, &pnskip, &pnsse, + ref_best_rd, plane, bsize, uv_txfm_size, + cpi->sf.use_fast_coef_costing); if (pnrate == INT_MAX) goto term; *rate += pnrate; @@ -1333,23 +1352,19 @@ static int64_t rd_pick_intra_sbuv_mode(VP9_COMP *cpi, MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, int *rate, int *rate_tokenonly, int64_t *distortion, int *skippable, - BLOCK_SIZE bsize) { + BLOCK_SIZE bsize, TX_SIZE max_tx_size) { + MACROBLOCKD *xd = &x->e_mbd; MB_PREDICTION_MODE mode; MB_PREDICTION_MODE mode_selected = DC_PRED; int64_t best_rd = INT64_MAX, this_rd; int this_rate_tokenonly, this_rate, s; int64_t this_distortion, this_sse; - // int mode_mask = (bsize <= BLOCK_8X8) - // ? ALL_INTRA_MODES : cpi->sf.intra_uv_mode_mask; - - for (mode = DC_PRED; mode <= TM_PRED; mode ++) { - // if (!(mode_mask & (1 << mode))) - if (!(cpi->sf.intra_uv_mode_mask[max_uv_txsize_lookup[bsize]] - & (1 << mode))) + for (mode = DC_PRED; mode <= TM_PRED; ++mode) { + if (!(cpi->sf.intra_uv_mode_mask[max_tx_size] & (1 << mode))) continue; - x->e_mbd.mi_8x8[0]->mbmi.uv_mode = mode; + xd->mi_8x8[0]->mbmi.uv_mode = mode; super_block_uvrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s, &this_sse, bsize, best_rd); @@ -1369,12 +1384,12 @@ static int64_t rd_pick_intra_sbuv_mode(VP9_COMP *cpi, MACROBLOCK *x, if (!x->select_txfm_size) { int i; struct macroblock_plane *const p = x->plane; - struct macroblockd_plane *const pd = x->e_mbd.plane; + struct macroblockd_plane *const pd = xd->plane; for (i = 1; i < MAX_MB_PLANE; ++i) { p[i].coeff = ctx->coeff_pbuf[i][2]; - pd[i].qcoeff = ctx->qcoeff_pbuf[i][2]; + p[i].qcoeff = ctx->qcoeff_pbuf[i][2]; pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][2]; - pd[i].eobs = ctx->eobs_pbuf[i][2]; + p[i].eobs = ctx->eobs_pbuf[i][2]; ctx->coeff_pbuf[i][2] = ctx->coeff_pbuf[i][0]; ctx->qcoeff_pbuf[i][2] = ctx->qcoeff_pbuf[i][0]; @@ -1382,39 +1397,35 @@ static int64_t rd_pick_intra_sbuv_mode(VP9_COMP *cpi, MACROBLOCK *x, ctx->eobs_pbuf[i][2] = ctx->eobs_pbuf[i][0]; ctx->coeff_pbuf[i][0] = p[i].coeff; - ctx->qcoeff_pbuf[i][0] = pd[i].qcoeff; + ctx->qcoeff_pbuf[i][0] = p[i].qcoeff; ctx->dqcoeff_pbuf[i][0] = pd[i].dqcoeff; - ctx->eobs_pbuf[i][0] = pd[i].eobs; + ctx->eobs_pbuf[i][0] = p[i].eobs; } } } } - x->e_mbd.mi_8x8[0]->mbmi.uv_mode = mode_selected; - + xd->mi_8x8[0]->mbmi.uv_mode = mode_selected; return best_rd; } -static int64_t rd_sbuv_dcpred(VP9_COMP *cpi, MACROBLOCK *x, +static int64_t rd_sbuv_dcpred(const VP9_COMP *cpi, MACROBLOCK *x, int *rate, int *rate_tokenonly, int64_t *distortion, int *skippable, BLOCK_SIZE bsize) { - int64_t this_rd; - int64_t this_sse; + const VP9_COMMON *cm = &cpi->common; + int64_t unused; x->e_mbd.mi_8x8[0]->mbmi.uv_mode = DC_PRED; super_block_uvrd(cpi, x, rate_tokenonly, distortion, - skippable, &this_sse, bsize, INT64_MAX); - *rate = *rate_tokenonly + - x->intra_uv_mode_cost[cpi->common.frame_type][DC_PRED]; - this_rd = RDCOST(x->rdmult, x->rddiv, *rate, *distortion); - - return this_rd; + skippable, &unused, bsize, INT64_MAX); + *rate = *rate_tokenonly + x->intra_uv_mode_cost[cm->frame_type][DC_PRED]; + return RDCOST(x->rdmult, x->rddiv, *rate, *distortion); } static void choose_intra_uv_mode(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx, - BLOCK_SIZE bsize, int *rate_uv, - int *rate_uv_tokenonly, + BLOCK_SIZE bsize, TX_SIZE max_tx_size, + int *rate_uv, int *rate_uv_tokenonly, int64_t *dist_uv, int *skip_uv, MB_PREDICTION_MODE *mode_uv) { MACROBLOCK *const x = &cpi->mb; @@ -1422,14 +1433,14 @@ static void choose_intra_uv_mode(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx, // Use an estimated rd for uv_intra based on DC_PRED if the // appropriate speed flag is set. if (cpi->sf.use_uv_intra_rd_estimate) { - rd_sbuv_dcpred(cpi, x, rate_uv, rate_uv_tokenonly, dist_uv, skip_uv, - bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize); + rd_sbuv_dcpred(cpi, x, rate_uv, rate_uv_tokenonly, dist_uv, + skip_uv, bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize); // Else do a proper rd search for each possible transform size that may // be considered in the main rd loop. } else { rd_pick_intra_sbuv_mode(cpi, x, ctx, rate_uv, rate_uv_tokenonly, dist_uv, skip_uv, - bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize); + bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize, max_tx_size); } *mode_uv = x->e_mbd.mi_8x8[0]->mbmi.uv_mode; } @@ -1437,8 +1448,7 @@ static void choose_intra_uv_mode(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx, static int cost_mv_ref(VP9_COMP *cpi, MB_PREDICTION_MODE mode, int mode_context) { MACROBLOCK *const x = &cpi->mb; - MACROBLOCKD *const xd = &x->e_mbd; - const int segment_id = xd->mi_8x8[0]->mbmi.segment_id; + const int segment_id = x->e_mbd.mi_8x8[0]->mbmi.segment_id; // Don't account for mode here if segment skip is enabled. if (!vp9_segfeature_active(&cpi->common.seg, segment_id, SEG_LVL_SKIP)) { @@ -1449,11 +1459,6 @@ static int cost_mv_ref(VP9_COMP *cpi, MB_PREDICTION_MODE mode, } } -void vp9_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv) { - x->e_mbd.mi_8x8[0]->mbmi.mode = mb; - x->e_mbd.mi_8x8[0]->mbmi.mv[0].as_int = mv->as_int; -} - static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int_mv *frame_mv, @@ -1461,79 +1466,66 @@ static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x, int_mv single_newmv[MAX_REF_FRAMES], int *rate_mv); -static int labels2mode(MACROBLOCK *x, int i, - MB_PREDICTION_MODE this_mode, - int_mv *this_mv, int_mv *this_second_mv, +static int labels2mode(VP9_COMP *cpi, MACROBLOCKD *xd, int i, + MB_PREDICTION_MODE mode, + int_mv this_mv[2], int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES], int_mv seg_mvs[MAX_REF_FRAMES], - int_mv *best_ref_mv, - int_mv *second_best_ref_mv, - int *mvjcost, int *mvcost[2], VP9_COMP *cpi) { - MACROBLOCKD *const xd = &x->e_mbd; + int_mv *best_ref_mv[2], + const int *mvjcost, int *mvcost[2]) { MODE_INFO *const mic = xd->mi_8x8[0]; - MB_MODE_INFO *mbmi = &mic->mbmi; - int cost = 0, thismvcost = 0; + const MB_MODE_INFO *const mbmi = &mic->mbmi; + int thismvcost = 0; int idx, idy; const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[mbmi->sb_type]; const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[mbmi->sb_type]; - const int has_second_rf = has_second_ref(mbmi); - - /* We have to be careful retrieving previously-encoded motion vectors. - Ones from this macroblock have to be pulled from the BLOCKD array - as they have not yet made it to the bmi array in our MB_MODE_INFO. */ - MB_PREDICTION_MODE m; + const int is_compound = has_second_ref(mbmi); // the only time we should do costing for new motion vector or mode // is when we are on a new label (jbb May 08, 2007) - switch (m = this_mode) { + switch (mode) { case NEWMV: - this_mv->as_int = seg_mvs[mbmi->ref_frame[0]].as_int; - thismvcost = vp9_mv_bit_cost(&this_mv->as_mv, &best_ref_mv->as_mv, + this_mv[0].as_int = seg_mvs[mbmi->ref_frame[0]].as_int; + thismvcost += vp9_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv, mvjcost, mvcost, MV_COST_WEIGHT_SUB); - if (has_second_rf) { - this_second_mv->as_int = seg_mvs[mbmi->ref_frame[1]].as_int; - thismvcost += vp9_mv_bit_cost(&this_second_mv->as_mv, - &second_best_ref_mv->as_mv, + if (is_compound) { + this_mv[1].as_int = seg_mvs[mbmi->ref_frame[1]].as_int; + thismvcost += vp9_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv, mvjcost, mvcost, MV_COST_WEIGHT_SUB); } break; case NEARESTMV: - this_mv->as_int = frame_mv[NEARESTMV][mbmi->ref_frame[0]].as_int; - if (has_second_rf) - this_second_mv->as_int = - frame_mv[NEARESTMV][mbmi->ref_frame[1]].as_int; + this_mv[0].as_int = frame_mv[NEARESTMV][mbmi->ref_frame[0]].as_int; + if (is_compound) + this_mv[1].as_int = frame_mv[NEARESTMV][mbmi->ref_frame[1]].as_int; break; case NEARMV: - this_mv->as_int = frame_mv[NEARMV][mbmi->ref_frame[0]].as_int; - if (has_second_rf) - this_second_mv->as_int = - frame_mv[NEARMV][mbmi->ref_frame[1]].as_int; + this_mv[0].as_int = frame_mv[NEARMV][mbmi->ref_frame[0]].as_int; + if (is_compound) + this_mv[1].as_int = frame_mv[NEARMV][mbmi->ref_frame[1]].as_int; break; case ZEROMV: - this_mv->as_int = 0; - if (has_second_rf) - this_second_mv->as_int = 0; + this_mv[0].as_int = 0; + if (is_compound) + this_mv[1].as_int = 0; break; default: break; } - cost = cost_mv_ref(cpi, this_mode, - mbmi->mode_context[mbmi->ref_frame[0]]); - - mic->bmi[i].as_mv[0].as_int = this_mv->as_int; - if (has_second_rf) - mic->bmi[i].as_mv[1].as_int = this_second_mv->as_int; + mic->bmi[i].as_mv[0].as_int = this_mv[0].as_int; + if (is_compound) + mic->bmi[i].as_mv[1].as_int = this_mv[1].as_int; - mic->bmi[i].as_mode = m; + mic->bmi[i].as_mode = mode; for (idy = 0; idy < num_4x4_blocks_high; ++idy) for (idx = 0; idx < num_4x4_blocks_wide; ++idx) vpx_memcpy(&mic->bmi[i + idy * 2 + idx], &mic->bmi[i], sizeof(mic->bmi[i])); - cost += thismvcost; - return cost; + return cost_mv_ref(cpi, mode, mbmi->mode_context[mbmi->ref_frame[0]]) + + thismvcost; } static int64_t encode_inter_mb_segment(VP9_COMP *cpi, @@ -1543,32 +1535,36 @@ static int64_t encode_inter_mb_segment(VP9_COMP *cpi, int *labelyrate, int64_t *distortion, int64_t *sse, ENTROPY_CONTEXT *ta, - ENTROPY_CONTEXT *tl) { + ENTROPY_CONTEXT *tl, + int mi_row, int mi_col) { int k; MACROBLOCKD *xd = &x->e_mbd; struct macroblockd_plane *const pd = &xd->plane[0]; struct macroblock_plane *const p = &x->plane[0]; MODE_INFO *const mi = xd->mi_8x8[0]; - const BLOCK_SIZE bsize = mi->mbmi.sb_type; - const int width = plane_block_width(bsize, pd); - const int height = plane_block_height(bsize, pd); + const BLOCK_SIZE plane_bsize = get_plane_block_size(mi->mbmi.sb_type, pd); + const int width = 4 * num_4x4_blocks_wide_lookup[plane_bsize]; + const int height = 4 * num_4x4_blocks_high_lookup[plane_bsize]; int idx, idy; - uint8_t *const src = raster_block_offset_uint8(BLOCK_8X8, i, - p->src.buf, p->src.stride); - uint8_t *const dst = raster_block_offset_uint8(BLOCK_8X8, i, - pd->dst.buf, pd->dst.stride); + const uint8_t *const src = &p->src.buf[raster_block_offset(BLOCK_8X8, i, + p->src.stride)]; + uint8_t *const dst = &pd->dst.buf[raster_block_offset(BLOCK_8X8, i, + pd->dst.stride)]; int64_t thisdistortion = 0, thissse = 0; int thisrate = 0, ref; + const scan_order *so = &vp9_default_scan_orders[TX_4X4]; const int is_compound = has_second_ref(&mi->mbmi); for (ref = 0; ref < 1 + is_compound; ++ref) { - const uint8_t *pre = raster_block_offset_uint8(BLOCK_8X8, i, - pd->pre[ref].buf, pd->pre[ref].stride); + const uint8_t *pre = &pd->pre[ref].buf[raster_block_offset(BLOCK_8X8, i, + pd->pre[ref].stride)]; vp9_build_inter_predictor(pre, pd->pre[ref].stride, dst, pd->dst.stride, &mi->bmi[i].as_mv[ref].as_mv, - &xd->scale_factor[ref], - width, height, ref, &xd->subpix, MV_PRECISION_Q3); + &xd->block_refs[ref]->sf, width, height, ref, + xd->interp_kernel, MV_PRECISION_Q3, + mi_col * MI_SIZE + 4 * (i % 2), + mi_row * MI_SIZE + 4 * (i / 2)); } vp9_subtract_block(height, width, @@ -1586,16 +1582,13 @@ static int64_t encode_inter_mb_segment(VP9_COMP *cpi, coeff = BLOCK_OFFSET(p->coeff, k); x->fwd_txm4x4(raster_block_offset_int16(BLOCK_8X8, k, p->src_diff), coeff, 8); - vp9_regular_quantize_b_4x4(x, 4, k, get_scan_4x4(DCT_DCT), - get_iscan_4x4(DCT_DCT)); + vp9_regular_quantize_b_4x4(x, 0, k, so->scan, so->iscan); thisdistortion += vp9_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k), 16, &ssz); thissse += ssz; - thisrate += cost_coeffs(x, 0, k, - ta + (k & 1), - tl + (k >> 1), TX_4X4, - vp9_default_scan_4x4, - vp9_default_scan_4x4_neighbors); + thisrate += cost_coeffs(x, 0, k, ta + (k & 1), tl + (k >> 1), TX_4X4, + so->scan, so->neighbors, + cpi->sf.use_fast_coef_costing); rd1 = RDCOST(x->rdmult, x->rddiv, thisrate, thisdistortion >> 2); rd2 = RDCOST(x->rdmult, x->rddiv, 0, thissse >> 2); rd = MIN(rd1, rd2); @@ -1624,7 +1617,7 @@ typedef struct { } SEG_RDSTAT; typedef struct { - int_mv *ref_mv, *second_ref_mv; + int_mv *ref_mv[2]; int_mv mvp; int64_t segment_rd; @@ -1637,13 +1630,11 @@ typedef struct { int mvthresh; } BEST_SEG_INFO; -static INLINE int mv_check_bounds(MACROBLOCK *x, int_mv *mv) { - int r = 0; - r |= (mv->as_mv.row >> 3) < x->mv_row_min; - r |= (mv->as_mv.row >> 3) > x->mv_row_max; - r |= (mv->as_mv.col >> 3) < x->mv_col_min; - r |= (mv->as_mv.col >> 3) > x->mv_col_max; - return r; +static INLINE int mv_check_bounds(const MACROBLOCK *x, const MV *mv) { + return (mv->row >> 3) < x->mv_row_min || + (mv->row >> 3) > x->mv_row_max || + (mv->col >> 3) < x->mv_col_min || + (mv->col >> 3) > x->mv_col_max; } static INLINE void mi_buf_shift(MACROBLOCK *x, int i) { @@ -1651,14 +1642,13 @@ static INLINE void mi_buf_shift(MACROBLOCK *x, int i) { struct macroblock_plane *const p = &x->plane[0]; struct macroblockd_plane *const pd = &x->e_mbd.plane[0]; - p->src.buf = raster_block_offset_uint8(BLOCK_8X8, i, p->src.buf, - p->src.stride); + p->src.buf = &p->src.buf[raster_block_offset(BLOCK_8X8, i, p->src.stride)]; assert(((intptr_t)pd->pre[0].buf & 0x7) == 0); - pd->pre[0].buf = raster_block_offset_uint8(BLOCK_8X8, i, pd->pre[0].buf, - pd->pre[0].stride); + pd->pre[0].buf = &pd->pre[0].buf[raster_block_offset(BLOCK_8X8, i, + pd->pre[0].stride)]; if (has_second_ref(mbmi)) - pd->pre[1].buf = raster_block_offset_uint8(BLOCK_8X8, i, pd->pre[1].buf, - pd->pre[1].stride); + pd->pre[1].buf = &pd->pre[1].buf[raster_block_offset(BLOCK_8X8, i, + pd->pre[1].stride)]; } static INLINE void mi_buf_restore(MACROBLOCK *x, struct buf_2d orig_src, @@ -1670,17 +1660,24 @@ static INLINE void mi_buf_restore(MACROBLOCK *x, struct buf_2d orig_src, x->e_mbd.plane[0].pre[1] = orig_pre[1]; } +static INLINE int mv_has_subpel(const MV *mv) { + return (mv->row & 0x0F) || (mv->col & 0x0F); +} + static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x, const TileInfo *const tile, BEST_SEG_INFO *bsi_buf, int filter_idx, int_mv seg_mvs[4][MAX_REF_FRAMES], int mi_row, int mi_col) { - int i, br = 0, idx, idy; + int k, br = 0, idx, idy; int64_t bd = 0, block_sse = 0; MB_PREDICTION_MODE this_mode; - MODE_INFO *mi = x->e_mbd.mi_8x8[0]; + MACROBLOCKD *xd = &x->e_mbd; + VP9_COMMON *cm = &cpi->common; + MODE_INFO *mi = xd->mi_8x8[0]; MB_MODE_INFO *const mbmi = &mi->mbmi; - struct macroblockd_plane *const pd = &x->e_mbd.plane[0]; + struct macroblock_plane *const p = &x->plane[0]; + struct macroblockd_plane *const pd = &xd->plane[0]; const int label_count = 4; int64_t this_segment_rd = 0; int label_mv_thresh; @@ -1688,18 +1685,17 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x, const BLOCK_SIZE bsize = mbmi->sb_type; const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; - vp9_variance_fn_ptr_t *v_fn_ptr; + vp9_variance_fn_ptr_t *v_fn_ptr = &cpi->fn_ptr[bsize]; ENTROPY_CONTEXT t_above[2], t_left[2]; BEST_SEG_INFO *bsi = bsi_buf + filter_idx; int mode_idx; int subpelmv = 1, have_ref = 0; const int has_second_rf = has_second_ref(mbmi); + const int disable_inter_mode_mask = cpi->sf.disable_inter_mode_mask[bsize]; vpx_memcpy(t_above, pd->above_context, sizeof(t_above)); vpx_memcpy(t_left, pd->left_context, sizeof(t_left)); - v_fn_ptr = &cpi->fn_ptr[bsize]; - // 64 makes this threshold really big effectively // making it so that we very rarely check mvs on // segments. setting this to 1 would make mv thresh @@ -1711,24 +1707,21 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x, for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { // TODO(jingning,rbultje): rewrite the rate-distortion optimization // loop for 4x4/4x8/8x4 block coding. to be replaced with new rd loop - int_mv mode_mv[MB_MODE_COUNT], second_mode_mv[MB_MODE_COUNT]; + int_mv mode_mv[MB_MODE_COUNT][2]; int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES]; MB_PREDICTION_MODE mode_selected = ZEROMV; int64_t best_rd = INT64_MAX; - i = idy * 2 + idx; - - frame_mv[ZEROMV][mbmi->ref_frame[0]].as_int = 0; - vp9_append_sub8x8_mvs_for_idx(&cpi->common, &x->e_mbd, tile, - &frame_mv[NEARESTMV][mbmi->ref_frame[0]], - &frame_mv[NEARMV][mbmi->ref_frame[0]], - i, 0, mi_row, mi_col); - if (has_second_rf) { - frame_mv[ZEROMV][mbmi->ref_frame[1]].as_int = 0; - vp9_append_sub8x8_mvs_for_idx(&cpi->common, &x->e_mbd, tile, - &frame_mv[NEARESTMV][mbmi->ref_frame[1]], - &frame_mv[NEARMV][mbmi->ref_frame[1]], - i, 1, mi_row, mi_col); + const int i = idy * 2 + idx; + int ref; + + for (ref = 0; ref < 1 + has_second_rf; ++ref) { + const MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref]; + frame_mv[ZEROMV][frame].as_int = 0; + vp9_append_sub8x8_mvs_for_idx(cm, xd, tile, i, ref, mi_row, mi_col, + &frame_mv[NEARESTMV][frame], + &frame_mv[NEARMV][frame]); } + // search for the best motion vector on this segment for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) { const struct buf_2d orig_src = x->plane[0].src; @@ -1736,9 +1729,12 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x, mode_idx = INTER_OFFSET(this_mode); bsi->rdstat[i][mode_idx].brdcost = INT64_MAX; + if (disable_inter_mode_mask & (1 << mode_idx)) + continue; // if we're near/nearest and mv == 0,0, compare to zeromv - if ((this_mode == NEARMV || this_mode == NEARESTMV || + if (!(disable_inter_mode_mask & (1 << INTER_OFFSET(ZEROMV))) && + (this_mode == NEARMV || this_mode == NEARESTMV || this_mode == ZEROMV) && frame_mv[this_mode][mbmi->ref_frame[0]].as_int == 0 && (!has_second_rf || @@ -1783,11 +1779,12 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x, // motion search for newmv (single predictor case only) if (!has_second_rf && this_mode == NEWMV && seg_mvs[i][mbmi->ref_frame[0]].as_int == INVALID_MV) { + int_mv *const new_mv = &mode_mv[NEWMV][0]; int step_param = 0; int further_steps; int thissme, bestsme = INT_MAX; int sadpb = x->sadperbit4; - int_mv mvp_full; + MV mvp_full; int max_mv; /* Is the best so far sufficiently good that we cant justify doing @@ -1795,7 +1792,8 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x, if (best_rd < label_mv_thresh) break; - if (cpi->compressor_speed) { + if (cpi->oxcf.mode != MODE_SECONDPASS_BEST && + cpi->oxcf.mode != MODE_BESTQUALITY) { // use previous block's result as next block's MV predictor. if (i > 0) { bsi->mvp.as_int = mi->bmi[i - 1].as_mv[0].as_int; @@ -1808,7 +1806,7 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x, else max_mv = MAX(abs(bsi->mvp.as_mv.row), abs(bsi->mvp.as_mv.col)) >> 3; - if (cpi->sf.auto_mv_step_size && cpi->common.show_frame) { + if (cpi->sf.auto_mv_step_size && cm->show_frame) { // Take wtd average of the step_params based on the last frame's // max mv magnitude and the best ref mvs of the current block for // the given reference. @@ -1818,81 +1816,99 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x, step_param = cpi->mv_step_param; } - mvp_full.as_mv.row = bsi->mvp.as_mv.row >> 3; - mvp_full.as_mv.col = bsi->mvp.as_mv.col >> 3; + mvp_full.row = bsi->mvp.as_mv.row >> 3; + mvp_full.col = bsi->mvp.as_mv.col >> 3; - if (cpi->sf.adaptive_motion_search && cpi->common.show_frame) { - mvp_full.as_mv.row = x->pred_mv[mbmi->ref_frame[0]].as_mv.row >> 3; - mvp_full.as_mv.col = x->pred_mv[mbmi->ref_frame[0]].as_mv.col >> 3; + if (cpi->sf.adaptive_motion_search && cm->show_frame) { + mvp_full.row = x->pred_mv[mbmi->ref_frame[0]].as_mv.row >> 3; + mvp_full.col = x->pred_mv[mbmi->ref_frame[0]].as_mv.col >> 3; step_param = MAX(step_param, 8); } further_steps = (MAX_MVSEARCH_STEPS - 1) - step_param; // adjust src pointer for this block mi_buf_shift(x, i); + + vp9_set_mv_search_range(x, &bsi->ref_mv[0]->as_mv); + if (cpi->sf.search_method == HEX) { - bestsme = vp9_hex_search(x, &mvp_full.as_mv, + bestsme = vp9_hex_search(x, &mvp_full, step_param, sadpb, 1, v_fn_ptr, 1, - &bsi->ref_mv->as_mv, - &mode_mv[NEWMV].as_mv); + &bsi->ref_mv[0]->as_mv, + &new_mv->as_mv); + if (bestsme < INT_MAX) + bestsme = vp9_get_mvpred_var(x, &new_mv->as_mv, + &bsi->ref_mv[0]->as_mv, + v_fn_ptr, 1); } else if (cpi->sf.search_method == SQUARE) { - bestsme = vp9_square_search(x, &mvp_full.as_mv, + bestsme = vp9_square_search(x, &mvp_full, step_param, sadpb, 1, v_fn_ptr, 1, - &bsi->ref_mv->as_mv, - &mode_mv[NEWMV].as_mv); + &bsi->ref_mv[0]->as_mv, + &new_mv->as_mv); + if (bestsme < INT_MAX) + bestsme = vp9_get_mvpred_var(x, &new_mv->as_mv, + &bsi->ref_mv[0]->as_mv, + v_fn_ptr, 1); } else if (cpi->sf.search_method == BIGDIA) { - bestsme = vp9_bigdia_search(x, &mvp_full.as_mv, + bestsme = vp9_bigdia_search(x, &mvp_full, step_param, sadpb, 1, v_fn_ptr, 1, - &bsi->ref_mv->as_mv, - &mode_mv[NEWMV].as_mv); + &bsi->ref_mv[0]->as_mv, + &new_mv->as_mv); + if (bestsme < INT_MAX) + bestsme = vp9_get_mvpred_var(x, &new_mv->as_mv, + &bsi->ref_mv[0]->as_mv, + v_fn_ptr, 1); } else { bestsme = vp9_full_pixel_diamond(cpi, x, &mvp_full, step_param, sadpb, further_steps, 0, v_fn_ptr, - bsi->ref_mv, &mode_mv[NEWMV]); + &bsi->ref_mv[0]->as_mv, + &new_mv->as_mv); } // Should we do a full search (best quality only) - if (cpi->compressor_speed == 0) { + if (cpi->oxcf.mode == MODE_BESTQUALITY || + cpi->oxcf.mode == MODE_SECONDPASS_BEST) { + int_mv *const best_mv = &mi->bmi[i].as_mv[0]; /* Check if mvp_full is within the range. */ - clamp_mv(&mvp_full.as_mv, x->mv_col_min, x->mv_col_max, + clamp_mv(&mvp_full, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max); - thissme = cpi->full_search_sad(x, &mvp_full, sadpb, 16, v_fn_ptr, x->nmvjointcost, x->mvcost, - bsi->ref_mv, i); - + &bsi->ref_mv[0]->as_mv, + &best_mv->as_mv); if (thissme < bestsme) { bestsme = thissme; - mode_mv[NEWMV].as_int = mi->bmi[i].as_mv[0].as_int; + new_mv->as_int = best_mv->as_int; } else { - /* The full search result is actually worse so re-instate the - * previous best vector */ - mi->bmi[i].as_mv[0].as_int = mode_mv[NEWMV].as_int; + // The full search result is actually worse so re-instate the + // previous best vector + best_mv->as_int = new_mv->as_int; } } if (bestsme < INT_MAX) { int distortion; - unsigned int sse; cpi->find_fractional_mv_step(x, - &mode_mv[NEWMV].as_mv, - &bsi->ref_mv->as_mv, - cpi->common.allow_high_precision_mv, + &new_mv->as_mv, + &bsi->ref_mv[0]->as_mv, + cm->allow_high_precision_mv, x->errorperbit, v_fn_ptr, - 0, cpi->sf.subpel_iters_per_step, + cpi->sf.subpel_force_stop, + cpi->sf.subpel_iters_per_step, x->nmvjointcost, x->mvcost, - &distortion, &sse); + &distortion, + &x->pred_sse[mbmi->ref_frame[0]]); // save motion search result for use in compound prediction - seg_mvs[i][mbmi->ref_frame[0]].as_int = mode_mv[NEWMV].as_int; + seg_mvs[i][mbmi->ref_frame[0]].as_int = new_mv->as_int; } if (cpi->sf.adaptive_motion_search) - x->pred_mv[mbmi->ref_frame[0]].as_int = mode_mv[NEWMV].as_int; + x->pred_mv[mbmi->ref_frame[0]].as_int = new_mv->as_int; // restore src pointers mi_buf_restore(x, orig_src, orig_pre); @@ -1923,58 +1939,43 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x, } bsi->rdstat[i][mode_idx].brate = - labels2mode(x, i, this_mode, &mode_mv[this_mode], - &second_mode_mv[this_mode], frame_mv, seg_mvs[i], - bsi->ref_mv, bsi->second_ref_mv, x->nmvjointcost, - x->mvcost, cpi); - - - bsi->rdstat[i][mode_idx].mvs[0].as_int = mode_mv[this_mode].as_int; - if (num_4x4_blocks_wide > 1) - bsi->rdstat[i + 1][mode_idx].mvs[0].as_int = - mode_mv[this_mode].as_int; - if (num_4x4_blocks_high > 1) - bsi->rdstat[i + 2][mode_idx].mvs[0].as_int = - mode_mv[this_mode].as_int; - if (has_second_rf) { - bsi->rdstat[i][mode_idx].mvs[1].as_int = - second_mode_mv[this_mode].as_int; + labels2mode(cpi, xd, i, this_mode, mode_mv[this_mode], frame_mv, + seg_mvs[i], bsi->ref_mv, x->nmvjointcost, x->mvcost); + + for (ref = 0; ref < 1 + has_second_rf; ++ref) { + bsi->rdstat[i][mode_idx].mvs[ref].as_int = + mode_mv[this_mode][ref].as_int; if (num_4x4_blocks_wide > 1) - bsi->rdstat[i + 1][mode_idx].mvs[1].as_int = - second_mode_mv[this_mode].as_int; + bsi->rdstat[i + 1][mode_idx].mvs[ref].as_int = + mode_mv[this_mode][ref].as_int; if (num_4x4_blocks_high > 1) - bsi->rdstat[i + 2][mode_idx].mvs[1].as_int = - second_mode_mv[this_mode].as_int; + bsi->rdstat[i + 2][mode_idx].mvs[ref].as_int = + mode_mv[this_mode][ref].as_int; } // Trap vectors that reach beyond the UMV borders - if (mv_check_bounds(x, &mode_mv[this_mode])) - continue; - if (has_second_rf && - mv_check_bounds(x, &second_mode_mv[this_mode])) + if (mv_check_bounds(x, &mode_mv[this_mode][0].as_mv) || + (has_second_rf && + mv_check_bounds(x, &mode_mv[this_mode][1].as_mv))) continue; if (filter_idx > 0) { BEST_SEG_INFO *ref_bsi = bsi_buf; - subpelmv = (mode_mv[this_mode].as_mv.row & 0x0f) || - (mode_mv[this_mode].as_mv.col & 0x0f); - have_ref = mode_mv[this_mode].as_int == - ref_bsi->rdstat[i][mode_idx].mvs[0].as_int; - if (has_second_rf) { - subpelmv |= (second_mode_mv[this_mode].as_mv.row & 0x0f) || - (second_mode_mv[this_mode].as_mv.col & 0x0f); - have_ref &= second_mode_mv[this_mode].as_int == - ref_bsi->rdstat[i][mode_idx].mvs[1].as_int; + subpelmv = 0; + have_ref = 1; + + for (ref = 0; ref < 1 + has_second_rf; ++ref) { + subpelmv |= mv_has_subpel(&mode_mv[this_mode][ref].as_mv); + have_ref &= mode_mv[this_mode][ref].as_int == + ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int; } if (filter_idx > 1 && !subpelmv && !have_ref) { ref_bsi = bsi_buf + 1; - have_ref = mode_mv[this_mode].as_int == - ref_bsi->rdstat[i][mode_idx].mvs[0].as_int; - if (has_second_rf) { - have_ref &= second_mode_mv[this_mode].as_int == - ref_bsi->rdstat[i][mode_idx].mvs[1].as_int; - } + have_ref = 1; + for (ref = 0; ref < 1 + has_second_rf; ++ref) + have_ref &= mode_mv[this_mode][ref].as_int == + ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int; } if (!subpelmv && have_ref && @@ -2003,16 +2004,17 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x, &bsi->rdstat[i][mode_idx].bdist, &bsi->rdstat[i][mode_idx].bsse, bsi->rdstat[i][mode_idx].ta, - bsi->rdstat[i][mode_idx].tl); + bsi->rdstat[i][mode_idx].tl, + mi_row, mi_col); if (bsi->rdstat[i][mode_idx].brdcost < INT64_MAX) { bsi->rdstat[i][mode_idx].brdcost += RDCOST(x->rdmult, x->rddiv, bsi->rdstat[i][mode_idx].brate, 0); bsi->rdstat[i][mode_idx].brate += bsi->rdstat[i][mode_idx].byrate; - bsi->rdstat[i][mode_idx].eobs = pd->eobs[i]; + bsi->rdstat[i][mode_idx].eobs = p->eobs[i]; if (num_4x4_blocks_wide > 1) - bsi->rdstat[i + 1][mode_idx].eobs = pd->eobs[i + 1]; + bsi->rdstat[i + 1][mode_idx].eobs = p->eobs[i + 1]; if (num_4x4_blocks_high > 1) - bsi->rdstat[i + 2][mode_idx].eobs = pd->eobs[i + 2]; + bsi->rdstat[i + 2][mode_idx].eobs = p->eobs[i + 2]; } if (bsi->rdstat[i][mode_idx].brdcost < best_rd) { @@ -2034,10 +2036,9 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x, vpx_memcpy(t_above, bsi->rdstat[i][mode_idx].ta, sizeof(t_above)); vpx_memcpy(t_left, bsi->rdstat[i][mode_idx].tl, sizeof(t_left)); - labels2mode(x, i, mode_selected, &mode_mv[mode_selected], - &second_mode_mv[mode_selected], frame_mv, seg_mvs[i], - bsi->ref_mv, bsi->second_ref_mv, x->nmvjointcost, - x->mvcost, cpi); + labels2mode(cpi, xd, i, mode_selected, mode_mv[mode_selected], + frame_mv, seg_mvs[i], bsi->ref_mv, x->nmvjointcost, + x->mvcost); br += bsi->rdstat[i][mode_idx].brate; bd += bsi->rdstat[i][mode_idx].bdist; @@ -2063,8 +2064,8 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x, bsi->sse = block_sse; // update the coding decisions - for (i = 0; i < 4; ++i) - bsi->modes[i] = mi->bmi[i].as_mode; + for (k = 0; k < 4; ++k) + bsi->modes[k] = mi->bmi[k].as_mode; } static int64_t rd_pick_best_mbsegmentation(VP9_COMP *cpi, MACROBLOCK *x, @@ -2091,8 +2092,8 @@ static int64_t rd_pick_best_mbsegmentation(VP9_COMP *cpi, MACROBLOCK *x, vp9_zero(*bsi); bsi->segment_rd = best_rd; - bsi->ref_mv = best_ref_mv; - bsi->second_ref_mv = second_best_ref_mv; + bsi->ref_mv[0] = best_ref_mv; + bsi->ref_mv[1] = second_best_ref_mv; bsi->mvp.as_int = best_ref_mv->as_int; bsi->mvthresh = mvthresh; @@ -2110,7 +2111,7 @@ static int64_t rd_pick_best_mbsegmentation(VP9_COMP *cpi, MACROBLOCK *x, mi->bmi[i].as_mv[0].as_int = bsi->rdstat[i][mode_idx].mvs[0].as_int; if (has_second_ref(mbmi)) mi->bmi[i].as_mv[1].as_int = bsi->rdstat[i][mode_idx].mvs[1].as_int; - xd->plane[0].eobs[i] = bsi->rdstat[i][mode_idx].eobs; + x->plane[0].eobs[i] = bsi->rdstat[i][mode_idx].eobs; mi->bmi[i].as_mode = bsi->modes[i]; } @@ -2120,7 +2121,7 @@ static int64_t rd_pick_best_mbsegmentation(VP9_COMP *cpi, MACROBLOCK *x, *returntotrate = bsi->r; *returndistortion = bsi->d; *returnyrate = bsi->segment_yrate; - *skippable = vp9_is_skippable_in_plane(&x->e_mbd, BLOCK_8X8, 0); + *skippable = vp9_is_skippable_in_plane(x, BLOCK_8X8, 0); *psse = bsi->sse; mbmi->mode = bsi->modes[3]; @@ -2138,7 +2139,7 @@ static void mv_pred(VP9_COMP *cpi, MACROBLOCK *x, int best_index = 0; int best_sad = INT_MAX; int this_sad = INT_MAX; - unsigned int max_mv = 0; + int max_mv = 0; uint8_t *src_y_ptr = x->plane[0].src.buf; uint8_t *ref_y_ptr; @@ -2148,16 +2149,22 @@ static void mv_pred(VP9_COMP *cpi, MACROBLOCK *x, cpi->common.show_frame && block_size < cpi->sf.max_partition_size); + int_mv pred_mv[3]; + pred_mv[0] = mbmi->ref_mvs[ref_frame][0]; + pred_mv[1] = mbmi->ref_mvs[ref_frame][1]; + pred_mv[2] = x->pred_mv[ref_frame]; + // Get the sad for each candidate reference mv for (i = 0; i < num_mv_refs; i++) { - this_mv.as_int = (i < MAX_MV_REF_CANDIDATES) ? - mbmi->ref_mvs[ref_frame][i].as_int : x->pred_mv[ref_frame].as_int; + this_mv.as_int = pred_mv[i].as_int; max_mv = MAX(max_mv, MAX(abs(this_mv.as_mv.row), abs(this_mv.as_mv.col)) >> 3); - // The list is at an end if we see 0 for a second time. - if (!this_mv.as_int && zero_seen) - break; + // only need to check zero mv once + if (!this_mv.as_int && zero_seen) { + x->mode_sad[ref_frame][i] = x->mode_sad[ref_frame][INTER_OFFSET(ZEROMV)]; + continue; + } zero_seen = zero_seen || !this_mv.as_int; row_offset = this_mv.as_mv.row >> 3; @@ -2168,6 +2175,9 @@ static void mv_pred(VP9_COMP *cpi, MACROBLOCK *x, this_sad = cpi->fn_ptr[block_size].sdf(src_y_ptr, x->plane[0].src.stride, ref_y_ptr, ref_y_stride, 0x7fffffff); + x->mode_sad[ref_frame][i] = this_sad; + if (this_mv.as_int == 0) + x->mode_sad[ref_frame][INTER_OFFSET(ZEROMV)] = this_sad; // Note if it is the best so far. if (this_sad < best_sad) { @@ -2176,9 +2186,16 @@ static void mv_pred(VP9_COMP *cpi, MACROBLOCK *x, } } + if (!zero_seen) + x->mode_sad[ref_frame][INTER_OFFSET(ZEROMV)] = + cpi->fn_ptr[block_size].sdf(src_y_ptr, x->plane[0].src.stride, + ref_y_buffer, ref_y_stride, + 0x7fffffff); + // Note the index of the mv that worked best in the reference list. x->mv_best_ref_index[ref_frame] = best_index; x->max_mv_context[ref_frame] = max_mv; + x->pred_mv_sad[ref_frame] = best_sad; } static void estimate_ref_frame_costs(VP9_COMP *cpi, int segment_id, @@ -2194,11 +2211,11 @@ static void estimate_ref_frame_costs(VP9_COMP *cpi, int segment_id, vpx_memset(ref_costs_comp, 0, MAX_REF_FRAMES * sizeof(*ref_costs_comp)); *comp_mode_p = 128; } else { - vp9_prob intra_inter_p = vp9_get_pred_prob_intra_inter(cm, xd); + vp9_prob intra_inter_p = vp9_get_intra_inter_prob(cm, xd); vp9_prob comp_inter_p = 128; - if (cm->comp_pred_mode == HYBRID_PREDICTION) { - comp_inter_p = vp9_get_pred_prob_comp_inter_inter(cm, xd); + if (cm->reference_mode == REFERENCE_MODE_SELECT) { + comp_inter_p = vp9_get_reference_mode_prob(cm, xd); *comp_mode_p = comp_inter_p; } else { *comp_mode_p = 128; @@ -2206,12 +2223,12 @@ static void estimate_ref_frame_costs(VP9_COMP *cpi, int segment_id, ref_costs_single[INTRA_FRAME] = vp9_cost_bit(intra_inter_p, 0); - if (cm->comp_pred_mode != COMP_PREDICTION_ONLY) { + if (cm->reference_mode != COMPOUND_REFERENCE) { vp9_prob ref_single_p1 = vp9_get_pred_prob_single_ref_p1(cm, xd); vp9_prob ref_single_p2 = vp9_get_pred_prob_single_ref_p2(cm, xd); unsigned int base_cost = vp9_cost_bit(intra_inter_p, 1); - if (cm->comp_pred_mode == HYBRID_PREDICTION) + if (cm->reference_mode == REFERENCE_MODE_SELECT) base_cost += vp9_cost_bit(comp_inter_p, 0); ref_costs_single[LAST_FRAME] = ref_costs_single[GOLDEN_FRAME] = @@ -2226,11 +2243,11 @@ static void estimate_ref_frame_costs(VP9_COMP *cpi, int segment_id, ref_costs_single[GOLDEN_FRAME] = 512; ref_costs_single[ALTREF_FRAME] = 512; } - if (cm->comp_pred_mode != SINGLE_PREDICTION_ONLY) { + if (cm->reference_mode != SINGLE_REFERENCE) { vp9_prob ref_comp_p = vp9_get_pred_prob_comp_ref_p(cm, xd); unsigned int base_cost = vp9_cost_bit(intra_inter_p, 1); - if (cm->comp_pred_mode == HYBRID_PREDICTION) + if (cm->reference_mode == REFERENCE_MODE_SELECT) base_cost += vp9_cost_bit(comp_inter_p, 1); ref_costs_comp[LAST_FRAME] = base_cost + vp9_cost_bit(ref_comp_p, 0); @@ -2246,7 +2263,7 @@ static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, int mode_index, int_mv *ref_mv, int_mv *second_ref_mv, - int64_t comp_pred_diff[NB_PREDICTION_TYPES], + int64_t comp_pred_diff[REFERENCE_MODES], int64_t tx_size_diff[TX_MODES], int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS]) { MACROBLOCKD *const xd = &x->e_mbd; @@ -2257,12 +2274,12 @@ static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, ctx->best_mode_index = mode_index; ctx->mic = *xd->mi_8x8[0]; - ctx->best_ref_mv.as_int = ref_mv->as_int; - ctx->second_best_ref_mv.as_int = second_ref_mv->as_int; + ctx->best_ref_mv[0].as_int = ref_mv->as_int; + ctx->best_ref_mv[1].as_int = second_ref_mv->as_int; - ctx->single_pred_diff = (int)comp_pred_diff[SINGLE_PREDICTION_ONLY]; - ctx->comp_pred_diff = (int)comp_pred_diff[COMP_PREDICTION_ONLY]; - ctx->hybrid_pred_diff = (int)comp_pred_diff[HYBRID_PREDICTION]; + ctx->single_pred_diff = (int)comp_pred_diff[SINGLE_REFERENCE]; + ctx->comp_pred_diff = (int)comp_pred_diff[COMPOUND_REFERENCE]; + ctx->hybrid_pred_diff = (int)comp_pred_diff[REFERENCE_MODE_SELECT]; vpx_memcpy(ctx->tx_rd_diff, tx_size_diff, sizeof(ctx->tx_rd_diff)); vpx_memcpy(ctx->best_filter_diff, best_filter_diff, @@ -2295,58 +2312,48 @@ static void setup_pred_block(const MACROBLOCKD *xd, } } -static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x, - const TileInfo *const tile, - int idx, MV_REFERENCE_FRAME frame_type, - BLOCK_SIZE block_size, - int mi_row, int mi_col, - int_mv frame_nearest_mv[MAX_REF_FRAMES], - int_mv frame_near_mv[MAX_REF_FRAMES], - struct buf_2d yv12_mb[4][MAX_MB_PLANE], - struct scale_factors scale[MAX_REF_FRAMES]) { - VP9_COMMON *cm = &cpi->common; - YV12_BUFFER_CONFIG *yv12 = &cm->yv12_fb[cpi->common.ref_frame_map[idx]]; +void vp9_setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x, + const TileInfo *const tile, + MV_REFERENCE_FRAME ref_frame, + BLOCK_SIZE block_size, + int mi_row, int mi_col, + int_mv frame_nearest_mv[MAX_REF_FRAMES], + int_mv frame_near_mv[MAX_REF_FRAMES], + struct buf_2d yv12_mb[4][MAX_MB_PLANE]) { + const VP9_COMMON *cm = &cpi->common; + const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame); MACROBLOCKD *const xd = &x->e_mbd; - MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi; - - // set up scaling factors - scale[frame_type] = cpi->common.active_ref_scale[frame_type - 1]; - - scale[frame_type].sfc->set_scaled_offsets(&scale[frame_type], - mi_row * MI_SIZE, mi_col * MI_SIZE); + MODE_INFO *const mi = xd->mi_8x8[0]; + int_mv *const candidates = mi->mbmi.ref_mvs[ref_frame]; + const struct scale_factors *const sf = &cm->frame_refs[ref_frame - 1].sf; // TODO(jkoleszar): Is the UV buffer ever used here? If so, need to make this // use the UV scaling factors. - setup_pred_block(xd, yv12_mb[frame_type], yv12, mi_row, mi_col, - &scale[frame_type], &scale[frame_type]); + setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, sf); // Gets an initial list of candidate vectors from neighbours and orders them - vp9_find_mv_refs(cm, xd, tile, xd->mi_8x8[0], - xd->last_mi, - frame_type, - mbmi->ref_mvs[frame_type], mi_row, mi_col); + vp9_find_mv_refs(cm, xd, tile, mi, xd->last_mi, ref_frame, candidates, + mi_row, mi_col); // Candidate refinement carried out at encoder and decoder - vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, - mbmi->ref_mvs[frame_type], - &frame_nearest_mv[frame_type], - &frame_near_mv[frame_type]); + vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, candidates, + &frame_nearest_mv[ref_frame], + &frame_near_mv[ref_frame]); // Further refinement that is encode side only to test the top few candidates // in full and choose the best as the centre point for subsequent searches. // The current implementation doesn't support scaling. - if (!vp9_is_scaled(scale[frame_type].sfc) && block_size >= BLOCK_8X8) - mv_pred(cpi, x, yv12_mb[frame_type][0].buf, yv12->y_stride, - frame_type, block_size); + if (!vp9_is_scaled(sf) && block_size >= BLOCK_8X8) + mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride, + ref_frame, block_size); } -static YV12_BUFFER_CONFIG *get_scaled_ref_frame(VP9_COMP *cpi, int ref_frame) { - YV12_BUFFER_CONFIG *scaled_ref_frame = NULL; - int fb = get_ref_frame_idx(cpi, ref_frame); - int fb_scale = get_scale_ref_frame_idx(cpi, ref_frame); - if (cpi->scaled_ref_idx[fb_scale] != cpi->common.ref_frame_map[fb]) - scaled_ref_frame = &cpi->common.yv12_fb[cpi->scaled_ref_idx[fb_scale]]; - return scaled_ref_frame; +const YV12_BUFFER_CONFIG *vp9_get_scaled_ref_frame(const VP9_COMP *cpi, + int ref_frame) { + const VP9_COMMON *const cm = &cpi->common; + const int ref_idx = cm->ref_frame_map[get_ref_frame_idx(cpi, ref_frame)]; + const int scaled_idx = cpi->scaled_ref_idx[ref_frame - 1]; + return (scaled_idx != ref_idx) ? &cm->frame_bufs[scaled_idx].buf : NULL; } static INLINE int get_switchable_rate(const MACROBLOCK *x) { @@ -2369,17 +2376,22 @@ static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x, int bestsme = INT_MAX; int further_steps, step_param; int sadpb = x->sadperbit16; - int_mv mvp_full; + MV mvp_full; int ref = mbmi->ref_frame[0]; - int_mv ref_mv = mbmi->ref_mvs[ref][0]; - const BLOCK_SIZE block_size = get_plane_block_size(bsize, &xd->plane[0]); + MV ref_mv = mbmi->ref_mvs[ref][0].as_mv; int tmp_col_min = x->mv_col_min; int tmp_col_max = x->mv_col_max; int tmp_row_min = x->mv_row_min; int tmp_row_max = x->mv_row_max; - YV12_BUFFER_CONFIG *scaled_ref_frame = get_scaled_ref_frame(cpi, ref); + const YV12_BUFFER_CONFIG *scaled_ref_frame = vp9_get_scaled_ref_frame(cpi, + ref); + + MV pred_mv[3]; + pred_mv[0] = mbmi->ref_mvs[ref][0].as_mv; + pred_mv[1] = mbmi->ref_mvs[ref][1].as_mv; + pred_mv[2] = x->pred_mv[ref].as_mv; if (scaled_ref_frame) { int i; @@ -2389,40 +2401,21 @@ static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x, for (i = 0; i < MAX_MB_PLANE; i++) backup_yv12[i] = xd->plane[i].pre[0]; - setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL); + vp9_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL); } - vp9_clamp_mv_min_max(x, &ref_mv.as_mv); - - // Adjust search parameters based on small partitions' result. - if (x->fast_ms) { - // && abs(mvp_full.as_mv.row - x->pred_mv.as_mv.row) < 24 && - // abs(mvp_full.as_mv.col - x->pred_mv.as_mv.col) < 24) { - // adjust search range - step_param = 6; - if (x->fast_ms > 1) - step_param = 8; + vp9_set_mv_search_range(x, &ref_mv); - // Get prediction MV. - mvp_full.as_int = x->pred_mv[ref].as_int; - - // Adjust MV sign if needed. - if (cm->ref_frame_sign_bias[ref]) { - mvp_full.as_mv.col *= -1; - mvp_full.as_mv.row *= -1; - } + // Work out the size of the first step in the mv step search. + // 0 here is maximum length first step. 1 is MAX >> 1 etc. + if (cpi->sf.auto_mv_step_size && cpi->common.show_frame) { + // Take wtd average of the step_params based on the last frame's + // max mv magnitude and that based on the best ref mvs of the current + // block for the given reference. + step_param = (vp9_init_search_range(cpi, x->max_mv_context[ref]) + + cpi->mv_step_param) >> 1; } else { - // Work out the size of the first step in the mv step search. - // 0 here is maximum length first step. 1 is MAX >> 1 etc. - if (cpi->sf.auto_mv_step_size && cpi->common.show_frame) { - // Take wtd average of the step_params based on the last frame's - // max mv magnitude and that based on the best ref mvs of the current - // block for the given reference. - step_param = (vp9_init_search_range(cpi, x->max_mv_context[ref]) + - cpi->mv_step_param) >> 1; - } else { - step_param = cpi->mv_step_param; - } + step_param = cpi->mv_step_param; } if (cpi->sf.adaptive_motion_search && bsize < BLOCK_64X64 && @@ -2432,39 +2425,78 @@ static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x, step_param = MAX(step_param, boffset); } - mvp_full.as_int = x->mv_best_ref_index[ref] < MAX_MV_REF_CANDIDATES ? - mbmi->ref_mvs[ref][x->mv_best_ref_index[ref]].as_int : - x->pred_mv[ref].as_int; + if (cpi->sf.adaptive_motion_search) { + int bwl = b_width_log2_lookup[bsize]; + int bhl = b_height_log2_lookup[bsize]; + int i; + int tlevel = x->pred_mv_sad[ref] >> (bwl + bhl + 4); + + if (tlevel < 5) + step_param += 2; + + for (i = LAST_FRAME; i <= ALTREF_FRAME && cpi->common.show_frame; ++i) { + if ((x->pred_mv_sad[ref] >> 3) > x->pred_mv_sad[i]) { + x->pred_mv[ref].as_int = 0; + tmp_mv->as_int = INVALID_MV; + + if (scaled_ref_frame) { + int i; + for (i = 0; i < MAX_MB_PLANE; i++) + xd->plane[i].pre[0] = backup_yv12[i]; + } + return; + } + } + } + + mvp_full = pred_mv[x->mv_best_ref_index[ref]]; - mvp_full.as_mv.col >>= 3; - mvp_full.as_mv.row >>= 3; + mvp_full.col >>= 3; + mvp_full.row >>= 3; // Further step/diamond searches as necessary further_steps = (cpi->sf.max_step_search_steps - 1) - step_param; - if (cpi->sf.search_method == HEX) { - bestsme = vp9_hex_search(x, &mvp_full.as_mv, - step_param, - sadpb, 1, - &cpi->fn_ptr[block_size], 1, - &ref_mv.as_mv, &tmp_mv->as_mv); + if (cpi->sf.search_method == FAST_DIAMOND) { + bestsme = vp9_fast_dia_search(x, &mvp_full, step_param, sadpb, 0, + &cpi->fn_ptr[bsize], 1, + &ref_mv, &tmp_mv->as_mv); + if (bestsme < INT_MAX) + bestsme = vp9_get_mvpred_var(x, &tmp_mv->as_mv, &ref_mv, + &cpi->fn_ptr[bsize], 1); + } else if (cpi->sf.search_method == FAST_HEX) { + bestsme = vp9_fast_hex_search(x, &mvp_full, step_param, sadpb, 0, + &cpi->fn_ptr[bsize], 1, + &ref_mv, &tmp_mv->as_mv); + if (bestsme < INT_MAX) + bestsme = vp9_get_mvpred_var(x, &tmp_mv->as_mv, &ref_mv, + &cpi->fn_ptr[bsize], 1); + } else if (cpi->sf.search_method == HEX) { + bestsme = vp9_hex_search(x, &mvp_full, step_param, sadpb, 1, + &cpi->fn_ptr[bsize], 1, + &ref_mv, &tmp_mv->as_mv); + if (bestsme < INT_MAX) + bestsme = vp9_get_mvpred_var(x, &tmp_mv->as_mv, &ref_mv, + &cpi->fn_ptr[bsize], 1); } else if (cpi->sf.search_method == SQUARE) { - bestsme = vp9_square_search(x, &mvp_full.as_mv, - step_param, - sadpb, 1, - &cpi->fn_ptr[block_size], 1, - &ref_mv.as_mv, &tmp_mv->as_mv); + bestsme = vp9_square_search(x, &mvp_full, step_param, sadpb, 1, + &cpi->fn_ptr[bsize], 1, + &ref_mv, &tmp_mv->as_mv); + if (bestsme < INT_MAX) + bestsme = vp9_get_mvpred_var(x, &tmp_mv->as_mv, &ref_mv, + &cpi->fn_ptr[bsize], 1); } else if (cpi->sf.search_method == BIGDIA) { - bestsme = vp9_bigdia_search(x, &mvp_full.as_mv, - step_param, - sadpb, 1, - &cpi->fn_ptr[block_size], 1, - &ref_mv.as_mv, &tmp_mv->as_mv); + bestsme = vp9_bigdia_search(x, &mvp_full, step_param, sadpb, 1, + &cpi->fn_ptr[bsize], 1, + &ref_mv, &tmp_mv->as_mv); + if (bestsme < INT_MAX) + bestsme = vp9_get_mvpred_var(x, &tmp_mv->as_mv, &ref_mv, + &cpi->fn_ptr[bsize], 1); } else { bestsme = vp9_full_pixel_diamond(cpi, x, &mvp_full, step_param, sadpb, further_steps, 1, - &cpi->fn_ptr[block_size], - &ref_mv, tmp_mv); + &cpi->fn_ptr[bsize], + &ref_mv, &tmp_mv->as_mv); } x->mv_col_min = tmp_col_min; @@ -2474,16 +2506,16 @@ static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x, if (bestsme < INT_MAX) { int dis; /* TODO: use dis in distortion calculation later. */ - unsigned int sse; - cpi->find_fractional_mv_step(x, &tmp_mv->as_mv, &ref_mv.as_mv, + cpi->find_fractional_mv_step(x, &tmp_mv->as_mv, &ref_mv, cm->allow_high_precision_mv, x->errorperbit, - &cpi->fn_ptr[block_size], - 0, cpi->sf.subpel_iters_per_step, + &cpi->fn_ptr[bsize], + cpi->sf.subpel_force_stop, + cpi->sf.subpel_iters_per_step, x->nmvjointcost, x->mvcost, - &dis, &sse); + &dis, &x->pred_sse[ref]); } - *rate_mv = vp9_mv_bit_cost(&tmp_mv->as_mv, &ref_mv.as_mv, + *rate_mv = vp9_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->nmvjointcost, x->mvcost, MV_COST_WEIGHT); if (cpi->sf.adaptive_motion_search && cpi->common.show_frame) @@ -2502,13 +2534,13 @@ static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x, int mi_row, int mi_col, int_mv single_newmv[MAX_REF_FRAMES], int *rate_mv) { - int pw = 4 << b_width_log2(bsize), ph = 4 << b_height_log2(bsize); + const int pw = 4 * num_4x4_blocks_wide_lookup[bsize]; + const int ph = 4 * num_4x4_blocks_high_lookup[bsize]; MACROBLOCKD *xd = &x->e_mbd; MB_MODE_INFO *mbmi = &xd->mi_8x8[0]->mbmi; const int refs[2] = { mbmi->ref_frame[0], mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1] }; int_mv ref_mv[2]; - const BLOCK_SIZE block_size = get_plane_block_size(bsize, &xd->plane[0]); int ite, ref; // Prediction buffer from second frame. uint8_t *second_pred = vpx_memalign(16, pw * ph * sizeof(uint8_t)); @@ -2517,9 +2549,9 @@ static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x, struct buf_2d backup_yv12[2][MAX_MB_PLANE]; struct buf_2d scaled_first_yv12 = xd->plane[0].pre[0]; int last_besterr[2] = {INT_MAX, INT_MAX}; - YV12_BUFFER_CONFIG *const scaled_ref_frame[2] = { - get_scaled_ref_frame(cpi, mbmi->ref_frame[0]), - get_scaled_ref_frame(cpi, mbmi->ref_frame[1]) + const YV12_BUFFER_CONFIG *const scaled_ref_frame[2] = { + vp9_get_scaled_ref_frame(cpi, mbmi->ref_frame[0]), + vp9_get_scaled_ref_frame(cpi, mbmi->ref_frame[1]) }; for (ref = 0; ref < 2; ++ref) { @@ -2532,11 +2564,10 @@ static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x, // motion search code to be used without additional modifications. for (i = 0; i < MAX_MB_PLANE; i++) backup_yv12[ref][i] = xd->plane[i].pre[ref]; - setup_pre_planes(xd, ref, scaled_ref_frame[ref], mi_row, mi_col, NULL); + vp9_setup_pre_planes(xd, ref, scaled_ref_frame[ref], mi_row, mi_col, + NULL); } - xd->scale_factor[ref].sfc->set_scaled_offsets(&xd->scale_factor[ref], - mi_row, mi_col); frame_mv[refs[ref]].as_int = single_newmv[refs[ref]].as_int; } @@ -2564,14 +2595,15 @@ static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x, ref_yv12[!id].stride, second_pred, pw, &frame_mv[refs[!id]].as_mv, - &xd->scale_factor[!id], + &xd->block_refs[!id]->sf, pw, ph, 0, - &xd->subpix, MV_PRECISION_Q3); + xd->interp_kernel, MV_PRECISION_Q3, + mi_col * MI_SIZE, mi_row * MI_SIZE); // Compound motion search on first ref frame. if (id) xd->plane[0].pre[0] = ref_yv12[id]; - vp9_clamp_mv_min_max(x, &ref_mv[id].as_mv); + vp9_set_mv_search_range(x, &ref_mv[id].as_mv); // Use mv result from single mode as mvp. tmp_mv.as_int = frame_mv[refs[id]].as_int; @@ -2580,12 +2612,15 @@ static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x, tmp_mv.as_mv.row >>= 3; // Small-range full-pixel motion search - bestsme = vp9_refining_search_8p_c(x, &tmp_mv, sadpb, + bestsme = vp9_refining_search_8p_c(x, &tmp_mv.as_mv, sadpb, search_range, - &cpi->fn_ptr[block_size], + &cpi->fn_ptr[bsize], x->nmvjointcost, x->mvcost, - &ref_mv[id], second_pred, + &ref_mv[id].as_mv, second_pred, pw, ph); + if (bestsme < INT_MAX) + bestsme = vp9_get_mvpred_av_var(x, &tmp_mv.as_mv, &ref_mv[id].as_mv, + second_pred, &cpi->fn_ptr[bsize], 1); x->mv_col_min = tmp_col_min; x->mv_col_max = tmp_col_max; @@ -2595,13 +2630,12 @@ static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x, if (bestsme < INT_MAX) { int dis; /* TODO: use dis in distortion calculation later. */ unsigned int sse; - bestsme = cpi->find_fractional_mv_step_comp( x, &tmp_mv.as_mv, &ref_mv[id].as_mv, cpi->common.allow_high_precision_mv, x->errorperbit, - &cpi->fn_ptr[block_size], + &cpi->fn_ptr[bsize], 0, cpi->sf.subpel_iters_per_step, x->nmvjointcost, x->mvcost, &dis, &sse, second_pred, @@ -2637,6 +2671,16 @@ static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x, vpx_free(second_pred); } +static INLINE void restore_dst_buf(MACROBLOCKD *xd, + uint8_t *orig_dst[MAX_MB_PLANE], + int orig_dst_stride[MAX_MB_PLANE]) { + int i; + for (i = 0; i < MAX_MB_PLANE; i++) { + xd->plane[i].dst.buf = orig_dst[i]; + xd->plane[i].dst.stride = orig_dst_stride[i]; + } +} + static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, const TileInfo *const tile, BLOCK_SIZE bsize, @@ -2646,7 +2690,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, int *rate_y, int64_t *distortion_y, int *rate_uv, int64_t *distortion_uv, int *mode_excluded, int *disable_skip, - INTERPOLATION_TYPE *best_filter, + INTERP_FILTER *best_filter, int_mv (*mode_mv)[MAX_REF_FRAMES], int mi_row, int mi_col, int_mv single_newmv[MAX_REF_FRAMES], @@ -2702,6 +2746,8 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, int_mv tmp_mv; single_motion_search(cpi, x, tile, bsize, mi_row, mi_col, &tmp_mv, &rate_mv); + if (tmp_mv.as_int == INVALID_MV) + return INT64_MAX; *rate2 += rate_mv; frame_mv[refs[0]].as_int = xd->mi_8x8[0]->bmi[0].as_mv[0].as_int = tmp_mv.as_int; @@ -2709,49 +2755,13 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, } } - // if we're near/nearest and mv == 0,0, compare to zeromv - if ((this_mode == NEARMV || this_mode == NEARESTMV || this_mode == ZEROMV) && - frame_mv[refs[0]].as_int == 0 && - !vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP) && - (num_refs == 1 || frame_mv[refs[1]].as_int == 0)) { - int rfc = mbmi->mode_context[mbmi->ref_frame[0]]; - int c1 = cost_mv_ref(cpi, NEARMV, rfc); - int c2 = cost_mv_ref(cpi, NEARESTMV, rfc); - int c3 = cost_mv_ref(cpi, ZEROMV, rfc); - - if (this_mode == NEARMV) { - if (c1 > c3) - return INT64_MAX; - } else if (this_mode == NEARESTMV) { - if (c2 > c3) - return INT64_MAX; - } else { - assert(this_mode == ZEROMV); - if (num_refs == 1) { - if ((c3 >= c2 && - mode_mv[NEARESTMV][mbmi->ref_frame[0]].as_int == 0) || - (c3 >= c1 && - mode_mv[NEARMV][mbmi->ref_frame[0]].as_int == 0)) - return INT64_MAX; - } else { - if ((c3 >= c2 && - mode_mv[NEARESTMV][mbmi->ref_frame[0]].as_int == 0 && - mode_mv[NEARESTMV][mbmi->ref_frame[1]].as_int == 0) || - (c3 >= c1 && - mode_mv[NEARMV][mbmi->ref_frame[0]].as_int == 0 && - mode_mv[NEARMV][mbmi->ref_frame[1]].as_int == 0)) - return INT64_MAX; - } - } - } - for (i = 0; i < num_refs; ++i) { cur_mv[i] = frame_mv[refs[i]]; // Clip "next_nearest" so that it does not extend to far out of image if (this_mode != NEWMV) clamp_mv2(&cur_mv[i].as_mv, xd); - if (mv_check_bounds(x, &cur_mv[i])) + if (mv_check_bounds(x, &cur_mv[i].as_mv)) return INT64_MAX; mbmi->mv[i].as_int = cur_mv[i].as_int; } @@ -2770,67 +2780,59 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, * are only three options: Last/Golden, ARF/Last or Golden/ARF, or in other * words if you present them in that order, the second one is always known * if the first is known */ - *rate2 += cost_mv_ref(cpi, this_mode, - mbmi->mode_context[mbmi->ref_frame[0]]); + *rate2 += cost_mv_ref(cpi, this_mode, mbmi->mode_context[refs[0]]); - if (!(*mode_excluded)) { - if (is_comp_pred) { - *mode_excluded = (cpi->common.comp_pred_mode == SINGLE_PREDICTION_ONLY); - } else { - *mode_excluded = (cpi->common.comp_pred_mode == COMP_PREDICTION_ONLY); - } - } + if (!(*mode_excluded)) + *mode_excluded = is_comp_pred ? cm->reference_mode == SINGLE_REFERENCE + : cm->reference_mode == COMPOUND_REFERENCE; pred_exists = 0; // Are all MVs integer pel for Y and UV - intpel_mv = (mbmi->mv[0].as_mv.row & 15) == 0 && - (mbmi->mv[0].as_mv.col & 15) == 0; + intpel_mv = !mv_has_subpel(&mbmi->mv[0].as_mv); if (is_comp_pred) - intpel_mv &= (mbmi->mv[1].as_mv.row & 15) == 0 && - (mbmi->mv[1].as_mv.col & 15) == 0; + intpel_mv &= !mv_has_subpel(&mbmi->mv[1].as_mv); + // Search for best switchable filter by checking the variance of // pred error irrespective of whether the filter will be used - if (cm->mcomp_filter_type != BILINEAR) { + cpi->mask_filter_rd = 0; + for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) + cpi->rd_filter_cache[i] = INT64_MAX; + + if (cm->interp_filter != BILINEAR) { *best_filter = EIGHTTAP; if (x->source_variance < cpi->sf.disable_filter_search_var_thresh) { *best_filter = EIGHTTAP; - vp9_zero(cpi->rd_filter_cache); } else { - int i, newbest; + int newbest; int tmp_rate_sum = 0; int64_t tmp_dist_sum = 0; - cpi->rd_filter_cache[SWITCHABLE_FILTERS] = INT64_MAX; for (i = 0; i < SWITCHABLE_FILTERS; ++i) { int j; int64_t rs_rd; mbmi->interp_filter = i; - vp9_setup_interp_filters(xd, mbmi->interp_filter, cm); + xd->interp_kernel = vp9_get_interp_kernel(mbmi->interp_filter); rs = get_switchable_rate(x); rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0); if (i > 0 && intpel_mv) { - cpi->rd_filter_cache[i] = RDCOST(x->rdmult, x->rddiv, - tmp_rate_sum, tmp_dist_sum); + rd = RDCOST(x->rdmult, x->rddiv, tmp_rate_sum, tmp_dist_sum); + cpi->rd_filter_cache[i] = rd; cpi->rd_filter_cache[SWITCHABLE_FILTERS] = - MIN(cpi->rd_filter_cache[SWITCHABLE_FILTERS], - cpi->rd_filter_cache[i] + rs_rd); - rd = cpi->rd_filter_cache[i]; - if (cm->mcomp_filter_type == SWITCHABLE) + MIN(cpi->rd_filter_cache[SWITCHABLE_FILTERS], rd + rs_rd); + if (cm->interp_filter == SWITCHABLE) rd += rs_rd; + cpi->mask_filter_rd = MAX(cpi->mask_filter_rd, rd); } else { int rate_sum = 0; int64_t dist_sum = 0; - if ((cm->mcomp_filter_type == SWITCHABLE && + if ((cm->interp_filter == SWITCHABLE && (!i || best_needs_copy)) || - (cm->mcomp_filter_type != SWITCHABLE && - (cm->mcomp_filter_type == mbmi->interp_filter || + (cm->interp_filter != SWITCHABLE && + (cm->interp_filter == mbmi->interp_filter || (i == 0 && intpel_mv)))) { - for (j = 0; j < MAX_MB_PLANE; j++) { - xd->plane[j].dst.buf = orig_dst[j]; - xd->plane[j].dst.stride = orig_dst_stride[j]; - } + restore_dst_buf(xd, orig_dst, orig_dst_stride); } else { for (j = 0; j < MAX_MB_PLANE; j++) { xd->plane[j].dst.buf = tmp_buf + j * 64 * 64; @@ -2839,25 +2841,24 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, } vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize); model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum); - cpi->rd_filter_cache[i] = RDCOST(x->rdmult, x->rddiv, - rate_sum, dist_sum); + + rd = RDCOST(x->rdmult, x->rddiv, rate_sum, dist_sum); + cpi->rd_filter_cache[i] = rd; cpi->rd_filter_cache[SWITCHABLE_FILTERS] = - MIN(cpi->rd_filter_cache[SWITCHABLE_FILTERS], - cpi->rd_filter_cache[i] + rs_rd); - rd = cpi->rd_filter_cache[i]; - if (cm->mcomp_filter_type == SWITCHABLE) + MIN(cpi->rd_filter_cache[SWITCHABLE_FILTERS], rd + rs_rd); + if (cm->interp_filter == SWITCHABLE) rd += rs_rd; + cpi->mask_filter_rd = MAX(cpi->mask_filter_rd, rd); + if (i == 0 && intpel_mv) { tmp_rate_sum = rate_sum; tmp_dist_sum = dist_sum; } } + if (i == 0 && cpi->sf.use_rd_breakout && ref_best_rd < INT64_MAX) { if (rd / 2 > ref_best_rd) { - for (i = 0; i < MAX_MB_PLANE; i++) { - xd->plane[i].dst.buf = orig_dst[i]; - xd->plane[i].dst.stride = orig_dst_stride[i]; - } + restore_dst_buf(xd, orig_dst, orig_dst_stride); return INT64_MAX; } } @@ -2866,28 +2867,24 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, if (newbest) { best_rd = rd; *best_filter = mbmi->interp_filter; - if (cm->mcomp_filter_type == SWITCHABLE && i && !intpel_mv) + if (cm->interp_filter == SWITCHABLE && i && !intpel_mv) best_needs_copy = !best_needs_copy; } - if ((cm->mcomp_filter_type == SWITCHABLE && newbest) || - (cm->mcomp_filter_type != SWITCHABLE && - cm->mcomp_filter_type == mbmi->interp_filter)) { + if ((cm->interp_filter == SWITCHABLE && newbest) || + (cm->interp_filter != SWITCHABLE && + cm->interp_filter == mbmi->interp_filter)) { pred_exists = 1; } } - - for (i = 0; i < MAX_MB_PLANE; i++) { - xd->plane[i].dst.buf = orig_dst[i]; - xd->plane[i].dst.stride = orig_dst_stride[i]; - } + restore_dst_buf(xd, orig_dst, orig_dst_stride); } } // Set the appropriate filter - mbmi->interp_filter = cm->mcomp_filter_type != SWITCHABLE ? - cm->mcomp_filter_type : *best_filter; - vp9_setup_interp_filters(xd, mbmi->interp_filter, cm); - rs = cm->mcomp_filter_type == SWITCHABLE ? get_switchable_rate(x) : 0; + mbmi->interp_filter = cm->interp_filter != SWITCHABLE ? + cm->interp_filter : *best_filter; + xd->interp_kernel = vp9_get_interp_kernel(mbmi->interp_filter); + rs = cm->interp_filter == SWITCHABLE ? get_switchable_rate(x) : 0; if (pred_exists) { if (best_needs_copy) { @@ -2903,7 +2900,6 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize); } - if (cpi->sf.use_rd_breakout && ref_best_rd < INT64_MAX) { int tmp_rate; int64_t tmp_dist; @@ -2912,44 +2908,34 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, // if current pred_error modeled rd is substantially more than the best // so far, do not bother doing full rd if (rd / 2 > ref_best_rd) { - for (i = 0; i < MAX_MB_PLANE; i++) { - xd->plane[i].dst.buf = orig_dst[i]; - xd->plane[i].dst.stride = orig_dst_stride[i]; - } + restore_dst_buf(xd, orig_dst, orig_dst_stride); return INT64_MAX; } } - if (cpi->common.mcomp_filter_type == SWITCHABLE) + if (cm->interp_filter == SWITCHABLE) *rate2 += get_switchable_rate(x); - if (!is_comp_pred && cpi->enable_encode_breakout) { + if (!is_comp_pred) { if (cpi->active_map_enabled && x->active_ptr[0] == 0) x->skip = 1; - else if (x->encode_breakout) { + else if (cpi->allow_encode_breakout && x->encode_breakout) { const BLOCK_SIZE y_size = get_plane_block_size(bsize, &xd->plane[0]); const BLOCK_SIZE uv_size = get_plane_block_size(bsize, &xd->plane[1]); unsigned int var, sse; // Skipping threshold for ac. unsigned int thresh_ac; - // The encode_breakout input - unsigned int encode_breakout = x->encode_breakout << 4; - unsigned int max_thresh = 36000; - + // Set a maximum for threshold to avoid big PSNR loss in low bitrate case. // Use extreme low threshold for static frames to limit skipping. - if (cpi->enable_encode_breakout == 2) - max_thresh = 128; + const unsigned int max_thresh = (cpi->allow_encode_breakout == + ENCODE_BREAKOUT_LIMITED) ? 128 : 36000; + // The encode_breakout input + const unsigned int min_thresh = + MIN(((unsigned int)x->encode_breakout << 4), max_thresh); // Calculate threshold according to dequant value. thresh_ac = (xd->plane[0].dequant[1] * xd->plane[0].dequant[1]) / 9; - - // Use encode_breakout input if it is bigger than internal threshold. - if (thresh_ac < encode_breakout) - thresh_ac = encode_breakout; - - // Set a maximum for threshold to avoid big PSNR loss in low bitrate case. - if (thresh_ac > max_thresh) - thresh_ac = max_thresh; + thresh_ac = clamp(thresh_ac, min_thresh, max_thresh); var = cpi->fn_ptr[y_size].vf(x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].dst.buf, @@ -2990,7 +2976,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, x->skip = 1; // The cost of skip bit needs to be added. - *rate2 += vp9_cost_bit(vp9_get_pred_prob_mbskip(cm, xd), 1); + *rate2 += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1); // Scaling factor for SSE from spatial domain to frequency domain // is 16. Adjust distortion accordingly. @@ -3012,16 +2998,13 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, int64_t rdcosty = INT64_MAX; // Y cost and distortion - super_block_yrd(cpi, x, rate_y, distortion_y, &skippable_y, psse, - bsize, txfm_cache, ref_best_rd); + inter_super_block_yrd(cpi, x, rate_y, distortion_y, &skippable_y, psse, + bsize, txfm_cache, ref_best_rd); if (*rate_y == INT_MAX) { *rate2 = INT_MAX; *distortion = INT64_MAX; - for (i = 0; i < MAX_MB_PLANE; i++) { - xd->plane[i].dst.buf = orig_dst[i]; - xd->plane[i].dst.stride = orig_dst_stride[i]; - } + restore_dst_buf(xd, orig_dst, orig_dst_stride); return INT64_MAX; } @@ -3036,10 +3019,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, if (*rate_uv == INT_MAX) { *rate2 = INT_MAX; *distortion = INT64_MAX; - for (i = 0; i < MAX_MB_PLANE; i++) { - xd->plane[i].dst.buf = orig_dst[i]; - xd->plane[i].dst.stride = orig_dst_stride[i]; - } + restore_dst_buf(xd, orig_dst, orig_dst_stride); return INT64_MAX; } @@ -3049,11 +3029,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, *skippable = skippable_y && skippable_uv; } - for (i = 0; i < MAX_MB_PLANE; i++) { - xd->plane[i].dst.buf = orig_dst[i]; - xd->plane[i].dst.stride = orig_dst_stride[i]; - } - + restore_dst_buf(xd, orig_dst, orig_dst_stride); return this_rd; // if 0, this will be re-calculated by caller } @@ -3065,9 +3041,9 @@ static void swap_block_ptr(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, for (i = 0; i < max_plane; ++i) { p[i].coeff = ctx->coeff_pbuf[i][1]; - pd[i].qcoeff = ctx->qcoeff_pbuf[i][1]; + p[i].qcoeff = ctx->qcoeff_pbuf[i][1]; pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][1]; - pd[i].eobs = ctx->eobs_pbuf[i][1]; + p[i].eobs = ctx->eobs_pbuf[i][1]; ctx->coeff_pbuf[i][1] = ctx->coeff_pbuf[i][0]; ctx->qcoeff_pbuf[i][1] = ctx->qcoeff_pbuf[i][0]; @@ -3075,9 +3051,9 @@ static void swap_block_ptr(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, ctx->eobs_pbuf[i][1] = ctx->eobs_pbuf[i][0]; ctx->coeff_pbuf[i][0] = p[i].coeff; - ctx->qcoeff_pbuf[i][0] = pd[i].qcoeff; + ctx->qcoeff_pbuf[i][0] = p[i].qcoeff; ctx->dqcoeff_pbuf[i][0] = pd[i].dqcoeff; - ctx->eobs_pbuf[i][0] = pd[i].eobs; + ctx->eobs_pbuf[i][0] = p[i].eobs; } } @@ -3090,9 +3066,11 @@ void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, int rate_y = 0, rate_uv = 0, rate_y_tokenonly = 0, rate_uv_tokenonly = 0; int y_skip = 0, uv_skip = 0; int64_t dist_y = 0, dist_uv = 0, tx_cache[TX_MODES] = { 0 }; + TX_SIZE max_uv_tx_size; x->skip_encode = 0; ctx->skip = 0; xd->mi_8x8[0]->mbmi.ref_frame[0] = INTRA_FRAME; + if (bsize >= BLOCK_8X8) { if (rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly, &dist_y, &y_skip, bsize, tx_cache, @@ -3100,8 +3078,9 @@ void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, *returnrate = INT_MAX; return; } + max_uv_tx_size = get_uv_tx_size_impl(xd->mi_8x8[0]->mbmi.tx_size, bsize); rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv, &rate_uv_tokenonly, - &dist_uv, &uv_skip, bsize); + &dist_uv, &uv_skip, bsize, max_uv_tx_size); } else { y_skip = 0; if (rd_pick_intra_sub_8x8_y_mode(cpi, x, &rate_y, &rate_y_tokenonly, @@ -3109,19 +3088,19 @@ void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, *returnrate = INT_MAX; return; } + max_uv_tx_size = get_uv_tx_size_impl(xd->mi_8x8[0]->mbmi.tx_size, bsize); rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv, &rate_uv_tokenonly, - &dist_uv, &uv_skip, BLOCK_8X8); + &dist_uv, &uv_skip, BLOCK_8X8, max_uv_tx_size); } if (y_skip && uv_skip) { *returnrate = rate_y + rate_uv - rate_y_tokenonly - rate_uv_tokenonly + - vp9_cost_bit(vp9_get_pred_prob_mbskip(cm, xd), 1); + vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1); *returndist = dist_y + dist_uv; vp9_zero(ctx->tx_rd_diff); } else { int i; - *returnrate = rate_y + rate_uv + - vp9_cost_bit(vp9_get_pred_prob_mbskip(cm, xd), 0); + *returnrate = rate_y + rate_uv + vp9_cost_bit(vp9_get_skip_prob(cm, xd), 0); *returndist = dist_y + dist_uv; if (cpi->sf.tx_size_search_method == USE_FULL_RD) for (i = 0; i < TX_MODES; i++) { @@ -3143,10 +3122,10 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx, int64_t best_rd_so_far) { - VP9_COMMON *cm = &cpi->common; - MACROBLOCKD *xd = &x->e_mbd; - MB_MODE_INFO *mbmi = &xd->mi_8x8[0]->mbmi; - const struct segmentation *seg = &cm->seg; + VP9_COMMON *const cm = &cpi->common; + MACROBLOCKD *const xd = &x->e_mbd; + MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi; + const struct segmentation *const seg = &cm->seg; const BLOCK_SIZE block_size = get_plane_block_size(bsize, &xd->plane[0]); MB_PREDICTION_MODE this_mode; MV_REFERENCE_FRAME ref_frame, second_ref_frame; @@ -3157,19 +3136,14 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, int_mv single_newmv[MAX_REF_FRAMES] = { { 0 } }; static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG, VP9_ALT_FLAG }; - int idx_list[4] = {0, - cpi->lst_fb_idx, - cpi->gld_fb_idx, - cpi->alt_fb_idx}; int64_t best_rd = best_rd_so_far; int64_t best_tx_rd[TX_MODES]; int64_t best_tx_diff[TX_MODES]; - int64_t best_pred_diff[NB_PREDICTION_TYPES]; - int64_t best_pred_rd[NB_PREDICTION_TYPES]; + int64_t best_pred_diff[REFERENCE_MODES]; + int64_t best_pred_rd[REFERENCE_MODES]; int64_t best_filter_rd[SWITCHABLE_FILTER_CONTEXTS]; int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS]; MB_MODE_INFO best_mbmode = { 0 }; - int j; int mode_index, best_mode_index = 0; unsigned int ref_costs_single[MAX_REF_FRAMES], ref_costs_comp[MAX_REF_FRAMES]; vp9_prob comp_mode_p; @@ -3177,31 +3151,31 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, int64_t best_inter_rd = INT64_MAX; MB_PREDICTION_MODE best_intra_mode = DC_PRED; MV_REFERENCE_FRAME best_inter_ref_frame = LAST_FRAME; - INTERPOLATION_TYPE tmp_best_filter = SWITCHABLE; + INTERP_FILTER tmp_best_filter = SWITCHABLE; int rate_uv_intra[TX_SIZES], rate_uv_tokenonly[TX_SIZES]; int64_t dist_uv[TX_SIZES]; int skip_uv[TX_SIZES]; MB_PREDICTION_MODE mode_uv[TX_SIZES]; - struct scale_factors scale_factor[4]; - unsigned int ref_frame_mask = 0; - unsigned int mode_mask = 0; int64_t mode_distortions[MB_MODE_COUNT] = {-1}; - int64_t frame_distortions[MAX_REF_FRAMES] = {-1}; int intra_cost_penalty = 20 * vp9_dc_quant(cm->base_qindex, cm->y_dc_delta_q); const int bws = num_8x8_blocks_wide_lookup[bsize] / 2; const int bhs = num_8x8_blocks_high_lookup[bsize] / 2; int best_skip2 = 0; + int mode_skip_mask = 0; + const int mode_skip_start = cpi->sf.mode_skip_start + 1; + const int *const rd_threshes = cpi->rd_threshes[segment_id][bsize]; + const int *const rd_thresh_freq_fact = cpi->rd_thresh_freq_fact[bsize]; + const int mode_search_skip_flags = cpi->sf.mode_search_skip_flags; + const int intra_y_mode_mask = + cpi->sf.intra_y_mode_mask[max_txsize_lookup[bsize]]; + const int disable_inter_mode_mask = cpi->sf.disable_inter_mode_mask[bsize]; x->skip_encode = cpi->sf.skip_encode_frame && x->q_index < QIDX_SKIP_THRESH; - // Everywhere the flag is set the error is much higher than its neighbors. - ctx->frames_with_high_error = 0; - ctx->modes_with_high_error = 0; - estimate_ref_frame_costs(cpi, segment_id, ref_costs_single, ref_costs_comp, &comp_mode_p); - for (i = 0; i < NB_PREDICTION_TYPES; ++i) + for (i = 0; i < REFERENCE_MODES; ++i) best_pred_rd[i] = INT64_MAX; for (i = 0; i < TX_MODES; i++) best_tx_rd[i] = INT64_MAX; @@ -3209,51 +3183,92 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, best_filter_rd[i] = INT64_MAX; for (i = 0; i < TX_SIZES; i++) rate_uv_intra[i] = INT_MAX; + for (i = 0; i < MAX_REF_FRAMES; ++i) + x->pred_sse[i] = INT_MAX; *returnrate = INT_MAX; - // Create a mask set to 1 for each reference frame used by a smaller - // resolution. - if (cpi->sf.use_avoid_tested_higherror) { - switch (block_size) { - case BLOCK_64X64: - for (i = 0; i < 4; i++) { - for (j = 0; j < 4; j++) { - ref_frame_mask |= x->mb_context[i][j].frames_with_high_error; - mode_mask |= x->mb_context[i][j].modes_with_high_error; - } - } - for (i = 0; i < 4; i++) { - ref_frame_mask |= x->sb32_context[i].frames_with_high_error; - mode_mask |= x->sb32_context[i].modes_with_high_error; - } - break; - case BLOCK_32X32: - for (i = 0; i < 4; i++) { - ref_frame_mask |= - x->mb_context[x->sb_index][i].frames_with_high_error; - mode_mask |= x->mb_context[x->sb_index][i].modes_with_high_error; + for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { + x->pred_mv_sad[ref_frame] = INT_MAX; + if (cpi->ref_frame_flags & flag_list[ref_frame]) { + vp9_setup_buffer_inter(cpi, x, tile, + ref_frame, block_size, mi_row, mi_col, + frame_mv[NEARESTMV], frame_mv[NEARMV], yv12_mb); + } + frame_mv[NEWMV][ref_frame].as_int = INVALID_MV; + frame_mv[ZEROMV][ref_frame].as_int = 0; + } + + for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { + // All modes from vp9_mode_order that use this frame as any ref + static const int ref_frame_mask_all[] = { + 0x0, 0x123291, 0x25c444, 0x39b722 + }; + // Fixed mv modes (NEARESTMV, NEARMV, ZEROMV) from vp9_mode_order that use + // this frame as their primary ref + static const int ref_frame_mask_fixedmv[] = { + 0x0, 0x121281, 0x24c404, 0x080102 + }; + if (!(cpi->ref_frame_flags & flag_list[ref_frame])) { + // Skip modes for missing references + mode_skip_mask |= ref_frame_mask_all[ref_frame]; + } else if (cpi->sf.reference_masking) { + for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) { + // Skip fixed mv modes for poor references + if ((x->pred_mv_sad[ref_frame] >> 2) > x->pred_mv_sad[i]) { + mode_skip_mask |= ref_frame_mask_fixedmv[ref_frame]; + break; } - break; - default: - // Until we handle all block sizes set it to present; - ref_frame_mask = 0; - mode_mask = 0; - break; + } + } + // If the segment reference frame feature is enabled.... + // then do nothing if the current ref frame is not allowed.. + if (vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) && + vp9_get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame) { + mode_skip_mask |= ref_frame_mask_all[ref_frame]; } - ref_frame_mask = ~ref_frame_mask; - mode_mask = ~mode_mask; } - for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ref_frame++) { - if (cpi->ref_frame_flags & flag_list[ref_frame]) { - setup_buffer_inter(cpi, x, tile, idx_list[ref_frame], ref_frame, - block_size, mi_row, mi_col, - frame_mv[NEARESTMV], frame_mv[NEARMV], - yv12_mb, scale_factor); + // If the segment skip feature is enabled.... + // then do nothing if the current mode is not allowed.. + if (vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) { + const int inter_non_zero_mode_mask = 0x1F7F7; + mode_skip_mask |= inter_non_zero_mode_mask; + } + + // Disable this drop out case if the ref frame + // segment level feature is enabled for this segment. This is to + // prevent the possibility that we end up unable to pick any mode. + if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) { + // Only consider ZEROMV/ALTREF_FRAME for alt ref frame, + // unless ARNR filtering is enabled in which case we want + // an unfiltered alternative. We allow near/nearest as well + // because they may result in zero-zero MVs but be cheaper. + if (cpi->rc.is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) { + const int altref_zero_mask = + ~((1 << THR_NEARESTA) | (1 << THR_NEARA) | (1 << THR_ZEROA)); + mode_skip_mask |= altref_zero_mask; + if (frame_mv[NEARMV][ALTREF_FRAME].as_int != 0) + mode_skip_mask |= (1 << THR_NEARA); + if (frame_mv[NEARESTMV][ALTREF_FRAME].as_int != 0) + mode_skip_mask |= (1 << THR_NEARESTA); } - frame_mv[NEWMV][ref_frame].as_int = INVALID_MV; - frame_mv[ZEROMV][ref_frame].as_int = 0; + } + + // TODO(JBB): This is to make up for the fact that we don't have sad + // functions that work when the block size reads outside the umv. We + // should fix this either by making the motion search just work on + // a representative block in the boundary ( first ) and then implement a + // function that does sads when inside the border.. + if ((mi_row + bhs) > cm->mi_rows || (mi_col + bws) > cm->mi_cols) { + const int new_modes_mask = + (1 << THR_NEWMV) | (1 << THR_NEWG) | (1 << THR_NEWA) | + (1 << THR_COMP_NEWLA) | (1 << THR_COMP_NEWGA); + mode_skip_mask |= new_modes_mask; + } + + if (bsize > cpi->sf.max_intra_bsize) { + mode_skip_mask |= 0xFF30808; } for (mode_index = 0; mode_index < MAX_MODES; ++mode_index) { @@ -3267,125 +3282,134 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, int64_t tx_cache[TX_MODES]; int i; int this_skip2 = 0; - int64_t total_sse = INT_MAX; + int64_t total_sse = INT64_MAX; int early_term = 0; - for (i = 0; i < TX_MODES; ++i) - tx_cache[i] = INT64_MAX; - - x->skip = 0; - this_mode = vp9_mode_order[mode_index].mode; - ref_frame = vp9_mode_order[mode_index].ref_frame; - second_ref_frame = vp9_mode_order[mode_index].second_ref_frame; - // Look at the reference frame of the best mode so far and set the // skip mask to look at a subset of the remaining modes. - if (mode_index > cpi->sf.mode_skip_start) { - if (mode_index == (cpi->sf.mode_skip_start + 1)) { - switch (vp9_mode_order[best_mode_index].ref_frame) { - case INTRA_FRAME: - cpi->mode_skip_mask = 0; - break; - case LAST_FRAME: - cpi->mode_skip_mask = LAST_FRAME_MODE_MASK; - break; - case GOLDEN_FRAME: - cpi->mode_skip_mask = GOLDEN_FRAME_MODE_MASK; - break; - case ALTREF_FRAME: - cpi->mode_skip_mask = ALT_REF_MODE_MASK; - break; - case NONE: - case MAX_REF_FRAMES: - assert(!"Invalid Reference frame"); - } + if (mode_index == mode_skip_start) { + switch (vp9_mode_order[best_mode_index].ref_frame[0]) { + case INTRA_FRAME: + break; + case LAST_FRAME: + mode_skip_mask |= LAST_FRAME_MODE_MASK; + break; + case GOLDEN_FRAME: + mode_skip_mask |= GOLDEN_FRAME_MODE_MASK; + break; + case ALTREF_FRAME: + mode_skip_mask |= ALT_REF_MODE_MASK; + break; + case NONE: + case MAX_REF_FRAMES: + assert(0 && "Invalid Reference frame"); } - if (cpi->mode_skip_mask & ((int64_t)1 << mode_index)) - continue; } - - // Skip if the current reference frame has been masked off - if (cpi->sf.reference_masking && !cpi->set_ref_frame_mask && - (cpi->ref_frame_mask & (1 << ref_frame))) + if (mode_skip_mask & (1 << mode_index)) continue; // Test best rd so far against threshold for trying this mode. - if ((best_rd < ((int64_t)cpi->rd_threshes[segment_id][bsize][mode_index] * - cpi->rd_thresh_freq_fact[bsize][mode_index] >> 5)) || - cpi->rd_threshes[segment_id][bsize][mode_index] == INT_MAX) - continue; - - // Do not allow compound prediction if the segment level reference - // frame feature is in use as in this case there can only be one reference. - if ((second_ref_frame > INTRA_FRAME) && - vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) - continue; + if (best_rd < ((int64_t)rd_threshes[mode_index] * + rd_thresh_freq_fact[mode_index] >> 5) || + rd_threshes[mode_index] == INT_MAX) + continue; - // Skip some checking based on small partitions' result. - if (x->fast_ms > 1 && !ref_frame) - continue; - if (x->fast_ms > 2 && ref_frame != x->subblock_ref) + this_mode = vp9_mode_order[mode_index].mode; + ref_frame = vp9_mode_order[mode_index].ref_frame[0]; + if (ref_frame != INTRA_FRAME && + disable_inter_mode_mask & (1 << INTER_OFFSET(this_mode))) continue; + second_ref_frame = vp9_mode_order[mode_index].ref_frame[1]; - if (cpi->sf.use_avoid_tested_higherror && bsize >= BLOCK_8X8) { - if (!(ref_frame_mask & (1 << ref_frame))) { - continue; - } - if (!(mode_mask & (1 << this_mode))) { + comp_pred = second_ref_frame > INTRA_FRAME; + if (comp_pred) { + if ((mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA) && + vp9_mode_order[best_mode_index].ref_frame[0] == INTRA_FRAME) continue; - } - if (second_ref_frame != NONE - && !(ref_frame_mask & (1 << second_ref_frame))) { + if ((mode_search_skip_flags & FLAG_SKIP_COMP_REFMISMATCH) && + ref_frame != best_inter_ref_frame && + second_ref_frame != best_inter_ref_frame) continue; - } - } - - mbmi->ref_frame[0] = ref_frame; - mbmi->ref_frame[1] = second_ref_frame; - - if (!(ref_frame == INTRA_FRAME - || (cpi->ref_frame_flags & flag_list[ref_frame]))) { - continue; - } - if (!(second_ref_frame == NONE - || (cpi->ref_frame_flags & flag_list[second_ref_frame]))) { - continue; + mode_excluded = cm->reference_mode == SINGLE_REFERENCE; + } else { + if (ref_frame != INTRA_FRAME) + mode_excluded = cm->reference_mode == COMPOUND_REFERENCE; } - comp_pred = second_ref_frame > INTRA_FRAME; - if (comp_pred) { - if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA) - if (vp9_mode_order[best_mode_index].ref_frame == INTRA_FRAME) - continue; - if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_COMP_REFMISMATCH) - if (ref_frame != best_inter_ref_frame && - second_ref_frame != best_inter_ref_frame) + if (ref_frame == INTRA_FRAME) { + if (!(intra_y_mode_mask & (1 << this_mode))) + continue; + if (this_mode != DC_PRED) { + // Disable intra modes other than DC_PRED for blocks with low variance + // Threshold for intra skipping based on source variance + // TODO(debargha): Specialize the threshold for super block sizes + const unsigned int skip_intra_var_thresh = 64; + if ((mode_search_skip_flags & FLAG_SKIP_INTRA_LOWVAR) && + x->source_variance < skip_intra_var_thresh) continue; + // Only search the oblique modes if the best so far is + // one of the neighboring directional modes + if ((mode_search_skip_flags & FLAG_SKIP_INTRA_BESTINTER) && + (this_mode >= D45_PRED && this_mode <= TM_PRED)) { + if (vp9_mode_order[best_mode_index].ref_frame[0] > INTRA_FRAME) + continue; + } + if (mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) { + if (conditional_skipintra(this_mode, best_intra_mode)) + continue; + } + } + } else { + // if we're near/nearest and mv == 0,0, compare to zeromv + if (!(disable_inter_mode_mask & (1 << INTER_OFFSET(ZEROMV))) && + (this_mode == NEARMV || this_mode == NEARESTMV || + this_mode == ZEROMV) && + frame_mv[this_mode][ref_frame].as_int == 0 && + !vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP) && + (!comp_pred || frame_mv[this_mode][second_ref_frame].as_int == 0)) { + int rfc = mbmi->mode_context[ref_frame]; + int c1 = cost_mv_ref(cpi, NEARMV, rfc); + int c2 = cost_mv_ref(cpi, NEARESTMV, rfc); + int c3 = cost_mv_ref(cpi, ZEROMV, rfc); + + if (this_mode == NEARMV) { + if (c1 > c3) + continue; + } else if (this_mode == NEARESTMV) { + if (c2 > c3) + continue; + } else { + assert(this_mode == ZEROMV); + if (!comp_pred) { + if ((c3 >= c2 && + frame_mv[NEARESTMV][ref_frame].as_int == 0) || + (c3 >= c1 && + frame_mv[NEARMV][ref_frame].as_int == 0)) + continue; + } else { + if ((c3 >= c2 && + frame_mv[NEARESTMV][ref_frame].as_int == 0 && + frame_mv[NEARESTMV][second_ref_frame].as_int == 0) || + (c3 >= c1 && + frame_mv[NEARMV][ref_frame].as_int == 0 && + frame_mv[NEARMV][second_ref_frame].as_int == 0)) + continue; + } + } + } } - set_scale_factors(xd, ref_frame, second_ref_frame, scale_factor); + mbmi->mode = this_mode; mbmi->uv_mode = DC_PRED; - + mbmi->ref_frame[0] = ref_frame; + mbmi->ref_frame[1] = second_ref_frame; // Evaluate all sub-pel filters irrespective of whether we can use // them for this frame. - mbmi->interp_filter = cm->mcomp_filter_type; - vp9_setup_interp_filters(xd, mbmi->interp_filter, cm); - - if (comp_pred) { - if (!(cpi->ref_frame_flags & flag_list[second_ref_frame])) - continue; - set_scale_factors(xd, ref_frame, second_ref_frame, scale_factor); - - mode_excluded = mode_excluded - ? mode_excluded - : cm->comp_pred_mode == SINGLE_PREDICTION_ONLY; - } else { - if (ref_frame != INTRA_FRAME && second_ref_frame != INTRA_FRAME) { - mode_excluded = - mode_excluded ? - mode_excluded : cm->comp_pred_mode == COMP_PREDICTION_ONLY; - } - } + mbmi->interp_filter = cm->interp_filter == SWITCHABLE ? EIGHTTAP + : cm->interp_filter; + x->skip = 0; + set_ref_ptrs(cm, xd, ref_frame, second_ref_frame); + xd->interp_kernel = vp9_get_interp_kernel(mbmi->interp_filter); // Select prediction reference frames. for (i = 0; i < MAX_MB_PLANE; i++) { @@ -3394,46 +3418,8 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i]; } - // If the segment reference frame feature is enabled.... - // then do nothing if the current ref frame is not allowed.. - if (vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) && - vp9_get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != - (int)ref_frame) { - continue; - // If the segment skip feature is enabled.... - // then do nothing if the current mode is not allowed.. - } else if (vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP) && - (this_mode != ZEROMV && ref_frame != INTRA_FRAME)) { - continue; - // Disable this drop out case if the ref frame - // segment level feature is enabled for this segment. This is to - // prevent the possibility that we end up unable to pick any mode. - } else if (!vp9_segfeature_active(seg, segment_id, - SEG_LVL_REF_FRAME)) { - // Only consider ZEROMV/ALTREF_FRAME for alt ref frame, - // unless ARNR filtering is enabled in which case we want - // an unfiltered alternative. We allow near/nearest as well - // because they may result in zero-zero MVs but be cheaper. - if (cpi->is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) { - if ((this_mode != ZEROMV && - !(this_mode == NEARMV && - frame_mv[NEARMV][ALTREF_FRAME].as_int == 0) && - !(this_mode == NEARESTMV && - frame_mv[NEARESTMV][ALTREF_FRAME].as_int == 0)) || - ref_frame != ALTREF_FRAME) { - continue; - } - } - } - // TODO(JBB): This is to make up for the fact that we don't have sad - // functions that work when the block size reads outside the umv. We - // should fix this either by making the motion search just work on - // a representative block in the boundary ( first ) and then implement a - // function that does sads when inside the border.. - if (((mi_row + bhs) > cm->mi_rows || (mi_col + bws) > cm->mi_cols) && - this_mode == NEWMV) { - continue; - } + for (i = 0; i < TX_MODES; ++i) + tx_cache[i] = INT64_MAX; #ifdef MODE_TEST_HIT_STATS // TEST/DEBUG CODE @@ -3441,44 +3427,19 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, cpi->mode_test_hits[bsize]++; #endif - if (ref_frame == INTRA_FRAME) { TX_SIZE uv_tx; - // Disable intra modes other than DC_PRED for blocks with low variance - // Threshold for intra skipping based on source variance - // TODO(debargha): Specialize the threshold for super block sizes - static const unsigned int skip_intra_var_thresh[BLOCK_SIZES] = { - 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, - }; - if ((cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_LOWVAR) && - this_mode != DC_PRED && - x->source_variance < skip_intra_var_thresh[mbmi->sb_type]) - continue; - // Only search the oblique modes if the best so far is - // one of the neighboring directional modes - if ((cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_BESTINTER) && - (this_mode >= D45_PRED && this_mode <= TM_PRED)) { - if (vp9_mode_order[best_mode_index].ref_frame > INTRA_FRAME) - continue; - } - mbmi->mode = this_mode; - if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) { - if (conditional_skipintra(mbmi->mode, best_intra_mode)) - continue; - } - - super_block_yrd(cpi, x, &rate_y, &distortion_y, &skippable, NULL, - bsize, tx_cache, best_rd); + intra_super_block_yrd(cpi, x, &rate_y, &distortion_y, &skippable, NULL, + bsize, tx_cache, best_rd); if (rate_y == INT_MAX) continue; - uv_tx = MIN(mbmi->tx_size, max_uv_txsize_lookup[bsize]); + uv_tx = get_uv_tx_size_impl(mbmi->tx_size, bsize); if (rate_uv_intra[uv_tx] == INT_MAX) { - choose_intra_uv_mode(cpi, ctx, bsize, &rate_uv_intra[uv_tx], - &rate_uv_tokenonly[uv_tx], - &dist_uv[uv_tx], &skip_uv[uv_tx], - &mode_uv[uv_tx]); + choose_intra_uv_mode(cpi, ctx, bsize, uv_tx, + &rate_uv_intra[uv_tx], &rate_uv_tokenonly[uv_tx], + &dist_uv[uv_tx], &skip_uv[uv_tx], &mode_uv[uv_tx]); } rate_uv = rate_uv_tokenonly[uv_tx]; @@ -3491,8 +3452,6 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, rate2 += intra_cost_penalty; distortion2 = distortion_y + distortion_uv; } else { - mbmi->mode = this_mode; - compmode_cost = vp9_cost_bit(comp_mode_p, second_ref_frame > INTRA_FRAME); this_rd = handle_inter_mode(cpi, x, tile, bsize, tx_cache, &rate2, &distortion2, &skippable, @@ -3504,15 +3463,16 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, single_newmv, &total_sse, best_rd); if (this_rd == INT64_MAX) continue; - } - if (cm->comp_pred_mode == HYBRID_PREDICTION) { - rate2 += compmode_cost; + compmode_cost = vp9_cost_bit(comp_mode_p, comp_pred); + + if (cm->reference_mode == REFERENCE_MODE_SELECT) + rate2 += compmode_cost; } // Estimate the reference frame signaling cost and add it // to the rolling cost variable. - if (second_ref_frame > INTRA_FRAME) { + if (comp_pred) { rate2 += ref_costs_comp[ref_frame]; } else { rate2 += ref_costs_single[ref_frame]; @@ -3537,9 +3497,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, int prob_skip_cost; // Cost the skip mb case - vp9_prob skip_prob = - vp9_get_pred_prob_mbskip(cm, xd); - + vp9_prob skip_prob = vp9_get_skip_prob(cm, xd); if (skip_prob) { prob_skip_cost = vp9_cost_bit(skip_prob, 1); rate2 += prob_skip_cost; @@ -3549,14 +3507,10 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv, distortion2) < RDCOST(x->rdmult, x->rddiv, 0, total_sse)) { // Add in the cost of the no skip flag. - int prob_skip_cost = vp9_cost_bit(vp9_get_pred_prob_mbskip(cm, xd), - 0); - rate2 += prob_skip_cost; + rate2 += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 0); } else { // FIXME(rbultje) make this work for splitmv also - int prob_skip_cost = vp9_cost_bit(vp9_get_pred_prob_mbskip(cm, xd), - 1); - rate2 += prob_skip_cost; + rate2 += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1); distortion2 = total_sse; assert(total_sse >= 0); rate2 -= (rate_y + rate_uv); @@ -3566,32 +3520,29 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, } } else if (mb_skip_allowed) { // Add in the cost of the no skip flag. - int prob_skip_cost = vp9_cost_bit(vp9_get_pred_prob_mbskip(cm, xd), - 0); - rate2 += prob_skip_cost; + rate2 += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 0); } // Calculate the final RD estimate for this mode. this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2); } + if (ref_frame == INTRA_FRAME) { // Keep record of best intra rd - if (!is_inter_block(&xd->mi_8x8[0]->mbmi) && - this_rd < best_intra_rd) { - best_intra_rd = this_rd; - best_intra_mode = xd->mi_8x8[0]->mbmi.mode; - } - - // Keep record of best inter rd with single reference - if (is_inter_block(&xd->mi_8x8[0]->mbmi) && - !has_second_ref(&xd->mi_8x8[0]->mbmi) && - !mode_excluded && this_rd < best_inter_rd) { - best_inter_rd = this_rd; - best_inter_ref_frame = ref_frame; + if (this_rd < best_intra_rd) { + best_intra_rd = this_rd; + best_intra_mode = mbmi->mode; + } + } else { + // Keep record of best inter rd with single reference + if (!comp_pred && !mode_excluded && this_rd < best_inter_rd) { + best_inter_rd = this_rd; + best_inter_ref_frame = ref_frame; + } } if (!disable_skip && ref_frame == INTRA_FRAME) { - for (i = 0; i < NB_PREDICTION_TYPES; ++i) + for (i = 0; i < REFERENCE_MODES; ++i) best_pred_rd[i] = MIN(best_pred_rd[i], this_rd); for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) best_filter_rd[i] = MIN(best_filter_rd[i], this_rd); @@ -3602,10 +3553,6 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, || distortion2 < mode_distortions[this_mode]) { mode_distortions[this_mode] = distortion2; } - if (frame_distortions[ref_frame] == -1 - || distortion2 < frame_distortions[ref_frame]) { - frame_distortions[ref_frame] = distortion2; - } // Did this mode help.. i.e. is it the new best mode if (this_rd < best_rd || x->skip) { @@ -3632,7 +3579,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, // TODO(debargha): enhance this test with a better distortion prediction // based on qp, activity mask and history - if ((cpi->sf.mode_search_skip_flags & FLAG_EARLY_TERMINATE) && + if ((mode_search_skip_flags & FLAG_EARLY_TERMINATE) && (mode_index > MIN_EARLY_TERM_INDEX)) { const int qstep = xd->plane[0].dequant[1]; // TODO(debargha): Enhance this by specializing for each mode_index @@ -3651,9 +3598,9 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, /* keep record of best compound/single-only prediction */ if (!disable_skip && ref_frame != INTRA_FRAME) { - int single_rd, hybrid_rd, single_rate, hybrid_rate; + int64_t single_rd, hybrid_rd, single_rate, hybrid_rate; - if (cm->comp_pred_mode == HYBRID_PREDICTION) { + if (cm->reference_mode == REFERENCE_MODE_SELECT) { single_rate = rate2 - compmode_cost; hybrid_rate = rate2; } else { @@ -3664,40 +3611,39 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, single_rd = RDCOST(x->rdmult, x->rddiv, single_rate, distortion2); hybrid_rd = RDCOST(x->rdmult, x->rddiv, hybrid_rate, distortion2); - if (second_ref_frame <= INTRA_FRAME && - single_rd < best_pred_rd[SINGLE_PREDICTION_ONLY]) { - best_pred_rd[SINGLE_PREDICTION_ONLY] = single_rd; - } else if (second_ref_frame > INTRA_FRAME && - single_rd < best_pred_rd[COMP_PREDICTION_ONLY]) { - best_pred_rd[COMP_PREDICTION_ONLY] = single_rd; + if (!comp_pred) { + if (single_rd < best_pred_rd[SINGLE_REFERENCE]) { + best_pred_rd[SINGLE_REFERENCE] = single_rd; + } + } else { + if (single_rd < best_pred_rd[COMPOUND_REFERENCE]) { + best_pred_rd[COMPOUND_REFERENCE] = single_rd; + } } - if (hybrid_rd < best_pred_rd[HYBRID_PREDICTION]) - best_pred_rd[HYBRID_PREDICTION] = hybrid_rd; - } + if (hybrid_rd < best_pred_rd[REFERENCE_MODE_SELECT]) + best_pred_rd[REFERENCE_MODE_SELECT] = hybrid_rd; + + /* keep record of best filter type */ + if (!mode_excluded && cm->interp_filter != BILINEAR) { + int64_t ref = cpi->rd_filter_cache[cm->interp_filter == SWITCHABLE ? + SWITCHABLE_FILTERS : cm->interp_filter]; + + for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) { + int64_t adj_rd; + if (ref == INT64_MAX) + adj_rd = 0; + else if (cpi->rd_filter_cache[i] == INT64_MAX) + // when early termination is triggered, the encoder does not have + // access to the rate-distortion cost. it only knows that the cost + // should be above the maximum valid value. hence it takes the known + // maximum plus an arbitrary constant as the rate-distortion cost. + adj_rd = cpi->mask_filter_rd - ref + 10; + else + adj_rd = cpi->rd_filter_cache[i] - ref; - /* keep record of best filter type */ - if (!mode_excluded && !disable_skip && ref_frame != INTRA_FRAME && - cm->mcomp_filter_type != BILINEAR) { - int64_t ref = cpi->rd_filter_cache[cm->mcomp_filter_type == SWITCHABLE ? - SWITCHABLE_FILTERS : cm->mcomp_filter_type]; - for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) { - int64_t adj_rd; - // In cases of poor prediction, filter_cache[] can contain really big - // values, which actually are bigger than this_rd itself. This can - // cause negative best_filter_rd[] values, which is obviously silly. - // Therefore, if filter_cache < ref, we do an adjusted calculation. - if (cpi->rd_filter_cache[i] >= ref) { - adj_rd = this_rd + cpi->rd_filter_cache[i] - ref; - } else { - // FIXME(rbultje) do this for comppsred also - // - // To prevent out-of-range computation in - // adj_rd = cpi->rd_filter_cache[i] * this_rd / ref - // cpi->rd_filter_cache[i] / ref is converted to a 256 based ratio. - int tmp = cpi->rd_filter_cache[i] * 256 / ref; - adj_rd = (this_rd * tmp) >> 8; + adj_rd += this_rd; + best_filter_rd[i] = MIN(best_filter_rd[i], adj_rd); } - best_filter_rd[i] = MIN(best_filter_rd[i], adj_rd); } } @@ -3731,43 +3677,22 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, // If we used an estimate for the uv intra rd in the loop above... if (cpi->sf.use_uv_intra_rd_estimate) { // Do Intra UV best rd mode selection if best mode choice above was intra. - if (vp9_mode_order[best_mode_index].ref_frame == INTRA_FRAME) { - TX_SIZE uv_tx_size = get_uv_tx_size(mbmi); + if (vp9_mode_order[best_mode_index].ref_frame[0] == INTRA_FRAME) { + TX_SIZE uv_tx_size; + *mbmi = best_mbmode; + uv_tx_size = get_uv_tx_size(mbmi); rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv_intra[uv_tx_size], &rate_uv_tokenonly[uv_tx_size], &dist_uv[uv_tx_size], &skip_uv[uv_tx_size], - bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize); + bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize, + uv_tx_size); } } - // If we are using reference masking and the set mask flag is set then - // create the reference frame mask. - if (cpi->sf.reference_masking && cpi->set_ref_frame_mask) - cpi->ref_frame_mask = ~(1 << vp9_mode_order[best_mode_index].ref_frame); - - // Flag all modes that have a distortion thats > 2x the best we found at - // this level. - for (mode_index = 0; mode_index < MB_MODE_COUNT; ++mode_index) { - if (mode_index == NEARESTMV || mode_index == NEARMV || mode_index == NEWMV) - continue; - - if (mode_distortions[mode_index] > 2 * *returndistortion) { - ctx->modes_with_high_error |= (1 << mode_index); - } - } - - // Flag all ref frames that have a distortion thats > 2x the best we found at - // this level. - for (ref_frame = INTRA_FRAME; ref_frame <= ALTREF_FRAME; ref_frame++) { - if (frame_distortions[ref_frame] > 2 * *returndistortion) { - ctx->frames_with_high_error |= (1 << ref_frame); - } - } - - assert((cm->mcomp_filter_type == SWITCHABLE) || - (cm->mcomp_filter_type == best_mbmode.interp_filter) || - (best_mbmode.ref_frame[0] == INTRA_FRAME)); + assert((cm->interp_filter == SWITCHABLE) || + (cm->interp_filter == best_mbmode.interp_filter) || + !is_inter_block(&best_mbmode)); // Updating rd_thresh_freq_fact[] here means that the different // partition/block sizes are handled independently based on the best @@ -3776,16 +3701,13 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, // combination that wins out. if (cpi->sf.adaptive_rd_thresh) { for (mode_index = 0; mode_index < MAX_MODES; ++mode_index) { + int *const fact = &cpi->rd_thresh_freq_fact[bsize][mode_index]; + if (mode_index == best_mode_index) { - cpi->rd_thresh_freq_fact[bsize][mode_index] -= - (cpi->rd_thresh_freq_fact[bsize][mode_index] >> 3); + *fact -= (*fact >> 3); } else { - cpi->rd_thresh_freq_fact[bsize][mode_index] += RD_THRESH_INC; - if (cpi->rd_thresh_freq_fact[bsize][mode_index] > - (cpi->sf.adaptive_rd_thresh * RD_THRESH_MAX_FACT)) { - cpi->rd_thresh_freq_fact[bsize][mode_index] = - cpi->sf.adaptive_rd_thresh * RD_THRESH_MAX_FACT; - } + *fact = MIN(*fact + RD_THRESH_INC, + cpi->sf.adaptive_rd_thresh * RD_THRESH_MAX_FACT); } } } @@ -3794,7 +3716,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, *mbmi = best_mbmode; x->skip |= best_skip2; - for (i = 0; i < NB_PREDICTION_TYPES; ++i) { + for (i = 0; i < REFERENCE_MODES; ++i) { if (best_pred_rd[i] == INT64_MAX) best_pred_diff[i] = INT_MIN; else @@ -3808,13 +3730,8 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, else best_filter_diff[i] = best_rd - best_filter_rd[i]; } - if (cm->mcomp_filter_type == SWITCHABLE) + if (cm->interp_filter == SWITCHABLE) assert(best_filter_diff[SWITCHABLE_FILTERS] == 0); - } else { - vp9_zero(best_filter_diff); - } - - if (!x->skip) { for (i = 0; i < TX_MODES; i++) { if (best_tx_rd[i] == INT64_MAX) best_tx_diff[i] = 0; @@ -3822,11 +3739,11 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, best_tx_diff[i] = best_rd - best_tx_rd[i]; } } else { + vp9_zero(best_filter_diff); vp9_zero(best_tx_diff); } - set_scale_factors(xd, mbmi->ref_frame[0], mbmi->ref_frame[1], - scale_factor); + set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]); store_coding_context(x, ctx, best_mode_index, &mbmi->ref_mvs[mbmi->ref_frame[0]][0], &mbmi->ref_mvs[mbmi->ref_frame[1] < 0 ? 0 : @@ -3857,16 +3774,12 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, struct buf_2d yv12_mb[4][MAX_MB_PLANE]; static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG, VP9_ALT_FLAG }; - int idx_list[4] = {0, - cpi->lst_fb_idx, - cpi->gld_fb_idx, - cpi->alt_fb_idx}; int64_t best_rd = best_rd_so_far; int64_t best_yrd = best_rd_so_far; // FIXME(rbultje) more precise int64_t best_tx_rd[TX_MODES]; int64_t best_tx_diff[TX_MODES]; - int64_t best_pred_diff[NB_PREDICTION_TYPES]; - int64_t best_pred_rd[NB_PREDICTION_TYPES]; + int64_t best_pred_diff[REFERENCE_MODES]; + int64_t best_pred_rd[REFERENCE_MODES]; int64_t best_filter_rd[SWITCHABLE_FILTER_CONTEXTS]; int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS]; MB_MODE_INFO best_mbmode = { 0 }; @@ -3875,19 +3788,17 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, vp9_prob comp_mode_p; int64_t best_inter_rd = INT64_MAX; MV_REFERENCE_FRAME best_inter_ref_frame = LAST_FRAME; - INTERPOLATION_TYPE tmp_best_filter = SWITCHABLE; + INTERP_FILTER tmp_best_filter = SWITCHABLE; int rate_uv_intra[TX_SIZES], rate_uv_tokenonly[TX_SIZES]; int64_t dist_uv[TX_SIZES]; int skip_uv[TX_SIZES]; MB_PREDICTION_MODE mode_uv[TX_SIZES] = { 0 }; - struct scale_factors scale_factor[4]; - unsigned int ref_frame_mask = 0; - unsigned int mode_mask = 0; - int intra_cost_penalty = 20 * vp9_dc_quant(cpi->common.base_qindex, - cpi->common.y_dc_delta_q); + int intra_cost_penalty = 20 * vp9_dc_quant(cm->base_qindex, cm->y_dc_delta_q); int_mv seg_mvs[4][MAX_REF_FRAMES]; b_mode_info best_bmodes[4]; int best_skip2 = 0; + int ref_frame_mask = 0; + int mode_skip_mask = 0; x->skip_encode = cpi->sf.skip_encode_frame && x->q_index < QIDX_SKIP_THRESH; vpx_memset(x->zcoeff_blk[TX_4X4], 0, 4); @@ -3901,7 +3812,7 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, estimate_ref_frame_costs(cpi, segment_id, ref_costs_single, ref_costs_comp, &comp_mode_p); - for (i = 0; i < NB_PREDICTION_TYPES; ++i) + for (i = 0; i < REFERENCE_MODES; ++i) best_pred_rd[i] = INT64_MAX; for (i = 0; i < TX_MODES; i++) best_tx_rd[i] = INT64_MAX; @@ -3912,26 +3823,28 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, *returnrate = INT_MAX; - // Create a mask set to 1 for each reference frame used by a smaller - // resolution. - if (cpi->sf.use_avoid_tested_higherror) { - ref_frame_mask = 0; - mode_mask = 0; - ref_frame_mask = ~ref_frame_mask; - mode_mask = ~mode_mask; - } - for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ref_frame++) { if (cpi->ref_frame_flags & flag_list[ref_frame]) { - setup_buffer_inter(cpi, x, tile, idx_list[ref_frame], ref_frame, - block_size, mi_row, mi_col, - frame_mv[NEARESTMV], frame_mv[NEARMV], - yv12_mb, scale_factor); + vp9_setup_buffer_inter(cpi, x, tile, + ref_frame, block_size, mi_row, mi_col, + frame_mv[NEARESTMV], frame_mv[NEARMV], + yv12_mb); } frame_mv[NEWMV][ref_frame].as_int = INVALID_MV; frame_mv[ZEROMV][ref_frame].as_int = 0; } + for (ref_frame = LAST_FRAME; + ref_frame <= ALTREF_FRAME && cpi->sf.reference_masking; ++ref_frame) { + int i; + for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) { + if ((x->pred_mv_sad[ref_frame] >> 1) > x->pred_mv_sad[i]) { + ref_frame_mask |= (1 << ref_frame); + break; + } + } + } + for (mode_index = 0; mode_index < MAX_REFS; ++mode_index) { int mode_excluded = 0; int64_t this_rd = INT64_MAX; @@ -3950,40 +3863,35 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, tx_cache[i] = INT64_MAX; x->skip = 0; - ref_frame = vp9_ref_order[mode_index].ref_frame; - second_ref_frame = vp9_ref_order[mode_index].second_ref_frame; + ref_frame = vp9_ref_order[mode_index].ref_frame[0]; + second_ref_frame = vp9_ref_order[mode_index].ref_frame[1]; // Look at the reference frame of the best mode so far and set the // skip mask to look at a subset of the remaining modes. if (mode_index > 2 && cpi->sf.mode_skip_start < MAX_MODES) { if (mode_index == 3) { - switch (vp9_ref_order[best_mode_index].ref_frame) { + switch (vp9_ref_order[best_mode_index].ref_frame[0]) { case INTRA_FRAME: - cpi->mode_skip_mask = 0; + mode_skip_mask = 0; break; case LAST_FRAME: - cpi->mode_skip_mask = 0x0010; + mode_skip_mask = 0x0010; break; case GOLDEN_FRAME: - cpi->mode_skip_mask = 0x0008; + mode_skip_mask = 0x0008; break; case ALTREF_FRAME: - cpi->mode_skip_mask = 0x0000; + mode_skip_mask = 0x0000; break; case NONE: case MAX_REF_FRAMES: - assert(!"Invalid Reference frame"); + assert(0 && "Invalid Reference frame"); } } - if (cpi->mode_skip_mask & ((int64_t)1 << mode_index)) + if (mode_skip_mask & (1 << mode_index)) continue; } - // Skip if the current reference frame has been masked off - if (cpi->sf.reference_masking && !cpi->set_ref_frame_mask && - (cpi->ref_frame_mask & (1 << ref_frame))) - continue; - // Test best rd so far against threshold for trying this mode. if ((best_rd < ((int64_t)cpi->rd_thresh_sub8x8[segment_id][bsize][mode_index] * @@ -4012,7 +3920,7 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, comp_pred = second_ref_frame > INTRA_FRAME; if (comp_pred) { if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA) - if (vp9_ref_order[best_mode_index].ref_frame == INTRA_FRAME) + if (vp9_ref_order[best_mode_index].ref_frame[0] == INTRA_FRAME) continue; if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_COMP_REFMISMATCH) if (ref_frame != best_inter_ref_frame && @@ -4022,35 +3930,32 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, // TODO(jingning, jkoleszar): scaling reference frame not supported for // sub8x8 blocks. - if (ref_frame > 0 && - vp9_is_scaled(scale_factor[ref_frame].sfc)) + if (ref_frame > 0 && vp9_is_scaled(&cm->frame_refs[ref_frame - 1].sf)) continue; if (second_ref_frame > 0 && - vp9_is_scaled(scale_factor[second_ref_frame].sfc)) + vp9_is_scaled(&cm->frame_refs[second_ref_frame - 1].sf)) continue; - set_scale_factors(xd, ref_frame, second_ref_frame, scale_factor); + set_ref_ptrs(cm, xd, ref_frame, second_ref_frame); mbmi->uv_mode = DC_PRED; // Evaluate all sub-pel filters irrespective of whether we can use // them for this frame. - mbmi->interp_filter = cm->mcomp_filter_type; - vp9_setup_interp_filters(xd, mbmi->interp_filter, &cpi->common); + mbmi->interp_filter = cm->interp_filter == SWITCHABLE ? EIGHTTAP + : cm->interp_filter; + xd->interp_kernel = vp9_get_interp_kernel(mbmi->interp_filter); if (comp_pred) { if (!(cpi->ref_frame_flags & flag_list[second_ref_frame])) continue; - set_scale_factors(xd, ref_frame, second_ref_frame, scale_factor); - mode_excluded = mode_excluded - ? mode_excluded - : cm->comp_pred_mode == SINGLE_PREDICTION_ONLY; + mode_excluded = mode_excluded ? mode_excluded + : cm->reference_mode == SINGLE_REFERENCE; } else { if (ref_frame != INTRA_FRAME && second_ref_frame != INTRA_FRAME) { - mode_excluded = - mode_excluded ? - mode_excluded : cm->comp_pred_mode == COMP_PREDICTION_ONLY; + mode_excluded = mode_excluded ? + mode_excluded : cm->reference_mode == COMPOUND_REFERENCE; } } @@ -4081,7 +3986,7 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, // unless ARNR filtering is enabled in which case we want // an unfiltered alternative. We allow near/nearest as well // because they may result in zero-zero MVs but be cheaper. - if (cpi->is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) + if (cpi->rc.is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) continue; } @@ -4102,7 +4007,8 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, distortion2 += distortion_y; if (rate_uv_intra[TX_4X4] == INT_MAX) { - choose_intra_uv_mode(cpi, ctx, bsize, &rate_uv_intra[TX_4X4], + choose_intra_uv_mode(cpi, ctx, bsize, TX_4X4, + &rate_uv_intra[TX_4X4], &rate_uv_tokenonly[TX_4X4], &dist_uv[TX_4X4], &skip_uv[TX_4X4], &mode_uv[TX_4X4]); @@ -4139,13 +4045,21 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, cpi->rd_thresh_sub8x8[segment_id][bsize][THR_GOLD] : this_rd_thresh; xd->mi_8x8[0]->mbmi.tx_size = TX_4X4; - cpi->rd_filter_cache[SWITCHABLE_FILTERS] = INT64_MAX; - if (cm->mcomp_filter_type != BILINEAR) { + cpi->mask_filter_rd = 0; + for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) + cpi->rd_filter_cache[i] = INT64_MAX; + + if (cm->interp_filter != BILINEAR) { tmp_best_filter = EIGHTTAP; if (x->source_variance < cpi->sf.disable_filter_search_var_thresh) { tmp_best_filter = EIGHTTAP; - vp9_zero(cpi->rd_filter_cache); + } else if (cpi->sf.adaptive_pred_interp_filter == 1 && + ctx->pred_interp_filter < SWITCHABLE) { + tmp_best_filter = ctx->pred_interp_filter; + } else if (cpi->sf.adaptive_pred_interp_filter == 2) { + tmp_best_filter = ctx->pred_interp_filter < SWITCHABLE ? + ctx->pred_interp_filter : 0; } else { for (switchable_filter_index = 0; switchable_filter_index < SWITCHABLE_FILTERS; @@ -4153,8 +4067,7 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, int newbest, rs; int64_t rs_rd; mbmi->interp_filter = switchable_filter_index; - vp9_setup_interp_filters(xd, mbmi->interp_filter, &cpi->common); - + xd->interp_kernel = vp9_get_interp_kernel(mbmi->interp_filter); tmp_rd = rd_pick_best_mbsegmentation(cpi, x, tile, &mbmi->ref_mvs[ref_frame][0], second_ref, @@ -4167,23 +4080,25 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, if (tmp_rd == INT64_MAX) continue; - cpi->rd_filter_cache[switchable_filter_index] = tmp_rd; rs = get_switchable_rate(x); rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0); + cpi->rd_filter_cache[switchable_filter_index] = tmp_rd; cpi->rd_filter_cache[SWITCHABLE_FILTERS] = MIN(cpi->rd_filter_cache[SWITCHABLE_FILTERS], tmp_rd + rs_rd); - if (cm->mcomp_filter_type == SWITCHABLE) + if (cm->interp_filter == SWITCHABLE) tmp_rd += rs_rd; + cpi->mask_filter_rd = MAX(cpi->mask_filter_rd, tmp_rd); + newbest = (tmp_rd < tmp_best_rd); if (newbest) { tmp_best_filter = mbmi->interp_filter; tmp_best_rd = tmp_rd; } - if ((newbest && cm->mcomp_filter_type == SWITCHABLE) || - (mbmi->interp_filter == cm->mcomp_filter_type && - cm->mcomp_filter_type != SWITCHABLE)) { + if ((newbest && cm->interp_filter == SWITCHABLE) || + (mbmi->interp_filter == cm->interp_filter && + cm->interp_filter != SWITCHABLE)) { tmp_best_rdu = tmp_rd; tmp_best_rate = rate; tmp_best_ratey = rate_y; @@ -4193,7 +4108,7 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, tmp_best_mbmode = *mbmi; for (i = 0; i < 4; i++) { tmp_best_bmodes[i] = xd->mi_8x8[0]->bmi[i]; - x->zcoeff_blk[TX_4X4][i] = !xd->plane[0].eobs[i]; + x->zcoeff_blk[TX_4X4][i] = !x->plane[0].eobs[i]; } pred_exists = 1; if (switchable_filter_index == 0 && @@ -4212,12 +4127,12 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, } } - if (tmp_best_rdu == INT64_MAX) + if (tmp_best_rdu == INT64_MAX && pred_exists) continue; - mbmi->interp_filter = (cm->mcomp_filter_type == SWITCHABLE ? - tmp_best_filter : cm->mcomp_filter_type); - vp9_setup_interp_filters(xd, mbmi->interp_filter, &cpi->common); + mbmi->interp_filter = (cm->interp_filter == SWITCHABLE ? + tmp_best_filter : cm->interp_filter); + xd->interp_kernel = vp9_get_interp_kernel(mbmi->interp_filter); if (!pred_exists) { // Handles the special case when a filter that is not in the // switchable list (bilinear, 6-tap) is indicated at the frame level @@ -4233,11 +4148,6 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, if (tmp_rd == INT64_MAX) continue; } else { - if (cpi->common.mcomp_filter_type == SWITCHABLE) { - int rs = get_switchable_rate(x); - tmp_best_rdu -= RDCOST(x->rdmult, x->rddiv, rs, 0); - } - tmp_rd = tmp_best_rdu; total_sse = tmp_best_sse; rate = tmp_best_rate; rate_y = tmp_best_ratey; @@ -4251,15 +4161,13 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, rate2 += rate; distortion2 += distortion; - if (cpi->common.mcomp_filter_type == SWITCHABLE) + if (cm->interp_filter == SWITCHABLE) rate2 += get_switchable_rate(x); - if (!mode_excluded) { - if (comp_pred) - mode_excluded = cpi->common.comp_pred_mode == SINGLE_PREDICTION_ONLY; - else - mode_excluded = cpi->common.comp_pred_mode == COMP_PREDICTION_ONLY; - } + if (!mode_excluded) + mode_excluded = comp_pred ? cm->reference_mode == SINGLE_REFERENCE + : cm->reference_mode == COMPOUND_REFERENCE; + compmode_cost = vp9_cost_bit(comp_mode_p, comp_pred); tmp_best_rdu = best_rd - @@ -4286,9 +4194,8 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, } } - if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) { + if (cm->reference_mode == REFERENCE_MODE_SELECT) rate2 += compmode_cost; - } // Estimate the reference frame signaling cost and add it // to the rolling cost variable. @@ -4311,14 +4218,10 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv, distortion2) < RDCOST(x->rdmult, x->rddiv, 0, total_sse)) { // Add in the cost of the no skip flag. - int prob_skip_cost = vp9_cost_bit(vp9_get_pred_prob_mbskip(cm, xd), - 0); - rate2 += prob_skip_cost; + rate2 += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 0); } else { // FIXME(rbultje) make this work for splitmv also - int prob_skip_cost = vp9_cost_bit(vp9_get_pred_prob_mbskip(cm, xd), - 1); - rate2 += prob_skip_cost; + rate2 += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1); distortion2 = total_sse; assert(total_sse >= 0); rate2 -= (rate_y + rate_uv); @@ -4328,9 +4231,7 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, } } else if (mb_skip_allowed) { // Add in the cost of the no skip flag. - int prob_skip_cost = vp9_cost_bit(vp9_get_pred_prob_mbskip(cm, xd), - 0); - rate2 += prob_skip_cost; + rate2 += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 0); } // Calculate the final RD estimate for this mode. @@ -4338,8 +4239,8 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, } // Keep record of best inter rd with single reference - if (xd->mi_8x8[0]->mbmi.ref_frame[0] > INTRA_FRAME && - xd->mi_8x8[0]->mbmi.ref_frame[1] == NONE && + if (is_inter_block(&xd->mi_8x8[0]->mbmi) && + !has_second_ref(&xd->mi_8x8[0]->mbmi) && !mode_excluded && this_rd < best_inter_rd) { best_inter_rd = this_rd; @@ -4347,7 +4248,7 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, } if (!disable_skip && ref_frame == INTRA_FRAME) { - for (i = 0; i < NB_PREDICTION_TYPES; ++i) + for (i = 0; i < REFERENCE_MODES; ++i) best_pred_rd[i] = MIN(best_pred_rd[i], this_rd); for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) best_filter_rd[i] = MIN(best_filter_rd[i], this_rd); @@ -4402,9 +4303,9 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, /* keep record of best compound/single-only prediction */ if (!disable_skip && ref_frame != INTRA_FRAME) { - int single_rd, hybrid_rd, single_rate, hybrid_rate; + int64_t single_rd, hybrid_rd, single_rate, hybrid_rate; - if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) { + if (cm->reference_mode == REFERENCE_MODE_SELECT) { single_rate = rate2 - compmode_cost; hybrid_rate = rate2; } else { @@ -4416,31 +4317,35 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, hybrid_rd = RDCOST(x->rdmult, x->rddiv, hybrid_rate, distortion2); if (second_ref_frame <= INTRA_FRAME && - single_rd < best_pred_rd[SINGLE_PREDICTION_ONLY]) { - best_pred_rd[SINGLE_PREDICTION_ONLY] = single_rd; + single_rd < best_pred_rd[SINGLE_REFERENCE]) { + best_pred_rd[SINGLE_REFERENCE] = single_rd; } else if (second_ref_frame > INTRA_FRAME && - single_rd < best_pred_rd[COMP_PREDICTION_ONLY]) { - best_pred_rd[COMP_PREDICTION_ONLY] = single_rd; + single_rd < best_pred_rd[COMPOUND_REFERENCE]) { + best_pred_rd[COMPOUND_REFERENCE] = single_rd; } - if (hybrid_rd < best_pred_rd[HYBRID_PREDICTION]) - best_pred_rd[HYBRID_PREDICTION] = hybrid_rd; + if (hybrid_rd < best_pred_rd[REFERENCE_MODE_SELECT]) + best_pred_rd[REFERENCE_MODE_SELECT] = hybrid_rd; } /* keep record of best filter type */ if (!mode_excluded && !disable_skip && ref_frame != INTRA_FRAME && - cm->mcomp_filter_type != BILINEAR) { - int64_t ref = cpi->rd_filter_cache[cm->mcomp_filter_type == SWITCHABLE ? - SWITCHABLE_FILTERS : cm->mcomp_filter_type]; + cm->interp_filter != BILINEAR) { + int64_t ref = cpi->rd_filter_cache[cm->interp_filter == SWITCHABLE ? + SWITCHABLE_FILTERS : cm->interp_filter]; + int64_t adj_rd; for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) { - int64_t adj_rd; - // In cases of poor prediction, filter_cache[] can contain really big - // values, which actually are bigger than this_rd itself. This can - // cause negative best_filter_rd[] values, which is obviously silly. - // Therefore, if filter_cache < ref, we do an adjusted calculation. - if (cpi->rd_filter_cache[i] >= ref) - adj_rd = this_rd + cpi->rd_filter_cache[i] - ref; - else // FIXME(rbultje) do this for comppred also - adj_rd = this_rd - (ref - cpi->rd_filter_cache[i]) * this_rd / ref; + if (ref == INT64_MAX) + adj_rd = 0; + else if (cpi->rd_filter_cache[i] == INT64_MAX) + // when early termination is triggered, the encoder does not have + // access to the rate-distortion cost. it only knows that the cost + // should be above the maximum valid value. hence it takes the known + // maximum plus an arbitrary constant as the rate-distortion cost. + adj_rd = cpi->mask_filter_rd - ref + 10; + else + adj_rd = cpi->rd_filter_cache[i] - ref; + + adj_rd += this_rd; best_filter_rd[i] = MIN(best_filter_rd[i], adj_rd); } } @@ -4479,30 +4384,27 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, // If we used an estimate for the uv intra rd in the loop above... if (cpi->sf.use_uv_intra_rd_estimate) { // Do Intra UV best rd mode selection if best mode choice above was intra. - if (vp9_ref_order[best_mode_index].ref_frame == INTRA_FRAME) { - TX_SIZE uv_tx_size = get_uv_tx_size(mbmi); + if (vp9_ref_order[best_mode_index].ref_frame[0] == INTRA_FRAME) { + TX_SIZE uv_tx_size; + *mbmi = best_mbmode; + uv_tx_size = get_uv_tx_size(mbmi); rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv_intra[uv_tx_size], &rate_uv_tokenonly[uv_tx_size], &dist_uv[uv_tx_size], &skip_uv[uv_tx_size], - BLOCK_8X8); + BLOCK_8X8, uv_tx_size); } } - // If we are using reference masking and the set mask flag is set then - // create the reference frame mask. - if (cpi->sf.reference_masking && cpi->set_ref_frame_mask) - cpi->ref_frame_mask = ~(1 << vp9_ref_order[best_mode_index].ref_frame); - if (best_rd == INT64_MAX && bsize < BLOCK_8X8) { *returnrate = INT_MAX; - *returndistortion = INT_MAX; + *returndistortion = INT64_MAX; return best_rd; } - assert((cm->mcomp_filter_type == SWITCHABLE) || - (cm->mcomp_filter_type == best_mbmode.interp_filter) || - (best_mbmode.ref_frame[0] == INTRA_FRAME)); + assert((cm->interp_filter == SWITCHABLE) || + (cm->interp_filter == best_mbmode.interp_filter) || + !is_inter_block(&best_mbmode)); // Updating rd_thresh_freq_fact[] here means that the different // partition/block sizes are handled independently based on the best @@ -4511,16 +4413,13 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, // combination that wins out. if (cpi->sf.adaptive_rd_thresh) { for (mode_index = 0; mode_index < MAX_REFS; ++mode_index) { + int *const fact = &cpi->rd_thresh_freq_sub8x8[bsize][mode_index]; + if (mode_index == best_mode_index) { - cpi->rd_thresh_freq_sub8x8[bsize][mode_index] -= - (cpi->rd_thresh_freq_sub8x8[bsize][mode_index] >> 3); + *fact -= (*fact >> 3); } else { - cpi->rd_thresh_freq_sub8x8[bsize][mode_index] += RD_THRESH_INC; - if (cpi->rd_thresh_freq_sub8x8[bsize][mode_index] > - (cpi->sf.adaptive_rd_thresh * RD_THRESH_MAX_FACT)) { - cpi->rd_thresh_freq_sub8x8[bsize][mode_index] = - cpi->sf.adaptive_rd_thresh * RD_THRESH_MAX_FACT; - } + *fact = MIN(*fact + RD_THRESH_INC, + cpi->sf.adaptive_rd_thresh * RD_THRESH_MAX_FACT); } } } @@ -4528,7 +4427,7 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, // macroblock modes *mbmi = best_mbmode; x->skip |= best_skip2; - if (best_mbmode.ref_frame[0] == INTRA_FRAME) { + if (!is_inter_block(&best_mbmode)) { for (i = 0; i < 4; i++) xd->mi_8x8[0]->bmi[i].as_mode = best_bmodes[i].as_mode; } else { @@ -4539,7 +4438,7 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, mbmi->mv[1].as_int = xd->mi_8x8[0]->bmi[3].as_mv[1].as_int; } - for (i = 0; i < NB_PREDICTION_TYPES; ++i) { + for (i = 0; i < REFERENCE_MODES; ++i) { if (best_pred_rd[i] == INT64_MAX) best_pred_diff[i] = INT_MIN; else @@ -4553,7 +4452,7 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, else best_filter_diff[i] = best_rd - best_filter_rd[i]; } - if (cm->mcomp_filter_type == SWITCHABLE) + if (cm->interp_filter == SWITCHABLE) assert(best_filter_diff[SWITCHABLE_FILTERS] == 0); } else { vp9_zero(best_filter_diff); @@ -4570,8 +4469,7 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, vp9_zero(best_tx_diff); } - set_scale_factors(xd, mbmi->ref_frame[0], mbmi->ref_frame[1], - scale_factor); + set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]); store_coding_context(x, ctx, best_mode_index, &mbmi->ref_mvs[mbmi->ref_frame[0]][0], &mbmi->ref_mvs[mbmi->ref_frame[1] < 0 ? 0 : diff --git a/libvpx/vp9/encoder/vp9_rdopt.h b/libvpx/vp9/encoder/vp9_rdopt.h index 92fb235..7d7aeed 100644 --- a/libvpx/vp9/encoder/vp9_rdopt.h +++ b/libvpx/vp9/encoder/vp9_rdopt.h @@ -8,24 +8,55 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP9_ENCODER_VP9_RDOPT_H_ #define VP9_ENCODER_VP9_RDOPT_H_ +#include "vp9/encoder/vp9_onyx_int.h" + +#ifdef __cplusplus +extern "C" { +#endif + #define RDDIV_BITS 7 #define RDCOST(RM, DM, R, D) \ (((128 + ((int64_t)R) * (RM)) >> 8) + (D << DM)) #define QIDX_SKIP_THRESH 115 +#define RD_THRESH_MAX_FACT 64 +#define RD_THRESH_INC 1 +#define RD_THRESH_POW 1.25 +#define RD_MULT_EPB_RATIO 64 + +#define MV_COST_WEIGHT 108 +#define MV_COST_WEIGHT_SUB 120 + +#define INVALID_MV 0x80008000 + struct TileInfo; -int vp9_compute_rd_mult(VP9_COMP *cpi, int qindex); +int vp9_compute_rd_mult(const VP9_COMP *cpi, int qindex); void vp9_initialize_rd_consts(VP9_COMP *cpi); void vp9_initialize_me_consts(VP9_COMP *cpi, int qindex); +void vp9_model_rd_from_var_lapndz(unsigned int var, unsigned int n, + unsigned int qstep, int *rate, + int64_t *dist); + +void vp9_setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x, + const TileInfo *const tile, + MV_REFERENCE_FRAME ref_frame, + BLOCK_SIZE block_size, + int mi_row, int mi_col, + int_mv frame_nearest_mv[MAX_REF_FRAMES], + int_mv frame_near_mv[MAX_REF_FRAMES], + struct buf_2d yv12_mb[4][MAX_MB_PLANE]); + +const YV12_BUFFER_CONFIG *vp9_get_scaled_ref_frame(const VP9_COMP *cpi, + int ref_frame); + void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, int *r, int64_t *d, BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx, int64_t best_rd); @@ -50,12 +81,13 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, void vp9_init_me_luts(); -void vp9_set_mbmode_and_mvs(MACROBLOCK *x, - MB_PREDICTION_MODE mb, int_mv *mv); +void vp9_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size, + const struct macroblockd_plane *pd, + ENTROPY_CONTEXT t_above[16], + ENTROPY_CONTEXT t_left[16]); -void vp9_get_entropy_contexts(TX_SIZE tx_size, - ENTROPY_CONTEXT t_above[16], ENTROPY_CONTEXT t_left[16], - const ENTROPY_CONTEXT *above, const ENTROPY_CONTEXT *left, - int num_4x4_w, int num_4x4_h); +#ifdef __cplusplus +} // extern "C" +#endif #endif // VP9_ENCODER_VP9_RDOPT_H_ diff --git a/libvpx/vp9/encoder/vp9_resize.c b/libvpx/vp9/encoder/vp9_resize.c new file mode 100644 index 0000000..4e6efae --- /dev/null +++ b/libvpx/vp9/encoder/vp9_resize.c @@ -0,0 +1,576 @@ +/* + * Copyright (c) 2014 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include <assert.h> +#include <limits.h> +#include <math.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#include "vp9/common/vp9_common.h" +#include "vp9/encoder/vp9_resize.h" + +#define FILTER_BITS 7 + +#define INTERP_TAPS 8 +#define SUBPEL_BITS 5 +#define SUBPEL_MASK ((1 << SUBPEL_BITS) - 1) +#define INTERP_PRECISION_BITS 32 + +typedef int16_t interp_kernel[INTERP_TAPS]; + +// Filters for interpolation (0.5-band) - note this also filters integer pels. +const interp_kernel vp9_filteredinterp_filters500[(1 << SUBPEL_BITS)] = { + {-3, 0, 35, 64, 35, 0, -3, 0}, + {-3, -1, 34, 64, 36, 1, -3, 0}, + {-3, -1, 32, 64, 38, 1, -3, 0}, + {-2, -2, 31, 63, 39, 2, -3, 0}, + {-2, -2, 29, 63, 41, 2, -3, 0}, + {-2, -2, 28, 63, 42, 3, -4, 0}, + {-2, -3, 27, 63, 43, 4, -4, 0}, + {-2, -3, 25, 62, 45, 5, -4, 0}, + {-2, -3, 24, 62, 46, 5, -4, 0}, + {-2, -3, 23, 61, 47, 6, -4, 0}, + {-2, -3, 21, 60, 49, 7, -4, 0}, + {-1, -4, 20, 60, 50, 8, -4, -1}, + {-1, -4, 19, 59, 51, 9, -4, -1}, + {-1, -4, 17, 58, 52, 10, -4, 0}, + {-1, -4, 16, 57, 53, 12, -4, -1}, + {-1, -4, 15, 56, 54, 13, -4, -1}, + {-1, -4, 14, 55, 55, 14, -4, -1}, + {-1, -4, 13, 54, 56, 15, -4, -1}, + {-1, -4, 12, 53, 57, 16, -4, -1}, + {0, -4, 10, 52, 58, 17, -4, -1}, + {-1, -4, 9, 51, 59, 19, -4, -1}, + {-1, -4, 8, 50, 60, 20, -4, -1}, + {0, -4, 7, 49, 60, 21, -3, -2}, + {0, -4, 6, 47, 61, 23, -3, -2}, + {0, -4, 5, 46, 62, 24, -3, -2}, + {0, -4, 5, 45, 62, 25, -3, -2}, + {0, -4, 4, 43, 63, 27, -3, -2}, + {0, -4, 3, 42, 63, 28, -2, -2}, + {0, -3, 2, 41, 63, 29, -2, -2}, + {0, -3, 2, 39, 63, 31, -2, -2}, + {0, -3, 1, 38, 64, 32, -1, -3}, + {0, -3, 1, 36, 64, 34, -1, -3} +}; + +// Filters for interpolation (0.625-band) - note this also filters integer pels. +const interp_kernel vp9_filteredinterp_filters625[(1 << SUBPEL_BITS)] = { + {-1, -8, 33, 80, 33, -8, -1, 0}, + {-1, -8, 30, 80, 35, -8, -1, 1}, + {-1, -8, 28, 80, 37, -7, -2, 1}, + {0, -8, 26, 79, 39, -7, -2, 1}, + {0, -8, 24, 79, 41, -7, -2, 1}, + {0, -8, 22, 78, 43, -6, -2, 1}, + {0, -8, 20, 78, 45, -5, -3, 1}, + {0, -8, 18, 77, 48, -5, -3, 1}, + {0, -8, 16, 76, 50, -4, -3, 1}, + {0, -8, 15, 75, 52, -3, -4, 1}, + {0, -7, 13, 74, 54, -3, -4, 1}, + {0, -7, 11, 73, 56, -2, -4, 1}, + {0, -7, 10, 71, 58, -1, -4, 1}, + {1, -7, 8, 70, 60, 0, -5, 1}, + {1, -6, 6, 68, 62, 1, -5, 1}, + {1, -6, 5, 67, 63, 2, -5, 1}, + {1, -6, 4, 65, 65, 4, -6, 1}, + {1, -5, 2, 63, 67, 5, -6, 1}, + {1, -5, 1, 62, 68, 6, -6, 1}, + {1, -5, 0, 60, 70, 8, -7, 1}, + {1, -4, -1, 58, 71, 10, -7, 0}, + {1, -4, -2, 56, 73, 11, -7, 0}, + {1, -4, -3, 54, 74, 13, -7, 0}, + {1, -4, -3, 52, 75, 15, -8, 0}, + {1, -3, -4, 50, 76, 16, -8, 0}, + {1, -3, -5, 48, 77, 18, -8, 0}, + {1, -3, -5, 45, 78, 20, -8, 0}, + {1, -2, -6, 43, 78, 22, -8, 0}, + {1, -2, -7, 41, 79, 24, -8, 0}, + {1, -2, -7, 39, 79, 26, -8, 0}, + {1, -2, -7, 37, 80, 28, -8, -1}, + {1, -1, -8, 35, 80, 30, -8, -1}, +}; + +// Filters for interpolation (0.75-band) - note this also filters integer pels. +const interp_kernel vp9_filteredinterp_filters750[(1 << SUBPEL_BITS)] = { + {2, -11, 25, 96, 25, -11, 2, 0}, + {2, -11, 22, 96, 28, -11, 2, 0}, + {2, -10, 19, 95, 31, -11, 2, 0}, + {2, -10, 17, 95, 34, -12, 2, 0}, + {2, -9, 14, 94, 37, -12, 2, 0}, + {2, -8, 12, 93, 40, -12, 1, 0}, + {2, -8, 9, 92, 43, -12, 1, 1}, + {2, -7, 7, 91, 46, -12, 1, 0}, + {2, -7, 5, 90, 49, -12, 1, 0}, + {2, -6, 3, 88, 52, -12, 0, 1}, + {2, -5, 1, 86, 55, -12, 0, 1}, + {2, -5, -1, 84, 58, -11, 0, 1}, + {2, -4, -2, 82, 61, -11, -1, 1}, + {2, -4, -4, 80, 64, -10, -1, 1}, + {1, -3, -5, 77, 67, -9, -1, 1}, + {1, -3, -6, 75, 70, -8, -2, 1}, + {1, -2, -7, 72, 72, -7, -2, 1}, + {1, -2, -8, 70, 75, -6, -3, 1}, + {1, -1, -9, 67, 77, -5, -3, 1}, + {1, -1, -10, 64, 80, -4, -4, 2}, + {1, -1, -11, 61, 82, -2, -4, 2}, + {1, 0, -11, 58, 84, -1, -5, 2}, + {1, 0, -12, 55, 86, 1, -5, 2}, + {1, 0, -12, 52, 88, 3, -6, 2}, + {0, 1, -12, 49, 90, 5, -7, 2}, + {0, 1, -12, 46, 91, 7, -7, 2}, + {1, 1, -12, 43, 92, 9, -8, 2}, + {0, 1, -12, 40, 93, 12, -8, 2}, + {0, 2, -12, 37, 94, 14, -9, 2}, + {0, 2, -12, 34, 95, 17, -10, 2}, + {0, 2, -11, 31, 95, 19, -10, 2}, + {0, 2, -11, 28, 96, 22, -11, 2} +}; + +// Filters for interpolation (0.875-band) - note this also filters integer pels. +const interp_kernel vp9_filteredinterp_filters875[(1 << SUBPEL_BITS)] = { + {3, -8, 13, 112, 13, -8, 3, 0}, + {3, -7, 10, 112, 17, -9, 3, -1}, + {2, -6, 7, 111, 21, -9, 3, -1}, + {2, -5, 4, 111, 24, -10, 3, -1}, + {2, -4, 1, 110, 28, -11, 3, -1}, + {1, -3, -1, 108, 32, -12, 4, -1}, + {1, -2, -3, 106, 36, -13, 4, -1}, + {1, -1, -6, 105, 40, -14, 4, -1}, + {1, -1, -7, 102, 44, -14, 4, -1}, + {1, 0, -9, 100, 48, -15, 4, -1}, + {1, 1, -11, 97, 53, -16, 4, -1}, + {0, 1, -12, 95, 57, -16, 4, -1}, + {0, 2, -13, 91, 61, -16, 4, -1}, + {0, 2, -14, 88, 65, -16, 4, -1}, + {0, 3, -15, 84, 69, -17, 4, 0}, + {0, 3, -16, 81, 73, -16, 3, 0}, + {0, 3, -16, 77, 77, -16, 3, 0}, + {0, 3, -16, 73, 81, -16, 3, 0}, + {0, 4, -17, 69, 84, -15, 3, 0}, + {-1, 4, -16, 65, 88, -14, 2, 0}, + {-1, 4, -16, 61, 91, -13, 2, 0}, + {-1, 4, -16, 57, 95, -12, 1, 0}, + {-1, 4, -16, 53, 97, -11, 1, 1}, + {-1, 4, -15, 48, 100, -9, 0, 1}, + {-1, 4, -14, 44, 102, -7, -1, 1}, + {-1, 4, -14, 40, 105, -6, -1, 1}, + {-1, 4, -13, 36, 106, -3, -2, 1}, + {-1, 4, -12, 32, 108, -1, -3, 1}, + {-1, 3, -11, 28, 110, 1, -4, 2}, + {-1, 3, -10, 24, 111, 4, -5, 2}, + {-1, 3, -9, 21, 111, 7, -6, 2}, + {-1, 3, -9, 17, 112, 10, -7, 3} +}; + +// Filters for interpolation (full-band) - no filtering for integer pixels +const interp_kernel vp9_filteredinterp_filters1000[(1 << SUBPEL_BITS)] = { + {0, 0, 0, 128, 0, 0, 0, 0}, + {0, 1, -3, 128, 3, -1, 0, 0}, + {-1, 2, -6, 127, 7, -2, 1, 0}, + {-1, 3, -9, 126, 12, -4, 1, 0}, + {-1, 4, -12, 125, 16, -5, 1, 0}, + {-1, 4, -14, 123, 20, -6, 2, 0}, + {-1, 5, -15, 120, 25, -8, 2, 0}, + {-1, 5, -17, 118, 30, -9, 3, -1}, + {-1, 6, -18, 114, 35, -10, 3, -1}, + {-1, 6, -19, 111, 41, -12, 3, -1}, + {-1, 6, -20, 107, 46, -13, 4, -1}, + {-1, 6, -21, 103, 52, -14, 4, -1}, + {-1, 6, -21, 99, 57, -16, 5, -1}, + {-1, 6, -21, 94, 63, -17, 5, -1}, + {-1, 6, -20, 89, 68, -18, 5, -1}, + {-1, 6, -20, 84, 73, -19, 6, -1}, + {-1, 6, -20, 79, 79, -20, 6, -1}, + {-1, 6, -19, 73, 84, -20, 6, -1}, + {-1, 5, -18, 68, 89, -20, 6, -1}, + {-1, 5, -17, 63, 94, -21, 6, -1}, + {-1, 5, -16, 57, 99, -21, 6, -1}, + {-1, 4, -14, 52, 103, -21, 6, -1}, + {-1, 4, -13, 46, 107, -20, 6, -1}, + {-1, 3, -12, 41, 111, -19, 6, -1}, + {-1, 3, -10, 35, 114, -18, 6, -1}, + {-1, 3, -9, 30, 118, -17, 5, -1}, + {0, 2, -8, 25, 120, -15, 5, -1}, + {0, 2, -6, 20, 123, -14, 4, -1}, + {0, 1, -5, 16, 125, -12, 4, -1}, + {0, 1, -4, 12, 126, -9, 3, -1}, + {0, 1, -2, 7, 127, -6, 2, -1}, + {0, 0, -1, 3, 128, -3, 1, 0} +}; + +// Filters for factor of 2 downsampling. +static const int16_t vp9_down2_symeven_half_filter[] = {56, 12, -3, -1}; +static const int16_t vp9_down2_symodd_half_filter[] = {64, 35, 0, -3}; + +static const interp_kernel *choose_interp_filter(int inlength, int outlength) { + int outlength16 = outlength * 16; + if (outlength16 >= inlength * 16) + return vp9_filteredinterp_filters1000; + else if (outlength16 >= inlength * 13) + return vp9_filteredinterp_filters875; + else if (outlength16 >= inlength * 11) + return vp9_filteredinterp_filters750; + else if (outlength16 >= inlength * 9) + return vp9_filteredinterp_filters625; + else + return vp9_filteredinterp_filters500; +} + +static void interpolate(const uint8_t *const input, int inlength, + uint8_t *output, int outlength) { + const int64_t delta = (((uint64_t)inlength << 32) + outlength / 2) / + outlength; + const int64_t offset = inlength > outlength ? + (((int64_t)(inlength - outlength) << 31) + outlength / 2) / outlength : + -(((int64_t)(outlength - inlength) << 31) + outlength / 2) / outlength; + uint8_t *optr = output; + int x, x1, x2, sum, k, int_pel, sub_pel; + int64_t y; + + const interp_kernel *interp_filters = + choose_interp_filter(inlength, outlength); + + x = 0; + y = offset; + while ((y >> INTERP_PRECISION_BITS) < (INTERP_TAPS / 2 - 1)) { + x++; + y += delta; + } + x1 = x; + x = outlength - 1; + y = delta * x + offset; + while ((y >> INTERP_PRECISION_BITS) + + (int64_t)(INTERP_TAPS / 2) >= inlength) { + x--; + y -= delta; + } + x2 = x; + if (x1 > x2) { + for (x = 0, y = offset; x < outlength; ++x, y += delta) { + const int16_t *filter; + int_pel = y >> INTERP_PRECISION_BITS; + sub_pel = (y >> (INTERP_PRECISION_BITS - SUBPEL_BITS)) & SUBPEL_MASK; + filter = interp_filters[sub_pel]; + sum = 0; + for (k = 0; k < INTERP_TAPS; ++k) { + const int pk = int_pel - INTERP_TAPS / 2 + 1 + k; + sum += filter[k] * input[(pk < 0 ? 0 : + (pk >= inlength ? inlength - 1 : pk))]; + } + *optr++ = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)); + } + } else { + // Initial part. + for (x = 0, y = offset; x < x1; ++x, y += delta) { + const int16_t *filter; + int_pel = y >> INTERP_PRECISION_BITS; + sub_pel = (y >> (INTERP_PRECISION_BITS - SUBPEL_BITS)) & SUBPEL_MASK; + filter = interp_filters[sub_pel]; + sum = 0; + for (k = 0; k < INTERP_TAPS; ++k) + sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k < 0 ? + 0 : + int_pel - INTERP_TAPS / 2 + 1 + k)]; + *optr++ = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)); + } + // Middle part. + for (; x <= x2; ++x, y += delta) { + const int16_t *filter; + int_pel = y >> INTERP_PRECISION_BITS; + sub_pel = (y >> (INTERP_PRECISION_BITS - SUBPEL_BITS)) & SUBPEL_MASK; + filter = interp_filters[sub_pel]; + sum = 0; + for (k = 0; k < INTERP_TAPS; ++k) + sum += filter[k] * input[int_pel - INTERP_TAPS / 2 + 1 + k]; + *optr++ = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)); + } + // End part. + for (; x < outlength; ++x, y += delta) { + const int16_t *filter; + int_pel = y >> INTERP_PRECISION_BITS; + sub_pel = (y >> (INTERP_PRECISION_BITS - SUBPEL_BITS)) & SUBPEL_MASK; + filter = interp_filters[sub_pel]; + sum = 0; + for (k = 0; k < INTERP_TAPS; ++k) + sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k >= + inlength ? inlength - 1 : + int_pel - INTERP_TAPS / 2 + 1 + k)]; + *optr++ = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)); + } + } +} + +static void down2_symeven(const uint8_t *const input, int length, + uint8_t *output) { + // Actual filter len = 2 * filter_len_half. + static const int16_t *filter = vp9_down2_symeven_half_filter; + const int filter_len_half = sizeof(vp9_down2_symeven_half_filter) / 2; + int i, j; + uint8_t *optr = output; + int l1 = filter_len_half; + int l2 = (length - filter_len_half); + l1 += (l1 & 1); + l2 += (l2 & 1); + if (l1 > l2) { + // Short input length. + for (i = 0; i < length; i += 2) { + int sum = (1 << (FILTER_BITS - 1)); + for (j = 0; j < filter_len_half; ++j) { + sum += (input[(i - j < 0 ? 0 : i - j)] + + input[(i + 1 + j >= length ? length - 1 : i + 1 + j)]) * + filter[j]; + } + sum >>= FILTER_BITS; + *optr++ = clip_pixel(sum); + } + } else { + // Initial part. + for (i = 0; i < l1; i += 2) { + int sum = (1 << (FILTER_BITS - 1)); + for (j = 0; j < filter_len_half; ++j) { + sum += (input[(i - j < 0 ? 0 : i - j)] + input[i + 1 + j]) * filter[j]; + } + sum >>= FILTER_BITS; + *optr++ = clip_pixel(sum); + } + // Middle part. + for (; i < l2; i += 2) { + int sum = (1 << (FILTER_BITS - 1)); + for (j = 0; j < filter_len_half; ++j) { + sum += (input[i - j] + input[i + 1 + j]) * filter[j]; + } + sum >>= FILTER_BITS; + *optr++ = clip_pixel(sum); + } + // End part. + for (; i < length; i += 2) { + int sum = (1 << (FILTER_BITS - 1)); + for (j = 0; j < filter_len_half; ++j) { + sum += (input[i - j] + + input[(i + 1 + j >= length ? length - 1 : i + 1 + j)]) * + filter[j]; + } + sum >>= FILTER_BITS; + *optr++ = clip_pixel(sum); + } + } +} + +static void down2_symodd(const uint8_t *const input, int length, + uint8_t *output) { + // Actual filter len = 2 * filter_len_half - 1. + static const int16_t *filter = vp9_down2_symodd_half_filter; + const int filter_len_half = sizeof(vp9_down2_symodd_half_filter) / 2; + int i, j; + uint8_t *optr = output; + int l1 = filter_len_half - 1; + int l2 = (length - filter_len_half + 1); + l1 += (l1 & 1); + l2 += (l2 & 1); + if (l1 > l2) { + // Short input length. + for (i = 0; i < length; i += 2) { + int sum = (1 << (FILTER_BITS - 1)) + input[i] * filter[0]; + for (j = 1; j < filter_len_half; ++j) { + sum += (input[(i - j < 0 ? 0 : i - j)] + + input[(i + j >= length ? length - 1 : i + j)]) * + filter[j]; + } + sum >>= FILTER_BITS; + *optr++ = clip_pixel(sum); + } + } else { + // Initial part. + for (i = 0; i < l1; i += 2) { + int sum = (1 << (FILTER_BITS - 1)) + input[i] * filter[0]; + for (j = 1; j < filter_len_half; ++j) { + sum += (input[(i - j < 0 ? 0 : i - j)] + input[i + j]) * filter[j]; + } + sum >>= FILTER_BITS; + *optr++ = clip_pixel(sum); + } + // Middle part. + for (; i < l2; i += 2) { + int sum = (1 << (FILTER_BITS - 1)) + input[i] * filter[0]; + for (j = 1; j < filter_len_half; ++j) { + sum += (input[i - j] + input[i + j]) * filter[j]; + } + sum >>= FILTER_BITS; + *optr++ = clip_pixel(sum); + } + // End part. + for (; i < length; i += 2) { + int sum = (1 << (FILTER_BITS - 1)) + input[i] * filter[0]; + for (j = 1; j < filter_len_half; ++j) { + sum += (input[i - j] + input[(i + j >= length ? length - 1 : i + j)]) * + filter[j]; + } + sum >>= FILTER_BITS; + *optr++ = clip_pixel(sum); + } + } +} + +static int get_down2_length(int length, int steps) { + int s; + for (s = 0; s < steps; ++s) + length = (length + 1) >> 1; + return length; +} + +int get_down2_steps(int in_length, int out_length) { + int steps = 0; + int proj_in_length; + while ((proj_in_length = get_down2_length(in_length, 1)) >= out_length) { + ++steps; + in_length = proj_in_length; + } + return steps; +} + +static void resize_multistep(const uint8_t *const input, + int length, + uint8_t *output, + int olength, + uint8_t *buf) { + int steps; + if (length == olength) { + memcpy(output, input, sizeof(uint8_t) * length); + return; + } + steps = get_down2_steps(length, olength); + + if (steps > 0) { + int s; + uint8_t *out = NULL; + uint8_t *tmpbuf = NULL; + uint8_t *otmp, *otmp2; + int filteredlength = length; + if (!tmpbuf) { + tmpbuf = (uint8_t *)malloc(sizeof(uint8_t) * length); + otmp = tmpbuf; + } else { + otmp = buf; + } + otmp2 = otmp + get_down2_length(length, 1); + for (s = 0; s < steps; ++s) { + const int proj_filteredlength = get_down2_length(filteredlength, 1); + const uint8_t *const in = (s == 0 ? input : out); + if (s == steps - 1 && proj_filteredlength == olength) + out = output; + else + out = (s & 1 ? otmp2 : otmp); + if (filteredlength & 1) + down2_symodd(in, filteredlength, out); + else + down2_symeven(in, filteredlength, out); + filteredlength = proj_filteredlength; + } + if (filteredlength != olength) { + interpolate(out, filteredlength, output, olength); + } + if (tmpbuf) + free(tmpbuf); + } else { + interpolate(input, length, output, olength); + } +} + +static void fill_col_to_arr(uint8_t *img, int stride, int len, uint8_t *arr) { + int i; + uint8_t *iptr = img; + uint8_t *aptr = arr; + for (i = 0; i < len; ++i, iptr += stride) { + *aptr++ = *iptr; + } +} + +static void fill_arr_to_col(uint8_t *img, int stride, int len, uint8_t *arr) { + int i; + uint8_t *iptr = img; + uint8_t *aptr = arr; + for (i = 0; i < len; ++i, iptr += stride) { + *iptr = *aptr++; + } +} + +void vp9_resize_plane(const uint8_t *const input, + int height, + int width, + int in_stride, + uint8_t *output, + int height2, + int width2, + int out_stride) { + int i; + uint8_t *intbuf = (uint8_t *)malloc(sizeof(uint8_t) * width2 * height); + uint8_t *tmpbuf = (uint8_t *)malloc(sizeof(uint8_t) * + (width < height ? height : width)); + uint8_t *arrbuf = (uint8_t *)malloc(sizeof(uint8_t) * (height + height2)); + for (i = 0; i < height; ++i) + resize_multistep(input + in_stride * i, width, + intbuf + width2 * i, width2, tmpbuf); + for (i = 0; i < width2; ++i) { + fill_col_to_arr(intbuf + i, width2, height, arrbuf); + resize_multistep(arrbuf, height, arrbuf + height, height2, tmpbuf); + fill_arr_to_col(output + i, out_stride, height2, arrbuf + height); + } + free(intbuf); + free(tmpbuf); + free(arrbuf); +} + +void vp9_resize_frame420(const uint8_t *const y, + int y_stride, + const uint8_t *const u, const uint8_t *const v, + int uv_stride, + int height, int width, + uint8_t *oy, int oy_stride, + uint8_t *ou, uint8_t *ov, int ouv_stride, + int oheight, int owidth) { + vp9_resize_plane(y, height, width, y_stride, + oy, oheight, owidth, oy_stride); + vp9_resize_plane(u, height / 2, width / 2, uv_stride, + ou, oheight / 2, owidth / 2, ouv_stride); + vp9_resize_plane(v, height / 2, width / 2, uv_stride, + ov, oheight / 2, owidth / 2, ouv_stride); +} + +void vp9_resize_frame422(const uint8_t *const y, int y_stride, + const uint8_t *const u, const uint8_t *const v, + int uv_stride, + int height, int width, + uint8_t *oy, int oy_stride, + uint8_t *ou, uint8_t *ov, int ouv_stride, + int oheight, int owidth) { + vp9_resize_plane(y, height, width, y_stride, + oy, oheight, owidth, oy_stride); + vp9_resize_plane(u, height, width / 2, uv_stride, + ou, oheight, owidth / 2, ouv_stride); + vp9_resize_plane(v, height, width / 2, uv_stride, + ov, oheight, owidth / 2, ouv_stride); +} + +void vp9_resize_frame444(const uint8_t *const y, int y_stride, + const uint8_t *const u, const uint8_t *const v, + int uv_stride, + int height, int width, + uint8_t *oy, int oy_stride, + uint8_t *ou, uint8_t *ov, int ouv_stride, + int oheight, int owidth) { + vp9_resize_plane(y, height, width, y_stride, + oy, oheight, owidth, oy_stride); + vp9_resize_plane(u, height, width, uv_stride, + ou, oheight, owidth, ouv_stride); + vp9_resize_plane(v, height, width, uv_stride, + ov, oheight, owidth, ouv_stride); +} diff --git a/libvpx/vp9/encoder/vp9_resize.h b/libvpx/vp9/encoder/vp9_resize.h new file mode 100644 index 0000000..1818cd4 --- /dev/null +++ b/libvpx/vp9/encoder/vp9_resize.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2014 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VP9_ENCODER_VP9_RESIZE_H_ +#define VP9_ENCODER_VP9_RESIZE_H_ + +#include <stdio.h> +#include "vpx/vpx_integer.h" + +void vp9_resize_plane(const uint8_t *const input, + int height, + int width, + int in_stride, + uint8_t *output, + int height2, + int width2, + int out_stride); +void vp9_resize_frame420(const uint8_t *const y, + int y_stride, + const uint8_t *const u, + const uint8_t *const v, + int uv_stride, + int height, + int width, + uint8_t *oy, + int oy_stride, + uint8_t *ou, + uint8_t *ov, + int ouv_stride, + int oheight, + int owidth); +void vp9_resize_frame422(const uint8_t *const y, + int y_stride, + const uint8_t *const u, + const uint8_t *const v, + int uv_stride, + int height, + int width, + uint8_t *oy, + int oy_stride, + uint8_t *ou, + uint8_t *ov, + int ouv_stride, + int oheight, + int owidth); +void vp9_resize_frame444(const uint8_t *const y, + int y_stride, + const uint8_t *const u, + const uint8_t *const v, + int uv_stride, + int height, + int width, + uint8_t *oy, + int oy_stride, + uint8_t *ou, + uint8_t *ov, + int ouv_stride, + int oheight, + int owidth); + +#endif // VP9_ENCODER_VP9_RESIZE_H_ diff --git a/libvpx/vp9/encoder/vp9_sad.c b/libvpx/vp9/encoder/vp9_sad.c new file mode 100644 index 0000000..58c5df4 --- /dev/null +++ b/libvpx/vp9/encoder/vp9_sad.c @@ -0,0 +1,324 @@ +/* + * Copyright (c) 2010 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include <stdlib.h> + +#include "./vp9_rtcd.h" +#include "./vpx_config.h" + +#include "vpx/vpx_integer.h" +#include "vp9/encoder/vp9_variance.h" + +static INLINE unsigned int sad(const uint8_t *a, int a_stride, + const uint8_t *b, int b_stride, + int width, int height) { + int y, x; + unsigned int sad = 0; + + for (y = 0; y < height; y++) { + for (x = 0; x < width; x++) + sad += abs(a[x] - b[x]); + + a += a_stride; + b += b_stride; + } + + return sad; +} + +#define sad_mxn_func(m, n) \ +unsigned int vp9_sad##m##x##n##_c(const uint8_t *src_ptr, int src_stride, \ + const uint8_t *ref_ptr, int ref_stride, \ + unsigned int max_sad) { \ + return sad(src_ptr, src_stride, ref_ptr, ref_stride, m, n); \ +} \ +unsigned int vp9_sad##m##x##n##_avg_c(const uint8_t *src_ptr, int src_stride, \ + const uint8_t *ref_ptr, int ref_stride, \ + const uint8_t *second_pred, \ + unsigned int max_sad) { \ + uint8_t comp_pred[m * n]; \ + comp_avg_pred(comp_pred, second_pred, m, n, ref_ptr, ref_stride); \ + return sad(src_ptr, src_stride, comp_pred, m, m, n); \ +} + +sad_mxn_func(64, 64) +sad_mxn_func(64, 32) +sad_mxn_func(32, 64) +sad_mxn_func(32, 32) +sad_mxn_func(32, 16) +sad_mxn_func(16, 32) +sad_mxn_func(16, 16) +sad_mxn_func(16, 8) +sad_mxn_func(8, 16) +sad_mxn_func(8, 8) +sad_mxn_func(8, 4) +sad_mxn_func(4, 8) +sad_mxn_func(4, 4) + +void vp9_sad64x32x4d_c(const uint8_t *src_ptr, int src_stride, + const uint8_t* const ref_ptr[], int ref_stride, + unsigned int *sad_array) { + int i; + for (i = 0; i < 4; ++i) + sad_array[i] = vp9_sad64x32(src_ptr, src_stride, ref_ptr[i], ref_stride, + 0x7fffffff); +} + +void vp9_sad32x64x4d_c(const uint8_t *src_ptr, int src_stride, + const uint8_t* const ref_ptr[], int ref_stride, + unsigned int *sad_array) { + int i; + for (i = 0; i < 4; ++i) + sad_array[i] = vp9_sad32x64(src_ptr, src_stride, ref_ptr[i], ref_stride, + 0x7fffffff); +} + +void vp9_sad32x16x4d_c(const uint8_t *src_ptr, int src_stride, + const uint8_t* const ref_ptr[], int ref_stride, + unsigned int *sad_array) { + int i; + for (i = 0; i < 4; ++i) + sad_array[i] = vp9_sad32x16(src_ptr, src_stride, ref_ptr[i], ref_stride, + 0x7fffffff); +} + +void vp9_sad16x32x4d_c(const uint8_t *src_ptr, int src_stride, + const uint8_t* const ref_ptr[], int ref_stride, + unsigned int *sad_array) { + int i; + for (i = 0; i < 4; ++i) + sad_array[i] = vp9_sad16x32(src_ptr, src_stride, ref_ptr[i], ref_stride, + 0x7fffffff); +} + +void vp9_sad64x64x3_c(const uint8_t *src_ptr, int src_stride, + const uint8_t *ref_ptr, int ref_stride, + unsigned int *sad_array) { + int i; + for (i = 0; i < 3; ++i) + sad_array[i] = vp9_sad64x64(src_ptr, src_stride, ref_ptr + i, ref_stride, + 0x7fffffff); +} + +void vp9_sad32x32x3_c(const uint8_t *src_ptr, int src_stride, + const uint8_t *ref_ptr, int ref_stride, + unsigned int *sad_array) { + int i; + for (i = 0; i < 3; ++i) + sad_array[i] = vp9_sad32x32(src_ptr, src_stride, ref_ptr + i, ref_stride, + 0x7fffffff); +} + +void vp9_sad64x64x8_c(const uint8_t *src_ptr, int src_stride, + const uint8_t *ref_ptr, int ref_stride, + unsigned int *sad_array) { + int i; + for (i = 0; i < 8; ++i) + sad_array[i] = vp9_sad64x64(src_ptr, src_stride, ref_ptr + i, ref_stride, + 0x7fffffff); +} + +void vp9_sad32x32x8_c(const uint8_t *src_ptr, int src_stride, + const uint8_t *ref_ptr, int ref_stride, + unsigned int *sad_array) { + int i; + for (i = 0; i < 8; ++i) + sad_array[i] = vp9_sad32x32(src_ptr, src_stride, ref_ptr + i, ref_stride, + 0x7fffffff); +} + +void vp9_sad16x16x3_c(const uint8_t *src_ptr, int src_stride, + const uint8_t *ref_ptr, int ref_stride, + unsigned int *sad_array) { + int i; + for (i = 0; i < 3; ++i) + sad_array[i] = vp9_sad16x16(src_ptr, src_stride, ref_ptr + i, ref_stride, + 0x7fffffff); +} + +void vp9_sad16x16x8_c(const uint8_t *src_ptr, int src_stride, + const uint8_t *ref_ptr, int ref_stride, + uint32_t *sad_array) { + int i; + for (i = 0; i < 8; ++i) + sad_array[i] = vp9_sad16x16(src_ptr, src_stride, ref_ptr + i, ref_stride, + 0x7fffffff); +} + +void vp9_sad16x8x3_c(const uint8_t *src_ptr, int src_stride, + const uint8_t *ref_ptr, int ref_stride, + unsigned int *sad_array) { + int i; + for (i = 0; i < 3; ++i) + sad_array[i] = vp9_sad16x8(src_ptr, src_stride, ref_ptr + i, ref_stride, + 0x7fffffff); +} + +void vp9_sad16x8x8_c(const uint8_t *src_ptr, int src_stride, + const uint8_t *ref_ptr, int ref_stride, + uint32_t *sad_array) { + int i; + for (i = 0; i < 8; ++i) + sad_array[i] = vp9_sad16x8(src_ptr, src_stride, ref_ptr + i, ref_stride, + 0x7fffffff); +} + +void vp9_sad8x8x3_c(const uint8_t *src_ptr, int src_stride, + const uint8_t *ref_ptr, int ref_stride, + unsigned int *sad_array) { + int i; + for (i = 0; i < 3; ++i) + sad_array[i] = vp9_sad8x8(src_ptr, src_stride, ref_ptr + i, ref_stride, + 0x7fffffff); +} + +void vp9_sad8x8x8_c(const uint8_t *src_ptr, int src_stride, + const uint8_t *ref_ptr, int ref_stride, + uint32_t *sad_array) { + int i; + for (i = 0; i < 8; ++i) + sad_array[i] = vp9_sad8x8(src_ptr, src_stride, ref_ptr + i, ref_stride, + 0x7fffffff); +} + +void vp9_sad8x16x3_c(const uint8_t *src_ptr, int src_stride, + const uint8_t *ref_ptr, int ref_stride, + unsigned int *sad_array) { + int i; + for (i = 0; i < 3; ++i) + sad_array[i] = vp9_sad8x16(src_ptr, src_stride, ref_ptr + i, ref_stride, + 0x7fffffff); +} + +void vp9_sad8x16x8_c(const uint8_t *src_ptr, int src_stride, + const uint8_t *ref_ptr, int ref_stride, + uint32_t *sad_array) { + int i; + for (i = 0; i < 8; ++i) + sad_array[i] = vp9_sad8x16(src_ptr, src_stride, ref_ptr + i, ref_stride, + 0x7fffffff); +} + +void vp9_sad4x4x3_c(const uint8_t *src_ptr, int src_stride, + const uint8_t *ref_ptr, int ref_stride, + unsigned int *sad_array) { + int i; + for (i = 0; i < 3; ++i) + sad_array[i] = vp9_sad4x4(src_ptr, src_stride, ref_ptr + i, ref_stride, + 0x7fffffff); +} + +void vp9_sad4x4x8_c(const uint8_t *src_ptr, int src_stride, + const uint8_t *ref_ptr, int ref_stride, + uint32_t *sad_array) { + int i; + for (i = 0; i < 8; ++i) + sad_array[i] = vp9_sad4x4(src_ptr, src_stride, ref_ptr + i, ref_stride, + 0x7fffffff); +} + +void vp9_sad64x64x4d_c(const uint8_t *src_ptr, int src_stride, + const uint8_t* const ref_ptr[], int ref_stride, + unsigned int *sad_array) { + int i; + for (i = 0; i < 4; ++i) + sad_array[i] = vp9_sad64x64(src_ptr, src_stride, ref_ptr[i], ref_stride, + 0x7fffffff); +} + +void vp9_sad32x32x4d_c(const uint8_t *src_ptr, int src_stride, + const uint8_t* const ref_ptr[], int ref_stride, + unsigned int *sad_array) { + int i; + for (i = 0; i < 4; ++i) + sad_array[i] = vp9_sad32x32(src_ptr, src_stride, ref_ptr[i], ref_stride, + 0x7fffffff); +} + +void vp9_sad16x16x4d_c(const uint8_t *src_ptr, int src_stride, + const uint8_t* const ref_ptr[], int ref_stride, + unsigned int *sad_array) { + int i; + for (i = 0; i < 4; ++i) + sad_array[i] = vp9_sad16x16(src_ptr, src_stride, ref_ptr[i], ref_stride, + 0x7fffffff); +} + +void vp9_sad16x8x4d_c(const uint8_t *src_ptr, int src_stride, + const uint8_t* const ref_ptr[], int ref_stride, + unsigned int *sad_array) { + int i; + for (i = 0; i < 4; ++i) + sad_array[i] = vp9_sad16x8(src_ptr, src_stride, ref_ptr[i], ref_stride, + 0x7fffffff); +} + +void vp9_sad8x8x4d_c(const uint8_t *src_ptr, int src_stride, + const uint8_t* const ref_ptr[], int ref_stride, + unsigned int *sad_array) { + int i; + for (i = 0; i < 4; ++i) + sad_array[i] = vp9_sad8x8(src_ptr, src_stride, ref_ptr[i], ref_stride, + 0x7fffffff); +} + +void vp9_sad8x16x4d_c(const uint8_t *src_ptr, int src_stride, + const uint8_t* const ref_ptr[], int ref_stride, + unsigned int *sad_array) { + int i; + for (i = 0; i < 4; ++i) + sad_array[i] = vp9_sad8x16(src_ptr, src_stride, ref_ptr[i], ref_stride, + 0x7fffffff); +} + +void vp9_sad8x4x4d_c(const uint8_t *src_ptr, int src_stride, + const uint8_t* const ref_ptr[], int ref_stride, + unsigned int *sad_array) { + int i; + for (i = 0; i < 4; ++i) + sad_array[i] = vp9_sad8x4(src_ptr, src_stride, ref_ptr[i], ref_stride, + 0x7fffffff); +} + +void vp9_sad8x4x8_c(const uint8_t *src_ptr, int src_stride, + const uint8_t *ref_ptr, int ref_stride, + uint32_t *sad_array) { + int i; + for (i = 0; i < 8; ++i) + sad_array[i] = vp9_sad8x4(src_ptr, src_stride, ref_ptr + i, ref_stride, + 0x7fffffff); +} + +void vp9_sad4x8x4d_c(const uint8_t *src_ptr, int src_stride, + const uint8_t* const ref_ptr[], int ref_stride, + unsigned int *sad_array) { + int i; + for (i = 0; i < 4; ++i) + sad_array[i] = vp9_sad4x8(src_ptr, src_stride, ref_ptr[i], ref_stride, + 0x7fffffff); +} + +void vp9_sad4x8x8_c(const uint8_t *src_ptr, int src_stride, + const uint8_t *ref_ptr, int ref_stride, + uint32_t *sad_array) { + int i; + for (i = 0; i < 8; ++i) + sad_array[i] = vp9_sad4x8(src_ptr, src_stride, ref_ptr + i, ref_stride, + 0x7fffffff); +} + +void vp9_sad4x4x4d_c(const uint8_t *src_ptr, int src_stride, + const uint8_t* const ref_ptr[], int ref_stride, + unsigned int *sad_array) { + int i; + for (i = 0; i < 4; ++i) + sad_array[i] = vp9_sad4x4(src_ptr, src_stride, ref_ptr[i], ref_stride, + 0x7fffffff); +} diff --git a/libvpx/vp9/encoder/vp9_sad_c.c b/libvpx/vp9/encoder/vp9_sad_c.c deleted file mode 100644 index 42ddb21..0000000 --- a/libvpx/vp9/encoder/vp9_sad_c.c +++ /dev/null @@ -1,615 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - - -#include <stdlib.h> -#include "vp9/common/vp9_sadmxn.h" -#include "vp9/encoder/vp9_variance.h" -#include "./vpx_config.h" -#include "vpx/vpx_integer.h" -#include "./vp9_rtcd.h" - -#define sad_mxn_func(m, n) \ -unsigned int vp9_sad##m##x##n##_c(const uint8_t *src_ptr, \ - int src_stride, \ - const uint8_t *ref_ptr, \ - int ref_stride, \ - unsigned int max_sad) { \ - return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, m, n); \ -} \ -unsigned int vp9_sad##m##x##n##_avg_c(const uint8_t *src_ptr, \ - int src_stride, \ - const uint8_t *ref_ptr, \ - int ref_stride, \ - const uint8_t *second_pred, \ - unsigned int max_sad) { \ - uint8_t comp_pred[m * n]; \ - comp_avg_pred(comp_pred, second_pred, m, n, ref_ptr, ref_stride); \ - return sad_mx_n_c(src_ptr, src_stride, comp_pred, m, m, n); \ -} - -sad_mxn_func(64, 64) -sad_mxn_func(64, 32) -sad_mxn_func(32, 64) -sad_mxn_func(32, 32) -sad_mxn_func(32, 16) -sad_mxn_func(16, 32) -sad_mxn_func(16, 16) -sad_mxn_func(16, 8) -sad_mxn_func(8, 16) -sad_mxn_func(8, 8) -sad_mxn_func(8, 4) -sad_mxn_func(4, 8) -sad_mxn_func(4, 4) - -void vp9_sad64x32x4d_c(const uint8_t *src_ptr, - int src_stride, - const uint8_t* const ref_ptr[], - int ref_stride, - unsigned int *sad_array) { - sad_array[0] = vp9_sad64x32(src_ptr, src_stride, - ref_ptr[0], ref_stride, 0x7fffffff); - sad_array[1] = vp9_sad64x32(src_ptr, src_stride, - ref_ptr[1], ref_stride, 0x7fffffff); - sad_array[2] = vp9_sad64x32(src_ptr, src_stride, - ref_ptr[2], ref_stride, 0x7fffffff); - sad_array[3] = vp9_sad64x32(src_ptr, src_stride, - ref_ptr[3], ref_stride, 0x7fffffff); -} - -void vp9_sad32x64x4d_c(const uint8_t *src_ptr, - int src_stride, - const uint8_t* const ref_ptr[], - int ref_stride, - unsigned int *sad_array) { - sad_array[0] = vp9_sad32x64(src_ptr, src_stride, - ref_ptr[0], ref_stride, 0x7fffffff); - sad_array[1] = vp9_sad32x64(src_ptr, src_stride, - ref_ptr[1], ref_stride, 0x7fffffff); - sad_array[2] = vp9_sad32x64(src_ptr, src_stride, - ref_ptr[2], ref_stride, 0x7fffffff); - sad_array[3] = vp9_sad32x64(src_ptr, src_stride, - ref_ptr[3], ref_stride, 0x7fffffff); -} - -void vp9_sad32x16x4d_c(const uint8_t *src_ptr, - int src_stride, - const uint8_t* const ref_ptr[], - int ref_stride, - unsigned int *sad_array) { - sad_array[0] = vp9_sad32x16(src_ptr, src_stride, - ref_ptr[0], ref_stride, 0x7fffffff); - sad_array[1] = vp9_sad32x16(src_ptr, src_stride, - ref_ptr[1], ref_stride, 0x7fffffff); - sad_array[2] = vp9_sad32x16(src_ptr, src_stride, - ref_ptr[2], ref_stride, 0x7fffffff); - sad_array[3] = vp9_sad32x16(src_ptr, src_stride, - ref_ptr[3], ref_stride, 0x7fffffff); -} - -void vp9_sad16x32x4d_c(const uint8_t *src_ptr, - int src_stride, - const uint8_t* const ref_ptr[], - int ref_stride, - unsigned int *sad_array) { - sad_array[0] = vp9_sad16x32(src_ptr, src_stride, - ref_ptr[0], ref_stride, 0x7fffffff); - sad_array[1] = vp9_sad16x32(src_ptr, src_stride, - ref_ptr[1], ref_stride, 0x7fffffff); - sad_array[2] = vp9_sad16x32(src_ptr, src_stride, - ref_ptr[2], ref_stride, 0x7fffffff); - sad_array[3] = vp9_sad16x32(src_ptr, src_stride, - ref_ptr[3], ref_stride, 0x7fffffff); -} - -void vp9_sad64x64x3_c(const uint8_t *src_ptr, - int src_stride, - const uint8_t *ref_ptr, - int ref_stride, - unsigned int *sad_array) { - sad_array[0] = vp9_sad64x64(src_ptr, src_stride, ref_ptr, ref_stride, - 0x7fffffff); - sad_array[1] = vp9_sad64x64(src_ptr, src_stride, ref_ptr + 1, ref_stride, - 0x7fffffff); - sad_array[2] = vp9_sad64x64(src_ptr, src_stride, ref_ptr + 2, ref_stride, - 0x7fffffff); -} - -void vp9_sad32x32x3_c(const uint8_t *src_ptr, - int src_stride, - const uint8_t *ref_ptr, - int ref_stride, - unsigned int *sad_array) { - sad_array[0] = vp9_sad32x32(src_ptr, src_stride, - ref_ptr, ref_stride, 0x7fffffff); - sad_array[1] = vp9_sad32x32(src_ptr, src_stride, - ref_ptr + 1, ref_stride, 0x7fffffff); - sad_array[2] = vp9_sad32x32(src_ptr, src_stride, - ref_ptr + 2, ref_stride, 0x7fffffff); -} - -void vp9_sad64x64x8_c(const uint8_t *src_ptr, - int src_stride, - const uint8_t *ref_ptr, - int ref_stride, - unsigned int *sad_array) { - sad_array[0] = vp9_sad64x64(src_ptr, src_stride, - ref_ptr, ref_stride, - 0x7fffffff); - sad_array[1] = vp9_sad64x64(src_ptr, src_stride, - ref_ptr + 1, ref_stride, - 0x7fffffff); - sad_array[2] = vp9_sad64x64(src_ptr, src_stride, - ref_ptr + 2, ref_stride, - 0x7fffffff); - sad_array[3] = vp9_sad64x64(src_ptr, src_stride, - ref_ptr + 3, ref_stride, - 0x7fffffff); - sad_array[4] = vp9_sad64x64(src_ptr, src_stride, - ref_ptr + 4, ref_stride, - 0x7fffffff); - sad_array[5] = vp9_sad64x64(src_ptr, src_stride, - ref_ptr + 5, ref_stride, - 0x7fffffff); - sad_array[6] = vp9_sad64x64(src_ptr, src_stride, - ref_ptr + 6, ref_stride, - 0x7fffffff); - sad_array[7] = vp9_sad64x64(src_ptr, src_stride, - ref_ptr + 7, ref_stride, - 0x7fffffff); -} - -void vp9_sad32x32x8_c(const uint8_t *src_ptr, - int src_stride, - const uint8_t *ref_ptr, - int ref_stride, - unsigned int *sad_array) { - sad_array[0] = vp9_sad32x32(src_ptr, src_stride, - ref_ptr, ref_stride, - 0x7fffffff); - sad_array[1] = vp9_sad32x32(src_ptr, src_stride, - ref_ptr + 1, ref_stride, - 0x7fffffff); - sad_array[2] = vp9_sad32x32(src_ptr, src_stride, - ref_ptr + 2, ref_stride, - 0x7fffffff); - sad_array[3] = vp9_sad32x32(src_ptr, src_stride, - ref_ptr + 3, ref_stride, - 0x7fffffff); - sad_array[4] = vp9_sad32x32(src_ptr, src_stride, - ref_ptr + 4, ref_stride, - 0x7fffffff); - sad_array[5] = vp9_sad32x32(src_ptr, src_stride, - ref_ptr + 5, ref_stride, - 0x7fffffff); - sad_array[6] = vp9_sad32x32(src_ptr, src_stride, - ref_ptr + 6, ref_stride, - 0x7fffffff); - sad_array[7] = vp9_sad32x32(src_ptr, src_stride, - ref_ptr + 7, ref_stride, - 0x7fffffff); -} - -void vp9_sad16x16x3_c(const uint8_t *src_ptr, - int src_stride, - const uint8_t *ref_ptr, - int ref_stride, - unsigned int *sad_array) { - sad_array[0] = vp9_sad16x16(src_ptr, src_stride, - ref_ptr, ref_stride, 0x7fffffff); - sad_array[1] = vp9_sad16x16(src_ptr, src_stride, - ref_ptr + 1, ref_stride, 0x7fffffff); - sad_array[2] = vp9_sad16x16(src_ptr, src_stride, - ref_ptr + 2, ref_stride, 0x7fffffff); -} - -void vp9_sad16x16x8_c(const uint8_t *src_ptr, - int src_stride, - const uint8_t *ref_ptr, - int ref_stride, - uint32_t *sad_array) { - sad_array[0] = vp9_sad16x16(src_ptr, src_stride, - ref_ptr, ref_stride, - 0x7fffffff); - sad_array[1] = vp9_sad16x16(src_ptr, src_stride, - ref_ptr + 1, ref_stride, - 0x7fffffff); - sad_array[2] = vp9_sad16x16(src_ptr, src_stride, - ref_ptr + 2, ref_stride, - 0x7fffffff); - sad_array[3] = vp9_sad16x16(src_ptr, src_stride, - ref_ptr + 3, ref_stride, - 0x7fffffff); - sad_array[4] = vp9_sad16x16(src_ptr, src_stride, - ref_ptr + 4, ref_stride, - 0x7fffffff); - sad_array[5] = vp9_sad16x16(src_ptr, src_stride, - ref_ptr + 5, ref_stride, - 0x7fffffff); - sad_array[6] = vp9_sad16x16(src_ptr, src_stride, - ref_ptr + 6, ref_stride, - 0x7fffffff); - sad_array[7] = vp9_sad16x16(src_ptr, src_stride, - ref_ptr + 7, ref_stride, - 0x7fffffff); -} - -void vp9_sad16x8x3_c(const uint8_t *src_ptr, - int src_stride, - const uint8_t *ref_ptr, - int ref_stride, - unsigned int *sad_array) { - sad_array[0] = vp9_sad16x8(src_ptr, src_stride, - ref_ptr, ref_stride, 0x7fffffff); - sad_array[1] = vp9_sad16x8(src_ptr, src_stride, - ref_ptr + 1, ref_stride, 0x7fffffff); - sad_array[2] = vp9_sad16x8(src_ptr, src_stride, - ref_ptr + 2, ref_stride, 0x7fffffff); -} - -void vp9_sad16x8x8_c(const uint8_t *src_ptr, - int src_stride, - const uint8_t *ref_ptr, - int ref_stride, - uint32_t *sad_array) { - sad_array[0] = vp9_sad16x8(src_ptr, src_stride, - ref_ptr, ref_stride, - 0x7fffffff); - sad_array[1] = vp9_sad16x8(src_ptr, src_stride, - ref_ptr + 1, ref_stride, - 0x7fffffff); - sad_array[2] = vp9_sad16x8(src_ptr, src_stride, - ref_ptr + 2, ref_stride, - 0x7fffffff); - sad_array[3] = vp9_sad16x8(src_ptr, src_stride, - ref_ptr + 3, ref_stride, - 0x7fffffff); - sad_array[4] = vp9_sad16x8(src_ptr, src_stride, - ref_ptr + 4, ref_stride, - 0x7fffffff); - sad_array[5] = vp9_sad16x8(src_ptr, src_stride, - ref_ptr + 5, ref_stride, - 0x7fffffff); - sad_array[6] = vp9_sad16x8(src_ptr, src_stride, - ref_ptr + 6, ref_stride, - 0x7fffffff); - sad_array[7] = vp9_sad16x8(src_ptr, src_stride, - ref_ptr + 7, ref_stride, - 0x7fffffff); -} - -void vp9_sad8x8x3_c(const uint8_t *src_ptr, - int src_stride, - const uint8_t *ref_ptr, - int ref_stride, - unsigned int *sad_array) { - sad_array[0] = vp9_sad8x8(src_ptr, src_stride, - ref_ptr, ref_stride, 0x7fffffff); - sad_array[1] = vp9_sad8x8(src_ptr, src_stride, - ref_ptr + 1, ref_stride, 0x7fffffff); - sad_array[2] = vp9_sad8x8(src_ptr, src_stride, - ref_ptr + 2, ref_stride, 0x7fffffff); -} - -void vp9_sad8x8x8_c(const uint8_t *src_ptr, - int src_stride, - const uint8_t *ref_ptr, - int ref_stride, - uint32_t *sad_array) { - sad_array[0] = vp9_sad8x8(src_ptr, src_stride, - ref_ptr, ref_stride, - 0x7fffffff); - sad_array[1] = vp9_sad8x8(src_ptr, src_stride, - ref_ptr + 1, ref_stride, - 0x7fffffff); - sad_array[2] = vp9_sad8x8(src_ptr, src_stride, - ref_ptr + 2, ref_stride, - 0x7fffffff); - sad_array[3] = vp9_sad8x8(src_ptr, src_stride, - ref_ptr + 3, ref_stride, - 0x7fffffff); - sad_array[4] = vp9_sad8x8(src_ptr, src_stride, - ref_ptr + 4, ref_stride, - 0x7fffffff); - sad_array[5] = vp9_sad8x8(src_ptr, src_stride, - ref_ptr + 5, ref_stride, - 0x7fffffff); - sad_array[6] = vp9_sad8x8(src_ptr, src_stride, - ref_ptr + 6, ref_stride, - 0x7fffffff); - sad_array[7] = vp9_sad8x8(src_ptr, src_stride, - ref_ptr + 7, ref_stride, - 0x7fffffff); -} - -void vp9_sad8x16x3_c(const uint8_t *src_ptr, - int src_stride, - const uint8_t *ref_ptr, - int ref_stride, - unsigned int *sad_array) { - sad_array[0] = vp9_sad8x16(src_ptr, src_stride, - ref_ptr, ref_stride, 0x7fffffff); - sad_array[1] = vp9_sad8x16(src_ptr, src_stride, - ref_ptr + 1, ref_stride, 0x7fffffff); - sad_array[2] = vp9_sad8x16(src_ptr, src_stride, - ref_ptr + 2, ref_stride, 0x7fffffff); -} - -void vp9_sad8x16x8_c(const uint8_t *src_ptr, - int src_stride, - const uint8_t *ref_ptr, - int ref_stride, - uint32_t *sad_array) { - sad_array[0] = vp9_sad8x16(src_ptr, src_stride, - ref_ptr, ref_stride, - 0x7fffffff); - sad_array[1] = vp9_sad8x16(src_ptr, src_stride, - ref_ptr + 1, ref_stride, - 0x7fffffff); - sad_array[2] = vp9_sad8x16(src_ptr, src_stride, - ref_ptr + 2, ref_stride, - 0x7fffffff); - sad_array[3] = vp9_sad8x16(src_ptr, src_stride, - ref_ptr + 3, ref_stride, - 0x7fffffff); - sad_array[4] = vp9_sad8x16(src_ptr, src_stride, - ref_ptr + 4, ref_stride, - 0x7fffffff); - sad_array[5] = vp9_sad8x16(src_ptr, src_stride, - ref_ptr + 5, ref_stride, - 0x7fffffff); - sad_array[6] = vp9_sad8x16(src_ptr, src_stride, - ref_ptr + 6, ref_stride, - 0x7fffffff); - sad_array[7] = vp9_sad8x16(src_ptr, src_stride, - ref_ptr + 7, ref_stride, - 0x7fffffff); -} - -void vp9_sad4x4x3_c(const uint8_t *src_ptr, - int src_stride, - const uint8_t *ref_ptr, - int ref_stride, - unsigned int *sad_array) { - sad_array[0] = vp9_sad4x4(src_ptr, src_stride, - ref_ptr, ref_stride, 0x7fffffff); - sad_array[1] = vp9_sad4x4(src_ptr, src_stride, - ref_ptr + 1, ref_stride, 0x7fffffff); - sad_array[2] = vp9_sad4x4(src_ptr, src_stride, - ref_ptr + 2, ref_stride, 0x7fffffff); -} - -void vp9_sad4x4x8_c(const uint8_t *src_ptr, - int src_stride, - const uint8_t *ref_ptr, - int ref_stride, - uint32_t *sad_array) { - sad_array[0] = vp9_sad4x4(src_ptr, src_stride, - ref_ptr, ref_stride, - 0x7fffffff); - sad_array[1] = vp9_sad4x4(src_ptr, src_stride, - ref_ptr + 1, ref_stride, - 0x7fffffff); - sad_array[2] = vp9_sad4x4(src_ptr, src_stride, - ref_ptr + 2, ref_stride, - 0x7fffffff); - sad_array[3] = vp9_sad4x4(src_ptr, src_stride, - ref_ptr + 3, ref_stride, - 0x7fffffff); - sad_array[4] = vp9_sad4x4(src_ptr, src_stride, - ref_ptr + 4, ref_stride, - 0x7fffffff); - sad_array[5] = vp9_sad4x4(src_ptr, src_stride, - ref_ptr + 5, ref_stride, - 0x7fffffff); - sad_array[6] = vp9_sad4x4(src_ptr, src_stride, - ref_ptr + 6, ref_stride, - 0x7fffffff); - sad_array[7] = vp9_sad4x4(src_ptr, src_stride, - ref_ptr + 7, ref_stride, - 0x7fffffff); -} - -void vp9_sad64x64x4d_c(const uint8_t *src_ptr, - int src_stride, - const uint8_t* const ref_ptr[], - int ref_stride, - unsigned int *sad_array) { - sad_array[0] = vp9_sad64x64(src_ptr, src_stride, - ref_ptr[0], ref_stride, 0x7fffffff); - sad_array[1] = vp9_sad64x64(src_ptr, src_stride, - ref_ptr[1], ref_stride, 0x7fffffff); - sad_array[2] = vp9_sad64x64(src_ptr, src_stride, - ref_ptr[2], ref_stride, 0x7fffffff); - sad_array[3] = vp9_sad64x64(src_ptr, src_stride, - ref_ptr[3], ref_stride, 0x7fffffff); -} - -void vp9_sad32x32x4d_c(const uint8_t *src_ptr, - int src_stride, - const uint8_t* const ref_ptr[], - int ref_stride, - unsigned int *sad_array) { - sad_array[0] = vp9_sad32x32(src_ptr, src_stride, - ref_ptr[0], ref_stride, 0x7fffffff); - sad_array[1] = vp9_sad32x32(src_ptr, src_stride, - ref_ptr[1], ref_stride, 0x7fffffff); - sad_array[2] = vp9_sad32x32(src_ptr, src_stride, - ref_ptr[2], ref_stride, 0x7fffffff); - sad_array[3] = vp9_sad32x32(src_ptr, src_stride, - ref_ptr[3], ref_stride, 0x7fffffff); -} - -void vp9_sad16x16x4d_c(const uint8_t *src_ptr, - int src_stride, - const uint8_t* const ref_ptr[], - int ref_stride, - unsigned int *sad_array) { - sad_array[0] = vp9_sad16x16(src_ptr, src_stride, - ref_ptr[0], ref_stride, 0x7fffffff); - sad_array[1] = vp9_sad16x16(src_ptr, src_stride, - ref_ptr[1], ref_stride, 0x7fffffff); - sad_array[2] = vp9_sad16x16(src_ptr, src_stride, - ref_ptr[2], ref_stride, 0x7fffffff); - sad_array[3] = vp9_sad16x16(src_ptr, src_stride, - ref_ptr[3], ref_stride, 0x7fffffff); -} - -void vp9_sad16x8x4d_c(const uint8_t *src_ptr, - int src_stride, - const uint8_t* const ref_ptr[], - int ref_stride, - unsigned int *sad_array) { - sad_array[0] = vp9_sad16x8(src_ptr, src_stride, - ref_ptr[0], ref_stride, 0x7fffffff); - sad_array[1] = vp9_sad16x8(src_ptr, src_stride, - ref_ptr[1], ref_stride, 0x7fffffff); - sad_array[2] = vp9_sad16x8(src_ptr, src_stride, - ref_ptr[2], ref_stride, 0x7fffffff); - sad_array[3] = vp9_sad16x8(src_ptr, src_stride, - ref_ptr[3], ref_stride, 0x7fffffff); -} - -void vp9_sad8x8x4d_c(const uint8_t *src_ptr, - int src_stride, - const uint8_t* const ref_ptr[], - int ref_stride, - unsigned int *sad_array) { - sad_array[0] = vp9_sad8x8(src_ptr, src_stride, - ref_ptr[0], ref_stride, 0x7fffffff); - sad_array[1] = vp9_sad8x8(src_ptr, src_stride, - ref_ptr[1], ref_stride, 0x7fffffff); - sad_array[2] = vp9_sad8x8(src_ptr, src_stride, - ref_ptr[2], ref_stride, 0x7fffffff); - sad_array[3] = vp9_sad8x8(src_ptr, src_stride, - ref_ptr[3], ref_stride, 0x7fffffff); -} - -void vp9_sad8x16x4d_c(const uint8_t *src_ptr, - int src_stride, - const uint8_t* const ref_ptr[], - int ref_stride, - unsigned int *sad_array) { - sad_array[0] = vp9_sad8x16(src_ptr, src_stride, - ref_ptr[0], ref_stride, 0x7fffffff); - sad_array[1] = vp9_sad8x16(src_ptr, src_stride, - ref_ptr[1], ref_stride, 0x7fffffff); - sad_array[2] = vp9_sad8x16(src_ptr, src_stride, - ref_ptr[2], ref_stride, 0x7fffffff); - sad_array[3] = vp9_sad8x16(src_ptr, src_stride, - ref_ptr[3], ref_stride, 0x7fffffff); -} - -void vp9_sad8x4x4d_c(const uint8_t *src_ptr, - int src_stride, - const uint8_t* const ref_ptr[], - int ref_stride, - unsigned int *sad_array) { - sad_array[0] = vp9_sad8x4(src_ptr, src_stride, - ref_ptr[0], ref_stride, 0x7fffffff); - sad_array[1] = vp9_sad8x4(src_ptr, src_stride, - ref_ptr[1], ref_stride, 0x7fffffff); - sad_array[2] = vp9_sad8x4(src_ptr, src_stride, - ref_ptr[2], ref_stride, 0x7fffffff); - sad_array[3] = vp9_sad8x4(src_ptr, src_stride, - ref_ptr[3], ref_stride, 0x7fffffff); -} - -void vp9_sad8x4x8_c(const uint8_t *src_ptr, - int src_stride, - const uint8_t *ref_ptr, - int ref_stride, - uint32_t *sad_array) { - sad_array[0] = vp9_sad8x4(src_ptr, src_stride, - ref_ptr, ref_stride, - 0x7fffffff); - sad_array[1] = vp9_sad8x4(src_ptr, src_stride, - ref_ptr + 1, ref_stride, - 0x7fffffff); - sad_array[2] = vp9_sad8x4(src_ptr, src_stride, - ref_ptr + 2, ref_stride, - 0x7fffffff); - sad_array[3] = vp9_sad8x4(src_ptr, src_stride, - ref_ptr + 3, ref_stride, - 0x7fffffff); - sad_array[4] = vp9_sad8x4(src_ptr, src_stride, - ref_ptr + 4, ref_stride, - 0x7fffffff); - sad_array[5] = vp9_sad8x4(src_ptr, src_stride, - ref_ptr + 5, ref_stride, - 0x7fffffff); - sad_array[6] = vp9_sad8x4(src_ptr, src_stride, - ref_ptr + 6, ref_stride, - 0x7fffffff); - sad_array[7] = vp9_sad8x4(src_ptr, src_stride, - ref_ptr + 7, ref_stride, - 0x7fffffff); -} - -void vp9_sad4x8x4d_c(const uint8_t *src_ptr, - int src_stride, - const uint8_t* const ref_ptr[], - int ref_stride, - unsigned int *sad_array) { - sad_array[0] = vp9_sad4x8(src_ptr, src_stride, - ref_ptr[0], ref_stride, 0x7fffffff); - sad_array[1] = vp9_sad4x8(src_ptr, src_stride, - ref_ptr[1], ref_stride, 0x7fffffff); - sad_array[2] = vp9_sad4x8(src_ptr, src_stride, - ref_ptr[2], ref_stride, 0x7fffffff); - sad_array[3] = vp9_sad4x8(src_ptr, src_stride, - ref_ptr[3], ref_stride, 0x7fffffff); -} - -void vp9_sad4x8x8_c(const uint8_t *src_ptr, - int src_stride, - const uint8_t *ref_ptr, - int ref_stride, - uint32_t *sad_array) { - sad_array[0] = vp9_sad4x8(src_ptr, src_stride, - ref_ptr, ref_stride, - 0x7fffffff); - sad_array[1] = vp9_sad4x8(src_ptr, src_stride, - ref_ptr + 1, ref_stride, - 0x7fffffff); - sad_array[2] = vp9_sad4x8(src_ptr, src_stride, - ref_ptr + 2, ref_stride, - 0x7fffffff); - sad_array[3] = vp9_sad4x8(src_ptr, src_stride, - ref_ptr + 3, ref_stride, - 0x7fffffff); - sad_array[4] = vp9_sad4x8(src_ptr, src_stride, - ref_ptr + 4, ref_stride, - 0x7fffffff); - sad_array[5] = vp9_sad4x8(src_ptr, src_stride, - ref_ptr + 5, ref_stride, - 0x7fffffff); - sad_array[6] = vp9_sad4x8(src_ptr, src_stride, - ref_ptr + 6, ref_stride, - 0x7fffffff); - sad_array[7] = vp9_sad4x8(src_ptr, src_stride, - ref_ptr + 7, ref_stride, - 0x7fffffff); -} - -void vp9_sad4x4x4d_c(const uint8_t *src_ptr, - int src_stride, - const uint8_t* const ref_ptr[], - int ref_stride, - unsigned int *sad_array) { - sad_array[0] = vp9_sad4x4(src_ptr, src_stride, - ref_ptr[0], ref_stride, 0x7fffffff); - sad_array[1] = vp9_sad4x4(src_ptr, src_stride, - ref_ptr[1], ref_stride, 0x7fffffff); - sad_array[2] = vp9_sad4x4(src_ptr, src_stride, - ref_ptr[2], ref_stride, 0x7fffffff); - sad_array[3] = vp9_sad4x4(src_ptr, src_stride, - ref_ptr[3], ref_stride, 0x7fffffff); -} diff --git a/libvpx/vp9/encoder/vp9_segmentation.c b/libvpx/vp9/encoder/vp9_segmentation.c index 24f011f..fd8fa53 100644 --- a/libvpx/vp9/encoder/vp9_segmentation.c +++ b/libvpx/vp9/encoder/vp9_segmentation.c @@ -10,29 +10,26 @@ #include <limits.h> + #include "vpx_mem/vpx_mem.h" -#include "vp9/encoder/vp9_segmentation.h" + #include "vp9/common/vp9_pred_common.h" #include "vp9/common/vp9_tile_common.h" -void vp9_enable_segmentation(VP9_PTR ptr) { - VP9_COMP *cpi = (VP9_COMP *)ptr; - struct segmentation *const seg = &cpi->common.seg; +#include "vp9/encoder/vp9_cost.h" +#include "vp9/encoder/vp9_segmentation.h" +void vp9_enable_segmentation(struct segmentation *seg) { seg->enabled = 1; seg->update_map = 1; seg->update_data = 1; } -void vp9_disable_segmentation(VP9_PTR ptr) { - VP9_COMP *cpi = (VP9_COMP *)ptr; - struct segmentation *const seg = &cpi->common.seg; +void vp9_disable_segmentation(struct segmentation *seg) { seg->enabled = 0; } -void vp9_set_segmentation_map(VP9_PTR ptr, - unsigned char *segmentation_map) { - VP9_COMP *cpi = (VP9_COMP *)ptr; +void vp9_set_segmentation_map(VP9_COMP *cpi, unsigned char *segmentation_map) { struct segmentation *const seg = &cpi->common.seg; // Copy in the new segmentation map @@ -44,12 +41,9 @@ void vp9_set_segmentation_map(VP9_PTR ptr, seg->update_data = 1; } -void vp9_set_segment_data(VP9_PTR ptr, +void vp9_set_segment_data(struct segmentation *seg, signed char *feature_data, unsigned char abs_delta) { - VP9_COMP *cpi = (VP9_COMP *)ptr; - struct segmentation *const seg = &cpi->common.seg; - seg->abs_delta = abs_delta; vpx_memcpy(seg->feature_data, feature_data, sizeof(seg->feature_data)); @@ -58,6 +52,15 @@ void vp9_set_segment_data(VP9_PTR ptr, // vpx_memcpy(cpi->mb.e_mbd.segment_feature_mask, 0, // sizeof(cpi->mb.e_mbd.segment_feature_mask)); } +void vp9_disable_segfeature(struct segmentation *seg, int segment_id, + SEG_LVL_FEATURES feature_id) { + seg->feature_mask[segment_id] &= ~(1 << feature_id); +} + +void vp9_clear_segdata(struct segmentation *seg, int segment_id, + SEG_LVL_FEATURES feature_id) { + seg->feature_data[segment_id][feature_id] = 0; +} // Based on set of segment counts calculate a probability tree static void calc_segtree_probs(int *segcounts, vp9_prob *segment_tree_probs) { @@ -149,7 +152,7 @@ static void count_segs(VP9_COMP *cpi, const TileInfo *const tile, // Store the prediction status for this mb and update counts // as appropriate - vp9_set_pred_flag_seg_id(xd, pred_flag); + xd->mi_8x8[0]->mbmi.seg_id_predicted = pred_flag; temporal_predictor_count[pred_context][pred_flag]++; if (!pred_flag) @@ -287,3 +290,12 @@ void vp9_choose_segmap_coding_method(VP9_COMP *cpi) { vpx_memcpy(seg->tree_probs, no_pred_tree, sizeof(no_pred_tree)); } } + +void vp9_reset_segment_features(struct segmentation *seg) { + // Set up default state for MB feature flags + seg->enabled = 0; + seg->update_map = 0; + seg->update_data = 0; + vpx_memset(seg->tree_probs, 255, sizeof(seg->tree_probs)); + vp9_clearall_segfeatures(seg); +} diff --git a/libvpx/vp9/encoder/vp9_segmentation.h b/libvpx/vp9/encoder/vp9_segmentation.h index 2183771..66c51a2 100644 --- a/libvpx/vp9/encoder/vp9_segmentation.h +++ b/libvpx/vp9/encoder/vp9_segmentation.h @@ -15,12 +15,22 @@ #include "vp9/common/vp9_blockd.h" #include "vp9/encoder/vp9_onyx_int.h" -void vp9_enable_segmentation(VP9_PTR ptr); -void vp9_disable_segmentation(VP9_PTR ptr); - +#ifdef __cplusplus +extern "C" { +#endif + +void vp9_enable_segmentation(struct segmentation *seg); +void vp9_disable_segmentation(struct segmentation *seg); + +void vp9_disable_segfeature(struct segmentation *seg, + int segment_id, + SEG_LVL_FEATURES feature_id); +void vp9_clear_segdata(struct segmentation *seg, + int segment_id, + SEG_LVL_FEATURES feature_id); // Valid values for a segment are 0 to 3 // Segmentation map is arrange as [Rows][Columns] -void vp9_set_segmentation_map(VP9_PTR ptr, unsigned char *segmentation_map); +void vp9_set_segmentation_map(VP9_COMP *cpi, unsigned char *segmentation_map); // The values given for each segment can be either deltas (from the default // value chosen for the frame) or absolute values. @@ -32,9 +42,15 @@ void vp9_set_segmentation_map(VP9_PTR ptr, unsigned char *segmentation_map); // // abs_delta = SEGMENT_DELTADATA (deltas) abs_delta = SEGMENT_ABSDATA (use // the absolute values given). -void vp9_set_segment_data(VP9_PTR ptr, signed char *feature_data, +void vp9_set_segment_data(struct segmentation *seg, signed char *feature_data, unsigned char abs_delta); void vp9_choose_segmap_coding_method(VP9_COMP *cpi); +void vp9_reset_segment_features(struct segmentation *seg); + +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_ENCODER_VP9_SEGMENTATION_H_ diff --git a/libvpx/vp9/encoder/vp9_subexp.c b/libvpx/vp9/encoder/vp9_subexp.c index 387fc90..fd82fa3 100644 --- a/libvpx/vp9/encoder/vp9_subexp.c +++ b/libvpx/vp9/encoder/vp9_subexp.c @@ -11,25 +11,13 @@ #include "vp9/common/vp9_common.h" #include "vp9/common/vp9_entropy.h" -#include "vp9/encoder/vp9_boolhuff.h" -#include "vp9/encoder/vp9_treewriter.h" +#include "vp9/encoder/vp9_cost.h" +#include "vp9/encoder/vp9_writer.h" -#define vp9_cost_upd ((int)(vp9_cost_one(upd) - vp9_cost_zero(upd)) >> 8) #define vp9_cost_upd256 ((int)(vp9_cost_one(upd) - vp9_cost_zero(upd))) static int update_bits[255]; -static int count_uniform(int v, int n) { - int l = get_unsigned_bits(n); - int m; - if (l == 0) return 0; - m = (1 << l) - n; - if (v < m) - return l - 1; - else - return l; -} - static int split_index(int i, int n, int modulus) { int max1 = (n - 1 - modulus / 2) / modulus + 1; if (i % modulus == modulus / 2) @@ -82,29 +70,16 @@ static int remap_prob(int v, int m) { return i; } -static int count_term_subexp(int word, int k, int num_syms) { - int count = 0; - int i = 0; - int mk = 0; - while (1) { - int b = (i ? k + i - 1 : k); - int a = (1 << b); - if (num_syms <= mk + 3 * a) { - count += count_uniform(word - mk, num_syms - mk); - break; - } else { - int t = (word >= mk + a); - count++; - if (t) { - i = i + 1; - mk += a; - } else { - count += b; - break; - } - } - } - return count; +static int count_term_subexp(int word) { + if (word < 16) + return 5; + if (word < 32) + return 6; + if (word < 64) + return 8; + if (word < 129) + return 10; + return 11; } static int prob_diff_update_cost(vp9_prob newp, vp9_prob oldp) { @@ -112,12 +87,9 @@ static int prob_diff_update_cost(vp9_prob newp, vp9_prob oldp) { return update_bits[delp] * 256; } -static void encode_uniform(vp9_writer *w, int v, int n) { - int l = get_unsigned_bits(n); - int m; - if (l == 0) - return; - m = (1 << l) - n; +static void encode_uniform(vp9_writer *w, int v) { + const int l = 8; + const int m = (1 << l) - 191; if (v < m) { vp9_write_literal(w, v, l - 1); } else { @@ -126,38 +98,32 @@ static void encode_uniform(vp9_writer *w, int v, int n) { } } -static void encode_term_subexp(vp9_writer *w, int word, int k, int num_syms) { - int i = 0; - int mk = 0; - while (1) { - int b = (i ? k + i - 1 : k); - int a = (1 << b); - if (num_syms <= mk + 3 * a) { - encode_uniform(w, word - mk, num_syms - mk); - break; - } else { - int t = (word >= mk + a); - vp9_write_literal(w, t, 1); - if (t) { - i = i + 1; - mk += a; - } else { - vp9_write_literal(w, word - mk, b); - break; - } - } +static INLINE int write_bit_gte(vp9_writer *w, int word, int test) { + vp9_write_literal(w, word >= test, 1); + return word >= test; +} + +static void encode_term_subexp(vp9_writer *w, int word) { + if (!write_bit_gte(w, word, 16)) { + vp9_write_literal(w, word, 4); + } else if (!write_bit_gte(w, word, 32)) { + vp9_write_literal(w, word - 16, 4); + } else if (!write_bit_gte(w, word, 64)) { + vp9_write_literal(w, word - 32, 5); + } else { + encode_uniform(w, word - 64); } } void vp9_write_prob_diff_update(vp9_writer *w, vp9_prob newp, vp9_prob oldp) { const int delp = remap_prob(newp, oldp); - encode_term_subexp(w, delp, SUBEXP_PARAM, 255); + encode_term_subexp(w, delp); } void vp9_compute_update_table() { int i; for (i = 0; i < 254; i++) - update_bits[i] = count_term_subexp(i, SUBEXP_PARAM, 255); + update_bits[i] = count_term_subexp(i); } int vp9_prob_diff_update_savings_search(const unsigned int *ct, @@ -184,8 +150,7 @@ int vp9_prob_diff_update_savings_search(const unsigned int *ct, int vp9_prob_diff_update_savings_search_model(const unsigned int *ct, const vp9_prob *oldp, vp9_prob *bestp, - vp9_prob upd, - int b, int r) { + vp9_prob upd) { int i, old_b, new_b, update_b, savings, bestsavings, step; int newp; vp9_prob bestnewp, newplist[ENTROPY_NODES], oldplist[ENTROPY_NODES]; diff --git a/libvpx/vp9/encoder/vp9_subexp.h b/libvpx/vp9/encoder/vp9_subexp.h index 521c777..8e9c0c6 100644 --- a/libvpx/vp9/encoder/vp9_subexp.h +++ b/libvpx/vp9/encoder/vp9_subexp.h @@ -9,8 +9,12 @@ */ -#ifndef VP9_DECODER_VP9_SUBEXP_H_ -#define VP9_DECODER_VP9_SUBEXP_H_ +#ifndef VP9_ENCODER_VP9_SUBEXP_H_ +#define VP9_ENCODER_VP9_SUBEXP_H_ + +#ifdef __cplusplus +extern "C" { +#endif void vp9_compute_update_table(); @@ -29,7 +33,10 @@ int vp9_prob_diff_update_savings_search(const unsigned int *ct, int vp9_prob_diff_update_savings_search_model(const unsigned int *ct, const vp9_prob *oldp, vp9_prob *bestp, - vp9_prob upd, - int b, int r); + vp9_prob upd); + +#ifdef __cplusplus +} // extern "C" +#endif -#endif // VP9_DECODER_VP9_SUBEXP_H_ +#endif // VP9_ENCODER_VP9_SUBEXP_H_ diff --git a/libvpx/vp9/encoder/vp9_temporal_filter.c b/libvpx/vp9/encoder/vp9_temporal_filter.c index 2cace03..6233116 100644 --- a/libvpx/vp9/encoder/vp9_temporal_filter.c +++ b/libvpx/vp9/encoder/vp9_temporal_filter.c @@ -11,37 +11,46 @@ #include <math.h> #include <limits.h> +#include "vp9/common/vp9_alloccommon.h" #include "vp9/common/vp9_onyxc_int.h" +#include "vp9/common/vp9_quant_common.h" #include "vp9/common/vp9_reconinter.h" -#include "vp9/encoder/vp9_onyx_int.h" #include "vp9/common/vp9_systemdependent.h" -#include "vp9/encoder/vp9_quantize.h" -#include "vp9/common/vp9_alloccommon.h" -#include "vp9/encoder/vp9_mcomp.h" +#include "vp9/encoder/vp9_extend.h" #include "vp9/encoder/vp9_firstpass.h" -#include "vp9/encoder/vp9_psnr.h" -#include "vpx_scale/vpx_scale.h" -#include "vp9/common/vp9_extend.h" +#include "vp9/encoder/vp9_mcomp.h" +#include "vp9/encoder/vp9_onyx_int.h" +#include "vp9/encoder/vp9_quantize.h" #include "vp9/encoder/vp9_ratectrl.h" -#include "vp9/common/vp9_quant_common.h" #include "vp9/encoder/vp9_segmentation.h" #include "vpx_mem/vpx_mem.h" #include "vpx_ports/vpx_timer.h" +#include "vpx_scale/vpx_scale.h" #define ALT_REF_MC_ENABLED 1 // dis/enable MC in AltRef filtering -#define ALT_REF_SUBPEL_ENABLED 1 // dis/enable subpel in MC AltRef filtering static void temporal_filter_predictors_mb_c(MACROBLOCKD *xd, uint8_t *y_mb_ptr, uint8_t *u_mb_ptr, uint8_t *v_mb_ptr, int stride, + int uv_block_size, int mv_row, int mv_col, uint8_t *pred, - struct scale_factors *scale) { + struct scale_factors *scale, + int x, int y) { const int which_mv = 0; MV mv = { mv_row, mv_col }; + enum mv_precision mv_precision_uv; + int uv_stride; + if (uv_block_size == 8) { + uv_stride = (stride + 1) >> 1; + mv_precision_uv = MV_PRECISION_Q4; + } else { + uv_stride = stride; + mv_precision_uv = MV_PRECISION_Q3; + } vp9_build_inter_predictor(y_mb_ptr, stride, &pred[0], 16, @@ -49,25 +58,23 @@ static void temporal_filter_predictors_mb_c(MACROBLOCKD *xd, scale, 16, 16, which_mv, - &xd->subpix, MV_PRECISION_Q3); - - stride = (stride + 1) >> 1; + xd->interp_kernel, MV_PRECISION_Q3, x, y); - vp9_build_inter_predictor(u_mb_ptr, stride, - &pred[256], 8, + vp9_build_inter_predictor(u_mb_ptr, uv_stride, + &pred[256], uv_block_size, &mv, scale, - 8, 8, + uv_block_size, uv_block_size, which_mv, - &xd->subpix, MV_PRECISION_Q4); + xd->interp_kernel, mv_precision_uv, x, y); - vp9_build_inter_predictor(v_mb_ptr, stride, - &pred[320], 8, + vp9_build_inter_predictor(v_mb_ptr, uv_stride, + &pred[512], uv_block_size, &mv, scale, - 8, 8, + uv_block_size, uv_block_size, which_mv, - &xd->subpix, MV_PRECISION_Q4); + xd->interp_kernel, mv_precision_uv, x, y); } void vp9_temporal_filter_apply_c(uint8_t *frame1, @@ -117,25 +124,23 @@ void vp9_temporal_filter_apply_c(uint8_t *frame1, static int temporal_filter_find_matching_mb_c(VP9_COMP *cpi, uint8_t *arf_frame_buf, uint8_t *frame_ptr_buf, - int stride, - int error_thresh) { + int stride) { MACROBLOCK *x = &cpi->mb; MACROBLOCKD* const xd = &x->e_mbd; int step_param; int sadpb = x->sadperbit16; int bestsme = INT_MAX; - int_mv best_ref_mv1; - int_mv best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */ - int_mv *ref_mv; + MV best_ref_mv1 = {0, 0}; + MV best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */ + MV *ref_mv = &x->e_mbd.mi_8x8[0]->bmi[0].as_mv[0].as_mv; // Save input state struct buf_2d src = x->plane[0].src; struct buf_2d pre = xd->plane[0].pre[0]; - best_ref_mv1.as_int = 0; - best_ref_mv1_full.as_mv.col = best_ref_mv1.as_mv.col >> 3; - best_ref_mv1_full.as_mv.row = best_ref_mv1.as_mv.row >> 3; + best_ref_mv1_full.col = best_ref_mv1.col >> 3; + best_ref_mv1_full.row = best_ref_mv1.row >> 3; // Setup frame pointers x->plane[0].src.buf = arf_frame_buf; @@ -152,21 +157,17 @@ static int temporal_filter_find_matching_mb_c(VP9_COMP *cpi, /*cpi->sf.search_method == HEX*/ // Ignore mv costing by sending NULL pointer instead of cost arrays - ref_mv = &x->e_mbd.mi_8x8[0]->bmi[0].as_mv[0]; - bestsme = vp9_hex_search(x, &best_ref_mv1_full.as_mv, - step_param, sadpb, 1, - &cpi->fn_ptr[BLOCK_16X16], - 0, &best_ref_mv1.as_mv, &ref_mv->as_mv); + vp9_hex_search(x, &best_ref_mv1_full, step_param, sadpb, 1, + &cpi->fn_ptr[BLOCK_16X16], 0, &best_ref_mv1, ref_mv); -#if ALT_REF_SUBPEL_ENABLED // Try sub-pixel MC? // if (bestsme > error_thresh && bestsme < INT_MAX) { int distortion; unsigned int sse; // Ignore mv costing by sending NULL pointer instead of cost array - bestsme = cpi->find_fractional_mv_step(x, &ref_mv->as_mv, - &best_ref_mv1.as_mv, + bestsme = cpi->find_fractional_mv_step(x, ref_mv, + &best_ref_mv1, cpi->common.allow_high_precision_mv, x->errorperbit, &cpi->fn_ptr[BLOCK_16X16], @@ -174,7 +175,6 @@ static int temporal_filter_find_matching_mb_c(VP9_COMP *cpi, NULL, NULL, &distortion, &sse); } -#endif // Restore input state x->plane[0].src = src; @@ -197,24 +197,28 @@ static void temporal_filter_iterate_c(VP9_COMP *cpi, int mb_rows = cpi->common.mb_rows; int mb_y_offset = 0; int mb_uv_offset = 0; - DECLARE_ALIGNED_ARRAY(16, unsigned int, accumulator, 16 * 16 + 8 * 8 + 8 * 8); - DECLARE_ALIGNED_ARRAY(16, uint16_t, count, 16 * 16 + 8 * 8 + 8 * 8); + DECLARE_ALIGNED_ARRAY(16, unsigned int, accumulator, 16 * 16 * 3); + DECLARE_ALIGNED_ARRAY(16, uint16_t, count, 16 * 16 * 3); MACROBLOCKD *mbd = &cpi->mb.e_mbd; YV12_BUFFER_CONFIG *f = cpi->frames[alt_ref_index]; uint8_t *dst1, *dst2; - DECLARE_ALIGNED_ARRAY(16, uint8_t, predictor, 16 * 16 + 8 * 8 + 8 * 8); + DECLARE_ALIGNED_ARRAY(16, uint8_t, predictor, 16 * 16 * 3); + const int mb_uv_height = 16 >> mbd->plane[1].subsampling_y; // Save input state uint8_t* input_buffer[MAX_MB_PLANE]; int i; + // TODO(aconverse): Add 4:2:2 support + assert(mbd->plane[1].subsampling_x == mbd->plane[1].subsampling_y); + for (i = 0; i < MAX_MB_PLANE; i++) input_buffer[i] = mbd->plane[i].pre[0].buf; for (mb_row = 0; mb_row < mb_rows; mb_row++) { #if ALT_REF_MC_ENABLED // Source frames are extended to 16 pixels. This is different than - // L/A/G reference frames that have a border of 32 (VP9BORDERINPIXELS) + // L/A/G reference frames that have a border of 32 (VP9ENCBORDERINPIXELS) // A 6/8 tap filter is used for motion search. This requires 2 pixels // before and 3 pixels after. So the largest Y mv on a border would // then be 16 - VP9_INTERP_EXTEND. The UV blocks are half the size of the @@ -233,8 +237,8 @@ static void temporal_filter_iterate_c(VP9_COMP *cpi, int i, j, k; int stride; - vpx_memset(accumulator, 0, 384 * sizeof(unsigned int)); - vpx_memset(count, 0, 384 * sizeof(uint16_t)); + vpx_memset(accumulator, 0, 16 * 16 * 3 * sizeof(accumulator[0])); + vpx_memset(count, 0, 16 * 16 * 3 * sizeof(count[0])); #if ALT_REF_MC_ENABLED cpi->mb.mv_col_min = -((mb_col * 16) + (17 - 2 * VP9_INTERP_EXTEND)); @@ -262,8 +266,7 @@ static void temporal_filter_iterate_c(VP9_COMP *cpi, (cpi, cpi->frames[alt_ref_index]->y_buffer + mb_y_offset, cpi->frames[frame]->y_buffer + mb_y_offset, - cpi->frames[frame]->y_stride, - THRESH_LOW); + cpi->frames[frame]->y_stride); #endif // Assign higher weight to matching MB if it's error // score is lower. If not applying MC default behavior @@ -280,9 +283,11 @@ static void temporal_filter_iterate_c(VP9_COMP *cpi, cpi->frames[frame]->u_buffer + mb_uv_offset, cpi->frames[frame]->v_buffer + mb_uv_offset, cpi->frames[frame]->y_stride, + mb_uv_height, mbd->mi_8x8[0]->bmi[0].as_mv[0].as_mv.row, mbd->mi_8x8[0]->bmi[0].as_mv[0].as_mv.col, - predictor, scale); + predictor, scale, + mb_col * 16, mb_row * 16); // Apply the filter (YUV) vp9_temporal_filter_apply(f->y_buffer + mb_y_offset, f->y_stride, @@ -290,12 +295,14 @@ static void temporal_filter_iterate_c(VP9_COMP *cpi, accumulator, count); vp9_temporal_filter_apply(f->u_buffer + mb_uv_offset, f->uv_stride, - predictor + 256, 8, strength, filter_weight, - accumulator + 256, count + 256); + predictor + 256, mb_uv_height, strength, + filter_weight, accumulator + 256, + count + 256); vp9_temporal_filter_apply(f->v_buffer + mb_uv_offset, f->uv_stride, - predictor + 320, 8, strength, filter_weight, - accumulator + 320, count + 320); + predictor + 512, mb_uv_height, strength, + filter_weight, accumulator + 512, + count + 512); } } @@ -322,9 +329,9 @@ static void temporal_filter_iterate_c(VP9_COMP *cpi, dst2 = cpi->alt_ref_buffer.v_buffer; stride = cpi->alt_ref_buffer.uv_stride; byte = mb_uv_offset; - for (i = 0, k = 256; i < 8; i++) { - for (j = 0; j < 8; j++, k++) { - int m = k + 64; + for (i = 0, k = 256; i < mb_uv_height; i++) { + for (j = 0; j < mb_uv_height; j++, k++) { + int m = k + 256; // U unsigned int pval = accumulator[k] + (count[k] >> 1); @@ -342,15 +349,15 @@ static void temporal_filter_iterate_c(VP9_COMP *cpi, byte++; } - byte += stride - 8; + byte += stride - mb_uv_height; } mb_y_offset += 16; - mb_uv_offset += 8; + mb_uv_offset += mb_uv_height; } mb_y_offset += 16 * (f->y_stride - mb_cols); - mb_uv_offset += 8 * (f->uv_stride - mb_cols); + mb_uv_offset += mb_uv_height * (f->uv_stride - mb_cols); } // Restore input state @@ -375,9 +382,7 @@ void vp9_temporal_filter_prepare(VP9_COMP *cpi, int distance) { const int num_frames_backward = distance; const int num_frames_forward = vp9_lookahead_depth(cpi->lookahead) - (num_frames_backward + 1); - - struct scale_factors scale; - struct scale_factors_common scale_comm; + struct scale_factors sf; switch (blur_type) { case 1: @@ -392,7 +397,6 @@ void vp9_temporal_filter_prepare(VP9_COMP *cpi, int distance) { case 2: // Forward Blur - frames_to_blur_forward = num_frames_forward; if (frames_to_blur_forward >= max_frames) @@ -437,7 +441,7 @@ void vp9_temporal_filter_prepare(VP9_COMP *cpi, int distance) { #endif // Setup scaling factors. Scaling on each of the arnr frames is not supported - vp9_setup_scale_factors_for_frame(&scale, &scale_comm, + vp9_setup_scale_factors_for_frame(&sf, get_frame_new_buffer(cm)->y_crop_width, get_frame_new_buffer(cm)->y_crop_height, cm->width, cm->height); @@ -452,25 +456,27 @@ void vp9_temporal_filter_prepare(VP9_COMP *cpi, int distance) { } temporal_filter_iterate_c(cpi, frames_to_blur, frames_to_blur_backward, - strength, &scale); + strength, &sf); } -void configure_arnr_filter(VP9_COMP *cpi, const unsigned int this_frame, - const int group_boost) { +void vp9_configure_arnr_filter(VP9_COMP *cpi, + const unsigned int frames_to_arnr, + const int group_boost) { int half_gf_int; int frames_after_arf; int frames_bwd = cpi->oxcf.arnr_max_frames - 1; int frames_fwd = cpi->oxcf.arnr_max_frames - 1; int q; - // Define the arnr filter width for this group of frames: - // We only filter frames that lie within a distance of half - // the GF interval from the ARF frame. We also have to trap - // cases where the filter extends beyond the end of clip. - // Note: this_frame->frame has been updated in the loop - // so it now points at the ARF frame. - half_gf_int = cpi->baseline_gf_interval >> 1; - frames_after_arf = (int)(cpi->twopass.total_stats.count - this_frame - 1); + // Define the arnr filter width for this group of frames. We only + // filter frames that lie within a distance of half the GF interval + // from the ARF frame. We also have to trap cases where the filter + // extends beyond the end of the lookahead buffer. + // Note: frames_to_arnr parameter is the offset of the arnr + // frame from the current frame. + half_gf_int = cpi->rc.baseline_gf_interval >> 1; + frames_after_arf = vp9_lookahead_depth(cpi->lookahead) + - frames_to_arnr - 1; switch (cpi->oxcf.arnr_type) { case 1: // Backward filter @@ -507,11 +513,16 @@ void configure_arnr_filter(VP9_COMP *cpi, const unsigned int this_frame, cpi->active_arnr_frames = frames_bwd + 1 + frames_fwd; // Adjust the strength based on active max q - q = ((int)vp9_convert_qindex_to_q(cpi->active_worst_quality) >> 1); - if (q > 8) { + if (cpi->common.current_video_frame > 1) + q = ((int)vp9_convert_qindex_to_q( + cpi->rc.avg_frame_qindex[INTER_FRAME])); + else + q = ((int)vp9_convert_qindex_to_q( + cpi->rc.avg_frame_qindex[KEY_FRAME])); + if (q > 16) { cpi->active_arnr_strength = cpi->oxcf.arnr_strength; } else { - cpi->active_arnr_strength = cpi->oxcf.arnr_strength - (8 - q); + cpi->active_arnr_strength = cpi->oxcf.arnr_strength - ((16 - q) / 2); if (cpi->active_arnr_strength < 0) cpi->active_arnr_strength = 0; } diff --git a/libvpx/vp9/encoder/vp9_temporal_filter.h b/libvpx/vp9/encoder/vp9_temporal_filter.h index c5f3b46..3028d78 100644 --- a/libvpx/vp9/encoder/vp9_temporal_filter.h +++ b/libvpx/vp9/encoder/vp9_temporal_filter.h @@ -11,8 +11,17 @@ #ifndef VP9_ENCODER_VP9_TEMPORAL_FILTER_H_ #define VP9_ENCODER_VP9_TEMPORAL_FILTER_H_ +#ifdef __cplusplus +extern "C" { +#endif + void vp9_temporal_filter_prepare(VP9_COMP *cpi, int distance); -void configure_arnr_filter(VP9_COMP *cpi, const unsigned int this_frame, - const int group_boost); +void vp9_configure_arnr_filter(VP9_COMP *cpi, + const unsigned int frames_to_arnr, + const int group_boost); + +#ifdef __cplusplus +} // extern "C" +#endif #endif // VP9_ENCODER_VP9_TEMPORAL_FILTER_H_ diff --git a/libvpx/vp9/encoder/vp9_tokenize.c b/libvpx/vp9/encoder/vp9_tokenize.c index c7336d0..a293dd8 100644 --- a/libvpx/vp9/encoder/vp9_tokenize.c +++ b/libvpx/vp9/encoder/vp9_tokenize.c @@ -8,23 +8,105 @@ * be found in the AUTHORS file in the root of the source tree. */ - +#include <assert.h> #include <math.h> #include <stdio.h> #include <string.h> -#include <assert.h> -#include "vp9/encoder/vp9_onyx_int.h" -#include "vp9/encoder/vp9_tokenize.h" + #include "vpx_mem/vpx_mem.h" +#include "vp9/common/vp9_entropy.h" #include "vp9/common/vp9_pred_common.h" #include "vp9/common/vp9_seg_common.h" -#include "vp9/common/vp9_entropy.h" + +#include "vp9/encoder/vp9_cost.h" +#include "vp9/encoder/vp9_onyx_int.h" +#include "vp9/encoder/vp9_tokenize.h" static TOKENVALUE dct_value_tokens[DCT_MAX_VALUE * 2]; const TOKENVALUE *vp9_dct_value_tokens_ptr; -static int dct_value_cost[DCT_MAX_VALUE * 2]; -const int *vp9_dct_value_cost_ptr; +static int16_t dct_value_cost[DCT_MAX_VALUE * 2]; +const int16_t *vp9_dct_value_cost_ptr; + +// Array indices are identical to previously-existing CONTEXT_NODE indices +const vp9_tree_index vp9_coef_tree[TREE_SIZE(ENTROPY_TOKENS)] = { + -EOB_TOKEN, 2, // 0 = EOB + -ZERO_TOKEN, 4, // 1 = ZERO + -ONE_TOKEN, 6, // 2 = ONE + 8, 12, // 3 = LOW_VAL + -TWO_TOKEN, 10, // 4 = TWO + -THREE_TOKEN, -FOUR_TOKEN, // 5 = THREE + 14, 16, // 6 = HIGH_LOW + -CATEGORY1_TOKEN, -CATEGORY2_TOKEN, // 7 = CAT_ONE + 18, 20, // 8 = CAT_THREEFOUR + -CATEGORY3_TOKEN, -CATEGORY4_TOKEN, // 9 = CAT_THREE + -CATEGORY5_TOKEN, -CATEGORY6_TOKEN // 10 = CAT_FIVE +}; + +// Unconstrained Node Tree +const vp9_tree_index vp9_coef_con_tree[TREE_SIZE(ENTROPY_TOKENS)] = { + 2, 6, // 0 = LOW_VAL + -TWO_TOKEN, 4, // 1 = TWO + -THREE_TOKEN, -FOUR_TOKEN, // 2 = THREE + 8, 10, // 3 = HIGH_LOW + -CATEGORY1_TOKEN, -CATEGORY2_TOKEN, // 4 = CAT_ONE + 12, 14, // 5 = CAT_THREEFOUR + -CATEGORY3_TOKEN, -CATEGORY4_TOKEN, // 6 = CAT_THREE + -CATEGORY5_TOKEN, -CATEGORY6_TOKEN // 7 = CAT_FIVE +}; + +static const vp9_prob Pcat1[] = { 159}; +static const vp9_prob Pcat2[] = { 165, 145}; +static const vp9_prob Pcat3[] = { 173, 148, 140}; +static const vp9_prob Pcat4[] = { 176, 155, 140, 135}; +static const vp9_prob Pcat5[] = { 180, 157, 141, 134, 130}; +static const vp9_prob Pcat6[] = { + 254, 254, 254, 252, 249, 243, 230, 196, 177, 153, 140, 133, 130, 129 +}; + +static vp9_tree_index cat1[2], cat2[4], cat3[6], cat4[8], cat5[10], cat6[28]; + +static void init_bit_tree(vp9_tree_index *p, int n) { + int i = 0; + + while (++i < n) { + p[0] = p[1] = i << 1; + p += 2; + } + + p[0] = p[1] = 0; +} + +static void init_bit_trees() { + init_bit_tree(cat1, 1); + init_bit_tree(cat2, 2); + init_bit_tree(cat3, 3); + init_bit_tree(cat4, 4); + init_bit_tree(cat5, 5); + init_bit_tree(cat6, 14); +} + +const vp9_extra_bit vp9_extra_bits[ENTROPY_TOKENS] = { + {0, 0, 0, 0}, // ZERO_TOKEN + {0, 0, 0, 1}, // ONE_TOKEN + {0, 0, 0, 2}, // TWO_TOKEN + {0, 0, 0, 3}, // THREE_TOKEN + {0, 0, 0, 4}, // FOUR_TOKEN + {cat1, Pcat1, 1, 5}, // CATEGORY1_TOKEN + {cat2, Pcat2, 2, 7}, // CATEGORY2_TOKEN + {cat3, Pcat3, 3, 11}, // CATEGORY3_TOKEN + {cat4, Pcat4, 4, 19}, // CATEGORY4_TOKEN + {cat5, Pcat5, 5, 35}, // CATEGORY5_TOKEN + {cat6, Pcat6, 14, 67}, // CATEGORY6_TOKEN + {0, 0, 0, 0} // EOB_TOKEN +}; + +struct vp9_token vp9_coef_encodings[ENTROPY_TOKENS]; + +void vp9_coef_tree_initialize() { + init_bit_trees(); + vp9_tokens_from_tree(vp9_coef_encodings, vp9_coef_tree); +} static void fill_value_tokens() { TOKENVALUE *const t = dct_value_tokens + DCT_MAX_VALUE; @@ -80,7 +162,6 @@ struct tokenize_b_args { VP9_COMP *cpi; MACROBLOCKD *xd; TOKENEXTRA **tp; - TX_SIZE tx_size; uint8_t *token_cache; }; @@ -88,10 +169,42 @@ static void set_entropy_context_b(int plane, int block, BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) { struct tokenize_b_args* const args = arg; MACROBLOCKD *const xd = args->xd; + struct macroblock_plane *p = &args->cpi->mb.plane[plane]; struct macroblockd_plane *pd = &xd->plane[plane]; int aoff, loff; txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &aoff, &loff); - set_contexts(xd, pd, plane_bsize, tx_size, pd->eobs[block] > 0, aoff, loff); + vp9_set_contexts(xd, pd, plane_bsize, tx_size, p->eobs[block] > 0, + aoff, loff); +} + +static INLINE void add_token(TOKENEXTRA **t, const vp9_prob *context_tree, + int16_t extra, uint8_t token, + uint8_t skip_eob_node, + unsigned int *counts) { + (*t)->token = token; + (*t)->extra = extra; + (*t)->context_tree = context_tree; + (*t)->skip_eob_node = skip_eob_node; + (*t)++; + ++counts[token]; +} + +static INLINE void add_token_no_extra(TOKENEXTRA **t, + const vp9_prob *context_tree, + uint8_t token, + uint8_t skip_eob_node, + unsigned int *counts) { + (*t)->token = token; + (*t)->context_tree = context_tree; + (*t)->skip_eob_node = skip_eob_node; + (*t)++; + ++counts[token]; +} + +static INLINE int get_tx_eob(const struct segmentation *seg, int segment_id, + TX_SIZE tx_size) { + const int eob_max = 16 << (tx_size << 1); + return vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP) ? 0 : eob_max; } static void tokenize_b(int plane, int block, BLOCK_SIZE plane_bsize, @@ -101,69 +214,80 @@ static void tokenize_b(int plane, int block, BLOCK_SIZE plane_bsize, MACROBLOCKD *xd = args->xd; TOKENEXTRA **tp = args->tp; uint8_t *token_cache = args->token_cache; + struct macroblock_plane *p = &cpi->mb.plane[plane]; struct macroblockd_plane *pd = &xd->plane[plane]; MB_MODE_INFO *mbmi = &xd->mi_8x8[0]->mbmi; int pt; /* near block/prev token context index */ - int c = 0, rc = 0; + int c; TOKENEXTRA *t = *tp; /* store tokens starting here */ - const int eob = pd->eobs[block]; + int eob = p->eobs[block]; const PLANE_TYPE type = pd->plane_type; - const int16_t *qcoeff_ptr = BLOCK_OFFSET(pd->qcoeff, block); - + const int16_t *qcoeff = BLOCK_OFFSET(p->qcoeff, block); const int segment_id = mbmi->segment_id; const int16_t *scan, *nb; - vp9_coeff_count *const counts = cpi->coef_counts[tx_size]; - vp9_coeff_probs_model *const coef_probs = cpi->common.fc.coef_probs[tx_size]; + const scan_order *so; const int ref = is_inter_block(mbmi); - const uint8_t *const band_translate = get_band_translate(tx_size); + unsigned int (*const counts)[COEFF_CONTEXTS][ENTROPY_TOKENS] = + cpi->coef_counts[tx_size][type][ref]; + vp9_prob (*const coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] = + cpi->common.fc.coef_probs[tx_size][type][ref]; + unsigned int (*const eob_branch)[COEFF_CONTEXTS] = + cpi->common.counts.eob_branch[tx_size][type][ref]; + + const uint8_t *const band = get_band_translate(tx_size); const int seg_eob = get_tx_eob(&cpi->common.seg, segment_id, tx_size); + int aoff, loff; txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &aoff, &loff); - assert((!type && !plane) || (type && plane)); - pt = get_entropy_context(tx_size, pd->above_context + aoff, - pd->left_context + loff); - get_scan(xd, tx_size, type, block, &scan, &nb); + pd->left_context + loff); + so = get_scan(xd, tx_size, type, block); + scan = so->scan; + nb = so->neighbors; c = 0; - do { - const int band = band_translate[c]; - int token; + while (c < eob) { int v = 0; - rc = scan[c]; - if (c) - pt = get_coef_context(nb, token_cache, c); - if (c < eob) { - v = qcoeff_ptr[rc]; - assert(-DCT_MAX_VALUE <= v && v < DCT_MAX_VALUE); - - t->extra = vp9_dct_value_tokens_ptr[v].extra; - token = vp9_dct_value_tokens_ptr[v].token; - } else { - token = DCT_EOB_TOKEN; - } - - t->token = token; - t->context_tree = coef_probs[type][ref][band][pt]; - t->skip_eob_node = (c > 0) && (token_cache[scan[c - 1]] == 0); + int skip_eob = 0; + v = qcoeff[scan[c]]; - assert(vp9_coef_encodings[t->token].len - t->skip_eob_node > 0); + while (!v) { + add_token_no_extra(&t, coef_probs[band[c]][pt], ZERO_TOKEN, skip_eob, + counts[band[c]][pt]); + eob_branch[band[c]][pt] += !skip_eob; - ++counts[type][ref][band][pt][token]; - if (!t->skip_eob_node) - ++cpi->common.counts.eob_branch[tx_size][type][ref][band][pt]; + skip_eob = 1; + token_cache[scan[c]] = 0; + ++c; + pt = get_coef_context(nb, token_cache, c); + v = qcoeff[scan[c]]; + } - token_cache[rc] = vp9_pt_energy_class[token]; - ++t; - } while (c < eob && ++c < seg_eob); + add_token(&t, coef_probs[band[c]][pt], + vp9_dct_value_tokens_ptr[v].extra, + (uint8_t)vp9_dct_value_tokens_ptr[v].token, + (uint8_t)skip_eob, + counts[band[c]][pt]); + eob_branch[band[c]][pt] += !skip_eob; + + token_cache[scan[c]] = + vp9_pt_energy_class[vp9_dct_value_tokens_ptr[v].token]; + ++c; + pt = get_coef_context(nb, token_cache, c); + } + if (c < seg_eob) { + add_token_no_extra(&t, coef_probs[band[c]][pt], EOB_TOKEN, 0, + counts[band[c]][pt]); + ++eob_branch[band[c]][pt]; + } *tp = t; - set_contexts(xd, pd, plane_bsize, tx_size, c > 0, aoff, loff); + vp9_set_contexts(xd, pd, plane_bsize, tx_size, c > 0, aoff, loff); } struct is_skippable_args { - MACROBLOCKD *xd; + MACROBLOCK *x; int *skippable; }; @@ -171,21 +295,21 @@ static void is_skippable(int plane, int block, BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *argv) { struct is_skippable_args *args = argv; - args->skippable[0] &= (!args->xd->plane[plane].eobs[block]); + args->skippable[0] &= (!args->x->plane[plane].eobs[block]); } -int vp9_sb_is_skippable(MACROBLOCKD *xd, BLOCK_SIZE bsize) { +static int sb_is_skippable(MACROBLOCK *x, BLOCK_SIZE bsize) { int result = 1; - struct is_skippable_args args = {xd, &result}; - foreach_transformed_block(xd, bsize, is_skippable, &args); + struct is_skippable_args args = {x, &result}; + vp9_foreach_transformed_block(&x->e_mbd, bsize, is_skippable, &args); return result; } -int vp9_is_skippable_in_plane(MACROBLOCKD *xd, BLOCK_SIZE bsize, - int plane) { +int vp9_is_skippable_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) { int result = 1; - struct is_skippable_args args = {xd, &result}; - foreach_transformed_block_in_plane(xd, bsize, plane, is_skippable, &args); + struct is_skippable_args args = {x, &result}; + vp9_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane, is_skippable, + &args); return result; } @@ -195,15 +319,13 @@ void vp9_tokenize_sb(VP9_COMP *cpi, TOKENEXTRA **t, int dry_run, MACROBLOCKD *const xd = &cpi->mb.e_mbd; MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi; TOKENEXTRA *t_backup = *t; - const int mb_skip_context = vp9_get_pred_context_mbskip(xd); + const int ctx = vp9_get_skip_context(xd); const int skip_inc = !vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP); - struct tokenize_b_args arg = {cpi, xd, t, mbmi->tx_size, cpi->mb.token_cache}; - - mbmi->skip_coeff = vp9_sb_is_skippable(xd, bsize); - if (mbmi->skip_coeff) { + struct tokenize_b_args arg = {cpi, xd, t, cpi->mb.token_cache}; + if (mbmi->skip) { if (!dry_run) - cm->counts.mbskip[mb_skip_context][1] += skip_inc; + cm->counts.skip[ctx][1] += skip_inc; reset_skip_context(xd, bsize); if (dry_run) *t = t_backup; @@ -211,10 +333,10 @@ void vp9_tokenize_sb(VP9_COMP *cpi, TOKENEXTRA **t, int dry_run, } if (!dry_run) { - cm->counts.mbskip[mb_skip_context][0] += skip_inc; - foreach_transformed_block(xd, bsize, tokenize_b, &arg); + cm->counts.skip[ctx][0] += skip_inc; + vp9_foreach_transformed_block(xd, bsize, tokenize_b, &arg); } else { - foreach_transformed_block(xd, bsize, set_entropy_context_b, &arg); + vp9_foreach_transformed_block(xd, bsize, set_entropy_context_b, &arg); *t = t_backup; } } diff --git a/libvpx/vp9/encoder/vp9_tokenize.h b/libvpx/vp9/encoder/vp9_tokenize.h index e24e31b..063c0ba 100644 --- a/libvpx/vp9/encoder/vp9_tokenize.h +++ b/libvpx/vp9/encoder/vp9_tokenize.h @@ -12,10 +12,18 @@ #define VP9_ENCODER_VP9_TOKENIZE_H_ #include "vp9/common/vp9_entropy.h" + #include "vp9/encoder/vp9_block.h" +#include "vp9/encoder/vp9_treewriter.h" + +#ifdef __cplusplus +extern "C" { +#endif void vp9_tokenize_initialize(); +#define EOSB_TOKEN 127 // Not signalled, encoder only + typedef struct { int16_t token; int16_t extra; @@ -28,19 +36,26 @@ typedef struct { uint8_t skip_eob_node; } TOKENEXTRA; -int vp9_sb_is_skippable(MACROBLOCKD *xd, BLOCK_SIZE bsize); -int vp9_is_skippable_in_plane(MACROBLOCKD *xd, BLOCK_SIZE bsize, - int plane); +extern const vp9_tree_index vp9_coef_tree[]; +extern const vp9_tree_index vp9_coef_con_tree[]; +extern struct vp9_token vp9_coef_encodings[]; + +int vp9_is_skippable_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane); + struct VP9_COMP; void vp9_tokenize_sb(struct VP9_COMP *cpi, TOKENEXTRA **t, int dry_run, BLOCK_SIZE bsize); -extern const int *vp9_dct_value_cost_ptr; +extern const int16_t *vp9_dct_value_cost_ptr; /* TODO: The Token field should be broken out into a separate char array to * improve cache locality, since it's needed for costing when the rest of the * fields are not. */ extern const TOKENVALUE *vp9_dct_value_tokens_ptr; +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_ENCODER_VP9_TOKENIZE_H_ diff --git a/libvpx/vp9/encoder/vp9_treewriter.c b/libvpx/vp9/encoder/vp9_treewriter.c index e4aed53..bb04b40 100644 --- a/libvpx/vp9/encoder/vp9_treewriter.c +++ b/libvpx/vp9/encoder/vp9_treewriter.c @@ -10,29 +10,49 @@ #include "vp9/encoder/vp9_treewriter.h" -static void cost(int *costs, vp9_tree tree, const vp9_prob *probs, - int i, int c) { - const vp9_prob prob = probs[i / 2]; - int b; - - for (b = 0; b <= 1; ++b) { - const int cc = c + vp9_cost_bit(prob, b); - const vp9_tree_index ii = tree[i + b]; - - if (ii <= 0) - costs[-ii] = cc; - else - cost(costs, tree, probs, ii, cc); - } +static void tree2tok(struct vp9_token *tokens, const vp9_tree_index *tree, + int i, int v, int l) { + v += v; + ++l; + + do { + const vp9_tree_index j = tree[i++]; + if (j <= 0) { + tokens[-j].value = v; + tokens[-j].len = l; + } else { + tree2tok(tokens, tree, j, v, l); + } + } while (++v & 1); } -void vp9_cost_tokens(int *costs, const vp9_prob *probs, vp9_tree tree) { - cost(costs, tree, probs, 0, 0); +void vp9_tokens_from_tree(struct vp9_token *tokens, + const vp9_tree_index *tree) { + tree2tok(tokens, tree, 0, 0, 0); } -void vp9_cost_tokens_skip(int *costs, const vp9_prob *probs, vp9_tree tree) { - assert(tree[0] <= 0 && tree[1] > 0); +static unsigned int convert_distribution(unsigned int i, vp9_tree tree, + unsigned int branch_ct[][2], + const unsigned int num_events[]) { + unsigned int left, right; + + if (tree[i] <= 0) + left = num_events[-tree[i]]; + else + left = convert_distribution(tree[i], tree, branch_ct, num_events); + + if (tree[i + 1] <= 0) + right = num_events[-tree[i + 1]]; + else + right = convert_distribution(tree[i + 1], tree, branch_ct, num_events); + + branch_ct[i >> 1][0] = left; + branch_ct[i >> 1][1] = right; + return left + right; +} - costs[-tree[0]] = vp9_cost_bit(probs[0], 0); - cost(costs, tree, probs, 2, 0); +void vp9_tree_probs_from_distribution(vp9_tree tree, + unsigned int branch_ct[/* n-1 */][2], + const unsigned int num_events[/* n */]) { + convert_distribution(0, tree, branch_ct, num_events); } diff --git a/libvpx/vp9/encoder/vp9_treewriter.h b/libvpx/vp9/encoder/vp9_treewriter.h index eeda5cd..4a76d87 100644 --- a/libvpx/vp9/encoder/vp9_treewriter.h +++ b/libvpx/vp9/encoder/vp9_treewriter.h @@ -8,47 +8,29 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP9_ENCODER_VP9_TREEWRITER_H_ #define VP9_ENCODER_VP9_TREEWRITER_H_ -/* Trees map alphabets into huffman-like codes suitable for an arithmetic - bit coder. Timothy S Murphy 11 October 2004 */ - -#include "vp9/common/vp9_treecoder.h" - -#include "vp9/encoder/vp9_boolhuff.h" /* for now */ - - -#define vp9_write_prob(w, v) vp9_write_literal((w), (v), 8) - -/* Approximate length of an encoded bool in 256ths of a bit at given prob */ +#include "vp9/encoder/vp9_writer.h" -#define vp9_cost_zero(x) (vp9_prob_cost[x]) -#define vp9_cost_one(x) vp9_cost_zero(vp9_complement(x)) - -#define vp9_cost_bit(x, b) vp9_cost_zero((b) ? vp9_complement(x) : (x)) - -/* VP8BC version is scaled by 2^20 rather than 2^8; see bool_coder.h */ - - -/* Both of these return bits, not scaled bits. */ -static INLINE unsigned int cost_branch256(const unsigned int ct[2], - vp9_prob p) { - return ct[0] * vp9_cost_zero(p) + ct[1] * vp9_cost_one(p); -} +#ifdef __cplusplus +extern "C" { +#endif -static INLINE unsigned int cost_branch(const unsigned int ct[2], - vp9_prob p) { - return cost_branch256(ct, p) >> 8; -} +void vp9_tree_probs_from_distribution(vp9_tree tree, + unsigned int branch_ct[ /* n - 1 */ ][2], + const unsigned int num_events[ /* n */ ]); +struct vp9_token { + int value; + int len; +}; -static INLINE void treed_write(vp9_writer *w, - vp9_tree tree, const vp9_prob *probs, - int bits, int len) { - vp9_tree_index i = 0; +void vp9_tokens_from_tree(struct vp9_token*, const vp9_tree_index *); +static INLINE void vp9_write_tree(vp9_writer *w, const vp9_tree_index *tree, + const vp9_prob *probs, int bits, int len, + vp9_tree_index i) { do { const int bit = (bits >> --len) & 1; vp9_write(w, bit, probs[i >> 1]); @@ -56,32 +38,14 @@ static INLINE void treed_write(vp9_writer *w, } while (len); } -static INLINE void write_token(vp9_writer *w, vp9_tree tree, - const vp9_prob *probs, - const struct vp9_token *token) { - treed_write(w, tree, probs, token->value, token->len); -} - -static INLINE int treed_cost(vp9_tree tree, const vp9_prob *probs, - int bits, int len) { - int cost = 0; - vp9_tree_index i = 0; - - do { - const int bit = (bits >> --len) & 1; - cost += vp9_cost_bit(probs[i >> 1], bit); - i = tree[i + bit]; - } while (len); - - return cost; -} - -static INLINE int cost_token(vp9_tree tree, const vp9_prob *probs, - const struct vp9_token *token) { - return treed_cost(tree, probs, token->value, token->len); +static INLINE void vp9_write_token(vp9_writer *w, const vp9_tree_index *tree, + const vp9_prob *probs, + const struct vp9_token *token) { + vp9_write_tree(w, tree, probs, token->value, token->len, 0); } -void vp9_cost_tokens(int *costs, const vp9_prob *probs, vp9_tree tree); -void vp9_cost_tokens_skip(int *costs, const vp9_prob *probs, vp9_tree tree); +#ifdef __cplusplus +} // extern "C" +#endif #endif // VP9_ENCODER_VP9_TREEWRITER_H_ diff --git a/libvpx/vp9/encoder/vp9_vaq.c b/libvpx/vp9/encoder/vp9_vaq.c index 1f9cb87..c71c171 100644 --- a/libvpx/vp9/encoder/vp9_vaq.c +++ b/libvpx/vp9/encoder/vp9_vaq.c @@ -19,8 +19,8 @@ #include "vp9/encoder/vp9_segmentation.h" #include "vp9/common/vp9_systemdependent.h" -#define ENERGY_MIN (-3) -#define ENERGY_MAX (3) +#define ENERGY_MIN (-1) +#define ENERGY_MAX (1) #define ENERGY_SPAN (ENERGY_MAX - ENERGY_MIN + 1) #define ENERGY_IN_BOUNDS(energy)\ assert((energy) >= ENERGY_MIN && (energy) <= ENERGY_MAX) @@ -44,7 +44,7 @@ unsigned int vp9_vaq_segment_id(int energy) { double vp9_vaq_rdmult_ratio(int energy) { ENERGY_IN_BOUNDS(energy); - vp9_clear_system_state(); // __asm emms; + vp9_clear_system_state(); return RDMULT_RATIO(energy); } @@ -52,7 +52,7 @@ double vp9_vaq_rdmult_ratio(int energy) { double vp9_vaq_inv_q_ratio(int energy) { ENERGY_IN_BOUNDS(energy); - vp9_clear_system_state(); // __asm emms; + vp9_clear_system_state(); return Q_RATIO(-energy); } @@ -63,9 +63,9 @@ void vp9_vaq_init() { assert(ENERGY_SPAN <= MAX_SEGMENTS); - vp9_clear_system_state(); // __asm emms; + vp9_clear_system_state(); - base_ratio = 1.8; + base_ratio = 1.5; for (i = ENERGY_MIN; i <= ENERGY_MAX; i++) { Q_RATIO(i) = pow(base_ratio, i/3.0); @@ -75,35 +75,39 @@ void vp9_vaq_init() { void vp9_vaq_frame_setup(VP9_COMP *cpi) { VP9_COMMON *cm = &cpi->common; struct segmentation *seg = &cm->seg; - int base_q = vp9_convert_qindex_to_q(cm->base_qindex); - int base_rdmult = vp9_compute_rd_mult(cpi, cm->base_qindex + - cm->y_dc_delta_q); + const double base_q = vp9_convert_qindex_to_q(cm->base_qindex); + const int base_rdmult = vp9_compute_rd_mult(cpi, cm->base_qindex + + cm->y_dc_delta_q); int i; - vp9_enable_segmentation((VP9_PTR)cpi); - vp9_clearall_segfeatures(seg); + if (cm->frame_type == KEY_FRAME || + cpi->refresh_alt_ref_frame || + (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) { + vp9_enable_segmentation(seg); + vp9_clearall_segfeatures(seg); - seg->abs_delta = SEGMENT_DELTADATA; + seg->abs_delta = SEGMENT_DELTADATA; - vp9_clear_system_state(); // __asm emms; + vp9_clear_system_state(); - for (i = ENERGY_MIN; i <= ENERGY_MAX; i++) { - int qindex_delta, segment_rdmult; + for (i = ENERGY_MIN; i <= ENERGY_MAX; i++) { + int qindex_delta, segment_rdmult; - if (Q_RATIO(i) == 1) { - // No need to enable SEG_LVL_ALT_Q for this segment - RDMULT_RATIO(i) = 1; - continue; - } + if (Q_RATIO(i) == 1) { + // No need to enable SEG_LVL_ALT_Q for this segment + RDMULT_RATIO(i) = 1; + continue; + } - qindex_delta = vp9_compute_qdelta(cpi, base_q, base_q * Q_RATIO(i)); - vp9_set_segdata(seg, SEGMENT_ID(i), SEG_LVL_ALT_Q, qindex_delta); - vp9_enable_segfeature(seg, SEGMENT_ID(i), SEG_LVL_ALT_Q); + qindex_delta = vp9_compute_qdelta(cpi, base_q, base_q * Q_RATIO(i)); + vp9_set_segdata(seg, SEGMENT_ID(i), SEG_LVL_ALT_Q, qindex_delta); + vp9_enable_segfeature(seg, SEGMENT_ID(i), SEG_LVL_ALT_Q); - segment_rdmult = vp9_compute_rd_mult(cpi, cm->base_qindex + qindex_delta + - cm->y_dc_delta_q); + segment_rdmult = vp9_compute_rd_mult(cpi, cm->base_qindex + qindex_delta + + cm->y_dc_delta_q); - RDMULT_RATIO(i) = (double) segment_rdmult / base_rdmult; + RDMULT_RATIO(i) = (double) segment_rdmult / base_rdmult; + } } } @@ -137,11 +141,8 @@ int vp9_block_energy(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) { double energy; unsigned int var = block_variance(cpi, x, bs); - vp9_clear_system_state(); // __asm emms; - - // if (var <= 1000) - // return 0; + vp9_clear_system_state(); - energy = 0.9*(logf(var + 1) - 10.0); - return clamp(round(energy), ENERGY_MIN, ENERGY_MAX); + energy = 0.9 * (log(var + 1.0) - 10.0); + return clamp((int)round(energy), ENERGY_MIN, ENERGY_MAX); } diff --git a/libvpx/vp9/encoder/vp9_vaq.h b/libvpx/vp9/encoder/vp9_vaq.h index dc18b22..c73114a 100644 --- a/libvpx/vp9/encoder/vp9_vaq.h +++ b/libvpx/vp9/encoder/vp9_vaq.h @@ -9,11 +9,15 @@ */ -#ifndef VP9_ENCODER_VP9_CONFIG_VAQ_H_ -#define VP9_ENCODER_VP9_CONFIG_VAQ_H_ +#ifndef VP9_ENCODER_VP9_VAQ_H_ +#define VP9_ENCODER_VP9_VAQ_H_ #include "vp9/encoder/vp9_onyx_int.h" +#ifdef __cplusplus +extern "C" { +#endif + unsigned int vp9_vaq_segment_id(int energy); double vp9_vaq_rdmult_ratio(int energy); double vp9_vaq_inv_q_ratio(int energy); @@ -23,4 +27,8 @@ void vp9_vaq_frame_setup(VP9_COMP *cpi); int vp9_block_energy(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs); -#endif // VP9_ENCODER_VP9_CONFIG_VAQ_H_ +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // VP9_ENCODER_VP9_VAQ_H_ diff --git a/libvpx/vp9/encoder/vp9_variance_c.c b/libvpx/vp9/encoder/vp9_variance.c index 8bc3850..8bc3850 100644 --- a/libvpx/vp9/encoder/vp9_variance_c.c +++ b/libvpx/vp9/encoder/vp9_variance.c diff --git a/libvpx/vp9/encoder/vp9_variance.h b/libvpx/vp9/encoder/vp9_variance.h index 2ded97c..3bc2091 100644 --- a/libvpx/vp9/encoder/vp9_variance.h +++ b/libvpx/vp9/encoder/vp9_variance.h @@ -12,7 +12,10 @@ #define VP9_ENCODER_VP9_VARIANCE_H_ #include "vpx/vpx_integer.h" -// #include "./vpx_config.h" + +#ifdef __cplusplus +extern "C" { +#endif void variance(const uint8_t *src_ptr, int source_stride, @@ -112,4 +115,8 @@ static void comp_avg_pred(uint8_t *comp_pred, const uint8_t *pred, int width, ref += ref_stride; } } +#ifdef __cplusplus +} // extern "C" +#endif + #endif // VP9_ENCODER_VP9_VARIANCE_H_ diff --git a/libvpx/vp9/encoder/vp9_write_bit_buffer.h b/libvpx/vp9/encoder/vp9_write_bit_buffer.h index 6f91cfc..1795e05 100644 --- a/libvpx/vp9/encoder/vp9_write_bit_buffer.h +++ b/libvpx/vp9/encoder/vp9_write_bit_buffer.h @@ -8,13 +8,17 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef VP9_BIT_WRITE_BUFFER_H_ -#define VP9_BIT_WRITE_BUFFER_H_ +#ifndef VP9_ENCODER_VP9_WRITE_BIT_BUFFER_H_ +#define VP9_ENCODER_VP9_WRITE_BIT_BUFFER_H_ #include <limits.h> #include "vpx/vpx_integer.h" +#ifdef __cplusplus +extern "C" { +#endif + struct vp9_write_bit_buffer { uint8_t *bit_buffer; size_t bit_offset; @@ -25,7 +29,7 @@ static size_t vp9_rb_bytes_written(struct vp9_write_bit_buffer *wb) { } static void vp9_wb_write_bit(struct vp9_write_bit_buffer *wb, int bit) { - const int off = wb->bit_offset; + const int off = (int)wb->bit_offset; const int p = off / CHAR_BIT; const int q = CHAR_BIT - 1 - off % CHAR_BIT; if (q == CHAR_BIT -1) { @@ -45,4 +49,8 @@ static void vp9_wb_write_literal(struct vp9_write_bit_buffer *wb, } -#endif // VP9_BIT_WRITE_BUFFER_H_ +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // VP9_ENCODER_VP9_WRITE_BIT_BUFFER_H_ diff --git a/libvpx/vp9/encoder/vp9_writer.c b/libvpx/vp9/encoder/vp9_writer.c new file mode 100644 index 0000000..8398fc0 --- /dev/null +++ b/libvpx/vp9/encoder/vp9_writer.c @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2010 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include <assert.h> +#include "vp9/encoder/vp9_writer.h" +#include "vp9/common/vp9_entropy.h" + +void vp9_start_encode(vp9_writer *br, uint8_t *source) { + br->lowvalue = 0; + br->range = 255; + br->value = 0; + br->count = -24; + br->buffer = source; + br->pos = 0; + vp9_write_bit(br, 0); +} + +void vp9_stop_encode(vp9_writer *br) { + int i; + + for (i = 0; i < 32; i++) + vp9_write_bit(br, 0); + + // Ensure there's no ambigous collision with any index marker bytes + if ((br->buffer[br->pos - 1] & 0xe0) == 0xc0) + br->buffer[br->pos++] = 0; +} + diff --git a/libvpx/vp9/encoder/vp9_boolhuff.h b/libvpx/vp9/encoder/vp9_writer.h index c3f340d..7f4fa1e 100644 --- a/libvpx/vp9/encoder/vp9_boolhuff.h +++ b/libvpx/vp9/encoder/vp9_writer.h @@ -8,19 +8,17 @@ * be found in the AUTHORS file in the root of the source tree. */ - -/**************************************************************************** -* -* Module Title : vp9_boolhuff.h -* -* Description : Bool Coder header file. -* -****************************************************************************/ -#ifndef VP9_ENCODER_VP9_BOOLHUFF_H_ -#define VP9_ENCODER_VP9_BOOLHUFF_H_ +#ifndef VP9_ENCODER_VP9_WRITER_H_ +#define VP9_ENCODER_VP9_WRITER_H_ #include "vpx_ports/mem.h" +#include "vp9/common/vp9_prob.h" + +#ifdef __cplusplus +extern "C" { +#endif + typedef struct { unsigned int lowvalue; unsigned int range; @@ -31,16 +29,12 @@ typedef struct { // Variables used to track bit costs without outputing to the bitstream unsigned int measure_cost; - unsigned long bit_counter; + uint64_t bit_counter; } vp9_writer; -extern const unsigned int vp9_prob_cost[256]; - void vp9_start_encode(vp9_writer *bc, uint8_t *buffer); void vp9_stop_encode(vp9_writer *bc); -DECLARE_ALIGNED(16, extern const unsigned char, vp9_norm[256]); - static void vp9_write(vp9_writer *br, int bit, int probability) { unsigned int split; int count = br->count; @@ -48,17 +42,6 @@ static void vp9_write(vp9_writer *br, int bit, int probability) { unsigned int lowvalue = br->lowvalue; register unsigned int shift; -#ifdef ENTROPY_STATS -#if defined(SECTIONBITS_OUTPUT) - - if (bit) - Sectionbits[active_section] += vp9_prob_cost[255 - probability]; - else - Sectionbits[active_section] += vp9_prob_cost[probability]; - -#endif -#endif - split = 1 + (((range - 1) * probability) >> 8); range = split; @@ -111,5 +94,10 @@ static void vp9_write_literal(vp9_writer *w, int data, int bits) { vp9_write_bit(w, 1 & (data >> bit)); } +#define vp9_write_prob(w, v) vp9_write_literal((w), (v), 8) + +#ifdef __cplusplus +} // extern "C" +#endif -#endif // VP9_ENCODER_VP9_BOOLHUFF_H_ +#endif // VP9_ENCODER_VP9_WRITER_H_ diff --git a/libvpx/vp9/encoder/x86/vp9_dct32x32_avx2.c b/libvpx/vp9/encoder/x86/vp9_dct32x32_avx2.c new file mode 100644 index 0000000..9ea22fe --- /dev/null +++ b/libvpx/vp9/encoder/x86/vp9_dct32x32_avx2.c @@ -0,0 +1,2710 @@ +/* + * Copyright (c) 2012 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include <immintrin.h> // AVX2 +#include "vp9/common/vp9_idct.h" // for cospi constants +#include "vpx_ports/mem.h" + +#define pair256_set_epi16(a, b) \ + _mm256_set_epi16(b, a, b, a, b, a, b, a, b, a, b, a, b, a, b, a) + +#define pair256_set_epi32(a, b) \ + _mm256_set_epi32(b, a, b, a, b, a, b, a) + + + + +#if FDCT32x32_HIGH_PRECISION +static INLINE __m256i k_madd_epi32_avx2(__m256i a, __m256i b) { + __m256i buf0, buf1; + buf0 = _mm256_mul_epu32(a, b); + a = _mm256_srli_epi64(a, 32); + b = _mm256_srli_epi64(b, 32); + buf1 = _mm256_mul_epu32(a, b); + return _mm256_add_epi64(buf0, buf1); +} + +static INLINE __m256i k_packs_epi64_avx2(__m256i a, __m256i b) { + __m256i buf0 = _mm256_shuffle_epi32(a, _MM_SHUFFLE(0, 0, 2, 0)); + __m256i buf1 = _mm256_shuffle_epi32(b, _MM_SHUFFLE(0, 0, 2, 0)); + return _mm256_unpacklo_epi64(buf0, buf1); +} +#endif + +void FDCT32x32_2D_AVX2(const int16_t *input, + int16_t *output_org, int stride) { + // Calculate pre-multiplied strides + const int str1 = stride; + const int str2 = 2 * stride; + const int str3 = 2 * stride + str1; + // We need an intermediate buffer between passes. + DECLARE_ALIGNED(32, int16_t, intermediate[32 * 32]); + // Constants + // When we use them, in one case, they are all the same. In all others + // it's a pair of them that we need to repeat four times. This is done + // by constructing the 32 bit constant corresponding to that pair. + const __m256i k__cospi_p16_p16 = _mm256_set1_epi16(+cospi_16_64); + const __m256i k__cospi_p16_m16 = pair256_set_epi16(+cospi_16_64, -cospi_16_64); + const __m256i k__cospi_m08_p24 = pair256_set_epi16(-cospi_8_64, cospi_24_64); + const __m256i k__cospi_m24_m08 = pair256_set_epi16(-cospi_24_64, -cospi_8_64); + const __m256i k__cospi_p24_p08 = pair256_set_epi16(+cospi_24_64, cospi_8_64); + const __m256i k__cospi_p12_p20 = pair256_set_epi16(+cospi_12_64, cospi_20_64); + const __m256i k__cospi_m20_p12 = pair256_set_epi16(-cospi_20_64, cospi_12_64); + const __m256i k__cospi_m04_p28 = pair256_set_epi16(-cospi_4_64, cospi_28_64); + const __m256i k__cospi_p28_p04 = pair256_set_epi16(+cospi_28_64, cospi_4_64); + const __m256i k__cospi_m28_m04 = pair256_set_epi16(-cospi_28_64, -cospi_4_64); + const __m256i k__cospi_m12_m20 = pair256_set_epi16(-cospi_12_64, -cospi_20_64); + const __m256i k__cospi_p30_p02 = pair256_set_epi16(+cospi_30_64, cospi_2_64); + const __m256i k__cospi_p14_p18 = pair256_set_epi16(+cospi_14_64, cospi_18_64); + const __m256i k__cospi_p22_p10 = pair256_set_epi16(+cospi_22_64, cospi_10_64); + const __m256i k__cospi_p06_p26 = pair256_set_epi16(+cospi_6_64, cospi_26_64); + const __m256i k__cospi_m26_p06 = pair256_set_epi16(-cospi_26_64, cospi_6_64); + const __m256i k__cospi_m10_p22 = pair256_set_epi16(-cospi_10_64, cospi_22_64); + const __m256i k__cospi_m18_p14 = pair256_set_epi16(-cospi_18_64, cospi_14_64); + const __m256i k__cospi_m02_p30 = pair256_set_epi16(-cospi_2_64, cospi_30_64); + const __m256i k__cospi_p31_p01 = pair256_set_epi16(+cospi_31_64, cospi_1_64); + const __m256i k__cospi_p15_p17 = pair256_set_epi16(+cospi_15_64, cospi_17_64); + const __m256i k__cospi_p23_p09 = pair256_set_epi16(+cospi_23_64, cospi_9_64); + const __m256i k__cospi_p07_p25 = pair256_set_epi16(+cospi_7_64, cospi_25_64); + const __m256i k__cospi_m25_p07 = pair256_set_epi16(-cospi_25_64, cospi_7_64); + const __m256i k__cospi_m09_p23 = pair256_set_epi16(-cospi_9_64, cospi_23_64); + const __m256i k__cospi_m17_p15 = pair256_set_epi16(-cospi_17_64, cospi_15_64); + const __m256i k__cospi_m01_p31 = pair256_set_epi16(-cospi_1_64, cospi_31_64); + const __m256i k__cospi_p27_p05 = pair256_set_epi16(+cospi_27_64, cospi_5_64); + const __m256i k__cospi_p11_p21 = pair256_set_epi16(+cospi_11_64, cospi_21_64); + const __m256i k__cospi_p19_p13 = pair256_set_epi16(+cospi_19_64, cospi_13_64); + const __m256i k__cospi_p03_p29 = pair256_set_epi16(+cospi_3_64, cospi_29_64); + const __m256i k__cospi_m29_p03 = pair256_set_epi16(-cospi_29_64, cospi_3_64); + const __m256i k__cospi_m13_p19 = pair256_set_epi16(-cospi_13_64, cospi_19_64); + const __m256i k__cospi_m21_p11 = pair256_set_epi16(-cospi_21_64, cospi_11_64); + const __m256i k__cospi_m05_p27 = pair256_set_epi16(-cospi_5_64, cospi_27_64); + const __m256i k__DCT_CONST_ROUNDING = _mm256_set1_epi32(DCT_CONST_ROUNDING); + const __m256i kZero = _mm256_set1_epi16(0); + const __m256i kOne = _mm256_set1_epi16(1); + // Do the two transform/transpose passes + int pass; + for (pass = 0; pass < 2; ++pass) { + // We process sixteen columns (transposed rows in second pass) at a time. + int column_start; + for (column_start = 0; column_start < 32; column_start += 16) { + __m256i step1[32]; + __m256i step2[32]; + __m256i step3[32]; + __m256i out[32]; + // Stage 1 + // Note: even though all the loads below are aligned, using the aligned + // intrinsic make the code slightly slower. + if (0 == pass) { + const int16_t *in = &input[column_start]; + // step1[i] = (in[ 0 * stride] + in[(32 - 1) * stride]) << 2; + // Note: the next four blocks could be in a loop. That would help the + // instruction cache but is actually slower. + { + const int16_t *ina = in + 0 * str1; + const int16_t *inb = in + 31 * str1; + __m256i *step1a = &step1[ 0]; + __m256i *step1b = &step1[31]; + const __m256i ina0 = _mm256_loadu_si256((const __m256i *)(ina)); + const __m256i ina1 = _mm256_loadu_si256((const __m256i *)(ina + str1)); + const __m256i ina2 = _mm256_loadu_si256((const __m256i *)(ina + str2)); + const __m256i ina3 = _mm256_loadu_si256((const __m256i *)(ina + str3)); + const __m256i inb3 = _mm256_loadu_si256((const __m256i *)(inb - str3)); + const __m256i inb2 = _mm256_loadu_si256((const __m256i *)(inb - str2)); + const __m256i inb1 = _mm256_loadu_si256((const __m256i *)(inb - str1)); + const __m256i inb0 = _mm256_loadu_si256((const __m256i *)(inb)); + step1a[ 0] = _mm256_add_epi16(ina0, inb0); + step1a[ 1] = _mm256_add_epi16(ina1, inb1); + step1a[ 2] = _mm256_add_epi16(ina2, inb2); + step1a[ 3] = _mm256_add_epi16(ina3, inb3); + step1b[-3] = _mm256_sub_epi16(ina3, inb3); + step1b[-2] = _mm256_sub_epi16(ina2, inb2); + step1b[-1] = _mm256_sub_epi16(ina1, inb1); + step1b[-0] = _mm256_sub_epi16(ina0, inb0); + step1a[ 0] = _mm256_slli_epi16(step1a[ 0], 2); + step1a[ 1] = _mm256_slli_epi16(step1a[ 1], 2); + step1a[ 2] = _mm256_slli_epi16(step1a[ 2], 2); + step1a[ 3] = _mm256_slli_epi16(step1a[ 3], 2); + step1b[-3] = _mm256_slli_epi16(step1b[-3], 2); + step1b[-2] = _mm256_slli_epi16(step1b[-2], 2); + step1b[-1] = _mm256_slli_epi16(step1b[-1], 2); + step1b[-0] = _mm256_slli_epi16(step1b[-0], 2); + } + { + const int16_t *ina = in + 4 * str1; + const int16_t *inb = in + 27 * str1; + __m256i *step1a = &step1[ 4]; + __m256i *step1b = &step1[27]; + const __m256i ina0 = _mm256_loadu_si256((const __m256i *)(ina)); + const __m256i ina1 = _mm256_loadu_si256((const __m256i *)(ina + str1)); + const __m256i ina2 = _mm256_loadu_si256((const __m256i *)(ina + str2)); + const __m256i ina3 = _mm256_loadu_si256((const __m256i *)(ina + str3)); + const __m256i inb3 = _mm256_loadu_si256((const __m256i *)(inb - str3)); + const __m256i inb2 = _mm256_loadu_si256((const __m256i *)(inb - str2)); + const __m256i inb1 = _mm256_loadu_si256((const __m256i *)(inb - str1)); + const __m256i inb0 = _mm256_loadu_si256((const __m256i *)(inb)); + step1a[ 0] = _mm256_add_epi16(ina0, inb0); + step1a[ 1] = _mm256_add_epi16(ina1, inb1); + step1a[ 2] = _mm256_add_epi16(ina2, inb2); + step1a[ 3] = _mm256_add_epi16(ina3, inb3); + step1b[-3] = _mm256_sub_epi16(ina3, inb3); + step1b[-2] = _mm256_sub_epi16(ina2, inb2); + step1b[-1] = _mm256_sub_epi16(ina1, inb1); + step1b[-0] = _mm256_sub_epi16(ina0, inb0); + step1a[ 0] = _mm256_slli_epi16(step1a[ 0], 2); + step1a[ 1] = _mm256_slli_epi16(step1a[ 1], 2); + step1a[ 2] = _mm256_slli_epi16(step1a[ 2], 2); + step1a[ 3] = _mm256_slli_epi16(step1a[ 3], 2); + step1b[-3] = _mm256_slli_epi16(step1b[-3], 2); + step1b[-2] = _mm256_slli_epi16(step1b[-2], 2); + step1b[-1] = _mm256_slli_epi16(step1b[-1], 2); + step1b[-0] = _mm256_slli_epi16(step1b[-0], 2); + } + { + const int16_t *ina = in + 8 * str1; + const int16_t *inb = in + 23 * str1; + __m256i *step1a = &step1[ 8]; + __m256i *step1b = &step1[23]; + const __m256i ina0 = _mm256_loadu_si256((const __m256i *)(ina)); + const __m256i ina1 = _mm256_loadu_si256((const __m256i *)(ina + str1)); + const __m256i ina2 = _mm256_loadu_si256((const __m256i *)(ina + str2)); + const __m256i ina3 = _mm256_loadu_si256((const __m256i *)(ina + str3)); + const __m256i inb3 = _mm256_loadu_si256((const __m256i *)(inb - str3)); + const __m256i inb2 = _mm256_loadu_si256((const __m256i *)(inb - str2)); + const __m256i inb1 = _mm256_loadu_si256((const __m256i *)(inb - str1)); + const __m256i inb0 = _mm256_loadu_si256((const __m256i *)(inb)); + step1a[ 0] = _mm256_add_epi16(ina0, inb0); + step1a[ 1] = _mm256_add_epi16(ina1, inb1); + step1a[ 2] = _mm256_add_epi16(ina2, inb2); + step1a[ 3] = _mm256_add_epi16(ina3, inb3); + step1b[-3] = _mm256_sub_epi16(ina3, inb3); + step1b[-2] = _mm256_sub_epi16(ina2, inb2); + step1b[-1] = _mm256_sub_epi16(ina1, inb1); + step1b[-0] = _mm256_sub_epi16(ina0, inb0); + step1a[ 0] = _mm256_slli_epi16(step1a[ 0], 2); + step1a[ 1] = _mm256_slli_epi16(step1a[ 1], 2); + step1a[ 2] = _mm256_slli_epi16(step1a[ 2], 2); + step1a[ 3] = _mm256_slli_epi16(step1a[ 3], 2); + step1b[-3] = _mm256_slli_epi16(step1b[-3], 2); + step1b[-2] = _mm256_slli_epi16(step1b[-2], 2); + step1b[-1] = _mm256_slli_epi16(step1b[-1], 2); + step1b[-0] = _mm256_slli_epi16(step1b[-0], 2); + } + { + const int16_t *ina = in + 12 * str1; + const int16_t *inb = in + 19 * str1; + __m256i *step1a = &step1[12]; + __m256i *step1b = &step1[19]; + const __m256i ina0 = _mm256_loadu_si256((const __m256i *)(ina)); + const __m256i ina1 = _mm256_loadu_si256((const __m256i *)(ina + str1)); + const __m256i ina2 = _mm256_loadu_si256((const __m256i *)(ina + str2)); + const __m256i ina3 = _mm256_loadu_si256((const __m256i *)(ina + str3)); + const __m256i inb3 = _mm256_loadu_si256((const __m256i *)(inb - str3)); + const __m256i inb2 = _mm256_loadu_si256((const __m256i *)(inb - str2)); + const __m256i inb1 = _mm256_loadu_si256((const __m256i *)(inb - str1)); + const __m256i inb0 = _mm256_loadu_si256((const __m256i *)(inb)); + step1a[ 0] = _mm256_add_epi16(ina0, inb0); + step1a[ 1] = _mm256_add_epi16(ina1, inb1); + step1a[ 2] = _mm256_add_epi16(ina2, inb2); + step1a[ 3] = _mm256_add_epi16(ina3, inb3); + step1b[-3] = _mm256_sub_epi16(ina3, inb3); + step1b[-2] = _mm256_sub_epi16(ina2, inb2); + step1b[-1] = _mm256_sub_epi16(ina1, inb1); + step1b[-0] = _mm256_sub_epi16(ina0, inb0); + step1a[ 0] = _mm256_slli_epi16(step1a[ 0], 2); + step1a[ 1] = _mm256_slli_epi16(step1a[ 1], 2); + step1a[ 2] = _mm256_slli_epi16(step1a[ 2], 2); + step1a[ 3] = _mm256_slli_epi16(step1a[ 3], 2); + step1b[-3] = _mm256_slli_epi16(step1b[-3], 2); + step1b[-2] = _mm256_slli_epi16(step1b[-2], 2); + step1b[-1] = _mm256_slli_epi16(step1b[-1], 2); + step1b[-0] = _mm256_slli_epi16(step1b[-0], 2); + } + } else { + int16_t *in = &intermediate[column_start]; + // step1[i] = in[ 0 * 32] + in[(32 - 1) * 32]; + // Note: using the same approach as above to have common offset is + // counter-productive as all offsets can be calculated at compile + // time. + // Note: the next four blocks could be in a loop. That would help the + // instruction cache but is actually slower. + { + __m256i in00 = _mm256_loadu_si256((const __m256i *)(in + 0 * 32)); + __m256i in01 = _mm256_loadu_si256((const __m256i *)(in + 1 * 32)); + __m256i in02 = _mm256_loadu_si256((const __m256i *)(in + 2 * 32)); + __m256i in03 = _mm256_loadu_si256((const __m256i *)(in + 3 * 32)); + __m256i in28 = _mm256_loadu_si256((const __m256i *)(in + 28 * 32)); + __m256i in29 = _mm256_loadu_si256((const __m256i *)(in + 29 * 32)); + __m256i in30 = _mm256_loadu_si256((const __m256i *)(in + 30 * 32)); + __m256i in31 = _mm256_loadu_si256((const __m256i *)(in + 31 * 32)); + step1[ 0] = _mm256_add_epi16(in00, in31); + step1[ 1] = _mm256_add_epi16(in01, in30); + step1[ 2] = _mm256_add_epi16(in02, in29); + step1[ 3] = _mm256_add_epi16(in03, in28); + step1[28] = _mm256_sub_epi16(in03, in28); + step1[29] = _mm256_sub_epi16(in02, in29); + step1[30] = _mm256_sub_epi16(in01, in30); + step1[31] = _mm256_sub_epi16(in00, in31); + } + { + __m256i in04 = _mm256_loadu_si256((const __m256i *)(in + 4 * 32)); + __m256i in05 = _mm256_loadu_si256((const __m256i *)(in + 5 * 32)); + __m256i in06 = _mm256_loadu_si256((const __m256i *)(in + 6 * 32)); + __m256i in07 = _mm256_loadu_si256((const __m256i *)(in + 7 * 32)); + __m256i in24 = _mm256_loadu_si256((const __m256i *)(in + 24 * 32)); + __m256i in25 = _mm256_loadu_si256((const __m256i *)(in + 25 * 32)); + __m256i in26 = _mm256_loadu_si256((const __m256i *)(in + 26 * 32)); + __m256i in27 = _mm256_loadu_si256((const __m256i *)(in + 27 * 32)); + step1[ 4] = _mm256_add_epi16(in04, in27); + step1[ 5] = _mm256_add_epi16(in05, in26); + step1[ 6] = _mm256_add_epi16(in06, in25); + step1[ 7] = _mm256_add_epi16(in07, in24); + step1[24] = _mm256_sub_epi16(in07, in24); + step1[25] = _mm256_sub_epi16(in06, in25); + step1[26] = _mm256_sub_epi16(in05, in26); + step1[27] = _mm256_sub_epi16(in04, in27); + } + { + __m256i in08 = _mm256_loadu_si256((const __m256i *)(in + 8 * 32)); + __m256i in09 = _mm256_loadu_si256((const __m256i *)(in + 9 * 32)); + __m256i in10 = _mm256_loadu_si256((const __m256i *)(in + 10 * 32)); + __m256i in11 = _mm256_loadu_si256((const __m256i *)(in + 11 * 32)); + __m256i in20 = _mm256_loadu_si256((const __m256i *)(in + 20 * 32)); + __m256i in21 = _mm256_loadu_si256((const __m256i *)(in + 21 * 32)); + __m256i in22 = _mm256_loadu_si256((const __m256i *)(in + 22 * 32)); + __m256i in23 = _mm256_loadu_si256((const __m256i *)(in + 23 * 32)); + step1[ 8] = _mm256_add_epi16(in08, in23); + step1[ 9] = _mm256_add_epi16(in09, in22); + step1[10] = _mm256_add_epi16(in10, in21); + step1[11] = _mm256_add_epi16(in11, in20); + step1[20] = _mm256_sub_epi16(in11, in20); + step1[21] = _mm256_sub_epi16(in10, in21); + step1[22] = _mm256_sub_epi16(in09, in22); + step1[23] = _mm256_sub_epi16(in08, in23); + } + { + __m256i in12 = _mm256_loadu_si256((const __m256i *)(in + 12 * 32)); + __m256i in13 = _mm256_loadu_si256((const __m256i *)(in + 13 * 32)); + __m256i in14 = _mm256_loadu_si256((const __m256i *)(in + 14 * 32)); + __m256i in15 = _mm256_loadu_si256((const __m256i *)(in + 15 * 32)); + __m256i in16 = _mm256_loadu_si256((const __m256i *)(in + 16 * 32)); + __m256i in17 = _mm256_loadu_si256((const __m256i *)(in + 17 * 32)); + __m256i in18 = _mm256_loadu_si256((const __m256i *)(in + 18 * 32)); + __m256i in19 = _mm256_loadu_si256((const __m256i *)(in + 19 * 32)); + step1[12] = _mm256_add_epi16(in12, in19); + step1[13] = _mm256_add_epi16(in13, in18); + step1[14] = _mm256_add_epi16(in14, in17); + step1[15] = _mm256_add_epi16(in15, in16); + step1[16] = _mm256_sub_epi16(in15, in16); + step1[17] = _mm256_sub_epi16(in14, in17); + step1[18] = _mm256_sub_epi16(in13, in18); + step1[19] = _mm256_sub_epi16(in12, in19); + } + } + // Stage 2 + { + step2[ 0] = _mm256_add_epi16(step1[0], step1[15]); + step2[ 1] = _mm256_add_epi16(step1[1], step1[14]); + step2[ 2] = _mm256_add_epi16(step1[2], step1[13]); + step2[ 3] = _mm256_add_epi16(step1[3], step1[12]); + step2[ 4] = _mm256_add_epi16(step1[4], step1[11]); + step2[ 5] = _mm256_add_epi16(step1[5], step1[10]); + step2[ 6] = _mm256_add_epi16(step1[6], step1[ 9]); + step2[ 7] = _mm256_add_epi16(step1[7], step1[ 8]); + step2[ 8] = _mm256_sub_epi16(step1[7], step1[ 8]); + step2[ 9] = _mm256_sub_epi16(step1[6], step1[ 9]); + step2[10] = _mm256_sub_epi16(step1[5], step1[10]); + step2[11] = _mm256_sub_epi16(step1[4], step1[11]); + step2[12] = _mm256_sub_epi16(step1[3], step1[12]); + step2[13] = _mm256_sub_epi16(step1[2], step1[13]); + step2[14] = _mm256_sub_epi16(step1[1], step1[14]); + step2[15] = _mm256_sub_epi16(step1[0], step1[15]); + } + { + const __m256i s2_20_0 = _mm256_unpacklo_epi16(step1[27], step1[20]); + const __m256i s2_20_1 = _mm256_unpackhi_epi16(step1[27], step1[20]); + const __m256i s2_21_0 = _mm256_unpacklo_epi16(step1[26], step1[21]); + const __m256i s2_21_1 = _mm256_unpackhi_epi16(step1[26], step1[21]); + const __m256i s2_22_0 = _mm256_unpacklo_epi16(step1[25], step1[22]); + const __m256i s2_22_1 = _mm256_unpackhi_epi16(step1[25], step1[22]); + const __m256i s2_23_0 = _mm256_unpacklo_epi16(step1[24], step1[23]); + const __m256i s2_23_1 = _mm256_unpackhi_epi16(step1[24], step1[23]); + const __m256i s2_20_2 = _mm256_madd_epi16(s2_20_0, k__cospi_p16_m16); + const __m256i s2_20_3 = _mm256_madd_epi16(s2_20_1, k__cospi_p16_m16); + const __m256i s2_21_2 = _mm256_madd_epi16(s2_21_0, k__cospi_p16_m16); + const __m256i s2_21_3 = _mm256_madd_epi16(s2_21_1, k__cospi_p16_m16); + const __m256i s2_22_2 = _mm256_madd_epi16(s2_22_0, k__cospi_p16_m16); + const __m256i s2_22_3 = _mm256_madd_epi16(s2_22_1, k__cospi_p16_m16); + const __m256i s2_23_2 = _mm256_madd_epi16(s2_23_0, k__cospi_p16_m16); + const __m256i s2_23_3 = _mm256_madd_epi16(s2_23_1, k__cospi_p16_m16); + const __m256i s2_24_2 = _mm256_madd_epi16(s2_23_0, k__cospi_p16_p16); + const __m256i s2_24_3 = _mm256_madd_epi16(s2_23_1, k__cospi_p16_p16); + const __m256i s2_25_2 = _mm256_madd_epi16(s2_22_0, k__cospi_p16_p16); + const __m256i s2_25_3 = _mm256_madd_epi16(s2_22_1, k__cospi_p16_p16); + const __m256i s2_26_2 = _mm256_madd_epi16(s2_21_0, k__cospi_p16_p16); + const __m256i s2_26_3 = _mm256_madd_epi16(s2_21_1, k__cospi_p16_p16); + const __m256i s2_27_2 = _mm256_madd_epi16(s2_20_0, k__cospi_p16_p16); + const __m256i s2_27_3 = _mm256_madd_epi16(s2_20_1, k__cospi_p16_p16); + // dct_const_round_shift + const __m256i s2_20_4 = _mm256_add_epi32(s2_20_2, k__DCT_CONST_ROUNDING); + const __m256i s2_20_5 = _mm256_add_epi32(s2_20_3, k__DCT_CONST_ROUNDING); + const __m256i s2_21_4 = _mm256_add_epi32(s2_21_2, k__DCT_CONST_ROUNDING); + const __m256i s2_21_5 = _mm256_add_epi32(s2_21_3, k__DCT_CONST_ROUNDING); + const __m256i s2_22_4 = _mm256_add_epi32(s2_22_2, k__DCT_CONST_ROUNDING); + const __m256i s2_22_5 = _mm256_add_epi32(s2_22_3, k__DCT_CONST_ROUNDING); + const __m256i s2_23_4 = _mm256_add_epi32(s2_23_2, k__DCT_CONST_ROUNDING); + const __m256i s2_23_5 = _mm256_add_epi32(s2_23_3, k__DCT_CONST_ROUNDING); + const __m256i s2_24_4 = _mm256_add_epi32(s2_24_2, k__DCT_CONST_ROUNDING); + const __m256i s2_24_5 = _mm256_add_epi32(s2_24_3, k__DCT_CONST_ROUNDING); + const __m256i s2_25_4 = _mm256_add_epi32(s2_25_2, k__DCT_CONST_ROUNDING); + const __m256i s2_25_5 = _mm256_add_epi32(s2_25_3, k__DCT_CONST_ROUNDING); + const __m256i s2_26_4 = _mm256_add_epi32(s2_26_2, k__DCT_CONST_ROUNDING); + const __m256i s2_26_5 = _mm256_add_epi32(s2_26_3, k__DCT_CONST_ROUNDING); + const __m256i s2_27_4 = _mm256_add_epi32(s2_27_2, k__DCT_CONST_ROUNDING); + const __m256i s2_27_5 = _mm256_add_epi32(s2_27_3, k__DCT_CONST_ROUNDING); + const __m256i s2_20_6 = _mm256_srai_epi32(s2_20_4, DCT_CONST_BITS); + const __m256i s2_20_7 = _mm256_srai_epi32(s2_20_5, DCT_CONST_BITS); + const __m256i s2_21_6 = _mm256_srai_epi32(s2_21_4, DCT_CONST_BITS); + const __m256i s2_21_7 = _mm256_srai_epi32(s2_21_5, DCT_CONST_BITS); + const __m256i s2_22_6 = _mm256_srai_epi32(s2_22_4, DCT_CONST_BITS); + const __m256i s2_22_7 = _mm256_srai_epi32(s2_22_5, DCT_CONST_BITS); + const __m256i s2_23_6 = _mm256_srai_epi32(s2_23_4, DCT_CONST_BITS); + const __m256i s2_23_7 = _mm256_srai_epi32(s2_23_5, DCT_CONST_BITS); + const __m256i s2_24_6 = _mm256_srai_epi32(s2_24_4, DCT_CONST_BITS); + const __m256i s2_24_7 = _mm256_srai_epi32(s2_24_5, DCT_CONST_BITS); + const __m256i s2_25_6 = _mm256_srai_epi32(s2_25_4, DCT_CONST_BITS); + const __m256i s2_25_7 = _mm256_srai_epi32(s2_25_5, DCT_CONST_BITS); + const __m256i s2_26_6 = _mm256_srai_epi32(s2_26_4, DCT_CONST_BITS); + const __m256i s2_26_7 = _mm256_srai_epi32(s2_26_5, DCT_CONST_BITS); + const __m256i s2_27_6 = _mm256_srai_epi32(s2_27_4, DCT_CONST_BITS); + const __m256i s2_27_7 = _mm256_srai_epi32(s2_27_5, DCT_CONST_BITS); + // Combine + step2[20] = _mm256_packs_epi32(s2_20_6, s2_20_7); + step2[21] = _mm256_packs_epi32(s2_21_6, s2_21_7); + step2[22] = _mm256_packs_epi32(s2_22_6, s2_22_7); + step2[23] = _mm256_packs_epi32(s2_23_6, s2_23_7); + step2[24] = _mm256_packs_epi32(s2_24_6, s2_24_7); + step2[25] = _mm256_packs_epi32(s2_25_6, s2_25_7); + step2[26] = _mm256_packs_epi32(s2_26_6, s2_26_7); + step2[27] = _mm256_packs_epi32(s2_27_6, s2_27_7); + } + +#if !FDCT32x32_HIGH_PRECISION + // dump the magnitude by half, hence the intermediate values are within + // the range of 16 bits. + if (1 == pass) { + __m256i s3_00_0 = _mm256_cmpgt_epi16(kZero,step2[ 0]); + __m256i s3_01_0 = _mm256_cmpgt_epi16(kZero,step2[ 1]); + __m256i s3_02_0 = _mm256_cmpgt_epi16(kZero,step2[ 2]); + __m256i s3_03_0 = _mm256_cmpgt_epi16(kZero,step2[ 3]); + __m256i s3_04_0 = _mm256_cmpgt_epi16(kZero,step2[ 4]); + __m256i s3_05_0 = _mm256_cmpgt_epi16(kZero,step2[ 5]); + __m256i s3_06_0 = _mm256_cmpgt_epi16(kZero,step2[ 6]); + __m256i s3_07_0 = _mm256_cmpgt_epi16(kZero,step2[ 7]); + __m256i s2_08_0 = _mm256_cmpgt_epi16(kZero,step2[ 8]); + __m256i s2_09_0 = _mm256_cmpgt_epi16(kZero,step2[ 9]); + __m256i s3_10_0 = _mm256_cmpgt_epi16(kZero,step2[10]); + __m256i s3_11_0 = _mm256_cmpgt_epi16(kZero,step2[11]); + __m256i s3_12_0 = _mm256_cmpgt_epi16(kZero,step2[12]); + __m256i s3_13_0 = _mm256_cmpgt_epi16(kZero,step2[13]); + __m256i s2_14_0 = _mm256_cmpgt_epi16(kZero,step2[14]); + __m256i s2_15_0 = _mm256_cmpgt_epi16(kZero,step2[15]); + __m256i s3_16_0 = _mm256_cmpgt_epi16(kZero,step1[16]); + __m256i s3_17_0 = _mm256_cmpgt_epi16(kZero,step1[17]); + __m256i s3_18_0 = _mm256_cmpgt_epi16(kZero,step1[18]); + __m256i s3_19_0 = _mm256_cmpgt_epi16(kZero,step1[19]); + __m256i s3_20_0 = _mm256_cmpgt_epi16(kZero,step2[20]); + __m256i s3_21_0 = _mm256_cmpgt_epi16(kZero,step2[21]); + __m256i s3_22_0 = _mm256_cmpgt_epi16(kZero,step2[22]); + __m256i s3_23_0 = _mm256_cmpgt_epi16(kZero,step2[23]); + __m256i s3_24_0 = _mm256_cmpgt_epi16(kZero,step2[24]); + __m256i s3_25_0 = _mm256_cmpgt_epi16(kZero,step2[25]); + __m256i s3_26_0 = _mm256_cmpgt_epi16(kZero,step2[26]); + __m256i s3_27_0 = _mm256_cmpgt_epi16(kZero,step2[27]); + __m256i s3_28_0 = _mm256_cmpgt_epi16(kZero,step1[28]); + __m256i s3_29_0 = _mm256_cmpgt_epi16(kZero,step1[29]); + __m256i s3_30_0 = _mm256_cmpgt_epi16(kZero,step1[30]); + __m256i s3_31_0 = _mm256_cmpgt_epi16(kZero,step1[31]); + + step2[ 0] = _mm256_sub_epi16(step2[ 0], s3_00_0); + step2[ 1] = _mm256_sub_epi16(step2[ 1], s3_01_0); + step2[ 2] = _mm256_sub_epi16(step2[ 2], s3_02_0); + step2[ 3] = _mm256_sub_epi16(step2[ 3], s3_03_0); + step2[ 4] = _mm256_sub_epi16(step2[ 4], s3_04_0); + step2[ 5] = _mm256_sub_epi16(step2[ 5], s3_05_0); + step2[ 6] = _mm256_sub_epi16(step2[ 6], s3_06_0); + step2[ 7] = _mm256_sub_epi16(step2[ 7], s3_07_0); + step2[ 8] = _mm256_sub_epi16(step2[ 8], s2_08_0); + step2[ 9] = _mm256_sub_epi16(step2[ 9], s2_09_0); + step2[10] = _mm256_sub_epi16(step2[10], s3_10_0); + step2[11] = _mm256_sub_epi16(step2[11], s3_11_0); + step2[12] = _mm256_sub_epi16(step2[12], s3_12_0); + step2[13] = _mm256_sub_epi16(step2[13], s3_13_0); + step2[14] = _mm256_sub_epi16(step2[14], s2_14_0); + step2[15] = _mm256_sub_epi16(step2[15], s2_15_0); + step1[16] = _mm256_sub_epi16(step1[16], s3_16_0); + step1[17] = _mm256_sub_epi16(step1[17], s3_17_0); + step1[18] = _mm256_sub_epi16(step1[18], s3_18_0); + step1[19] = _mm256_sub_epi16(step1[19], s3_19_0); + step2[20] = _mm256_sub_epi16(step2[20], s3_20_0); + step2[21] = _mm256_sub_epi16(step2[21], s3_21_0); + step2[22] = _mm256_sub_epi16(step2[22], s3_22_0); + step2[23] = _mm256_sub_epi16(step2[23], s3_23_0); + step2[24] = _mm256_sub_epi16(step2[24], s3_24_0); + step2[25] = _mm256_sub_epi16(step2[25], s3_25_0); + step2[26] = _mm256_sub_epi16(step2[26], s3_26_0); + step2[27] = _mm256_sub_epi16(step2[27], s3_27_0); + step1[28] = _mm256_sub_epi16(step1[28], s3_28_0); + step1[29] = _mm256_sub_epi16(step1[29], s3_29_0); + step1[30] = _mm256_sub_epi16(step1[30], s3_30_0); + step1[31] = _mm256_sub_epi16(step1[31], s3_31_0); + + step2[ 0] = _mm256_add_epi16(step2[ 0], kOne); + step2[ 1] = _mm256_add_epi16(step2[ 1], kOne); + step2[ 2] = _mm256_add_epi16(step2[ 2], kOne); + step2[ 3] = _mm256_add_epi16(step2[ 3], kOne); + step2[ 4] = _mm256_add_epi16(step2[ 4], kOne); + step2[ 5] = _mm256_add_epi16(step2[ 5], kOne); + step2[ 6] = _mm256_add_epi16(step2[ 6], kOne); + step2[ 7] = _mm256_add_epi16(step2[ 7], kOne); + step2[ 8] = _mm256_add_epi16(step2[ 8], kOne); + step2[ 9] = _mm256_add_epi16(step2[ 9], kOne); + step2[10] = _mm256_add_epi16(step2[10], kOne); + step2[11] = _mm256_add_epi16(step2[11], kOne); + step2[12] = _mm256_add_epi16(step2[12], kOne); + step2[13] = _mm256_add_epi16(step2[13], kOne); + step2[14] = _mm256_add_epi16(step2[14], kOne); + step2[15] = _mm256_add_epi16(step2[15], kOne); + step1[16] = _mm256_add_epi16(step1[16], kOne); + step1[17] = _mm256_add_epi16(step1[17], kOne); + step1[18] = _mm256_add_epi16(step1[18], kOne); + step1[19] = _mm256_add_epi16(step1[19], kOne); + step2[20] = _mm256_add_epi16(step2[20], kOne); + step2[21] = _mm256_add_epi16(step2[21], kOne); + step2[22] = _mm256_add_epi16(step2[22], kOne); + step2[23] = _mm256_add_epi16(step2[23], kOne); + step2[24] = _mm256_add_epi16(step2[24], kOne); + step2[25] = _mm256_add_epi16(step2[25], kOne); + step2[26] = _mm256_add_epi16(step2[26], kOne); + step2[27] = _mm256_add_epi16(step2[27], kOne); + step1[28] = _mm256_add_epi16(step1[28], kOne); + step1[29] = _mm256_add_epi16(step1[29], kOne); + step1[30] = _mm256_add_epi16(step1[30], kOne); + step1[31] = _mm256_add_epi16(step1[31], kOne); + + step2[ 0] = _mm256_srai_epi16(step2[ 0], 2); + step2[ 1] = _mm256_srai_epi16(step2[ 1], 2); + step2[ 2] = _mm256_srai_epi16(step2[ 2], 2); + step2[ 3] = _mm256_srai_epi16(step2[ 3], 2); + step2[ 4] = _mm256_srai_epi16(step2[ 4], 2); + step2[ 5] = _mm256_srai_epi16(step2[ 5], 2); + step2[ 6] = _mm256_srai_epi16(step2[ 6], 2); + step2[ 7] = _mm256_srai_epi16(step2[ 7], 2); + step2[ 8] = _mm256_srai_epi16(step2[ 8], 2); + step2[ 9] = _mm256_srai_epi16(step2[ 9], 2); + step2[10] = _mm256_srai_epi16(step2[10], 2); + step2[11] = _mm256_srai_epi16(step2[11], 2); + step2[12] = _mm256_srai_epi16(step2[12], 2); + step2[13] = _mm256_srai_epi16(step2[13], 2); + step2[14] = _mm256_srai_epi16(step2[14], 2); + step2[15] = _mm256_srai_epi16(step2[15], 2); + step1[16] = _mm256_srai_epi16(step1[16], 2); + step1[17] = _mm256_srai_epi16(step1[17], 2); + step1[18] = _mm256_srai_epi16(step1[18], 2); + step1[19] = _mm256_srai_epi16(step1[19], 2); + step2[20] = _mm256_srai_epi16(step2[20], 2); + step2[21] = _mm256_srai_epi16(step2[21], 2); + step2[22] = _mm256_srai_epi16(step2[22], 2); + step2[23] = _mm256_srai_epi16(step2[23], 2); + step2[24] = _mm256_srai_epi16(step2[24], 2); + step2[25] = _mm256_srai_epi16(step2[25], 2); + step2[26] = _mm256_srai_epi16(step2[26], 2); + step2[27] = _mm256_srai_epi16(step2[27], 2); + step1[28] = _mm256_srai_epi16(step1[28], 2); + step1[29] = _mm256_srai_epi16(step1[29], 2); + step1[30] = _mm256_srai_epi16(step1[30], 2); + step1[31] = _mm256_srai_epi16(step1[31], 2); + } +#endif + +#if FDCT32x32_HIGH_PRECISION + if (pass == 0) { +#endif + // Stage 3 + { + step3[0] = _mm256_add_epi16(step2[(8 - 1)], step2[0]); + step3[1] = _mm256_add_epi16(step2[(8 - 2)], step2[1]); + step3[2] = _mm256_add_epi16(step2[(8 - 3)], step2[2]); + step3[3] = _mm256_add_epi16(step2[(8 - 4)], step2[3]); + step3[4] = _mm256_sub_epi16(step2[(8 - 5)], step2[4]); + step3[5] = _mm256_sub_epi16(step2[(8 - 6)], step2[5]); + step3[6] = _mm256_sub_epi16(step2[(8 - 7)], step2[6]); + step3[7] = _mm256_sub_epi16(step2[(8 - 8)], step2[7]); + } + { + const __m256i s3_10_0 = _mm256_unpacklo_epi16(step2[13], step2[10]); + const __m256i s3_10_1 = _mm256_unpackhi_epi16(step2[13], step2[10]); + const __m256i s3_11_0 = _mm256_unpacklo_epi16(step2[12], step2[11]); + const __m256i s3_11_1 = _mm256_unpackhi_epi16(step2[12], step2[11]); + const __m256i s3_10_2 = _mm256_madd_epi16(s3_10_0, k__cospi_p16_m16); + const __m256i s3_10_3 = _mm256_madd_epi16(s3_10_1, k__cospi_p16_m16); + const __m256i s3_11_2 = _mm256_madd_epi16(s3_11_0, k__cospi_p16_m16); + const __m256i s3_11_3 = _mm256_madd_epi16(s3_11_1, k__cospi_p16_m16); + const __m256i s3_12_2 = _mm256_madd_epi16(s3_11_0, k__cospi_p16_p16); + const __m256i s3_12_3 = _mm256_madd_epi16(s3_11_1, k__cospi_p16_p16); + const __m256i s3_13_2 = _mm256_madd_epi16(s3_10_0, k__cospi_p16_p16); + const __m256i s3_13_3 = _mm256_madd_epi16(s3_10_1, k__cospi_p16_p16); + // dct_const_round_shift + const __m256i s3_10_4 = _mm256_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING); + const __m256i s3_10_5 = _mm256_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING); + const __m256i s3_11_4 = _mm256_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING); + const __m256i s3_11_5 = _mm256_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING); + const __m256i s3_12_4 = _mm256_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING); + const __m256i s3_12_5 = _mm256_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING); + const __m256i s3_13_4 = _mm256_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING); + const __m256i s3_13_5 = _mm256_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING); + const __m256i s3_10_6 = _mm256_srai_epi32(s3_10_4, DCT_CONST_BITS); + const __m256i s3_10_7 = _mm256_srai_epi32(s3_10_5, DCT_CONST_BITS); + const __m256i s3_11_6 = _mm256_srai_epi32(s3_11_4, DCT_CONST_BITS); + const __m256i s3_11_7 = _mm256_srai_epi32(s3_11_5, DCT_CONST_BITS); + const __m256i s3_12_6 = _mm256_srai_epi32(s3_12_4, DCT_CONST_BITS); + const __m256i s3_12_7 = _mm256_srai_epi32(s3_12_5, DCT_CONST_BITS); + const __m256i s3_13_6 = _mm256_srai_epi32(s3_13_4, DCT_CONST_BITS); + const __m256i s3_13_7 = _mm256_srai_epi32(s3_13_5, DCT_CONST_BITS); + // Combine + step3[10] = _mm256_packs_epi32(s3_10_6, s3_10_7); + step3[11] = _mm256_packs_epi32(s3_11_6, s3_11_7); + step3[12] = _mm256_packs_epi32(s3_12_6, s3_12_7); + step3[13] = _mm256_packs_epi32(s3_13_6, s3_13_7); + } + { + step3[16] = _mm256_add_epi16(step2[23], step1[16]); + step3[17] = _mm256_add_epi16(step2[22], step1[17]); + step3[18] = _mm256_add_epi16(step2[21], step1[18]); + step3[19] = _mm256_add_epi16(step2[20], step1[19]); + step3[20] = _mm256_sub_epi16(step1[19], step2[20]); + step3[21] = _mm256_sub_epi16(step1[18], step2[21]); + step3[22] = _mm256_sub_epi16(step1[17], step2[22]); + step3[23] = _mm256_sub_epi16(step1[16], step2[23]); + step3[24] = _mm256_sub_epi16(step1[31], step2[24]); + step3[25] = _mm256_sub_epi16(step1[30], step2[25]); + step3[26] = _mm256_sub_epi16(step1[29], step2[26]); + step3[27] = _mm256_sub_epi16(step1[28], step2[27]); + step3[28] = _mm256_add_epi16(step2[27], step1[28]); + step3[29] = _mm256_add_epi16(step2[26], step1[29]); + step3[30] = _mm256_add_epi16(step2[25], step1[30]); + step3[31] = _mm256_add_epi16(step2[24], step1[31]); + } + + // Stage 4 + { + step1[ 0] = _mm256_add_epi16(step3[ 3], step3[ 0]); + step1[ 1] = _mm256_add_epi16(step3[ 2], step3[ 1]); + step1[ 2] = _mm256_sub_epi16(step3[ 1], step3[ 2]); + step1[ 3] = _mm256_sub_epi16(step3[ 0], step3[ 3]); + step1[ 8] = _mm256_add_epi16(step3[11], step2[ 8]); + step1[ 9] = _mm256_add_epi16(step3[10], step2[ 9]); + step1[10] = _mm256_sub_epi16(step2[ 9], step3[10]); + step1[11] = _mm256_sub_epi16(step2[ 8], step3[11]); + step1[12] = _mm256_sub_epi16(step2[15], step3[12]); + step1[13] = _mm256_sub_epi16(step2[14], step3[13]); + step1[14] = _mm256_add_epi16(step3[13], step2[14]); + step1[15] = _mm256_add_epi16(step3[12], step2[15]); + } + { + const __m256i s1_05_0 = _mm256_unpacklo_epi16(step3[6], step3[5]); + const __m256i s1_05_1 = _mm256_unpackhi_epi16(step3[6], step3[5]); + const __m256i s1_05_2 = _mm256_madd_epi16(s1_05_0, k__cospi_p16_m16); + const __m256i s1_05_3 = _mm256_madd_epi16(s1_05_1, k__cospi_p16_m16); + const __m256i s1_06_2 = _mm256_madd_epi16(s1_05_0, k__cospi_p16_p16); + const __m256i s1_06_3 = _mm256_madd_epi16(s1_05_1, k__cospi_p16_p16); + // dct_const_round_shift + const __m256i s1_05_4 = _mm256_add_epi32(s1_05_2, k__DCT_CONST_ROUNDING); + const __m256i s1_05_5 = _mm256_add_epi32(s1_05_3, k__DCT_CONST_ROUNDING); + const __m256i s1_06_4 = _mm256_add_epi32(s1_06_2, k__DCT_CONST_ROUNDING); + const __m256i s1_06_5 = _mm256_add_epi32(s1_06_3, k__DCT_CONST_ROUNDING); + const __m256i s1_05_6 = _mm256_srai_epi32(s1_05_4, DCT_CONST_BITS); + const __m256i s1_05_7 = _mm256_srai_epi32(s1_05_5, DCT_CONST_BITS); + const __m256i s1_06_6 = _mm256_srai_epi32(s1_06_4, DCT_CONST_BITS); + const __m256i s1_06_7 = _mm256_srai_epi32(s1_06_5, DCT_CONST_BITS); + // Combine + step1[5] = _mm256_packs_epi32(s1_05_6, s1_05_7); + step1[6] = _mm256_packs_epi32(s1_06_6, s1_06_7); + } + { + const __m256i s1_18_0 = _mm256_unpacklo_epi16(step3[18], step3[29]); + const __m256i s1_18_1 = _mm256_unpackhi_epi16(step3[18], step3[29]); + const __m256i s1_19_0 = _mm256_unpacklo_epi16(step3[19], step3[28]); + const __m256i s1_19_1 = _mm256_unpackhi_epi16(step3[19], step3[28]); + const __m256i s1_20_0 = _mm256_unpacklo_epi16(step3[20], step3[27]); + const __m256i s1_20_1 = _mm256_unpackhi_epi16(step3[20], step3[27]); + const __m256i s1_21_0 = _mm256_unpacklo_epi16(step3[21], step3[26]); + const __m256i s1_21_1 = _mm256_unpackhi_epi16(step3[21], step3[26]); + const __m256i s1_18_2 = _mm256_madd_epi16(s1_18_0, k__cospi_m08_p24); + const __m256i s1_18_3 = _mm256_madd_epi16(s1_18_1, k__cospi_m08_p24); + const __m256i s1_19_2 = _mm256_madd_epi16(s1_19_0, k__cospi_m08_p24); + const __m256i s1_19_3 = _mm256_madd_epi16(s1_19_1, k__cospi_m08_p24); + const __m256i s1_20_2 = _mm256_madd_epi16(s1_20_0, k__cospi_m24_m08); + const __m256i s1_20_3 = _mm256_madd_epi16(s1_20_1, k__cospi_m24_m08); + const __m256i s1_21_2 = _mm256_madd_epi16(s1_21_0, k__cospi_m24_m08); + const __m256i s1_21_3 = _mm256_madd_epi16(s1_21_1, k__cospi_m24_m08); + const __m256i s1_26_2 = _mm256_madd_epi16(s1_21_0, k__cospi_m08_p24); + const __m256i s1_26_3 = _mm256_madd_epi16(s1_21_1, k__cospi_m08_p24); + const __m256i s1_27_2 = _mm256_madd_epi16(s1_20_0, k__cospi_m08_p24); + const __m256i s1_27_3 = _mm256_madd_epi16(s1_20_1, k__cospi_m08_p24); + const __m256i s1_28_2 = _mm256_madd_epi16(s1_19_0, k__cospi_p24_p08); + const __m256i s1_28_3 = _mm256_madd_epi16(s1_19_1, k__cospi_p24_p08); + const __m256i s1_29_2 = _mm256_madd_epi16(s1_18_0, k__cospi_p24_p08); + const __m256i s1_29_3 = _mm256_madd_epi16(s1_18_1, k__cospi_p24_p08); + // dct_const_round_shift + const __m256i s1_18_4 = _mm256_add_epi32(s1_18_2, k__DCT_CONST_ROUNDING); + const __m256i s1_18_5 = _mm256_add_epi32(s1_18_3, k__DCT_CONST_ROUNDING); + const __m256i s1_19_4 = _mm256_add_epi32(s1_19_2, k__DCT_CONST_ROUNDING); + const __m256i s1_19_5 = _mm256_add_epi32(s1_19_3, k__DCT_CONST_ROUNDING); + const __m256i s1_20_4 = _mm256_add_epi32(s1_20_2, k__DCT_CONST_ROUNDING); + const __m256i s1_20_5 = _mm256_add_epi32(s1_20_3, k__DCT_CONST_ROUNDING); + const __m256i s1_21_4 = _mm256_add_epi32(s1_21_2, k__DCT_CONST_ROUNDING); + const __m256i s1_21_5 = _mm256_add_epi32(s1_21_3, k__DCT_CONST_ROUNDING); + const __m256i s1_26_4 = _mm256_add_epi32(s1_26_2, k__DCT_CONST_ROUNDING); + const __m256i s1_26_5 = _mm256_add_epi32(s1_26_3, k__DCT_CONST_ROUNDING); + const __m256i s1_27_4 = _mm256_add_epi32(s1_27_2, k__DCT_CONST_ROUNDING); + const __m256i s1_27_5 = _mm256_add_epi32(s1_27_3, k__DCT_CONST_ROUNDING); + const __m256i s1_28_4 = _mm256_add_epi32(s1_28_2, k__DCT_CONST_ROUNDING); + const __m256i s1_28_5 = _mm256_add_epi32(s1_28_3, k__DCT_CONST_ROUNDING); + const __m256i s1_29_4 = _mm256_add_epi32(s1_29_2, k__DCT_CONST_ROUNDING); + const __m256i s1_29_5 = _mm256_add_epi32(s1_29_3, k__DCT_CONST_ROUNDING); + const __m256i s1_18_6 = _mm256_srai_epi32(s1_18_4, DCT_CONST_BITS); + const __m256i s1_18_7 = _mm256_srai_epi32(s1_18_5, DCT_CONST_BITS); + const __m256i s1_19_6 = _mm256_srai_epi32(s1_19_4, DCT_CONST_BITS); + const __m256i s1_19_7 = _mm256_srai_epi32(s1_19_5, DCT_CONST_BITS); + const __m256i s1_20_6 = _mm256_srai_epi32(s1_20_4, DCT_CONST_BITS); + const __m256i s1_20_7 = _mm256_srai_epi32(s1_20_5, DCT_CONST_BITS); + const __m256i s1_21_6 = _mm256_srai_epi32(s1_21_4, DCT_CONST_BITS); + const __m256i s1_21_7 = _mm256_srai_epi32(s1_21_5, DCT_CONST_BITS); + const __m256i s1_26_6 = _mm256_srai_epi32(s1_26_4, DCT_CONST_BITS); + const __m256i s1_26_7 = _mm256_srai_epi32(s1_26_5, DCT_CONST_BITS); + const __m256i s1_27_6 = _mm256_srai_epi32(s1_27_4, DCT_CONST_BITS); + const __m256i s1_27_7 = _mm256_srai_epi32(s1_27_5, DCT_CONST_BITS); + const __m256i s1_28_6 = _mm256_srai_epi32(s1_28_4, DCT_CONST_BITS); + const __m256i s1_28_7 = _mm256_srai_epi32(s1_28_5, DCT_CONST_BITS); + const __m256i s1_29_6 = _mm256_srai_epi32(s1_29_4, DCT_CONST_BITS); + const __m256i s1_29_7 = _mm256_srai_epi32(s1_29_5, DCT_CONST_BITS); + // Combine + step1[18] = _mm256_packs_epi32(s1_18_6, s1_18_7); + step1[19] = _mm256_packs_epi32(s1_19_6, s1_19_7); + step1[20] = _mm256_packs_epi32(s1_20_6, s1_20_7); + step1[21] = _mm256_packs_epi32(s1_21_6, s1_21_7); + step1[26] = _mm256_packs_epi32(s1_26_6, s1_26_7); + step1[27] = _mm256_packs_epi32(s1_27_6, s1_27_7); + step1[28] = _mm256_packs_epi32(s1_28_6, s1_28_7); + step1[29] = _mm256_packs_epi32(s1_29_6, s1_29_7); + } + // Stage 5 + { + step2[4] = _mm256_add_epi16(step1[5], step3[4]); + step2[5] = _mm256_sub_epi16(step3[4], step1[5]); + step2[6] = _mm256_sub_epi16(step3[7], step1[6]); + step2[7] = _mm256_add_epi16(step1[6], step3[7]); + } + { + const __m256i out_00_0 = _mm256_unpacklo_epi16(step1[0], step1[1]); + const __m256i out_00_1 = _mm256_unpackhi_epi16(step1[0], step1[1]); + const __m256i out_08_0 = _mm256_unpacklo_epi16(step1[2], step1[3]); + const __m256i out_08_1 = _mm256_unpackhi_epi16(step1[2], step1[3]); + const __m256i out_00_2 = _mm256_madd_epi16(out_00_0, k__cospi_p16_p16); + const __m256i out_00_3 = _mm256_madd_epi16(out_00_1, k__cospi_p16_p16); + const __m256i out_16_2 = _mm256_madd_epi16(out_00_0, k__cospi_p16_m16); + const __m256i out_16_3 = _mm256_madd_epi16(out_00_1, k__cospi_p16_m16); + const __m256i out_08_2 = _mm256_madd_epi16(out_08_0, k__cospi_p24_p08); + const __m256i out_08_3 = _mm256_madd_epi16(out_08_1, k__cospi_p24_p08); + const __m256i out_24_2 = _mm256_madd_epi16(out_08_0, k__cospi_m08_p24); + const __m256i out_24_3 = _mm256_madd_epi16(out_08_1, k__cospi_m08_p24); + // dct_const_round_shift + const __m256i out_00_4 = _mm256_add_epi32(out_00_2, k__DCT_CONST_ROUNDING); + const __m256i out_00_5 = _mm256_add_epi32(out_00_3, k__DCT_CONST_ROUNDING); + const __m256i out_16_4 = _mm256_add_epi32(out_16_2, k__DCT_CONST_ROUNDING); + const __m256i out_16_5 = _mm256_add_epi32(out_16_3, k__DCT_CONST_ROUNDING); + const __m256i out_08_4 = _mm256_add_epi32(out_08_2, k__DCT_CONST_ROUNDING); + const __m256i out_08_5 = _mm256_add_epi32(out_08_3, k__DCT_CONST_ROUNDING); + const __m256i out_24_4 = _mm256_add_epi32(out_24_2, k__DCT_CONST_ROUNDING); + const __m256i out_24_5 = _mm256_add_epi32(out_24_3, k__DCT_CONST_ROUNDING); + const __m256i out_00_6 = _mm256_srai_epi32(out_00_4, DCT_CONST_BITS); + const __m256i out_00_7 = _mm256_srai_epi32(out_00_5, DCT_CONST_BITS); + const __m256i out_16_6 = _mm256_srai_epi32(out_16_4, DCT_CONST_BITS); + const __m256i out_16_7 = _mm256_srai_epi32(out_16_5, DCT_CONST_BITS); + const __m256i out_08_6 = _mm256_srai_epi32(out_08_4, DCT_CONST_BITS); + const __m256i out_08_7 = _mm256_srai_epi32(out_08_5, DCT_CONST_BITS); + const __m256i out_24_6 = _mm256_srai_epi32(out_24_4, DCT_CONST_BITS); + const __m256i out_24_7 = _mm256_srai_epi32(out_24_5, DCT_CONST_BITS); + // Combine + out[ 0] = _mm256_packs_epi32(out_00_6, out_00_7); + out[16] = _mm256_packs_epi32(out_16_6, out_16_7); + out[ 8] = _mm256_packs_epi32(out_08_6, out_08_7); + out[24] = _mm256_packs_epi32(out_24_6, out_24_7); + } + { + const __m256i s2_09_0 = _mm256_unpacklo_epi16(step1[ 9], step1[14]); + const __m256i s2_09_1 = _mm256_unpackhi_epi16(step1[ 9], step1[14]); + const __m256i s2_10_0 = _mm256_unpacklo_epi16(step1[10], step1[13]); + const __m256i s2_10_1 = _mm256_unpackhi_epi16(step1[10], step1[13]); + const __m256i s2_09_2 = _mm256_madd_epi16(s2_09_0, k__cospi_m08_p24); + const __m256i s2_09_3 = _mm256_madd_epi16(s2_09_1, k__cospi_m08_p24); + const __m256i s2_10_2 = _mm256_madd_epi16(s2_10_0, k__cospi_m24_m08); + const __m256i s2_10_3 = _mm256_madd_epi16(s2_10_1, k__cospi_m24_m08); + const __m256i s2_13_2 = _mm256_madd_epi16(s2_10_0, k__cospi_m08_p24); + const __m256i s2_13_3 = _mm256_madd_epi16(s2_10_1, k__cospi_m08_p24); + const __m256i s2_14_2 = _mm256_madd_epi16(s2_09_0, k__cospi_p24_p08); + const __m256i s2_14_3 = _mm256_madd_epi16(s2_09_1, k__cospi_p24_p08); + // dct_const_round_shift + const __m256i s2_09_4 = _mm256_add_epi32(s2_09_2, k__DCT_CONST_ROUNDING); + const __m256i s2_09_5 = _mm256_add_epi32(s2_09_3, k__DCT_CONST_ROUNDING); + const __m256i s2_10_4 = _mm256_add_epi32(s2_10_2, k__DCT_CONST_ROUNDING); + const __m256i s2_10_5 = _mm256_add_epi32(s2_10_3, k__DCT_CONST_ROUNDING); + const __m256i s2_13_4 = _mm256_add_epi32(s2_13_2, k__DCT_CONST_ROUNDING); + const __m256i s2_13_5 = _mm256_add_epi32(s2_13_3, k__DCT_CONST_ROUNDING); + const __m256i s2_14_4 = _mm256_add_epi32(s2_14_2, k__DCT_CONST_ROUNDING); + const __m256i s2_14_5 = _mm256_add_epi32(s2_14_3, k__DCT_CONST_ROUNDING); + const __m256i s2_09_6 = _mm256_srai_epi32(s2_09_4, DCT_CONST_BITS); + const __m256i s2_09_7 = _mm256_srai_epi32(s2_09_5, DCT_CONST_BITS); + const __m256i s2_10_6 = _mm256_srai_epi32(s2_10_4, DCT_CONST_BITS); + const __m256i s2_10_7 = _mm256_srai_epi32(s2_10_5, DCT_CONST_BITS); + const __m256i s2_13_6 = _mm256_srai_epi32(s2_13_4, DCT_CONST_BITS); + const __m256i s2_13_7 = _mm256_srai_epi32(s2_13_5, DCT_CONST_BITS); + const __m256i s2_14_6 = _mm256_srai_epi32(s2_14_4, DCT_CONST_BITS); + const __m256i s2_14_7 = _mm256_srai_epi32(s2_14_5, DCT_CONST_BITS); + // Combine + step2[ 9] = _mm256_packs_epi32(s2_09_6, s2_09_7); + step2[10] = _mm256_packs_epi32(s2_10_6, s2_10_7); + step2[13] = _mm256_packs_epi32(s2_13_6, s2_13_7); + step2[14] = _mm256_packs_epi32(s2_14_6, s2_14_7); + } + { + step2[16] = _mm256_add_epi16(step1[19], step3[16]); + step2[17] = _mm256_add_epi16(step1[18], step3[17]); + step2[18] = _mm256_sub_epi16(step3[17], step1[18]); + step2[19] = _mm256_sub_epi16(step3[16], step1[19]); + step2[20] = _mm256_sub_epi16(step3[23], step1[20]); + step2[21] = _mm256_sub_epi16(step3[22], step1[21]); + step2[22] = _mm256_add_epi16(step1[21], step3[22]); + step2[23] = _mm256_add_epi16(step1[20], step3[23]); + step2[24] = _mm256_add_epi16(step1[27], step3[24]); + step2[25] = _mm256_add_epi16(step1[26], step3[25]); + step2[26] = _mm256_sub_epi16(step3[25], step1[26]); + step2[27] = _mm256_sub_epi16(step3[24], step1[27]); + step2[28] = _mm256_sub_epi16(step3[31], step1[28]); + step2[29] = _mm256_sub_epi16(step3[30], step1[29]); + step2[30] = _mm256_add_epi16(step1[29], step3[30]); + step2[31] = _mm256_add_epi16(step1[28], step3[31]); + } + // Stage 6 + { + const __m256i out_04_0 = _mm256_unpacklo_epi16(step2[4], step2[7]); + const __m256i out_04_1 = _mm256_unpackhi_epi16(step2[4], step2[7]); + const __m256i out_20_0 = _mm256_unpacklo_epi16(step2[5], step2[6]); + const __m256i out_20_1 = _mm256_unpackhi_epi16(step2[5], step2[6]); + const __m256i out_12_0 = _mm256_unpacklo_epi16(step2[5], step2[6]); + const __m256i out_12_1 = _mm256_unpackhi_epi16(step2[5], step2[6]); + const __m256i out_28_0 = _mm256_unpacklo_epi16(step2[4], step2[7]); + const __m256i out_28_1 = _mm256_unpackhi_epi16(step2[4], step2[7]); + const __m256i out_04_2 = _mm256_madd_epi16(out_04_0, k__cospi_p28_p04); + const __m256i out_04_3 = _mm256_madd_epi16(out_04_1, k__cospi_p28_p04); + const __m256i out_20_2 = _mm256_madd_epi16(out_20_0, k__cospi_p12_p20); + const __m256i out_20_3 = _mm256_madd_epi16(out_20_1, k__cospi_p12_p20); + const __m256i out_12_2 = _mm256_madd_epi16(out_12_0, k__cospi_m20_p12); + const __m256i out_12_3 = _mm256_madd_epi16(out_12_1, k__cospi_m20_p12); + const __m256i out_28_2 = _mm256_madd_epi16(out_28_0, k__cospi_m04_p28); + const __m256i out_28_3 = _mm256_madd_epi16(out_28_1, k__cospi_m04_p28); + // dct_const_round_shift + const __m256i out_04_4 = _mm256_add_epi32(out_04_2, k__DCT_CONST_ROUNDING); + const __m256i out_04_5 = _mm256_add_epi32(out_04_3, k__DCT_CONST_ROUNDING); + const __m256i out_20_4 = _mm256_add_epi32(out_20_2, k__DCT_CONST_ROUNDING); + const __m256i out_20_5 = _mm256_add_epi32(out_20_3, k__DCT_CONST_ROUNDING); + const __m256i out_12_4 = _mm256_add_epi32(out_12_2, k__DCT_CONST_ROUNDING); + const __m256i out_12_5 = _mm256_add_epi32(out_12_3, k__DCT_CONST_ROUNDING); + const __m256i out_28_4 = _mm256_add_epi32(out_28_2, k__DCT_CONST_ROUNDING); + const __m256i out_28_5 = _mm256_add_epi32(out_28_3, k__DCT_CONST_ROUNDING); + const __m256i out_04_6 = _mm256_srai_epi32(out_04_4, DCT_CONST_BITS); + const __m256i out_04_7 = _mm256_srai_epi32(out_04_5, DCT_CONST_BITS); + const __m256i out_20_6 = _mm256_srai_epi32(out_20_4, DCT_CONST_BITS); + const __m256i out_20_7 = _mm256_srai_epi32(out_20_5, DCT_CONST_BITS); + const __m256i out_12_6 = _mm256_srai_epi32(out_12_4, DCT_CONST_BITS); + const __m256i out_12_7 = _mm256_srai_epi32(out_12_5, DCT_CONST_BITS); + const __m256i out_28_6 = _mm256_srai_epi32(out_28_4, DCT_CONST_BITS); + const __m256i out_28_7 = _mm256_srai_epi32(out_28_5, DCT_CONST_BITS); + // Combine + out[ 4] = _mm256_packs_epi32(out_04_6, out_04_7); + out[20] = _mm256_packs_epi32(out_20_6, out_20_7); + out[12] = _mm256_packs_epi32(out_12_6, out_12_7); + out[28] = _mm256_packs_epi32(out_28_6, out_28_7); + } + { + step3[ 8] = _mm256_add_epi16(step2[ 9], step1[ 8]); + step3[ 9] = _mm256_sub_epi16(step1[ 8], step2[ 9]); + step3[10] = _mm256_sub_epi16(step1[11], step2[10]); + step3[11] = _mm256_add_epi16(step2[10], step1[11]); + step3[12] = _mm256_add_epi16(step2[13], step1[12]); + step3[13] = _mm256_sub_epi16(step1[12], step2[13]); + step3[14] = _mm256_sub_epi16(step1[15], step2[14]); + step3[15] = _mm256_add_epi16(step2[14], step1[15]); + } + { + const __m256i s3_17_0 = _mm256_unpacklo_epi16(step2[17], step2[30]); + const __m256i s3_17_1 = _mm256_unpackhi_epi16(step2[17], step2[30]); + const __m256i s3_18_0 = _mm256_unpacklo_epi16(step2[18], step2[29]); + const __m256i s3_18_1 = _mm256_unpackhi_epi16(step2[18], step2[29]); + const __m256i s3_21_0 = _mm256_unpacklo_epi16(step2[21], step2[26]); + const __m256i s3_21_1 = _mm256_unpackhi_epi16(step2[21], step2[26]); + const __m256i s3_22_0 = _mm256_unpacklo_epi16(step2[22], step2[25]); + const __m256i s3_22_1 = _mm256_unpackhi_epi16(step2[22], step2[25]); + const __m256i s3_17_2 = _mm256_madd_epi16(s3_17_0, k__cospi_m04_p28); + const __m256i s3_17_3 = _mm256_madd_epi16(s3_17_1, k__cospi_m04_p28); + const __m256i s3_18_2 = _mm256_madd_epi16(s3_18_0, k__cospi_m28_m04); + const __m256i s3_18_3 = _mm256_madd_epi16(s3_18_1, k__cospi_m28_m04); + const __m256i s3_21_2 = _mm256_madd_epi16(s3_21_0, k__cospi_m20_p12); + const __m256i s3_21_3 = _mm256_madd_epi16(s3_21_1, k__cospi_m20_p12); + const __m256i s3_22_2 = _mm256_madd_epi16(s3_22_0, k__cospi_m12_m20); + const __m256i s3_22_3 = _mm256_madd_epi16(s3_22_1, k__cospi_m12_m20); + const __m256i s3_25_2 = _mm256_madd_epi16(s3_22_0, k__cospi_m20_p12); + const __m256i s3_25_3 = _mm256_madd_epi16(s3_22_1, k__cospi_m20_p12); + const __m256i s3_26_2 = _mm256_madd_epi16(s3_21_0, k__cospi_p12_p20); + const __m256i s3_26_3 = _mm256_madd_epi16(s3_21_1, k__cospi_p12_p20); + const __m256i s3_29_2 = _mm256_madd_epi16(s3_18_0, k__cospi_m04_p28); + const __m256i s3_29_3 = _mm256_madd_epi16(s3_18_1, k__cospi_m04_p28); + const __m256i s3_30_2 = _mm256_madd_epi16(s3_17_0, k__cospi_p28_p04); + const __m256i s3_30_3 = _mm256_madd_epi16(s3_17_1, k__cospi_p28_p04); + // dct_const_round_shift + const __m256i s3_17_4 = _mm256_add_epi32(s3_17_2, k__DCT_CONST_ROUNDING); + const __m256i s3_17_5 = _mm256_add_epi32(s3_17_3, k__DCT_CONST_ROUNDING); + const __m256i s3_18_4 = _mm256_add_epi32(s3_18_2, k__DCT_CONST_ROUNDING); + const __m256i s3_18_5 = _mm256_add_epi32(s3_18_3, k__DCT_CONST_ROUNDING); + const __m256i s3_21_4 = _mm256_add_epi32(s3_21_2, k__DCT_CONST_ROUNDING); + const __m256i s3_21_5 = _mm256_add_epi32(s3_21_3, k__DCT_CONST_ROUNDING); + const __m256i s3_22_4 = _mm256_add_epi32(s3_22_2, k__DCT_CONST_ROUNDING); + const __m256i s3_22_5 = _mm256_add_epi32(s3_22_3, k__DCT_CONST_ROUNDING); + const __m256i s3_17_6 = _mm256_srai_epi32(s3_17_4, DCT_CONST_BITS); + const __m256i s3_17_7 = _mm256_srai_epi32(s3_17_5, DCT_CONST_BITS); + const __m256i s3_18_6 = _mm256_srai_epi32(s3_18_4, DCT_CONST_BITS); + const __m256i s3_18_7 = _mm256_srai_epi32(s3_18_5, DCT_CONST_BITS); + const __m256i s3_21_6 = _mm256_srai_epi32(s3_21_4, DCT_CONST_BITS); + const __m256i s3_21_7 = _mm256_srai_epi32(s3_21_5, DCT_CONST_BITS); + const __m256i s3_22_6 = _mm256_srai_epi32(s3_22_4, DCT_CONST_BITS); + const __m256i s3_22_7 = _mm256_srai_epi32(s3_22_5, DCT_CONST_BITS); + const __m256i s3_25_4 = _mm256_add_epi32(s3_25_2, k__DCT_CONST_ROUNDING); + const __m256i s3_25_5 = _mm256_add_epi32(s3_25_3, k__DCT_CONST_ROUNDING); + const __m256i s3_26_4 = _mm256_add_epi32(s3_26_2, k__DCT_CONST_ROUNDING); + const __m256i s3_26_5 = _mm256_add_epi32(s3_26_3, k__DCT_CONST_ROUNDING); + const __m256i s3_29_4 = _mm256_add_epi32(s3_29_2, k__DCT_CONST_ROUNDING); + const __m256i s3_29_5 = _mm256_add_epi32(s3_29_3, k__DCT_CONST_ROUNDING); + const __m256i s3_30_4 = _mm256_add_epi32(s3_30_2, k__DCT_CONST_ROUNDING); + const __m256i s3_30_5 = _mm256_add_epi32(s3_30_3, k__DCT_CONST_ROUNDING); + const __m256i s3_25_6 = _mm256_srai_epi32(s3_25_4, DCT_CONST_BITS); + const __m256i s3_25_7 = _mm256_srai_epi32(s3_25_5, DCT_CONST_BITS); + const __m256i s3_26_6 = _mm256_srai_epi32(s3_26_4, DCT_CONST_BITS); + const __m256i s3_26_7 = _mm256_srai_epi32(s3_26_5, DCT_CONST_BITS); + const __m256i s3_29_6 = _mm256_srai_epi32(s3_29_4, DCT_CONST_BITS); + const __m256i s3_29_7 = _mm256_srai_epi32(s3_29_5, DCT_CONST_BITS); + const __m256i s3_30_6 = _mm256_srai_epi32(s3_30_4, DCT_CONST_BITS); + const __m256i s3_30_7 = _mm256_srai_epi32(s3_30_5, DCT_CONST_BITS); + // Combine + step3[17] = _mm256_packs_epi32(s3_17_6, s3_17_7); + step3[18] = _mm256_packs_epi32(s3_18_6, s3_18_7); + step3[21] = _mm256_packs_epi32(s3_21_6, s3_21_7); + step3[22] = _mm256_packs_epi32(s3_22_6, s3_22_7); + // Combine + step3[25] = _mm256_packs_epi32(s3_25_6, s3_25_7); + step3[26] = _mm256_packs_epi32(s3_26_6, s3_26_7); + step3[29] = _mm256_packs_epi32(s3_29_6, s3_29_7); + step3[30] = _mm256_packs_epi32(s3_30_6, s3_30_7); + } + // Stage 7 + { + const __m256i out_02_0 = _mm256_unpacklo_epi16(step3[ 8], step3[15]); + const __m256i out_02_1 = _mm256_unpackhi_epi16(step3[ 8], step3[15]); + const __m256i out_18_0 = _mm256_unpacklo_epi16(step3[ 9], step3[14]); + const __m256i out_18_1 = _mm256_unpackhi_epi16(step3[ 9], step3[14]); + const __m256i out_10_0 = _mm256_unpacklo_epi16(step3[10], step3[13]); + const __m256i out_10_1 = _mm256_unpackhi_epi16(step3[10], step3[13]); + const __m256i out_26_0 = _mm256_unpacklo_epi16(step3[11], step3[12]); + const __m256i out_26_1 = _mm256_unpackhi_epi16(step3[11], step3[12]); + const __m256i out_02_2 = _mm256_madd_epi16(out_02_0, k__cospi_p30_p02); + const __m256i out_02_3 = _mm256_madd_epi16(out_02_1, k__cospi_p30_p02); + const __m256i out_18_2 = _mm256_madd_epi16(out_18_0, k__cospi_p14_p18); + const __m256i out_18_3 = _mm256_madd_epi16(out_18_1, k__cospi_p14_p18); + const __m256i out_10_2 = _mm256_madd_epi16(out_10_0, k__cospi_p22_p10); + const __m256i out_10_3 = _mm256_madd_epi16(out_10_1, k__cospi_p22_p10); + const __m256i out_26_2 = _mm256_madd_epi16(out_26_0, k__cospi_p06_p26); + const __m256i out_26_3 = _mm256_madd_epi16(out_26_1, k__cospi_p06_p26); + const __m256i out_06_2 = _mm256_madd_epi16(out_26_0, k__cospi_m26_p06); + const __m256i out_06_3 = _mm256_madd_epi16(out_26_1, k__cospi_m26_p06); + const __m256i out_22_2 = _mm256_madd_epi16(out_10_0, k__cospi_m10_p22); + const __m256i out_22_3 = _mm256_madd_epi16(out_10_1, k__cospi_m10_p22); + const __m256i out_14_2 = _mm256_madd_epi16(out_18_0, k__cospi_m18_p14); + const __m256i out_14_3 = _mm256_madd_epi16(out_18_1, k__cospi_m18_p14); + const __m256i out_30_2 = _mm256_madd_epi16(out_02_0, k__cospi_m02_p30); + const __m256i out_30_3 = _mm256_madd_epi16(out_02_1, k__cospi_m02_p30); + // dct_const_round_shift + const __m256i out_02_4 = _mm256_add_epi32(out_02_2, k__DCT_CONST_ROUNDING); + const __m256i out_02_5 = _mm256_add_epi32(out_02_3, k__DCT_CONST_ROUNDING); + const __m256i out_18_4 = _mm256_add_epi32(out_18_2, k__DCT_CONST_ROUNDING); + const __m256i out_18_5 = _mm256_add_epi32(out_18_3, k__DCT_CONST_ROUNDING); + const __m256i out_10_4 = _mm256_add_epi32(out_10_2, k__DCT_CONST_ROUNDING); + const __m256i out_10_5 = _mm256_add_epi32(out_10_3, k__DCT_CONST_ROUNDING); + const __m256i out_26_4 = _mm256_add_epi32(out_26_2, k__DCT_CONST_ROUNDING); + const __m256i out_26_5 = _mm256_add_epi32(out_26_3, k__DCT_CONST_ROUNDING); + const __m256i out_06_4 = _mm256_add_epi32(out_06_2, k__DCT_CONST_ROUNDING); + const __m256i out_06_5 = _mm256_add_epi32(out_06_3, k__DCT_CONST_ROUNDING); + const __m256i out_22_4 = _mm256_add_epi32(out_22_2, k__DCT_CONST_ROUNDING); + const __m256i out_22_5 = _mm256_add_epi32(out_22_3, k__DCT_CONST_ROUNDING); + const __m256i out_14_4 = _mm256_add_epi32(out_14_2, k__DCT_CONST_ROUNDING); + const __m256i out_14_5 = _mm256_add_epi32(out_14_3, k__DCT_CONST_ROUNDING); + const __m256i out_30_4 = _mm256_add_epi32(out_30_2, k__DCT_CONST_ROUNDING); + const __m256i out_30_5 = _mm256_add_epi32(out_30_3, k__DCT_CONST_ROUNDING); + const __m256i out_02_6 = _mm256_srai_epi32(out_02_4, DCT_CONST_BITS); + const __m256i out_02_7 = _mm256_srai_epi32(out_02_5, DCT_CONST_BITS); + const __m256i out_18_6 = _mm256_srai_epi32(out_18_4, DCT_CONST_BITS); + const __m256i out_18_7 = _mm256_srai_epi32(out_18_5, DCT_CONST_BITS); + const __m256i out_10_6 = _mm256_srai_epi32(out_10_4, DCT_CONST_BITS); + const __m256i out_10_7 = _mm256_srai_epi32(out_10_5, DCT_CONST_BITS); + const __m256i out_26_6 = _mm256_srai_epi32(out_26_4, DCT_CONST_BITS); + const __m256i out_26_7 = _mm256_srai_epi32(out_26_5, DCT_CONST_BITS); + const __m256i out_06_6 = _mm256_srai_epi32(out_06_4, DCT_CONST_BITS); + const __m256i out_06_7 = _mm256_srai_epi32(out_06_5, DCT_CONST_BITS); + const __m256i out_22_6 = _mm256_srai_epi32(out_22_4, DCT_CONST_BITS); + const __m256i out_22_7 = _mm256_srai_epi32(out_22_5, DCT_CONST_BITS); + const __m256i out_14_6 = _mm256_srai_epi32(out_14_4, DCT_CONST_BITS); + const __m256i out_14_7 = _mm256_srai_epi32(out_14_5, DCT_CONST_BITS); + const __m256i out_30_6 = _mm256_srai_epi32(out_30_4, DCT_CONST_BITS); + const __m256i out_30_7 = _mm256_srai_epi32(out_30_5, DCT_CONST_BITS); + // Combine + out[ 2] = _mm256_packs_epi32(out_02_6, out_02_7); + out[18] = _mm256_packs_epi32(out_18_6, out_18_7); + out[10] = _mm256_packs_epi32(out_10_6, out_10_7); + out[26] = _mm256_packs_epi32(out_26_6, out_26_7); + out[ 6] = _mm256_packs_epi32(out_06_6, out_06_7); + out[22] = _mm256_packs_epi32(out_22_6, out_22_7); + out[14] = _mm256_packs_epi32(out_14_6, out_14_7); + out[30] = _mm256_packs_epi32(out_30_6, out_30_7); + } + { + step1[16] = _mm256_add_epi16(step3[17], step2[16]); + step1[17] = _mm256_sub_epi16(step2[16], step3[17]); + step1[18] = _mm256_sub_epi16(step2[19], step3[18]); + step1[19] = _mm256_add_epi16(step3[18], step2[19]); + step1[20] = _mm256_add_epi16(step3[21], step2[20]); + step1[21] = _mm256_sub_epi16(step2[20], step3[21]); + step1[22] = _mm256_sub_epi16(step2[23], step3[22]); + step1[23] = _mm256_add_epi16(step3[22], step2[23]); + step1[24] = _mm256_add_epi16(step3[25], step2[24]); + step1[25] = _mm256_sub_epi16(step2[24], step3[25]); + step1[26] = _mm256_sub_epi16(step2[27], step3[26]); + step1[27] = _mm256_add_epi16(step3[26], step2[27]); + step1[28] = _mm256_add_epi16(step3[29], step2[28]); + step1[29] = _mm256_sub_epi16(step2[28], step3[29]); + step1[30] = _mm256_sub_epi16(step2[31], step3[30]); + step1[31] = _mm256_add_epi16(step3[30], step2[31]); + } + // Final stage --- outputs indices are bit-reversed. + { + const __m256i out_01_0 = _mm256_unpacklo_epi16(step1[16], step1[31]); + const __m256i out_01_1 = _mm256_unpackhi_epi16(step1[16], step1[31]); + const __m256i out_17_0 = _mm256_unpacklo_epi16(step1[17], step1[30]); + const __m256i out_17_1 = _mm256_unpackhi_epi16(step1[17], step1[30]); + const __m256i out_09_0 = _mm256_unpacklo_epi16(step1[18], step1[29]); + const __m256i out_09_1 = _mm256_unpackhi_epi16(step1[18], step1[29]); + const __m256i out_25_0 = _mm256_unpacklo_epi16(step1[19], step1[28]); + const __m256i out_25_1 = _mm256_unpackhi_epi16(step1[19], step1[28]); + const __m256i out_01_2 = _mm256_madd_epi16(out_01_0, k__cospi_p31_p01); + const __m256i out_01_3 = _mm256_madd_epi16(out_01_1, k__cospi_p31_p01); + const __m256i out_17_2 = _mm256_madd_epi16(out_17_0, k__cospi_p15_p17); + const __m256i out_17_3 = _mm256_madd_epi16(out_17_1, k__cospi_p15_p17); + const __m256i out_09_2 = _mm256_madd_epi16(out_09_0, k__cospi_p23_p09); + const __m256i out_09_3 = _mm256_madd_epi16(out_09_1, k__cospi_p23_p09); + const __m256i out_25_2 = _mm256_madd_epi16(out_25_0, k__cospi_p07_p25); + const __m256i out_25_3 = _mm256_madd_epi16(out_25_1, k__cospi_p07_p25); + const __m256i out_07_2 = _mm256_madd_epi16(out_25_0, k__cospi_m25_p07); + const __m256i out_07_3 = _mm256_madd_epi16(out_25_1, k__cospi_m25_p07); + const __m256i out_23_2 = _mm256_madd_epi16(out_09_0, k__cospi_m09_p23); + const __m256i out_23_3 = _mm256_madd_epi16(out_09_1, k__cospi_m09_p23); + const __m256i out_15_2 = _mm256_madd_epi16(out_17_0, k__cospi_m17_p15); + const __m256i out_15_3 = _mm256_madd_epi16(out_17_1, k__cospi_m17_p15); + const __m256i out_31_2 = _mm256_madd_epi16(out_01_0, k__cospi_m01_p31); + const __m256i out_31_3 = _mm256_madd_epi16(out_01_1, k__cospi_m01_p31); + // dct_const_round_shift + const __m256i out_01_4 = _mm256_add_epi32(out_01_2, k__DCT_CONST_ROUNDING); + const __m256i out_01_5 = _mm256_add_epi32(out_01_3, k__DCT_CONST_ROUNDING); + const __m256i out_17_4 = _mm256_add_epi32(out_17_2, k__DCT_CONST_ROUNDING); + const __m256i out_17_5 = _mm256_add_epi32(out_17_3, k__DCT_CONST_ROUNDING); + const __m256i out_09_4 = _mm256_add_epi32(out_09_2, k__DCT_CONST_ROUNDING); + const __m256i out_09_5 = _mm256_add_epi32(out_09_3, k__DCT_CONST_ROUNDING); + const __m256i out_25_4 = _mm256_add_epi32(out_25_2, k__DCT_CONST_ROUNDING); + const __m256i out_25_5 = _mm256_add_epi32(out_25_3, k__DCT_CONST_ROUNDING); + const __m256i out_07_4 = _mm256_add_epi32(out_07_2, k__DCT_CONST_ROUNDING); + const __m256i out_07_5 = _mm256_add_epi32(out_07_3, k__DCT_CONST_ROUNDING); + const __m256i out_23_4 = _mm256_add_epi32(out_23_2, k__DCT_CONST_ROUNDING); + const __m256i out_23_5 = _mm256_add_epi32(out_23_3, k__DCT_CONST_ROUNDING); + const __m256i out_15_4 = _mm256_add_epi32(out_15_2, k__DCT_CONST_ROUNDING); + const __m256i out_15_5 = _mm256_add_epi32(out_15_3, k__DCT_CONST_ROUNDING); + const __m256i out_31_4 = _mm256_add_epi32(out_31_2, k__DCT_CONST_ROUNDING); + const __m256i out_31_5 = _mm256_add_epi32(out_31_3, k__DCT_CONST_ROUNDING); + const __m256i out_01_6 = _mm256_srai_epi32(out_01_4, DCT_CONST_BITS); + const __m256i out_01_7 = _mm256_srai_epi32(out_01_5, DCT_CONST_BITS); + const __m256i out_17_6 = _mm256_srai_epi32(out_17_4, DCT_CONST_BITS); + const __m256i out_17_7 = _mm256_srai_epi32(out_17_5, DCT_CONST_BITS); + const __m256i out_09_6 = _mm256_srai_epi32(out_09_4, DCT_CONST_BITS); + const __m256i out_09_7 = _mm256_srai_epi32(out_09_5, DCT_CONST_BITS); + const __m256i out_25_6 = _mm256_srai_epi32(out_25_4, DCT_CONST_BITS); + const __m256i out_25_7 = _mm256_srai_epi32(out_25_5, DCT_CONST_BITS); + const __m256i out_07_6 = _mm256_srai_epi32(out_07_4, DCT_CONST_BITS); + const __m256i out_07_7 = _mm256_srai_epi32(out_07_5, DCT_CONST_BITS); + const __m256i out_23_6 = _mm256_srai_epi32(out_23_4, DCT_CONST_BITS); + const __m256i out_23_7 = _mm256_srai_epi32(out_23_5, DCT_CONST_BITS); + const __m256i out_15_6 = _mm256_srai_epi32(out_15_4, DCT_CONST_BITS); + const __m256i out_15_7 = _mm256_srai_epi32(out_15_5, DCT_CONST_BITS); + const __m256i out_31_6 = _mm256_srai_epi32(out_31_4, DCT_CONST_BITS); + const __m256i out_31_7 = _mm256_srai_epi32(out_31_5, DCT_CONST_BITS); + // Combine + out[ 1] = _mm256_packs_epi32(out_01_6, out_01_7); + out[17] = _mm256_packs_epi32(out_17_6, out_17_7); + out[ 9] = _mm256_packs_epi32(out_09_6, out_09_7); + out[25] = _mm256_packs_epi32(out_25_6, out_25_7); + out[ 7] = _mm256_packs_epi32(out_07_6, out_07_7); + out[23] = _mm256_packs_epi32(out_23_6, out_23_7); + out[15] = _mm256_packs_epi32(out_15_6, out_15_7); + out[31] = _mm256_packs_epi32(out_31_6, out_31_7); + } + { + const __m256i out_05_0 = _mm256_unpacklo_epi16(step1[20], step1[27]); + const __m256i out_05_1 = _mm256_unpackhi_epi16(step1[20], step1[27]); + const __m256i out_21_0 = _mm256_unpacklo_epi16(step1[21], step1[26]); + const __m256i out_21_1 = _mm256_unpackhi_epi16(step1[21], step1[26]); + const __m256i out_13_0 = _mm256_unpacklo_epi16(step1[22], step1[25]); + const __m256i out_13_1 = _mm256_unpackhi_epi16(step1[22], step1[25]); + const __m256i out_29_0 = _mm256_unpacklo_epi16(step1[23], step1[24]); + const __m256i out_29_1 = _mm256_unpackhi_epi16(step1[23], step1[24]); + const __m256i out_05_2 = _mm256_madd_epi16(out_05_0, k__cospi_p27_p05); + const __m256i out_05_3 = _mm256_madd_epi16(out_05_1, k__cospi_p27_p05); + const __m256i out_21_2 = _mm256_madd_epi16(out_21_0, k__cospi_p11_p21); + const __m256i out_21_3 = _mm256_madd_epi16(out_21_1, k__cospi_p11_p21); + const __m256i out_13_2 = _mm256_madd_epi16(out_13_0, k__cospi_p19_p13); + const __m256i out_13_3 = _mm256_madd_epi16(out_13_1, k__cospi_p19_p13); + const __m256i out_29_2 = _mm256_madd_epi16(out_29_0, k__cospi_p03_p29); + const __m256i out_29_3 = _mm256_madd_epi16(out_29_1, k__cospi_p03_p29); + const __m256i out_03_2 = _mm256_madd_epi16(out_29_0, k__cospi_m29_p03); + const __m256i out_03_3 = _mm256_madd_epi16(out_29_1, k__cospi_m29_p03); + const __m256i out_19_2 = _mm256_madd_epi16(out_13_0, k__cospi_m13_p19); + const __m256i out_19_3 = _mm256_madd_epi16(out_13_1, k__cospi_m13_p19); + const __m256i out_11_2 = _mm256_madd_epi16(out_21_0, k__cospi_m21_p11); + const __m256i out_11_3 = _mm256_madd_epi16(out_21_1, k__cospi_m21_p11); + const __m256i out_27_2 = _mm256_madd_epi16(out_05_0, k__cospi_m05_p27); + const __m256i out_27_3 = _mm256_madd_epi16(out_05_1, k__cospi_m05_p27); + // dct_const_round_shift + const __m256i out_05_4 = _mm256_add_epi32(out_05_2, k__DCT_CONST_ROUNDING); + const __m256i out_05_5 = _mm256_add_epi32(out_05_3, k__DCT_CONST_ROUNDING); + const __m256i out_21_4 = _mm256_add_epi32(out_21_2, k__DCT_CONST_ROUNDING); + const __m256i out_21_5 = _mm256_add_epi32(out_21_3, k__DCT_CONST_ROUNDING); + const __m256i out_13_4 = _mm256_add_epi32(out_13_2, k__DCT_CONST_ROUNDING); + const __m256i out_13_5 = _mm256_add_epi32(out_13_3, k__DCT_CONST_ROUNDING); + const __m256i out_29_4 = _mm256_add_epi32(out_29_2, k__DCT_CONST_ROUNDING); + const __m256i out_29_5 = _mm256_add_epi32(out_29_3, k__DCT_CONST_ROUNDING); + const __m256i out_03_4 = _mm256_add_epi32(out_03_2, k__DCT_CONST_ROUNDING); + const __m256i out_03_5 = _mm256_add_epi32(out_03_3, k__DCT_CONST_ROUNDING); + const __m256i out_19_4 = _mm256_add_epi32(out_19_2, k__DCT_CONST_ROUNDING); + const __m256i out_19_5 = _mm256_add_epi32(out_19_3, k__DCT_CONST_ROUNDING); + const __m256i out_11_4 = _mm256_add_epi32(out_11_2, k__DCT_CONST_ROUNDING); + const __m256i out_11_5 = _mm256_add_epi32(out_11_3, k__DCT_CONST_ROUNDING); + const __m256i out_27_4 = _mm256_add_epi32(out_27_2, k__DCT_CONST_ROUNDING); + const __m256i out_27_5 = _mm256_add_epi32(out_27_3, k__DCT_CONST_ROUNDING); + const __m256i out_05_6 = _mm256_srai_epi32(out_05_4, DCT_CONST_BITS); + const __m256i out_05_7 = _mm256_srai_epi32(out_05_5, DCT_CONST_BITS); + const __m256i out_21_6 = _mm256_srai_epi32(out_21_4, DCT_CONST_BITS); + const __m256i out_21_7 = _mm256_srai_epi32(out_21_5, DCT_CONST_BITS); + const __m256i out_13_6 = _mm256_srai_epi32(out_13_4, DCT_CONST_BITS); + const __m256i out_13_7 = _mm256_srai_epi32(out_13_5, DCT_CONST_BITS); + const __m256i out_29_6 = _mm256_srai_epi32(out_29_4, DCT_CONST_BITS); + const __m256i out_29_7 = _mm256_srai_epi32(out_29_5, DCT_CONST_BITS); + const __m256i out_03_6 = _mm256_srai_epi32(out_03_4, DCT_CONST_BITS); + const __m256i out_03_7 = _mm256_srai_epi32(out_03_5, DCT_CONST_BITS); + const __m256i out_19_6 = _mm256_srai_epi32(out_19_4, DCT_CONST_BITS); + const __m256i out_19_7 = _mm256_srai_epi32(out_19_5, DCT_CONST_BITS); + const __m256i out_11_6 = _mm256_srai_epi32(out_11_4, DCT_CONST_BITS); + const __m256i out_11_7 = _mm256_srai_epi32(out_11_5, DCT_CONST_BITS); + const __m256i out_27_6 = _mm256_srai_epi32(out_27_4, DCT_CONST_BITS); + const __m256i out_27_7 = _mm256_srai_epi32(out_27_5, DCT_CONST_BITS); + // Combine + out[ 5] = _mm256_packs_epi32(out_05_6, out_05_7); + out[21] = _mm256_packs_epi32(out_21_6, out_21_7); + out[13] = _mm256_packs_epi32(out_13_6, out_13_7); + out[29] = _mm256_packs_epi32(out_29_6, out_29_7); + out[ 3] = _mm256_packs_epi32(out_03_6, out_03_7); + out[19] = _mm256_packs_epi32(out_19_6, out_19_7); + out[11] = _mm256_packs_epi32(out_11_6, out_11_7); + out[27] = _mm256_packs_epi32(out_27_6, out_27_7); + } +#if FDCT32x32_HIGH_PRECISION + } else { + __m256i lstep1[64], lstep2[64], lstep3[64]; + __m256i u[32], v[32], sign[16]; + const __m256i K32One = _mm256_set_epi32(1, 1, 1, 1, 1, 1, 1, 1); + // start using 32-bit operations + // stage 3 + { + // expanding to 32-bit length priori to addition operations + lstep2[ 0] = _mm256_unpacklo_epi16(step2[ 0], kZero); + lstep2[ 1] = _mm256_unpackhi_epi16(step2[ 0], kZero); + lstep2[ 2] = _mm256_unpacklo_epi16(step2[ 1], kZero); + lstep2[ 3] = _mm256_unpackhi_epi16(step2[ 1], kZero); + lstep2[ 4] = _mm256_unpacklo_epi16(step2[ 2], kZero); + lstep2[ 5] = _mm256_unpackhi_epi16(step2[ 2], kZero); + lstep2[ 6] = _mm256_unpacklo_epi16(step2[ 3], kZero); + lstep2[ 7] = _mm256_unpackhi_epi16(step2[ 3], kZero); + lstep2[ 8] = _mm256_unpacklo_epi16(step2[ 4], kZero); + lstep2[ 9] = _mm256_unpackhi_epi16(step2[ 4], kZero); + lstep2[10] = _mm256_unpacklo_epi16(step2[ 5], kZero); + lstep2[11] = _mm256_unpackhi_epi16(step2[ 5], kZero); + lstep2[12] = _mm256_unpacklo_epi16(step2[ 6], kZero); + lstep2[13] = _mm256_unpackhi_epi16(step2[ 6], kZero); + lstep2[14] = _mm256_unpacklo_epi16(step2[ 7], kZero); + lstep2[15] = _mm256_unpackhi_epi16(step2[ 7], kZero); + lstep2[ 0] = _mm256_madd_epi16(lstep2[ 0], kOne); + lstep2[ 1] = _mm256_madd_epi16(lstep2[ 1], kOne); + lstep2[ 2] = _mm256_madd_epi16(lstep2[ 2], kOne); + lstep2[ 3] = _mm256_madd_epi16(lstep2[ 3], kOne); + lstep2[ 4] = _mm256_madd_epi16(lstep2[ 4], kOne); + lstep2[ 5] = _mm256_madd_epi16(lstep2[ 5], kOne); + lstep2[ 6] = _mm256_madd_epi16(lstep2[ 6], kOne); + lstep2[ 7] = _mm256_madd_epi16(lstep2[ 7], kOne); + lstep2[ 8] = _mm256_madd_epi16(lstep2[ 8], kOne); + lstep2[ 9] = _mm256_madd_epi16(lstep2[ 9], kOne); + lstep2[10] = _mm256_madd_epi16(lstep2[10], kOne); + lstep2[11] = _mm256_madd_epi16(lstep2[11], kOne); + lstep2[12] = _mm256_madd_epi16(lstep2[12], kOne); + lstep2[13] = _mm256_madd_epi16(lstep2[13], kOne); + lstep2[14] = _mm256_madd_epi16(lstep2[14], kOne); + lstep2[15] = _mm256_madd_epi16(lstep2[15], kOne); + + lstep3[ 0] = _mm256_add_epi32(lstep2[14], lstep2[ 0]); + lstep3[ 1] = _mm256_add_epi32(lstep2[15], lstep2[ 1]); + lstep3[ 2] = _mm256_add_epi32(lstep2[12], lstep2[ 2]); + lstep3[ 3] = _mm256_add_epi32(lstep2[13], lstep2[ 3]); + lstep3[ 4] = _mm256_add_epi32(lstep2[10], lstep2[ 4]); + lstep3[ 5] = _mm256_add_epi32(lstep2[11], lstep2[ 5]); + lstep3[ 6] = _mm256_add_epi32(lstep2[ 8], lstep2[ 6]); + lstep3[ 7] = _mm256_add_epi32(lstep2[ 9], lstep2[ 7]); + lstep3[ 8] = _mm256_sub_epi32(lstep2[ 6], lstep2[ 8]); + lstep3[ 9] = _mm256_sub_epi32(lstep2[ 7], lstep2[ 9]); + lstep3[10] = _mm256_sub_epi32(lstep2[ 4], lstep2[10]); + lstep3[11] = _mm256_sub_epi32(lstep2[ 5], lstep2[11]); + lstep3[12] = _mm256_sub_epi32(lstep2[ 2], lstep2[12]); + lstep3[13] = _mm256_sub_epi32(lstep2[ 3], lstep2[13]); + lstep3[14] = _mm256_sub_epi32(lstep2[ 0], lstep2[14]); + lstep3[15] = _mm256_sub_epi32(lstep2[ 1], lstep2[15]); + } + { + const __m256i s3_10_0 = _mm256_unpacklo_epi16(step2[13], step2[10]); + const __m256i s3_10_1 = _mm256_unpackhi_epi16(step2[13], step2[10]); + const __m256i s3_11_0 = _mm256_unpacklo_epi16(step2[12], step2[11]); + const __m256i s3_11_1 = _mm256_unpackhi_epi16(step2[12], step2[11]); + const __m256i s3_10_2 = _mm256_madd_epi16(s3_10_0, k__cospi_p16_m16); + const __m256i s3_10_3 = _mm256_madd_epi16(s3_10_1, k__cospi_p16_m16); + const __m256i s3_11_2 = _mm256_madd_epi16(s3_11_0, k__cospi_p16_m16); + const __m256i s3_11_3 = _mm256_madd_epi16(s3_11_1, k__cospi_p16_m16); + const __m256i s3_12_2 = _mm256_madd_epi16(s3_11_0, k__cospi_p16_p16); + const __m256i s3_12_3 = _mm256_madd_epi16(s3_11_1, k__cospi_p16_p16); + const __m256i s3_13_2 = _mm256_madd_epi16(s3_10_0, k__cospi_p16_p16); + const __m256i s3_13_3 = _mm256_madd_epi16(s3_10_1, k__cospi_p16_p16); + // dct_const_round_shift + const __m256i s3_10_4 = _mm256_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING); + const __m256i s3_10_5 = _mm256_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING); + const __m256i s3_11_4 = _mm256_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING); + const __m256i s3_11_5 = _mm256_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING); + const __m256i s3_12_4 = _mm256_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING); + const __m256i s3_12_5 = _mm256_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING); + const __m256i s3_13_4 = _mm256_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING); + const __m256i s3_13_5 = _mm256_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING); + lstep3[20] = _mm256_srai_epi32(s3_10_4, DCT_CONST_BITS); + lstep3[21] = _mm256_srai_epi32(s3_10_5, DCT_CONST_BITS); + lstep3[22] = _mm256_srai_epi32(s3_11_4, DCT_CONST_BITS); + lstep3[23] = _mm256_srai_epi32(s3_11_5, DCT_CONST_BITS); + lstep3[24] = _mm256_srai_epi32(s3_12_4, DCT_CONST_BITS); + lstep3[25] = _mm256_srai_epi32(s3_12_5, DCT_CONST_BITS); + lstep3[26] = _mm256_srai_epi32(s3_13_4, DCT_CONST_BITS); + lstep3[27] = _mm256_srai_epi32(s3_13_5, DCT_CONST_BITS); + } + { + lstep2[40] = _mm256_unpacklo_epi16(step2[20], kZero); + lstep2[41] = _mm256_unpackhi_epi16(step2[20], kZero); + lstep2[42] = _mm256_unpacklo_epi16(step2[21], kZero); + lstep2[43] = _mm256_unpackhi_epi16(step2[21], kZero); + lstep2[44] = _mm256_unpacklo_epi16(step2[22], kZero); + lstep2[45] = _mm256_unpackhi_epi16(step2[22], kZero); + lstep2[46] = _mm256_unpacklo_epi16(step2[23], kZero); + lstep2[47] = _mm256_unpackhi_epi16(step2[23], kZero); + lstep2[48] = _mm256_unpacklo_epi16(step2[24], kZero); + lstep2[49] = _mm256_unpackhi_epi16(step2[24], kZero); + lstep2[50] = _mm256_unpacklo_epi16(step2[25], kZero); + lstep2[51] = _mm256_unpackhi_epi16(step2[25], kZero); + lstep2[52] = _mm256_unpacklo_epi16(step2[26], kZero); + lstep2[53] = _mm256_unpackhi_epi16(step2[26], kZero); + lstep2[54] = _mm256_unpacklo_epi16(step2[27], kZero); + lstep2[55] = _mm256_unpackhi_epi16(step2[27], kZero); + lstep2[40] = _mm256_madd_epi16(lstep2[40], kOne); + lstep2[41] = _mm256_madd_epi16(lstep2[41], kOne); + lstep2[42] = _mm256_madd_epi16(lstep2[42], kOne); + lstep2[43] = _mm256_madd_epi16(lstep2[43], kOne); + lstep2[44] = _mm256_madd_epi16(lstep2[44], kOne); + lstep2[45] = _mm256_madd_epi16(lstep2[45], kOne); + lstep2[46] = _mm256_madd_epi16(lstep2[46], kOne); + lstep2[47] = _mm256_madd_epi16(lstep2[47], kOne); + lstep2[48] = _mm256_madd_epi16(lstep2[48], kOne); + lstep2[49] = _mm256_madd_epi16(lstep2[49], kOne); + lstep2[50] = _mm256_madd_epi16(lstep2[50], kOne); + lstep2[51] = _mm256_madd_epi16(lstep2[51], kOne); + lstep2[52] = _mm256_madd_epi16(lstep2[52], kOne); + lstep2[53] = _mm256_madd_epi16(lstep2[53], kOne); + lstep2[54] = _mm256_madd_epi16(lstep2[54], kOne); + lstep2[55] = _mm256_madd_epi16(lstep2[55], kOne); + + lstep1[32] = _mm256_unpacklo_epi16(step1[16], kZero); + lstep1[33] = _mm256_unpackhi_epi16(step1[16], kZero); + lstep1[34] = _mm256_unpacklo_epi16(step1[17], kZero); + lstep1[35] = _mm256_unpackhi_epi16(step1[17], kZero); + lstep1[36] = _mm256_unpacklo_epi16(step1[18], kZero); + lstep1[37] = _mm256_unpackhi_epi16(step1[18], kZero); + lstep1[38] = _mm256_unpacklo_epi16(step1[19], kZero); + lstep1[39] = _mm256_unpackhi_epi16(step1[19], kZero); + lstep1[56] = _mm256_unpacklo_epi16(step1[28], kZero); + lstep1[57] = _mm256_unpackhi_epi16(step1[28], kZero); + lstep1[58] = _mm256_unpacklo_epi16(step1[29], kZero); + lstep1[59] = _mm256_unpackhi_epi16(step1[29], kZero); + lstep1[60] = _mm256_unpacklo_epi16(step1[30], kZero); + lstep1[61] = _mm256_unpackhi_epi16(step1[30], kZero); + lstep1[62] = _mm256_unpacklo_epi16(step1[31], kZero); + lstep1[63] = _mm256_unpackhi_epi16(step1[31], kZero); + lstep1[32] = _mm256_madd_epi16(lstep1[32], kOne); + lstep1[33] = _mm256_madd_epi16(lstep1[33], kOne); + lstep1[34] = _mm256_madd_epi16(lstep1[34], kOne); + lstep1[35] = _mm256_madd_epi16(lstep1[35], kOne); + lstep1[36] = _mm256_madd_epi16(lstep1[36], kOne); + lstep1[37] = _mm256_madd_epi16(lstep1[37], kOne); + lstep1[38] = _mm256_madd_epi16(lstep1[38], kOne); + lstep1[39] = _mm256_madd_epi16(lstep1[39], kOne); + lstep1[56] = _mm256_madd_epi16(lstep1[56], kOne); + lstep1[57] = _mm256_madd_epi16(lstep1[57], kOne); + lstep1[58] = _mm256_madd_epi16(lstep1[58], kOne); + lstep1[59] = _mm256_madd_epi16(lstep1[59], kOne); + lstep1[60] = _mm256_madd_epi16(lstep1[60], kOne); + lstep1[61] = _mm256_madd_epi16(lstep1[61], kOne); + lstep1[62] = _mm256_madd_epi16(lstep1[62], kOne); + lstep1[63] = _mm256_madd_epi16(lstep1[63], kOne); + + lstep3[32] = _mm256_add_epi32(lstep2[46], lstep1[32]); + lstep3[33] = _mm256_add_epi32(lstep2[47], lstep1[33]); + + lstep3[34] = _mm256_add_epi32(lstep2[44], lstep1[34]); + lstep3[35] = _mm256_add_epi32(lstep2[45], lstep1[35]); + lstep3[36] = _mm256_add_epi32(lstep2[42], lstep1[36]); + lstep3[37] = _mm256_add_epi32(lstep2[43], lstep1[37]); + lstep3[38] = _mm256_add_epi32(lstep2[40], lstep1[38]); + lstep3[39] = _mm256_add_epi32(lstep2[41], lstep1[39]); + lstep3[40] = _mm256_sub_epi32(lstep1[38], lstep2[40]); + lstep3[41] = _mm256_sub_epi32(lstep1[39], lstep2[41]); + lstep3[42] = _mm256_sub_epi32(lstep1[36], lstep2[42]); + lstep3[43] = _mm256_sub_epi32(lstep1[37], lstep2[43]); + lstep3[44] = _mm256_sub_epi32(lstep1[34], lstep2[44]); + lstep3[45] = _mm256_sub_epi32(lstep1[35], lstep2[45]); + lstep3[46] = _mm256_sub_epi32(lstep1[32], lstep2[46]); + lstep3[47] = _mm256_sub_epi32(lstep1[33], lstep2[47]); + lstep3[48] = _mm256_sub_epi32(lstep1[62], lstep2[48]); + lstep3[49] = _mm256_sub_epi32(lstep1[63], lstep2[49]); + lstep3[50] = _mm256_sub_epi32(lstep1[60], lstep2[50]); + lstep3[51] = _mm256_sub_epi32(lstep1[61], lstep2[51]); + lstep3[52] = _mm256_sub_epi32(lstep1[58], lstep2[52]); + lstep3[53] = _mm256_sub_epi32(lstep1[59], lstep2[53]); + lstep3[54] = _mm256_sub_epi32(lstep1[56], lstep2[54]); + lstep3[55] = _mm256_sub_epi32(lstep1[57], lstep2[55]); + lstep3[56] = _mm256_add_epi32(lstep2[54], lstep1[56]); + lstep3[57] = _mm256_add_epi32(lstep2[55], lstep1[57]); + lstep3[58] = _mm256_add_epi32(lstep2[52], lstep1[58]); + lstep3[59] = _mm256_add_epi32(lstep2[53], lstep1[59]); + lstep3[60] = _mm256_add_epi32(lstep2[50], lstep1[60]); + lstep3[61] = _mm256_add_epi32(lstep2[51], lstep1[61]); + lstep3[62] = _mm256_add_epi32(lstep2[48], lstep1[62]); + lstep3[63] = _mm256_add_epi32(lstep2[49], lstep1[63]); + } + + // stage 4 + { + // expanding to 32-bit length priori to addition operations + lstep2[16] = _mm256_unpacklo_epi16(step2[ 8], kZero); + lstep2[17] = _mm256_unpackhi_epi16(step2[ 8], kZero); + lstep2[18] = _mm256_unpacklo_epi16(step2[ 9], kZero); + lstep2[19] = _mm256_unpackhi_epi16(step2[ 9], kZero); + lstep2[28] = _mm256_unpacklo_epi16(step2[14], kZero); + lstep2[29] = _mm256_unpackhi_epi16(step2[14], kZero); + lstep2[30] = _mm256_unpacklo_epi16(step2[15], kZero); + lstep2[31] = _mm256_unpackhi_epi16(step2[15], kZero); + lstep2[16] = _mm256_madd_epi16(lstep2[16], kOne); + lstep2[17] = _mm256_madd_epi16(lstep2[17], kOne); + lstep2[18] = _mm256_madd_epi16(lstep2[18], kOne); + lstep2[19] = _mm256_madd_epi16(lstep2[19], kOne); + lstep2[28] = _mm256_madd_epi16(lstep2[28], kOne); + lstep2[29] = _mm256_madd_epi16(lstep2[29], kOne); + lstep2[30] = _mm256_madd_epi16(lstep2[30], kOne); + lstep2[31] = _mm256_madd_epi16(lstep2[31], kOne); + + lstep1[ 0] = _mm256_add_epi32(lstep3[ 6], lstep3[ 0]); + lstep1[ 1] = _mm256_add_epi32(lstep3[ 7], lstep3[ 1]); + lstep1[ 2] = _mm256_add_epi32(lstep3[ 4], lstep3[ 2]); + lstep1[ 3] = _mm256_add_epi32(lstep3[ 5], lstep3[ 3]); + lstep1[ 4] = _mm256_sub_epi32(lstep3[ 2], lstep3[ 4]); + lstep1[ 5] = _mm256_sub_epi32(lstep3[ 3], lstep3[ 5]); + lstep1[ 6] = _mm256_sub_epi32(lstep3[ 0], lstep3[ 6]); + lstep1[ 7] = _mm256_sub_epi32(lstep3[ 1], lstep3[ 7]); + lstep1[16] = _mm256_add_epi32(lstep3[22], lstep2[16]); + lstep1[17] = _mm256_add_epi32(lstep3[23], lstep2[17]); + lstep1[18] = _mm256_add_epi32(lstep3[20], lstep2[18]); + lstep1[19] = _mm256_add_epi32(lstep3[21], lstep2[19]); + lstep1[20] = _mm256_sub_epi32(lstep2[18], lstep3[20]); + lstep1[21] = _mm256_sub_epi32(lstep2[19], lstep3[21]); + lstep1[22] = _mm256_sub_epi32(lstep2[16], lstep3[22]); + lstep1[23] = _mm256_sub_epi32(lstep2[17], lstep3[23]); + lstep1[24] = _mm256_sub_epi32(lstep2[30], lstep3[24]); + lstep1[25] = _mm256_sub_epi32(lstep2[31], lstep3[25]); + lstep1[26] = _mm256_sub_epi32(lstep2[28], lstep3[26]); + lstep1[27] = _mm256_sub_epi32(lstep2[29], lstep3[27]); + lstep1[28] = _mm256_add_epi32(lstep3[26], lstep2[28]); + lstep1[29] = _mm256_add_epi32(lstep3[27], lstep2[29]); + lstep1[30] = _mm256_add_epi32(lstep3[24], lstep2[30]); + lstep1[31] = _mm256_add_epi32(lstep3[25], lstep2[31]); + } + { + // to be continued... + // + const __m256i k32_p16_p16 = pair256_set_epi32(cospi_16_64, cospi_16_64); + const __m256i k32_p16_m16 = pair256_set_epi32(cospi_16_64, -cospi_16_64); + + u[0] = _mm256_unpacklo_epi32(lstep3[12], lstep3[10]); + u[1] = _mm256_unpackhi_epi32(lstep3[12], lstep3[10]); + u[2] = _mm256_unpacklo_epi32(lstep3[13], lstep3[11]); + u[3] = _mm256_unpackhi_epi32(lstep3[13], lstep3[11]); + + // TODO(jingning): manually inline k_madd_epi32_avx2_ to further hide + // instruction latency. + v[ 0] = k_madd_epi32_avx2(u[0], k32_p16_m16); + v[ 1] = k_madd_epi32_avx2(u[1], k32_p16_m16); + v[ 2] = k_madd_epi32_avx2(u[2], k32_p16_m16); + v[ 3] = k_madd_epi32_avx2(u[3], k32_p16_m16); + v[ 4] = k_madd_epi32_avx2(u[0], k32_p16_p16); + v[ 5] = k_madd_epi32_avx2(u[1], k32_p16_p16); + v[ 6] = k_madd_epi32_avx2(u[2], k32_p16_p16); + v[ 7] = k_madd_epi32_avx2(u[3], k32_p16_p16); + + u[0] = k_packs_epi64_avx2(v[0], v[1]); + u[1] = k_packs_epi64_avx2(v[2], v[3]); + u[2] = k_packs_epi64_avx2(v[4], v[5]); + u[3] = k_packs_epi64_avx2(v[6], v[7]); + + v[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING); + v[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING); + v[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING); + v[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING); + + lstep1[10] = _mm256_srai_epi32(v[0], DCT_CONST_BITS); + lstep1[11] = _mm256_srai_epi32(v[1], DCT_CONST_BITS); + lstep1[12] = _mm256_srai_epi32(v[2], DCT_CONST_BITS); + lstep1[13] = _mm256_srai_epi32(v[3], DCT_CONST_BITS); + } + { + const __m256i k32_m08_p24 = pair256_set_epi32(-cospi_8_64, cospi_24_64); + const __m256i k32_m24_m08 = pair256_set_epi32(-cospi_24_64, -cospi_8_64); + const __m256i k32_p24_p08 = pair256_set_epi32(cospi_24_64, cospi_8_64); + + u[ 0] = _mm256_unpacklo_epi32(lstep3[36], lstep3[58]); + u[ 1] = _mm256_unpackhi_epi32(lstep3[36], lstep3[58]); + u[ 2] = _mm256_unpacklo_epi32(lstep3[37], lstep3[59]); + u[ 3] = _mm256_unpackhi_epi32(lstep3[37], lstep3[59]); + u[ 4] = _mm256_unpacklo_epi32(lstep3[38], lstep3[56]); + u[ 5] = _mm256_unpackhi_epi32(lstep3[38], lstep3[56]); + u[ 6] = _mm256_unpacklo_epi32(lstep3[39], lstep3[57]); + u[ 7] = _mm256_unpackhi_epi32(lstep3[39], lstep3[57]); + u[ 8] = _mm256_unpacklo_epi32(lstep3[40], lstep3[54]); + u[ 9] = _mm256_unpackhi_epi32(lstep3[40], lstep3[54]); + u[10] = _mm256_unpacklo_epi32(lstep3[41], lstep3[55]); + u[11] = _mm256_unpackhi_epi32(lstep3[41], lstep3[55]); + u[12] = _mm256_unpacklo_epi32(lstep3[42], lstep3[52]); + u[13] = _mm256_unpackhi_epi32(lstep3[42], lstep3[52]); + u[14] = _mm256_unpacklo_epi32(lstep3[43], lstep3[53]); + u[15] = _mm256_unpackhi_epi32(lstep3[43], lstep3[53]); + + v[ 0] = k_madd_epi32_avx2(u[ 0], k32_m08_p24); + v[ 1] = k_madd_epi32_avx2(u[ 1], k32_m08_p24); + v[ 2] = k_madd_epi32_avx2(u[ 2], k32_m08_p24); + v[ 3] = k_madd_epi32_avx2(u[ 3], k32_m08_p24); + v[ 4] = k_madd_epi32_avx2(u[ 4], k32_m08_p24); + v[ 5] = k_madd_epi32_avx2(u[ 5], k32_m08_p24); + v[ 6] = k_madd_epi32_avx2(u[ 6], k32_m08_p24); + v[ 7] = k_madd_epi32_avx2(u[ 7], k32_m08_p24); + v[ 8] = k_madd_epi32_avx2(u[ 8], k32_m24_m08); + v[ 9] = k_madd_epi32_avx2(u[ 9], k32_m24_m08); + v[10] = k_madd_epi32_avx2(u[10], k32_m24_m08); + v[11] = k_madd_epi32_avx2(u[11], k32_m24_m08); + v[12] = k_madd_epi32_avx2(u[12], k32_m24_m08); + v[13] = k_madd_epi32_avx2(u[13], k32_m24_m08); + v[14] = k_madd_epi32_avx2(u[14], k32_m24_m08); + v[15] = k_madd_epi32_avx2(u[15], k32_m24_m08); + v[16] = k_madd_epi32_avx2(u[12], k32_m08_p24); + v[17] = k_madd_epi32_avx2(u[13], k32_m08_p24); + v[18] = k_madd_epi32_avx2(u[14], k32_m08_p24); + v[19] = k_madd_epi32_avx2(u[15], k32_m08_p24); + v[20] = k_madd_epi32_avx2(u[ 8], k32_m08_p24); + v[21] = k_madd_epi32_avx2(u[ 9], k32_m08_p24); + v[22] = k_madd_epi32_avx2(u[10], k32_m08_p24); + v[23] = k_madd_epi32_avx2(u[11], k32_m08_p24); + v[24] = k_madd_epi32_avx2(u[ 4], k32_p24_p08); + v[25] = k_madd_epi32_avx2(u[ 5], k32_p24_p08); + v[26] = k_madd_epi32_avx2(u[ 6], k32_p24_p08); + v[27] = k_madd_epi32_avx2(u[ 7], k32_p24_p08); + v[28] = k_madd_epi32_avx2(u[ 0], k32_p24_p08); + v[29] = k_madd_epi32_avx2(u[ 1], k32_p24_p08); + v[30] = k_madd_epi32_avx2(u[ 2], k32_p24_p08); + v[31] = k_madd_epi32_avx2(u[ 3], k32_p24_p08); + + u[ 0] = k_packs_epi64_avx2(v[ 0], v[ 1]); + u[ 1] = k_packs_epi64_avx2(v[ 2], v[ 3]); + u[ 2] = k_packs_epi64_avx2(v[ 4], v[ 5]); + u[ 3] = k_packs_epi64_avx2(v[ 6], v[ 7]); + u[ 4] = k_packs_epi64_avx2(v[ 8], v[ 9]); + u[ 5] = k_packs_epi64_avx2(v[10], v[11]); + u[ 6] = k_packs_epi64_avx2(v[12], v[13]); + u[ 7] = k_packs_epi64_avx2(v[14], v[15]); + u[ 8] = k_packs_epi64_avx2(v[16], v[17]); + u[ 9] = k_packs_epi64_avx2(v[18], v[19]); + u[10] = k_packs_epi64_avx2(v[20], v[21]); + u[11] = k_packs_epi64_avx2(v[22], v[23]); + u[12] = k_packs_epi64_avx2(v[24], v[25]); + u[13] = k_packs_epi64_avx2(v[26], v[27]); + u[14] = k_packs_epi64_avx2(v[28], v[29]); + u[15] = k_packs_epi64_avx2(v[30], v[31]); + + v[ 0] = _mm256_add_epi32(u[ 0], k__DCT_CONST_ROUNDING); + v[ 1] = _mm256_add_epi32(u[ 1], k__DCT_CONST_ROUNDING); + v[ 2] = _mm256_add_epi32(u[ 2], k__DCT_CONST_ROUNDING); + v[ 3] = _mm256_add_epi32(u[ 3], k__DCT_CONST_ROUNDING); + v[ 4] = _mm256_add_epi32(u[ 4], k__DCT_CONST_ROUNDING); + v[ 5] = _mm256_add_epi32(u[ 5], k__DCT_CONST_ROUNDING); + v[ 6] = _mm256_add_epi32(u[ 6], k__DCT_CONST_ROUNDING); + v[ 7] = _mm256_add_epi32(u[ 7], k__DCT_CONST_ROUNDING); + v[ 8] = _mm256_add_epi32(u[ 8], k__DCT_CONST_ROUNDING); + v[ 9] = _mm256_add_epi32(u[ 9], k__DCT_CONST_ROUNDING); + v[10] = _mm256_add_epi32(u[10], k__DCT_CONST_ROUNDING); + v[11] = _mm256_add_epi32(u[11], k__DCT_CONST_ROUNDING); + v[12] = _mm256_add_epi32(u[12], k__DCT_CONST_ROUNDING); + v[13] = _mm256_add_epi32(u[13], k__DCT_CONST_ROUNDING); + v[14] = _mm256_add_epi32(u[14], k__DCT_CONST_ROUNDING); + v[15] = _mm256_add_epi32(u[15], k__DCT_CONST_ROUNDING); + + lstep1[36] = _mm256_srai_epi32(v[ 0], DCT_CONST_BITS); + lstep1[37] = _mm256_srai_epi32(v[ 1], DCT_CONST_BITS); + lstep1[38] = _mm256_srai_epi32(v[ 2], DCT_CONST_BITS); + lstep1[39] = _mm256_srai_epi32(v[ 3], DCT_CONST_BITS); + lstep1[40] = _mm256_srai_epi32(v[ 4], DCT_CONST_BITS); + lstep1[41] = _mm256_srai_epi32(v[ 5], DCT_CONST_BITS); + lstep1[42] = _mm256_srai_epi32(v[ 6], DCT_CONST_BITS); + lstep1[43] = _mm256_srai_epi32(v[ 7], DCT_CONST_BITS); + lstep1[52] = _mm256_srai_epi32(v[ 8], DCT_CONST_BITS); + lstep1[53] = _mm256_srai_epi32(v[ 9], DCT_CONST_BITS); + lstep1[54] = _mm256_srai_epi32(v[10], DCT_CONST_BITS); + lstep1[55] = _mm256_srai_epi32(v[11], DCT_CONST_BITS); + lstep1[56] = _mm256_srai_epi32(v[12], DCT_CONST_BITS); + lstep1[57] = _mm256_srai_epi32(v[13], DCT_CONST_BITS); + lstep1[58] = _mm256_srai_epi32(v[14], DCT_CONST_BITS); + lstep1[59] = _mm256_srai_epi32(v[15], DCT_CONST_BITS); + } + // stage 5 + { + lstep2[ 8] = _mm256_add_epi32(lstep1[10], lstep3[ 8]); + lstep2[ 9] = _mm256_add_epi32(lstep1[11], lstep3[ 9]); + lstep2[10] = _mm256_sub_epi32(lstep3[ 8], lstep1[10]); + lstep2[11] = _mm256_sub_epi32(lstep3[ 9], lstep1[11]); + lstep2[12] = _mm256_sub_epi32(lstep3[14], lstep1[12]); + lstep2[13] = _mm256_sub_epi32(lstep3[15], lstep1[13]); + lstep2[14] = _mm256_add_epi32(lstep1[12], lstep3[14]); + lstep2[15] = _mm256_add_epi32(lstep1[13], lstep3[15]); + } + { + const __m256i k32_p16_p16 = pair256_set_epi32(cospi_16_64, cospi_16_64); + const __m256i k32_p16_m16 = pair256_set_epi32(cospi_16_64, -cospi_16_64); + const __m256i k32_p24_p08 = pair256_set_epi32(cospi_24_64, cospi_8_64); + const __m256i k32_m08_p24 = pair256_set_epi32(-cospi_8_64, cospi_24_64); + + u[0] = _mm256_unpacklo_epi32(lstep1[0], lstep1[2]); + u[1] = _mm256_unpackhi_epi32(lstep1[0], lstep1[2]); + u[2] = _mm256_unpacklo_epi32(lstep1[1], lstep1[3]); + u[3] = _mm256_unpackhi_epi32(lstep1[1], lstep1[3]); + u[4] = _mm256_unpacklo_epi32(lstep1[4], lstep1[6]); + u[5] = _mm256_unpackhi_epi32(lstep1[4], lstep1[6]); + u[6] = _mm256_unpacklo_epi32(lstep1[5], lstep1[7]); + u[7] = _mm256_unpackhi_epi32(lstep1[5], lstep1[7]); + + // TODO(jingning): manually inline k_madd_epi32_avx2_ to further hide + // instruction latency. + v[ 0] = k_madd_epi32_avx2(u[0], k32_p16_p16); + v[ 1] = k_madd_epi32_avx2(u[1], k32_p16_p16); + v[ 2] = k_madd_epi32_avx2(u[2], k32_p16_p16); + v[ 3] = k_madd_epi32_avx2(u[3], k32_p16_p16); + v[ 4] = k_madd_epi32_avx2(u[0], k32_p16_m16); + v[ 5] = k_madd_epi32_avx2(u[1], k32_p16_m16); + v[ 6] = k_madd_epi32_avx2(u[2], k32_p16_m16); + v[ 7] = k_madd_epi32_avx2(u[3], k32_p16_m16); + v[ 8] = k_madd_epi32_avx2(u[4], k32_p24_p08); + v[ 9] = k_madd_epi32_avx2(u[5], k32_p24_p08); + v[10] = k_madd_epi32_avx2(u[6], k32_p24_p08); + v[11] = k_madd_epi32_avx2(u[7], k32_p24_p08); + v[12] = k_madd_epi32_avx2(u[4], k32_m08_p24); + v[13] = k_madd_epi32_avx2(u[5], k32_m08_p24); + v[14] = k_madd_epi32_avx2(u[6], k32_m08_p24); + v[15] = k_madd_epi32_avx2(u[7], k32_m08_p24); + + u[0] = k_packs_epi64_avx2(v[0], v[1]); + u[1] = k_packs_epi64_avx2(v[2], v[3]); + u[2] = k_packs_epi64_avx2(v[4], v[5]); + u[3] = k_packs_epi64_avx2(v[6], v[7]); + u[4] = k_packs_epi64_avx2(v[8], v[9]); + u[5] = k_packs_epi64_avx2(v[10], v[11]); + u[6] = k_packs_epi64_avx2(v[12], v[13]); + u[7] = k_packs_epi64_avx2(v[14], v[15]); + + v[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING); + v[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING); + v[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING); + v[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING); + v[4] = _mm256_add_epi32(u[4], k__DCT_CONST_ROUNDING); + v[5] = _mm256_add_epi32(u[5], k__DCT_CONST_ROUNDING); + v[6] = _mm256_add_epi32(u[6], k__DCT_CONST_ROUNDING); + v[7] = _mm256_add_epi32(u[7], k__DCT_CONST_ROUNDING); + + u[0] = _mm256_srai_epi32(v[0], DCT_CONST_BITS); + u[1] = _mm256_srai_epi32(v[1], DCT_CONST_BITS); + u[2] = _mm256_srai_epi32(v[2], DCT_CONST_BITS); + u[3] = _mm256_srai_epi32(v[3], DCT_CONST_BITS); + u[4] = _mm256_srai_epi32(v[4], DCT_CONST_BITS); + u[5] = _mm256_srai_epi32(v[5], DCT_CONST_BITS); + u[6] = _mm256_srai_epi32(v[6], DCT_CONST_BITS); + u[7] = _mm256_srai_epi32(v[7], DCT_CONST_BITS); + + sign[0] = _mm256_cmpgt_epi32(kZero,u[0]); + sign[1] = _mm256_cmpgt_epi32(kZero,u[1]); + sign[2] = _mm256_cmpgt_epi32(kZero,u[2]); + sign[3] = _mm256_cmpgt_epi32(kZero,u[3]); + sign[4] = _mm256_cmpgt_epi32(kZero,u[4]); + sign[5] = _mm256_cmpgt_epi32(kZero,u[5]); + sign[6] = _mm256_cmpgt_epi32(kZero,u[6]); + sign[7] = _mm256_cmpgt_epi32(kZero,u[7]); + + u[0] = _mm256_sub_epi32(u[0], sign[0]); + u[1] = _mm256_sub_epi32(u[1], sign[1]); + u[2] = _mm256_sub_epi32(u[2], sign[2]); + u[3] = _mm256_sub_epi32(u[3], sign[3]); + u[4] = _mm256_sub_epi32(u[4], sign[4]); + u[5] = _mm256_sub_epi32(u[5], sign[5]); + u[6] = _mm256_sub_epi32(u[6], sign[6]); + u[7] = _mm256_sub_epi32(u[7], sign[7]); + + u[0] = _mm256_add_epi32(u[0], K32One); + u[1] = _mm256_add_epi32(u[1], K32One); + u[2] = _mm256_add_epi32(u[2], K32One); + u[3] = _mm256_add_epi32(u[3], K32One); + u[4] = _mm256_add_epi32(u[4], K32One); + u[5] = _mm256_add_epi32(u[5], K32One); + u[6] = _mm256_add_epi32(u[6], K32One); + u[7] = _mm256_add_epi32(u[7], K32One); + + u[0] = _mm256_srai_epi32(u[0], 2); + u[1] = _mm256_srai_epi32(u[1], 2); + u[2] = _mm256_srai_epi32(u[2], 2); + u[3] = _mm256_srai_epi32(u[3], 2); + u[4] = _mm256_srai_epi32(u[4], 2); + u[5] = _mm256_srai_epi32(u[5], 2); + u[6] = _mm256_srai_epi32(u[6], 2); + u[7] = _mm256_srai_epi32(u[7], 2); + + // Combine + out[ 0] = _mm256_packs_epi32(u[0], u[1]); + out[16] = _mm256_packs_epi32(u[2], u[3]); + out[ 8] = _mm256_packs_epi32(u[4], u[5]); + out[24] = _mm256_packs_epi32(u[6], u[7]); + } + { + const __m256i k32_m08_p24 = pair256_set_epi32(-cospi_8_64, cospi_24_64); + const __m256i k32_m24_m08 = pair256_set_epi32(-cospi_24_64, -cospi_8_64); + const __m256i k32_p24_p08 = pair256_set_epi32(cospi_24_64, cospi_8_64); + + u[0] = _mm256_unpacklo_epi32(lstep1[18], lstep1[28]); + u[1] = _mm256_unpackhi_epi32(lstep1[18], lstep1[28]); + u[2] = _mm256_unpacklo_epi32(lstep1[19], lstep1[29]); + u[3] = _mm256_unpackhi_epi32(lstep1[19], lstep1[29]); + u[4] = _mm256_unpacklo_epi32(lstep1[20], lstep1[26]); + u[5] = _mm256_unpackhi_epi32(lstep1[20], lstep1[26]); + u[6] = _mm256_unpacklo_epi32(lstep1[21], lstep1[27]); + u[7] = _mm256_unpackhi_epi32(lstep1[21], lstep1[27]); + + v[0] = k_madd_epi32_avx2(u[0], k32_m08_p24); + v[1] = k_madd_epi32_avx2(u[1], k32_m08_p24); + v[2] = k_madd_epi32_avx2(u[2], k32_m08_p24); + v[3] = k_madd_epi32_avx2(u[3], k32_m08_p24); + v[4] = k_madd_epi32_avx2(u[4], k32_m24_m08); + v[5] = k_madd_epi32_avx2(u[5], k32_m24_m08); + v[6] = k_madd_epi32_avx2(u[6], k32_m24_m08); + v[7] = k_madd_epi32_avx2(u[7], k32_m24_m08); + v[ 8] = k_madd_epi32_avx2(u[4], k32_m08_p24); + v[ 9] = k_madd_epi32_avx2(u[5], k32_m08_p24); + v[10] = k_madd_epi32_avx2(u[6], k32_m08_p24); + v[11] = k_madd_epi32_avx2(u[7], k32_m08_p24); + v[12] = k_madd_epi32_avx2(u[0], k32_p24_p08); + v[13] = k_madd_epi32_avx2(u[1], k32_p24_p08); + v[14] = k_madd_epi32_avx2(u[2], k32_p24_p08); + v[15] = k_madd_epi32_avx2(u[3], k32_p24_p08); + + u[0] = k_packs_epi64_avx2(v[0], v[1]); + u[1] = k_packs_epi64_avx2(v[2], v[3]); + u[2] = k_packs_epi64_avx2(v[4], v[5]); + u[3] = k_packs_epi64_avx2(v[6], v[7]); + u[4] = k_packs_epi64_avx2(v[8], v[9]); + u[5] = k_packs_epi64_avx2(v[10], v[11]); + u[6] = k_packs_epi64_avx2(v[12], v[13]); + u[7] = k_packs_epi64_avx2(v[14], v[15]); + + u[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING); + u[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING); + u[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING); + u[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING); + u[4] = _mm256_add_epi32(u[4], k__DCT_CONST_ROUNDING); + u[5] = _mm256_add_epi32(u[5], k__DCT_CONST_ROUNDING); + u[6] = _mm256_add_epi32(u[6], k__DCT_CONST_ROUNDING); + u[7] = _mm256_add_epi32(u[7], k__DCT_CONST_ROUNDING); + + lstep2[18] = _mm256_srai_epi32(u[0], DCT_CONST_BITS); + lstep2[19] = _mm256_srai_epi32(u[1], DCT_CONST_BITS); + lstep2[20] = _mm256_srai_epi32(u[2], DCT_CONST_BITS); + lstep2[21] = _mm256_srai_epi32(u[3], DCT_CONST_BITS); + lstep2[26] = _mm256_srai_epi32(u[4], DCT_CONST_BITS); + lstep2[27] = _mm256_srai_epi32(u[5], DCT_CONST_BITS); + lstep2[28] = _mm256_srai_epi32(u[6], DCT_CONST_BITS); + lstep2[29] = _mm256_srai_epi32(u[7], DCT_CONST_BITS); + } + { + lstep2[32] = _mm256_add_epi32(lstep1[38], lstep3[32]); + lstep2[33] = _mm256_add_epi32(lstep1[39], lstep3[33]); + lstep2[34] = _mm256_add_epi32(lstep1[36], lstep3[34]); + lstep2[35] = _mm256_add_epi32(lstep1[37], lstep3[35]); + lstep2[36] = _mm256_sub_epi32(lstep3[34], lstep1[36]); + lstep2[37] = _mm256_sub_epi32(lstep3[35], lstep1[37]); + lstep2[38] = _mm256_sub_epi32(lstep3[32], lstep1[38]); + lstep2[39] = _mm256_sub_epi32(lstep3[33], lstep1[39]); + lstep2[40] = _mm256_sub_epi32(lstep3[46], lstep1[40]); + lstep2[41] = _mm256_sub_epi32(lstep3[47], lstep1[41]); + lstep2[42] = _mm256_sub_epi32(lstep3[44], lstep1[42]); + lstep2[43] = _mm256_sub_epi32(lstep3[45], lstep1[43]); + lstep2[44] = _mm256_add_epi32(lstep1[42], lstep3[44]); + lstep2[45] = _mm256_add_epi32(lstep1[43], lstep3[45]); + lstep2[46] = _mm256_add_epi32(lstep1[40], lstep3[46]); + lstep2[47] = _mm256_add_epi32(lstep1[41], lstep3[47]); + lstep2[48] = _mm256_add_epi32(lstep1[54], lstep3[48]); + lstep2[49] = _mm256_add_epi32(lstep1[55], lstep3[49]); + lstep2[50] = _mm256_add_epi32(lstep1[52], lstep3[50]); + lstep2[51] = _mm256_add_epi32(lstep1[53], lstep3[51]); + lstep2[52] = _mm256_sub_epi32(lstep3[50], lstep1[52]); + lstep2[53] = _mm256_sub_epi32(lstep3[51], lstep1[53]); + lstep2[54] = _mm256_sub_epi32(lstep3[48], lstep1[54]); + lstep2[55] = _mm256_sub_epi32(lstep3[49], lstep1[55]); + lstep2[56] = _mm256_sub_epi32(lstep3[62], lstep1[56]); + lstep2[57] = _mm256_sub_epi32(lstep3[63], lstep1[57]); + lstep2[58] = _mm256_sub_epi32(lstep3[60], lstep1[58]); + lstep2[59] = _mm256_sub_epi32(lstep3[61], lstep1[59]); + lstep2[60] = _mm256_add_epi32(lstep1[58], lstep3[60]); + lstep2[61] = _mm256_add_epi32(lstep1[59], lstep3[61]); + lstep2[62] = _mm256_add_epi32(lstep1[56], lstep3[62]); + lstep2[63] = _mm256_add_epi32(lstep1[57], lstep3[63]); + } + // stage 6 + { + const __m256i k32_p28_p04 = pair256_set_epi32(cospi_28_64, cospi_4_64); + const __m256i k32_p12_p20 = pair256_set_epi32(cospi_12_64, cospi_20_64); + const __m256i k32_m20_p12 = pair256_set_epi32(-cospi_20_64, cospi_12_64); + const __m256i k32_m04_p28 = pair256_set_epi32(-cospi_4_64, cospi_28_64); + + u[0] = _mm256_unpacklo_epi32(lstep2[ 8], lstep2[14]); + u[1] = _mm256_unpackhi_epi32(lstep2[ 8], lstep2[14]); + u[2] = _mm256_unpacklo_epi32(lstep2[ 9], lstep2[15]); + u[3] = _mm256_unpackhi_epi32(lstep2[ 9], lstep2[15]); + u[4] = _mm256_unpacklo_epi32(lstep2[10], lstep2[12]); + u[5] = _mm256_unpackhi_epi32(lstep2[10], lstep2[12]); + u[6] = _mm256_unpacklo_epi32(lstep2[11], lstep2[13]); + u[7] = _mm256_unpackhi_epi32(lstep2[11], lstep2[13]); + u[8] = _mm256_unpacklo_epi32(lstep2[10], lstep2[12]); + u[9] = _mm256_unpackhi_epi32(lstep2[10], lstep2[12]); + u[10] = _mm256_unpacklo_epi32(lstep2[11], lstep2[13]); + u[11] = _mm256_unpackhi_epi32(lstep2[11], lstep2[13]); + u[12] = _mm256_unpacklo_epi32(lstep2[ 8], lstep2[14]); + u[13] = _mm256_unpackhi_epi32(lstep2[ 8], lstep2[14]); + u[14] = _mm256_unpacklo_epi32(lstep2[ 9], lstep2[15]); + u[15] = _mm256_unpackhi_epi32(lstep2[ 9], lstep2[15]); + + v[0] = k_madd_epi32_avx2(u[0], k32_p28_p04); + v[1] = k_madd_epi32_avx2(u[1], k32_p28_p04); + v[2] = k_madd_epi32_avx2(u[2], k32_p28_p04); + v[3] = k_madd_epi32_avx2(u[3], k32_p28_p04); + v[4] = k_madd_epi32_avx2(u[4], k32_p12_p20); + v[5] = k_madd_epi32_avx2(u[5], k32_p12_p20); + v[6] = k_madd_epi32_avx2(u[6], k32_p12_p20); + v[7] = k_madd_epi32_avx2(u[7], k32_p12_p20); + v[ 8] = k_madd_epi32_avx2(u[ 8], k32_m20_p12); + v[ 9] = k_madd_epi32_avx2(u[ 9], k32_m20_p12); + v[10] = k_madd_epi32_avx2(u[10], k32_m20_p12); + v[11] = k_madd_epi32_avx2(u[11], k32_m20_p12); + v[12] = k_madd_epi32_avx2(u[12], k32_m04_p28); + v[13] = k_madd_epi32_avx2(u[13], k32_m04_p28); + v[14] = k_madd_epi32_avx2(u[14], k32_m04_p28); + v[15] = k_madd_epi32_avx2(u[15], k32_m04_p28); + + u[0] = k_packs_epi64_avx2(v[0], v[1]); + u[1] = k_packs_epi64_avx2(v[2], v[3]); + u[2] = k_packs_epi64_avx2(v[4], v[5]); + u[3] = k_packs_epi64_avx2(v[6], v[7]); + u[4] = k_packs_epi64_avx2(v[8], v[9]); + u[5] = k_packs_epi64_avx2(v[10], v[11]); + u[6] = k_packs_epi64_avx2(v[12], v[13]); + u[7] = k_packs_epi64_avx2(v[14], v[15]); + + v[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING); + v[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING); + v[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING); + v[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING); + v[4] = _mm256_add_epi32(u[4], k__DCT_CONST_ROUNDING); + v[5] = _mm256_add_epi32(u[5], k__DCT_CONST_ROUNDING); + v[6] = _mm256_add_epi32(u[6], k__DCT_CONST_ROUNDING); + v[7] = _mm256_add_epi32(u[7], k__DCT_CONST_ROUNDING); + + u[0] = _mm256_srai_epi32(v[0], DCT_CONST_BITS); + u[1] = _mm256_srai_epi32(v[1], DCT_CONST_BITS); + u[2] = _mm256_srai_epi32(v[2], DCT_CONST_BITS); + u[3] = _mm256_srai_epi32(v[3], DCT_CONST_BITS); + u[4] = _mm256_srai_epi32(v[4], DCT_CONST_BITS); + u[5] = _mm256_srai_epi32(v[5], DCT_CONST_BITS); + u[6] = _mm256_srai_epi32(v[6], DCT_CONST_BITS); + u[7] = _mm256_srai_epi32(v[7], DCT_CONST_BITS); + + sign[0] = _mm256_cmpgt_epi32(kZero,u[0]); + sign[1] = _mm256_cmpgt_epi32(kZero,u[1]); + sign[2] = _mm256_cmpgt_epi32(kZero,u[2]); + sign[3] = _mm256_cmpgt_epi32(kZero,u[3]); + sign[4] = _mm256_cmpgt_epi32(kZero,u[4]); + sign[5] = _mm256_cmpgt_epi32(kZero,u[5]); + sign[6] = _mm256_cmpgt_epi32(kZero,u[6]); + sign[7] = _mm256_cmpgt_epi32(kZero,u[7]); + + u[0] = _mm256_sub_epi32(u[0], sign[0]); + u[1] = _mm256_sub_epi32(u[1], sign[1]); + u[2] = _mm256_sub_epi32(u[2], sign[2]); + u[3] = _mm256_sub_epi32(u[3], sign[3]); + u[4] = _mm256_sub_epi32(u[4], sign[4]); + u[5] = _mm256_sub_epi32(u[5], sign[5]); + u[6] = _mm256_sub_epi32(u[6], sign[6]); + u[7] = _mm256_sub_epi32(u[7], sign[7]); + + u[0] = _mm256_add_epi32(u[0], K32One); + u[1] = _mm256_add_epi32(u[1], K32One); + u[2] = _mm256_add_epi32(u[2], K32One); + u[3] = _mm256_add_epi32(u[3], K32One); + u[4] = _mm256_add_epi32(u[4], K32One); + u[5] = _mm256_add_epi32(u[5], K32One); + u[6] = _mm256_add_epi32(u[6], K32One); + u[7] = _mm256_add_epi32(u[7], K32One); + + u[0] = _mm256_srai_epi32(u[0], 2); + u[1] = _mm256_srai_epi32(u[1], 2); + u[2] = _mm256_srai_epi32(u[2], 2); + u[3] = _mm256_srai_epi32(u[3], 2); + u[4] = _mm256_srai_epi32(u[4], 2); + u[5] = _mm256_srai_epi32(u[5], 2); + u[6] = _mm256_srai_epi32(u[6], 2); + u[7] = _mm256_srai_epi32(u[7], 2); + + out[ 4] = _mm256_packs_epi32(u[0], u[1]); + out[20] = _mm256_packs_epi32(u[2], u[3]); + out[12] = _mm256_packs_epi32(u[4], u[5]); + out[28] = _mm256_packs_epi32(u[6], u[7]); + } + { + lstep3[16] = _mm256_add_epi32(lstep2[18], lstep1[16]); + lstep3[17] = _mm256_add_epi32(lstep2[19], lstep1[17]); + lstep3[18] = _mm256_sub_epi32(lstep1[16], lstep2[18]); + lstep3[19] = _mm256_sub_epi32(lstep1[17], lstep2[19]); + lstep3[20] = _mm256_sub_epi32(lstep1[22], lstep2[20]); + lstep3[21] = _mm256_sub_epi32(lstep1[23], lstep2[21]); + lstep3[22] = _mm256_add_epi32(lstep2[20], lstep1[22]); + lstep3[23] = _mm256_add_epi32(lstep2[21], lstep1[23]); + lstep3[24] = _mm256_add_epi32(lstep2[26], lstep1[24]); + lstep3[25] = _mm256_add_epi32(lstep2[27], lstep1[25]); + lstep3[26] = _mm256_sub_epi32(lstep1[24], lstep2[26]); + lstep3[27] = _mm256_sub_epi32(lstep1[25], lstep2[27]); + lstep3[28] = _mm256_sub_epi32(lstep1[30], lstep2[28]); + lstep3[29] = _mm256_sub_epi32(lstep1[31], lstep2[29]); + lstep3[30] = _mm256_add_epi32(lstep2[28], lstep1[30]); + lstep3[31] = _mm256_add_epi32(lstep2[29], lstep1[31]); + } + { + const __m256i k32_m04_p28 = pair256_set_epi32(-cospi_4_64, cospi_28_64); + const __m256i k32_m28_m04 = pair256_set_epi32(-cospi_28_64, -cospi_4_64); + const __m256i k32_m20_p12 = pair256_set_epi32(-cospi_20_64, cospi_12_64); + const __m256i k32_m12_m20 = pair256_set_epi32(-cospi_12_64, + -cospi_20_64); + const __m256i k32_p12_p20 = pair256_set_epi32(cospi_12_64, cospi_20_64); + const __m256i k32_p28_p04 = pair256_set_epi32(cospi_28_64, cospi_4_64); + + u[ 0] = _mm256_unpacklo_epi32(lstep2[34], lstep2[60]); + u[ 1] = _mm256_unpackhi_epi32(lstep2[34], lstep2[60]); + u[ 2] = _mm256_unpacklo_epi32(lstep2[35], lstep2[61]); + u[ 3] = _mm256_unpackhi_epi32(lstep2[35], lstep2[61]); + u[ 4] = _mm256_unpacklo_epi32(lstep2[36], lstep2[58]); + u[ 5] = _mm256_unpackhi_epi32(lstep2[36], lstep2[58]); + u[ 6] = _mm256_unpacklo_epi32(lstep2[37], lstep2[59]); + u[ 7] = _mm256_unpackhi_epi32(lstep2[37], lstep2[59]); + u[ 8] = _mm256_unpacklo_epi32(lstep2[42], lstep2[52]); + u[ 9] = _mm256_unpackhi_epi32(lstep2[42], lstep2[52]); + u[10] = _mm256_unpacklo_epi32(lstep2[43], lstep2[53]); + u[11] = _mm256_unpackhi_epi32(lstep2[43], lstep2[53]); + u[12] = _mm256_unpacklo_epi32(lstep2[44], lstep2[50]); + u[13] = _mm256_unpackhi_epi32(lstep2[44], lstep2[50]); + u[14] = _mm256_unpacklo_epi32(lstep2[45], lstep2[51]); + u[15] = _mm256_unpackhi_epi32(lstep2[45], lstep2[51]); + + v[ 0] = k_madd_epi32_avx2(u[ 0], k32_m04_p28); + v[ 1] = k_madd_epi32_avx2(u[ 1], k32_m04_p28); + v[ 2] = k_madd_epi32_avx2(u[ 2], k32_m04_p28); + v[ 3] = k_madd_epi32_avx2(u[ 3], k32_m04_p28); + v[ 4] = k_madd_epi32_avx2(u[ 4], k32_m28_m04); + v[ 5] = k_madd_epi32_avx2(u[ 5], k32_m28_m04); + v[ 6] = k_madd_epi32_avx2(u[ 6], k32_m28_m04); + v[ 7] = k_madd_epi32_avx2(u[ 7], k32_m28_m04); + v[ 8] = k_madd_epi32_avx2(u[ 8], k32_m20_p12); + v[ 9] = k_madd_epi32_avx2(u[ 9], k32_m20_p12); + v[10] = k_madd_epi32_avx2(u[10], k32_m20_p12); + v[11] = k_madd_epi32_avx2(u[11], k32_m20_p12); + v[12] = k_madd_epi32_avx2(u[12], k32_m12_m20); + v[13] = k_madd_epi32_avx2(u[13], k32_m12_m20); + v[14] = k_madd_epi32_avx2(u[14], k32_m12_m20); + v[15] = k_madd_epi32_avx2(u[15], k32_m12_m20); + v[16] = k_madd_epi32_avx2(u[12], k32_m20_p12); + v[17] = k_madd_epi32_avx2(u[13], k32_m20_p12); + v[18] = k_madd_epi32_avx2(u[14], k32_m20_p12); + v[19] = k_madd_epi32_avx2(u[15], k32_m20_p12); + v[20] = k_madd_epi32_avx2(u[ 8], k32_p12_p20); + v[21] = k_madd_epi32_avx2(u[ 9], k32_p12_p20); + v[22] = k_madd_epi32_avx2(u[10], k32_p12_p20); + v[23] = k_madd_epi32_avx2(u[11], k32_p12_p20); + v[24] = k_madd_epi32_avx2(u[ 4], k32_m04_p28); + v[25] = k_madd_epi32_avx2(u[ 5], k32_m04_p28); + v[26] = k_madd_epi32_avx2(u[ 6], k32_m04_p28); + v[27] = k_madd_epi32_avx2(u[ 7], k32_m04_p28); + v[28] = k_madd_epi32_avx2(u[ 0], k32_p28_p04); + v[29] = k_madd_epi32_avx2(u[ 1], k32_p28_p04); + v[30] = k_madd_epi32_avx2(u[ 2], k32_p28_p04); + v[31] = k_madd_epi32_avx2(u[ 3], k32_p28_p04); + + u[ 0] = k_packs_epi64_avx2(v[ 0], v[ 1]); + u[ 1] = k_packs_epi64_avx2(v[ 2], v[ 3]); + u[ 2] = k_packs_epi64_avx2(v[ 4], v[ 5]); + u[ 3] = k_packs_epi64_avx2(v[ 6], v[ 7]); + u[ 4] = k_packs_epi64_avx2(v[ 8], v[ 9]); + u[ 5] = k_packs_epi64_avx2(v[10], v[11]); + u[ 6] = k_packs_epi64_avx2(v[12], v[13]); + u[ 7] = k_packs_epi64_avx2(v[14], v[15]); + u[ 8] = k_packs_epi64_avx2(v[16], v[17]); + u[ 9] = k_packs_epi64_avx2(v[18], v[19]); + u[10] = k_packs_epi64_avx2(v[20], v[21]); + u[11] = k_packs_epi64_avx2(v[22], v[23]); + u[12] = k_packs_epi64_avx2(v[24], v[25]); + u[13] = k_packs_epi64_avx2(v[26], v[27]); + u[14] = k_packs_epi64_avx2(v[28], v[29]); + u[15] = k_packs_epi64_avx2(v[30], v[31]); + + v[ 0] = _mm256_add_epi32(u[ 0], k__DCT_CONST_ROUNDING); + v[ 1] = _mm256_add_epi32(u[ 1], k__DCT_CONST_ROUNDING); + v[ 2] = _mm256_add_epi32(u[ 2], k__DCT_CONST_ROUNDING); + v[ 3] = _mm256_add_epi32(u[ 3], k__DCT_CONST_ROUNDING); + v[ 4] = _mm256_add_epi32(u[ 4], k__DCT_CONST_ROUNDING); + v[ 5] = _mm256_add_epi32(u[ 5], k__DCT_CONST_ROUNDING); + v[ 6] = _mm256_add_epi32(u[ 6], k__DCT_CONST_ROUNDING); + v[ 7] = _mm256_add_epi32(u[ 7], k__DCT_CONST_ROUNDING); + v[ 8] = _mm256_add_epi32(u[ 8], k__DCT_CONST_ROUNDING); + v[ 9] = _mm256_add_epi32(u[ 9], k__DCT_CONST_ROUNDING); + v[10] = _mm256_add_epi32(u[10], k__DCT_CONST_ROUNDING); + v[11] = _mm256_add_epi32(u[11], k__DCT_CONST_ROUNDING); + v[12] = _mm256_add_epi32(u[12], k__DCT_CONST_ROUNDING); + v[13] = _mm256_add_epi32(u[13], k__DCT_CONST_ROUNDING); + v[14] = _mm256_add_epi32(u[14], k__DCT_CONST_ROUNDING); + v[15] = _mm256_add_epi32(u[15], k__DCT_CONST_ROUNDING); + + lstep3[34] = _mm256_srai_epi32(v[ 0], DCT_CONST_BITS); + lstep3[35] = _mm256_srai_epi32(v[ 1], DCT_CONST_BITS); + lstep3[36] = _mm256_srai_epi32(v[ 2], DCT_CONST_BITS); + lstep3[37] = _mm256_srai_epi32(v[ 3], DCT_CONST_BITS); + lstep3[42] = _mm256_srai_epi32(v[ 4], DCT_CONST_BITS); + lstep3[43] = _mm256_srai_epi32(v[ 5], DCT_CONST_BITS); + lstep3[44] = _mm256_srai_epi32(v[ 6], DCT_CONST_BITS); + lstep3[45] = _mm256_srai_epi32(v[ 7], DCT_CONST_BITS); + lstep3[50] = _mm256_srai_epi32(v[ 8], DCT_CONST_BITS); + lstep3[51] = _mm256_srai_epi32(v[ 9], DCT_CONST_BITS); + lstep3[52] = _mm256_srai_epi32(v[10], DCT_CONST_BITS); + lstep3[53] = _mm256_srai_epi32(v[11], DCT_CONST_BITS); + lstep3[58] = _mm256_srai_epi32(v[12], DCT_CONST_BITS); + lstep3[59] = _mm256_srai_epi32(v[13], DCT_CONST_BITS); + lstep3[60] = _mm256_srai_epi32(v[14], DCT_CONST_BITS); + lstep3[61] = _mm256_srai_epi32(v[15], DCT_CONST_BITS); + } + // stage 7 + { + const __m256i k32_p30_p02 = pair256_set_epi32(cospi_30_64, cospi_2_64); + const __m256i k32_p14_p18 = pair256_set_epi32(cospi_14_64, cospi_18_64); + const __m256i k32_p22_p10 = pair256_set_epi32(cospi_22_64, cospi_10_64); + const __m256i k32_p06_p26 = pair256_set_epi32(cospi_6_64, cospi_26_64); + const __m256i k32_m26_p06 = pair256_set_epi32(-cospi_26_64, cospi_6_64); + const __m256i k32_m10_p22 = pair256_set_epi32(-cospi_10_64, cospi_22_64); + const __m256i k32_m18_p14 = pair256_set_epi32(-cospi_18_64, cospi_14_64); + const __m256i k32_m02_p30 = pair256_set_epi32(-cospi_2_64, cospi_30_64); + + u[ 0] = _mm256_unpacklo_epi32(lstep3[16], lstep3[30]); + u[ 1] = _mm256_unpackhi_epi32(lstep3[16], lstep3[30]); + u[ 2] = _mm256_unpacklo_epi32(lstep3[17], lstep3[31]); + u[ 3] = _mm256_unpackhi_epi32(lstep3[17], lstep3[31]); + u[ 4] = _mm256_unpacklo_epi32(lstep3[18], lstep3[28]); + u[ 5] = _mm256_unpackhi_epi32(lstep3[18], lstep3[28]); + u[ 6] = _mm256_unpacklo_epi32(lstep3[19], lstep3[29]); + u[ 7] = _mm256_unpackhi_epi32(lstep3[19], lstep3[29]); + u[ 8] = _mm256_unpacklo_epi32(lstep3[20], lstep3[26]); + u[ 9] = _mm256_unpackhi_epi32(lstep3[20], lstep3[26]); + u[10] = _mm256_unpacklo_epi32(lstep3[21], lstep3[27]); + u[11] = _mm256_unpackhi_epi32(lstep3[21], lstep3[27]); + u[12] = _mm256_unpacklo_epi32(lstep3[22], lstep3[24]); + u[13] = _mm256_unpackhi_epi32(lstep3[22], lstep3[24]); + u[14] = _mm256_unpacklo_epi32(lstep3[23], lstep3[25]); + u[15] = _mm256_unpackhi_epi32(lstep3[23], lstep3[25]); + + v[ 0] = k_madd_epi32_avx2(u[ 0], k32_p30_p02); + v[ 1] = k_madd_epi32_avx2(u[ 1], k32_p30_p02); + v[ 2] = k_madd_epi32_avx2(u[ 2], k32_p30_p02); + v[ 3] = k_madd_epi32_avx2(u[ 3], k32_p30_p02); + v[ 4] = k_madd_epi32_avx2(u[ 4], k32_p14_p18); + v[ 5] = k_madd_epi32_avx2(u[ 5], k32_p14_p18); + v[ 6] = k_madd_epi32_avx2(u[ 6], k32_p14_p18); + v[ 7] = k_madd_epi32_avx2(u[ 7], k32_p14_p18); + v[ 8] = k_madd_epi32_avx2(u[ 8], k32_p22_p10); + v[ 9] = k_madd_epi32_avx2(u[ 9], k32_p22_p10); + v[10] = k_madd_epi32_avx2(u[10], k32_p22_p10); + v[11] = k_madd_epi32_avx2(u[11], k32_p22_p10); + v[12] = k_madd_epi32_avx2(u[12], k32_p06_p26); + v[13] = k_madd_epi32_avx2(u[13], k32_p06_p26); + v[14] = k_madd_epi32_avx2(u[14], k32_p06_p26); + v[15] = k_madd_epi32_avx2(u[15], k32_p06_p26); + v[16] = k_madd_epi32_avx2(u[12], k32_m26_p06); + v[17] = k_madd_epi32_avx2(u[13], k32_m26_p06); + v[18] = k_madd_epi32_avx2(u[14], k32_m26_p06); + v[19] = k_madd_epi32_avx2(u[15], k32_m26_p06); + v[20] = k_madd_epi32_avx2(u[ 8], k32_m10_p22); + v[21] = k_madd_epi32_avx2(u[ 9], k32_m10_p22); + v[22] = k_madd_epi32_avx2(u[10], k32_m10_p22); + v[23] = k_madd_epi32_avx2(u[11], k32_m10_p22); + v[24] = k_madd_epi32_avx2(u[ 4], k32_m18_p14); + v[25] = k_madd_epi32_avx2(u[ 5], k32_m18_p14); + v[26] = k_madd_epi32_avx2(u[ 6], k32_m18_p14); + v[27] = k_madd_epi32_avx2(u[ 7], k32_m18_p14); + v[28] = k_madd_epi32_avx2(u[ 0], k32_m02_p30); + v[29] = k_madd_epi32_avx2(u[ 1], k32_m02_p30); + v[30] = k_madd_epi32_avx2(u[ 2], k32_m02_p30); + v[31] = k_madd_epi32_avx2(u[ 3], k32_m02_p30); + + u[ 0] = k_packs_epi64_avx2(v[ 0], v[ 1]); + u[ 1] = k_packs_epi64_avx2(v[ 2], v[ 3]); + u[ 2] = k_packs_epi64_avx2(v[ 4], v[ 5]); + u[ 3] = k_packs_epi64_avx2(v[ 6], v[ 7]); + u[ 4] = k_packs_epi64_avx2(v[ 8], v[ 9]); + u[ 5] = k_packs_epi64_avx2(v[10], v[11]); + u[ 6] = k_packs_epi64_avx2(v[12], v[13]); + u[ 7] = k_packs_epi64_avx2(v[14], v[15]); + u[ 8] = k_packs_epi64_avx2(v[16], v[17]); + u[ 9] = k_packs_epi64_avx2(v[18], v[19]); + u[10] = k_packs_epi64_avx2(v[20], v[21]); + u[11] = k_packs_epi64_avx2(v[22], v[23]); + u[12] = k_packs_epi64_avx2(v[24], v[25]); + u[13] = k_packs_epi64_avx2(v[26], v[27]); + u[14] = k_packs_epi64_avx2(v[28], v[29]); + u[15] = k_packs_epi64_avx2(v[30], v[31]); + + v[ 0] = _mm256_add_epi32(u[ 0], k__DCT_CONST_ROUNDING); + v[ 1] = _mm256_add_epi32(u[ 1], k__DCT_CONST_ROUNDING); + v[ 2] = _mm256_add_epi32(u[ 2], k__DCT_CONST_ROUNDING); + v[ 3] = _mm256_add_epi32(u[ 3], k__DCT_CONST_ROUNDING); + v[ 4] = _mm256_add_epi32(u[ 4], k__DCT_CONST_ROUNDING); + v[ 5] = _mm256_add_epi32(u[ 5], k__DCT_CONST_ROUNDING); + v[ 6] = _mm256_add_epi32(u[ 6], k__DCT_CONST_ROUNDING); + v[ 7] = _mm256_add_epi32(u[ 7], k__DCT_CONST_ROUNDING); + v[ 8] = _mm256_add_epi32(u[ 8], k__DCT_CONST_ROUNDING); + v[ 9] = _mm256_add_epi32(u[ 9], k__DCT_CONST_ROUNDING); + v[10] = _mm256_add_epi32(u[10], k__DCT_CONST_ROUNDING); + v[11] = _mm256_add_epi32(u[11], k__DCT_CONST_ROUNDING); + v[12] = _mm256_add_epi32(u[12], k__DCT_CONST_ROUNDING); + v[13] = _mm256_add_epi32(u[13], k__DCT_CONST_ROUNDING); + v[14] = _mm256_add_epi32(u[14], k__DCT_CONST_ROUNDING); + v[15] = _mm256_add_epi32(u[15], k__DCT_CONST_ROUNDING); + + u[ 0] = _mm256_srai_epi32(v[ 0], DCT_CONST_BITS); + u[ 1] = _mm256_srai_epi32(v[ 1], DCT_CONST_BITS); + u[ 2] = _mm256_srai_epi32(v[ 2], DCT_CONST_BITS); + u[ 3] = _mm256_srai_epi32(v[ 3], DCT_CONST_BITS); + u[ 4] = _mm256_srai_epi32(v[ 4], DCT_CONST_BITS); + u[ 5] = _mm256_srai_epi32(v[ 5], DCT_CONST_BITS); + u[ 6] = _mm256_srai_epi32(v[ 6], DCT_CONST_BITS); + u[ 7] = _mm256_srai_epi32(v[ 7], DCT_CONST_BITS); + u[ 8] = _mm256_srai_epi32(v[ 8], DCT_CONST_BITS); + u[ 9] = _mm256_srai_epi32(v[ 9], DCT_CONST_BITS); + u[10] = _mm256_srai_epi32(v[10], DCT_CONST_BITS); + u[11] = _mm256_srai_epi32(v[11], DCT_CONST_BITS); + u[12] = _mm256_srai_epi32(v[12], DCT_CONST_BITS); + u[13] = _mm256_srai_epi32(v[13], DCT_CONST_BITS); + u[14] = _mm256_srai_epi32(v[14], DCT_CONST_BITS); + u[15] = _mm256_srai_epi32(v[15], DCT_CONST_BITS); + + v[ 0] = _mm256_cmpgt_epi32(kZero,u[ 0]); + v[ 1] = _mm256_cmpgt_epi32(kZero,u[ 1]); + v[ 2] = _mm256_cmpgt_epi32(kZero,u[ 2]); + v[ 3] = _mm256_cmpgt_epi32(kZero,u[ 3]); + v[ 4] = _mm256_cmpgt_epi32(kZero,u[ 4]); + v[ 5] = _mm256_cmpgt_epi32(kZero,u[ 5]); + v[ 6] = _mm256_cmpgt_epi32(kZero,u[ 6]); + v[ 7] = _mm256_cmpgt_epi32(kZero,u[ 7]); + v[ 8] = _mm256_cmpgt_epi32(kZero,u[ 8]); + v[ 9] = _mm256_cmpgt_epi32(kZero,u[ 9]); + v[10] = _mm256_cmpgt_epi32(kZero,u[10]); + v[11] = _mm256_cmpgt_epi32(kZero,u[11]); + v[12] = _mm256_cmpgt_epi32(kZero,u[12]); + v[13] = _mm256_cmpgt_epi32(kZero,u[13]); + v[14] = _mm256_cmpgt_epi32(kZero,u[14]); + v[15] = _mm256_cmpgt_epi32(kZero,u[15]); + + u[ 0] = _mm256_sub_epi32(u[ 0], v[ 0]); + u[ 1] = _mm256_sub_epi32(u[ 1], v[ 1]); + u[ 2] = _mm256_sub_epi32(u[ 2], v[ 2]); + u[ 3] = _mm256_sub_epi32(u[ 3], v[ 3]); + u[ 4] = _mm256_sub_epi32(u[ 4], v[ 4]); + u[ 5] = _mm256_sub_epi32(u[ 5], v[ 5]); + u[ 6] = _mm256_sub_epi32(u[ 6], v[ 6]); + u[ 7] = _mm256_sub_epi32(u[ 7], v[ 7]); + u[ 8] = _mm256_sub_epi32(u[ 8], v[ 8]); + u[ 9] = _mm256_sub_epi32(u[ 9], v[ 9]); + u[10] = _mm256_sub_epi32(u[10], v[10]); + u[11] = _mm256_sub_epi32(u[11], v[11]); + u[12] = _mm256_sub_epi32(u[12], v[12]); + u[13] = _mm256_sub_epi32(u[13], v[13]); + u[14] = _mm256_sub_epi32(u[14], v[14]); + u[15] = _mm256_sub_epi32(u[15], v[15]); + + v[ 0] = _mm256_add_epi32(u[ 0], K32One); + v[ 1] = _mm256_add_epi32(u[ 1], K32One); + v[ 2] = _mm256_add_epi32(u[ 2], K32One); + v[ 3] = _mm256_add_epi32(u[ 3], K32One); + v[ 4] = _mm256_add_epi32(u[ 4], K32One); + v[ 5] = _mm256_add_epi32(u[ 5], K32One); + v[ 6] = _mm256_add_epi32(u[ 6], K32One); + v[ 7] = _mm256_add_epi32(u[ 7], K32One); + v[ 8] = _mm256_add_epi32(u[ 8], K32One); + v[ 9] = _mm256_add_epi32(u[ 9], K32One); + v[10] = _mm256_add_epi32(u[10], K32One); + v[11] = _mm256_add_epi32(u[11], K32One); + v[12] = _mm256_add_epi32(u[12], K32One); + v[13] = _mm256_add_epi32(u[13], K32One); + v[14] = _mm256_add_epi32(u[14], K32One); + v[15] = _mm256_add_epi32(u[15], K32One); + + u[ 0] = _mm256_srai_epi32(v[ 0], 2); + u[ 1] = _mm256_srai_epi32(v[ 1], 2); + u[ 2] = _mm256_srai_epi32(v[ 2], 2); + u[ 3] = _mm256_srai_epi32(v[ 3], 2); + u[ 4] = _mm256_srai_epi32(v[ 4], 2); + u[ 5] = _mm256_srai_epi32(v[ 5], 2); + u[ 6] = _mm256_srai_epi32(v[ 6], 2); + u[ 7] = _mm256_srai_epi32(v[ 7], 2); + u[ 8] = _mm256_srai_epi32(v[ 8], 2); + u[ 9] = _mm256_srai_epi32(v[ 9], 2); + u[10] = _mm256_srai_epi32(v[10], 2); + u[11] = _mm256_srai_epi32(v[11], 2); + u[12] = _mm256_srai_epi32(v[12], 2); + u[13] = _mm256_srai_epi32(v[13], 2); + u[14] = _mm256_srai_epi32(v[14], 2); + u[15] = _mm256_srai_epi32(v[15], 2); + + out[ 2] = _mm256_packs_epi32(u[0], u[1]); + out[18] = _mm256_packs_epi32(u[2], u[3]); + out[10] = _mm256_packs_epi32(u[4], u[5]); + out[26] = _mm256_packs_epi32(u[6], u[7]); + out[ 6] = _mm256_packs_epi32(u[8], u[9]); + out[22] = _mm256_packs_epi32(u[10], u[11]); + out[14] = _mm256_packs_epi32(u[12], u[13]); + out[30] = _mm256_packs_epi32(u[14], u[15]); + } + { + lstep1[32] = _mm256_add_epi32(lstep3[34], lstep2[32]); + lstep1[33] = _mm256_add_epi32(lstep3[35], lstep2[33]); + lstep1[34] = _mm256_sub_epi32(lstep2[32], lstep3[34]); + lstep1[35] = _mm256_sub_epi32(lstep2[33], lstep3[35]); + lstep1[36] = _mm256_sub_epi32(lstep2[38], lstep3[36]); + lstep1[37] = _mm256_sub_epi32(lstep2[39], lstep3[37]); + lstep1[38] = _mm256_add_epi32(lstep3[36], lstep2[38]); + lstep1[39] = _mm256_add_epi32(lstep3[37], lstep2[39]); + lstep1[40] = _mm256_add_epi32(lstep3[42], lstep2[40]); + lstep1[41] = _mm256_add_epi32(lstep3[43], lstep2[41]); + lstep1[42] = _mm256_sub_epi32(lstep2[40], lstep3[42]); + lstep1[43] = _mm256_sub_epi32(lstep2[41], lstep3[43]); + lstep1[44] = _mm256_sub_epi32(lstep2[46], lstep3[44]); + lstep1[45] = _mm256_sub_epi32(lstep2[47], lstep3[45]); + lstep1[46] = _mm256_add_epi32(lstep3[44], lstep2[46]); + lstep1[47] = _mm256_add_epi32(lstep3[45], lstep2[47]); + lstep1[48] = _mm256_add_epi32(lstep3[50], lstep2[48]); + lstep1[49] = _mm256_add_epi32(lstep3[51], lstep2[49]); + lstep1[50] = _mm256_sub_epi32(lstep2[48], lstep3[50]); + lstep1[51] = _mm256_sub_epi32(lstep2[49], lstep3[51]); + lstep1[52] = _mm256_sub_epi32(lstep2[54], lstep3[52]); + lstep1[53] = _mm256_sub_epi32(lstep2[55], lstep3[53]); + lstep1[54] = _mm256_add_epi32(lstep3[52], lstep2[54]); + lstep1[55] = _mm256_add_epi32(lstep3[53], lstep2[55]); + lstep1[56] = _mm256_add_epi32(lstep3[58], lstep2[56]); + lstep1[57] = _mm256_add_epi32(lstep3[59], lstep2[57]); + lstep1[58] = _mm256_sub_epi32(lstep2[56], lstep3[58]); + lstep1[59] = _mm256_sub_epi32(lstep2[57], lstep3[59]); + lstep1[60] = _mm256_sub_epi32(lstep2[62], lstep3[60]); + lstep1[61] = _mm256_sub_epi32(lstep2[63], lstep3[61]); + lstep1[62] = _mm256_add_epi32(lstep3[60], lstep2[62]); + lstep1[63] = _mm256_add_epi32(lstep3[61], lstep2[63]); + } + // stage 8 + { + const __m256i k32_p31_p01 = pair256_set_epi32(cospi_31_64, cospi_1_64); + const __m256i k32_p15_p17 = pair256_set_epi32(cospi_15_64, cospi_17_64); + const __m256i k32_p23_p09 = pair256_set_epi32(cospi_23_64, cospi_9_64); + const __m256i k32_p07_p25 = pair256_set_epi32(cospi_7_64, cospi_25_64); + const __m256i k32_m25_p07 = pair256_set_epi32(-cospi_25_64, cospi_7_64); + const __m256i k32_m09_p23 = pair256_set_epi32(-cospi_9_64, cospi_23_64); + const __m256i k32_m17_p15 = pair256_set_epi32(-cospi_17_64, cospi_15_64); + const __m256i k32_m01_p31 = pair256_set_epi32(-cospi_1_64, cospi_31_64); + + u[ 0] = _mm256_unpacklo_epi32(lstep1[32], lstep1[62]); + u[ 1] = _mm256_unpackhi_epi32(lstep1[32], lstep1[62]); + u[ 2] = _mm256_unpacklo_epi32(lstep1[33], lstep1[63]); + u[ 3] = _mm256_unpackhi_epi32(lstep1[33], lstep1[63]); + u[ 4] = _mm256_unpacklo_epi32(lstep1[34], lstep1[60]); + u[ 5] = _mm256_unpackhi_epi32(lstep1[34], lstep1[60]); + u[ 6] = _mm256_unpacklo_epi32(lstep1[35], lstep1[61]); + u[ 7] = _mm256_unpackhi_epi32(lstep1[35], lstep1[61]); + u[ 8] = _mm256_unpacklo_epi32(lstep1[36], lstep1[58]); + u[ 9] = _mm256_unpackhi_epi32(lstep1[36], lstep1[58]); + u[10] = _mm256_unpacklo_epi32(lstep1[37], lstep1[59]); + u[11] = _mm256_unpackhi_epi32(lstep1[37], lstep1[59]); + u[12] = _mm256_unpacklo_epi32(lstep1[38], lstep1[56]); + u[13] = _mm256_unpackhi_epi32(lstep1[38], lstep1[56]); + u[14] = _mm256_unpacklo_epi32(lstep1[39], lstep1[57]); + u[15] = _mm256_unpackhi_epi32(lstep1[39], lstep1[57]); + + v[ 0] = k_madd_epi32_avx2(u[ 0], k32_p31_p01); + v[ 1] = k_madd_epi32_avx2(u[ 1], k32_p31_p01); + v[ 2] = k_madd_epi32_avx2(u[ 2], k32_p31_p01); + v[ 3] = k_madd_epi32_avx2(u[ 3], k32_p31_p01); + v[ 4] = k_madd_epi32_avx2(u[ 4], k32_p15_p17); + v[ 5] = k_madd_epi32_avx2(u[ 5], k32_p15_p17); + v[ 6] = k_madd_epi32_avx2(u[ 6], k32_p15_p17); + v[ 7] = k_madd_epi32_avx2(u[ 7], k32_p15_p17); + v[ 8] = k_madd_epi32_avx2(u[ 8], k32_p23_p09); + v[ 9] = k_madd_epi32_avx2(u[ 9], k32_p23_p09); + v[10] = k_madd_epi32_avx2(u[10], k32_p23_p09); + v[11] = k_madd_epi32_avx2(u[11], k32_p23_p09); + v[12] = k_madd_epi32_avx2(u[12], k32_p07_p25); + v[13] = k_madd_epi32_avx2(u[13], k32_p07_p25); + v[14] = k_madd_epi32_avx2(u[14], k32_p07_p25); + v[15] = k_madd_epi32_avx2(u[15], k32_p07_p25); + v[16] = k_madd_epi32_avx2(u[12], k32_m25_p07); + v[17] = k_madd_epi32_avx2(u[13], k32_m25_p07); + v[18] = k_madd_epi32_avx2(u[14], k32_m25_p07); + v[19] = k_madd_epi32_avx2(u[15], k32_m25_p07); + v[20] = k_madd_epi32_avx2(u[ 8], k32_m09_p23); + v[21] = k_madd_epi32_avx2(u[ 9], k32_m09_p23); + v[22] = k_madd_epi32_avx2(u[10], k32_m09_p23); + v[23] = k_madd_epi32_avx2(u[11], k32_m09_p23); + v[24] = k_madd_epi32_avx2(u[ 4], k32_m17_p15); + v[25] = k_madd_epi32_avx2(u[ 5], k32_m17_p15); + v[26] = k_madd_epi32_avx2(u[ 6], k32_m17_p15); + v[27] = k_madd_epi32_avx2(u[ 7], k32_m17_p15); + v[28] = k_madd_epi32_avx2(u[ 0], k32_m01_p31); + v[29] = k_madd_epi32_avx2(u[ 1], k32_m01_p31); + v[30] = k_madd_epi32_avx2(u[ 2], k32_m01_p31); + v[31] = k_madd_epi32_avx2(u[ 3], k32_m01_p31); + + u[ 0] = k_packs_epi64_avx2(v[ 0], v[ 1]); + u[ 1] = k_packs_epi64_avx2(v[ 2], v[ 3]); + u[ 2] = k_packs_epi64_avx2(v[ 4], v[ 5]); + u[ 3] = k_packs_epi64_avx2(v[ 6], v[ 7]); + u[ 4] = k_packs_epi64_avx2(v[ 8], v[ 9]); + u[ 5] = k_packs_epi64_avx2(v[10], v[11]); + u[ 6] = k_packs_epi64_avx2(v[12], v[13]); + u[ 7] = k_packs_epi64_avx2(v[14], v[15]); + u[ 8] = k_packs_epi64_avx2(v[16], v[17]); + u[ 9] = k_packs_epi64_avx2(v[18], v[19]); + u[10] = k_packs_epi64_avx2(v[20], v[21]); + u[11] = k_packs_epi64_avx2(v[22], v[23]); + u[12] = k_packs_epi64_avx2(v[24], v[25]); + u[13] = k_packs_epi64_avx2(v[26], v[27]); + u[14] = k_packs_epi64_avx2(v[28], v[29]); + u[15] = k_packs_epi64_avx2(v[30], v[31]); + + v[ 0] = _mm256_add_epi32(u[ 0], k__DCT_CONST_ROUNDING); + v[ 1] = _mm256_add_epi32(u[ 1], k__DCT_CONST_ROUNDING); + v[ 2] = _mm256_add_epi32(u[ 2], k__DCT_CONST_ROUNDING); + v[ 3] = _mm256_add_epi32(u[ 3], k__DCT_CONST_ROUNDING); + v[ 4] = _mm256_add_epi32(u[ 4], k__DCT_CONST_ROUNDING); + v[ 5] = _mm256_add_epi32(u[ 5], k__DCT_CONST_ROUNDING); + v[ 6] = _mm256_add_epi32(u[ 6], k__DCT_CONST_ROUNDING); + v[ 7] = _mm256_add_epi32(u[ 7], k__DCT_CONST_ROUNDING); + v[ 8] = _mm256_add_epi32(u[ 8], k__DCT_CONST_ROUNDING); + v[ 9] = _mm256_add_epi32(u[ 9], k__DCT_CONST_ROUNDING); + v[10] = _mm256_add_epi32(u[10], k__DCT_CONST_ROUNDING); + v[11] = _mm256_add_epi32(u[11], k__DCT_CONST_ROUNDING); + v[12] = _mm256_add_epi32(u[12], k__DCT_CONST_ROUNDING); + v[13] = _mm256_add_epi32(u[13], k__DCT_CONST_ROUNDING); + v[14] = _mm256_add_epi32(u[14], k__DCT_CONST_ROUNDING); + v[15] = _mm256_add_epi32(u[15], k__DCT_CONST_ROUNDING); + + u[ 0] = _mm256_srai_epi32(v[ 0], DCT_CONST_BITS); + u[ 1] = _mm256_srai_epi32(v[ 1], DCT_CONST_BITS); + u[ 2] = _mm256_srai_epi32(v[ 2], DCT_CONST_BITS); + u[ 3] = _mm256_srai_epi32(v[ 3], DCT_CONST_BITS); + u[ 4] = _mm256_srai_epi32(v[ 4], DCT_CONST_BITS); + u[ 5] = _mm256_srai_epi32(v[ 5], DCT_CONST_BITS); + u[ 6] = _mm256_srai_epi32(v[ 6], DCT_CONST_BITS); + u[ 7] = _mm256_srai_epi32(v[ 7], DCT_CONST_BITS); + u[ 8] = _mm256_srai_epi32(v[ 8], DCT_CONST_BITS); + u[ 9] = _mm256_srai_epi32(v[ 9], DCT_CONST_BITS); + u[10] = _mm256_srai_epi32(v[10], DCT_CONST_BITS); + u[11] = _mm256_srai_epi32(v[11], DCT_CONST_BITS); + u[12] = _mm256_srai_epi32(v[12], DCT_CONST_BITS); + u[13] = _mm256_srai_epi32(v[13], DCT_CONST_BITS); + u[14] = _mm256_srai_epi32(v[14], DCT_CONST_BITS); + u[15] = _mm256_srai_epi32(v[15], DCT_CONST_BITS); + + v[ 0] = _mm256_cmpgt_epi32(kZero,u[ 0]); + v[ 1] = _mm256_cmpgt_epi32(kZero,u[ 1]); + v[ 2] = _mm256_cmpgt_epi32(kZero,u[ 2]); + v[ 3] = _mm256_cmpgt_epi32(kZero,u[ 3]); + v[ 4] = _mm256_cmpgt_epi32(kZero,u[ 4]); + v[ 5] = _mm256_cmpgt_epi32(kZero,u[ 5]); + v[ 6] = _mm256_cmpgt_epi32(kZero,u[ 6]); + v[ 7] = _mm256_cmpgt_epi32(kZero,u[ 7]); + v[ 8] = _mm256_cmpgt_epi32(kZero,u[ 8]); + v[ 9] = _mm256_cmpgt_epi32(kZero,u[ 9]); + v[10] = _mm256_cmpgt_epi32(kZero,u[10]); + v[11] = _mm256_cmpgt_epi32(kZero,u[11]); + v[12] = _mm256_cmpgt_epi32(kZero,u[12]); + v[13] = _mm256_cmpgt_epi32(kZero,u[13]); + v[14] = _mm256_cmpgt_epi32(kZero,u[14]); + v[15] = _mm256_cmpgt_epi32(kZero,u[15]); + + u[ 0] = _mm256_sub_epi32(u[ 0], v[ 0]); + u[ 1] = _mm256_sub_epi32(u[ 1], v[ 1]); + u[ 2] = _mm256_sub_epi32(u[ 2], v[ 2]); + u[ 3] = _mm256_sub_epi32(u[ 3], v[ 3]); + u[ 4] = _mm256_sub_epi32(u[ 4], v[ 4]); + u[ 5] = _mm256_sub_epi32(u[ 5], v[ 5]); + u[ 6] = _mm256_sub_epi32(u[ 6], v[ 6]); + u[ 7] = _mm256_sub_epi32(u[ 7], v[ 7]); + u[ 8] = _mm256_sub_epi32(u[ 8], v[ 8]); + u[ 9] = _mm256_sub_epi32(u[ 9], v[ 9]); + u[10] = _mm256_sub_epi32(u[10], v[10]); + u[11] = _mm256_sub_epi32(u[11], v[11]); + u[12] = _mm256_sub_epi32(u[12], v[12]); + u[13] = _mm256_sub_epi32(u[13], v[13]); + u[14] = _mm256_sub_epi32(u[14], v[14]); + u[15] = _mm256_sub_epi32(u[15], v[15]); + + v[0] = _mm256_add_epi32(u[0], K32One); + v[1] = _mm256_add_epi32(u[1], K32One); + v[2] = _mm256_add_epi32(u[2], K32One); + v[3] = _mm256_add_epi32(u[3], K32One); + v[4] = _mm256_add_epi32(u[4], K32One); + v[5] = _mm256_add_epi32(u[5], K32One); + v[6] = _mm256_add_epi32(u[6], K32One); + v[7] = _mm256_add_epi32(u[7], K32One); + v[8] = _mm256_add_epi32(u[8], K32One); + v[9] = _mm256_add_epi32(u[9], K32One); + v[10] = _mm256_add_epi32(u[10], K32One); + v[11] = _mm256_add_epi32(u[11], K32One); + v[12] = _mm256_add_epi32(u[12], K32One); + v[13] = _mm256_add_epi32(u[13], K32One); + v[14] = _mm256_add_epi32(u[14], K32One); + v[15] = _mm256_add_epi32(u[15], K32One); + + u[0] = _mm256_srai_epi32(v[0], 2); + u[1] = _mm256_srai_epi32(v[1], 2); + u[2] = _mm256_srai_epi32(v[2], 2); + u[3] = _mm256_srai_epi32(v[3], 2); + u[4] = _mm256_srai_epi32(v[4], 2); + u[5] = _mm256_srai_epi32(v[5], 2); + u[6] = _mm256_srai_epi32(v[6], 2); + u[7] = _mm256_srai_epi32(v[7], 2); + u[8] = _mm256_srai_epi32(v[8], 2); + u[9] = _mm256_srai_epi32(v[9], 2); + u[10] = _mm256_srai_epi32(v[10], 2); + u[11] = _mm256_srai_epi32(v[11], 2); + u[12] = _mm256_srai_epi32(v[12], 2); + u[13] = _mm256_srai_epi32(v[13], 2); + u[14] = _mm256_srai_epi32(v[14], 2); + u[15] = _mm256_srai_epi32(v[15], 2); + + out[ 1] = _mm256_packs_epi32(u[0], u[1]); + out[17] = _mm256_packs_epi32(u[2], u[3]); + out[ 9] = _mm256_packs_epi32(u[4], u[5]); + out[25] = _mm256_packs_epi32(u[6], u[7]); + out[ 7] = _mm256_packs_epi32(u[8], u[9]); + out[23] = _mm256_packs_epi32(u[10], u[11]); + out[15] = _mm256_packs_epi32(u[12], u[13]); + out[31] = _mm256_packs_epi32(u[14], u[15]); + } + { + const __m256i k32_p27_p05 = pair256_set_epi32(cospi_27_64, cospi_5_64); + const __m256i k32_p11_p21 = pair256_set_epi32(cospi_11_64, cospi_21_64); + const __m256i k32_p19_p13 = pair256_set_epi32(cospi_19_64, cospi_13_64); + const __m256i k32_p03_p29 = pair256_set_epi32(cospi_3_64, cospi_29_64); + const __m256i k32_m29_p03 = pair256_set_epi32(-cospi_29_64, cospi_3_64); + const __m256i k32_m13_p19 = pair256_set_epi32(-cospi_13_64, cospi_19_64); + const __m256i k32_m21_p11 = pair256_set_epi32(-cospi_21_64, cospi_11_64); + const __m256i k32_m05_p27 = pair256_set_epi32(-cospi_5_64, cospi_27_64); + + u[ 0] = _mm256_unpacklo_epi32(lstep1[40], lstep1[54]); + u[ 1] = _mm256_unpackhi_epi32(lstep1[40], lstep1[54]); + u[ 2] = _mm256_unpacklo_epi32(lstep1[41], lstep1[55]); + u[ 3] = _mm256_unpackhi_epi32(lstep1[41], lstep1[55]); + u[ 4] = _mm256_unpacklo_epi32(lstep1[42], lstep1[52]); + u[ 5] = _mm256_unpackhi_epi32(lstep1[42], lstep1[52]); + u[ 6] = _mm256_unpacklo_epi32(lstep1[43], lstep1[53]); + u[ 7] = _mm256_unpackhi_epi32(lstep1[43], lstep1[53]); + u[ 8] = _mm256_unpacklo_epi32(lstep1[44], lstep1[50]); + u[ 9] = _mm256_unpackhi_epi32(lstep1[44], lstep1[50]); + u[10] = _mm256_unpacklo_epi32(lstep1[45], lstep1[51]); + u[11] = _mm256_unpackhi_epi32(lstep1[45], lstep1[51]); + u[12] = _mm256_unpacklo_epi32(lstep1[46], lstep1[48]); + u[13] = _mm256_unpackhi_epi32(lstep1[46], lstep1[48]); + u[14] = _mm256_unpacklo_epi32(lstep1[47], lstep1[49]); + u[15] = _mm256_unpackhi_epi32(lstep1[47], lstep1[49]); + + v[ 0] = k_madd_epi32_avx2(u[ 0], k32_p27_p05); + v[ 1] = k_madd_epi32_avx2(u[ 1], k32_p27_p05); + v[ 2] = k_madd_epi32_avx2(u[ 2], k32_p27_p05); + v[ 3] = k_madd_epi32_avx2(u[ 3], k32_p27_p05); + v[ 4] = k_madd_epi32_avx2(u[ 4], k32_p11_p21); + v[ 5] = k_madd_epi32_avx2(u[ 5], k32_p11_p21); + v[ 6] = k_madd_epi32_avx2(u[ 6], k32_p11_p21); + v[ 7] = k_madd_epi32_avx2(u[ 7], k32_p11_p21); + v[ 8] = k_madd_epi32_avx2(u[ 8], k32_p19_p13); + v[ 9] = k_madd_epi32_avx2(u[ 9], k32_p19_p13); + v[10] = k_madd_epi32_avx2(u[10], k32_p19_p13); + v[11] = k_madd_epi32_avx2(u[11], k32_p19_p13); + v[12] = k_madd_epi32_avx2(u[12], k32_p03_p29); + v[13] = k_madd_epi32_avx2(u[13], k32_p03_p29); + v[14] = k_madd_epi32_avx2(u[14], k32_p03_p29); + v[15] = k_madd_epi32_avx2(u[15], k32_p03_p29); + v[16] = k_madd_epi32_avx2(u[12], k32_m29_p03); + v[17] = k_madd_epi32_avx2(u[13], k32_m29_p03); + v[18] = k_madd_epi32_avx2(u[14], k32_m29_p03); + v[19] = k_madd_epi32_avx2(u[15], k32_m29_p03); + v[20] = k_madd_epi32_avx2(u[ 8], k32_m13_p19); + v[21] = k_madd_epi32_avx2(u[ 9], k32_m13_p19); + v[22] = k_madd_epi32_avx2(u[10], k32_m13_p19); + v[23] = k_madd_epi32_avx2(u[11], k32_m13_p19); + v[24] = k_madd_epi32_avx2(u[ 4], k32_m21_p11); + v[25] = k_madd_epi32_avx2(u[ 5], k32_m21_p11); + v[26] = k_madd_epi32_avx2(u[ 6], k32_m21_p11); + v[27] = k_madd_epi32_avx2(u[ 7], k32_m21_p11); + v[28] = k_madd_epi32_avx2(u[ 0], k32_m05_p27); + v[29] = k_madd_epi32_avx2(u[ 1], k32_m05_p27); + v[30] = k_madd_epi32_avx2(u[ 2], k32_m05_p27); + v[31] = k_madd_epi32_avx2(u[ 3], k32_m05_p27); + + u[ 0] = k_packs_epi64_avx2(v[ 0], v[ 1]); + u[ 1] = k_packs_epi64_avx2(v[ 2], v[ 3]); + u[ 2] = k_packs_epi64_avx2(v[ 4], v[ 5]); + u[ 3] = k_packs_epi64_avx2(v[ 6], v[ 7]); + u[ 4] = k_packs_epi64_avx2(v[ 8], v[ 9]); + u[ 5] = k_packs_epi64_avx2(v[10], v[11]); + u[ 6] = k_packs_epi64_avx2(v[12], v[13]); + u[ 7] = k_packs_epi64_avx2(v[14], v[15]); + u[ 8] = k_packs_epi64_avx2(v[16], v[17]); + u[ 9] = k_packs_epi64_avx2(v[18], v[19]); + u[10] = k_packs_epi64_avx2(v[20], v[21]); + u[11] = k_packs_epi64_avx2(v[22], v[23]); + u[12] = k_packs_epi64_avx2(v[24], v[25]); + u[13] = k_packs_epi64_avx2(v[26], v[27]); + u[14] = k_packs_epi64_avx2(v[28], v[29]); + u[15] = k_packs_epi64_avx2(v[30], v[31]); + + v[ 0] = _mm256_add_epi32(u[ 0], k__DCT_CONST_ROUNDING); + v[ 1] = _mm256_add_epi32(u[ 1], k__DCT_CONST_ROUNDING); + v[ 2] = _mm256_add_epi32(u[ 2], k__DCT_CONST_ROUNDING); + v[ 3] = _mm256_add_epi32(u[ 3], k__DCT_CONST_ROUNDING); + v[ 4] = _mm256_add_epi32(u[ 4], k__DCT_CONST_ROUNDING); + v[ 5] = _mm256_add_epi32(u[ 5], k__DCT_CONST_ROUNDING); + v[ 6] = _mm256_add_epi32(u[ 6], k__DCT_CONST_ROUNDING); + v[ 7] = _mm256_add_epi32(u[ 7], k__DCT_CONST_ROUNDING); + v[ 8] = _mm256_add_epi32(u[ 8], k__DCT_CONST_ROUNDING); + v[ 9] = _mm256_add_epi32(u[ 9], k__DCT_CONST_ROUNDING); + v[10] = _mm256_add_epi32(u[10], k__DCT_CONST_ROUNDING); + v[11] = _mm256_add_epi32(u[11], k__DCT_CONST_ROUNDING); + v[12] = _mm256_add_epi32(u[12], k__DCT_CONST_ROUNDING); + v[13] = _mm256_add_epi32(u[13], k__DCT_CONST_ROUNDING); + v[14] = _mm256_add_epi32(u[14], k__DCT_CONST_ROUNDING); + v[15] = _mm256_add_epi32(u[15], k__DCT_CONST_ROUNDING); + + u[ 0] = _mm256_srai_epi32(v[ 0], DCT_CONST_BITS); + u[ 1] = _mm256_srai_epi32(v[ 1], DCT_CONST_BITS); + u[ 2] = _mm256_srai_epi32(v[ 2], DCT_CONST_BITS); + u[ 3] = _mm256_srai_epi32(v[ 3], DCT_CONST_BITS); + u[ 4] = _mm256_srai_epi32(v[ 4], DCT_CONST_BITS); + u[ 5] = _mm256_srai_epi32(v[ 5], DCT_CONST_BITS); + u[ 6] = _mm256_srai_epi32(v[ 6], DCT_CONST_BITS); + u[ 7] = _mm256_srai_epi32(v[ 7], DCT_CONST_BITS); + u[ 8] = _mm256_srai_epi32(v[ 8], DCT_CONST_BITS); + u[ 9] = _mm256_srai_epi32(v[ 9], DCT_CONST_BITS); + u[10] = _mm256_srai_epi32(v[10], DCT_CONST_BITS); + u[11] = _mm256_srai_epi32(v[11], DCT_CONST_BITS); + u[12] = _mm256_srai_epi32(v[12], DCT_CONST_BITS); + u[13] = _mm256_srai_epi32(v[13], DCT_CONST_BITS); + u[14] = _mm256_srai_epi32(v[14], DCT_CONST_BITS); + u[15] = _mm256_srai_epi32(v[15], DCT_CONST_BITS); + + v[ 0] = _mm256_cmpgt_epi32(kZero,u[ 0]); + v[ 1] = _mm256_cmpgt_epi32(kZero,u[ 1]); + v[ 2] = _mm256_cmpgt_epi32(kZero,u[ 2]); + v[ 3] = _mm256_cmpgt_epi32(kZero,u[ 3]); + v[ 4] = _mm256_cmpgt_epi32(kZero,u[ 4]); + v[ 5] = _mm256_cmpgt_epi32(kZero,u[ 5]); + v[ 6] = _mm256_cmpgt_epi32(kZero,u[ 6]); + v[ 7] = _mm256_cmpgt_epi32(kZero,u[ 7]); + v[ 8] = _mm256_cmpgt_epi32(kZero,u[ 8]); + v[ 9] = _mm256_cmpgt_epi32(kZero,u[ 9]); + v[10] = _mm256_cmpgt_epi32(kZero,u[10]); + v[11] = _mm256_cmpgt_epi32(kZero,u[11]); + v[12] = _mm256_cmpgt_epi32(kZero,u[12]); + v[13] = _mm256_cmpgt_epi32(kZero,u[13]); + v[14] = _mm256_cmpgt_epi32(kZero,u[14]); + v[15] = _mm256_cmpgt_epi32(kZero,u[15]); + + u[ 0] = _mm256_sub_epi32(u[ 0], v[ 0]); + u[ 1] = _mm256_sub_epi32(u[ 1], v[ 1]); + u[ 2] = _mm256_sub_epi32(u[ 2], v[ 2]); + u[ 3] = _mm256_sub_epi32(u[ 3], v[ 3]); + u[ 4] = _mm256_sub_epi32(u[ 4], v[ 4]); + u[ 5] = _mm256_sub_epi32(u[ 5], v[ 5]); + u[ 6] = _mm256_sub_epi32(u[ 6], v[ 6]); + u[ 7] = _mm256_sub_epi32(u[ 7], v[ 7]); + u[ 8] = _mm256_sub_epi32(u[ 8], v[ 8]); + u[ 9] = _mm256_sub_epi32(u[ 9], v[ 9]); + u[10] = _mm256_sub_epi32(u[10], v[10]); + u[11] = _mm256_sub_epi32(u[11], v[11]); + u[12] = _mm256_sub_epi32(u[12], v[12]); + u[13] = _mm256_sub_epi32(u[13], v[13]); + u[14] = _mm256_sub_epi32(u[14], v[14]); + u[15] = _mm256_sub_epi32(u[15], v[15]); + + v[0] = _mm256_add_epi32(u[0], K32One); + v[1] = _mm256_add_epi32(u[1], K32One); + v[2] = _mm256_add_epi32(u[2], K32One); + v[3] = _mm256_add_epi32(u[3], K32One); + v[4] = _mm256_add_epi32(u[4], K32One); + v[5] = _mm256_add_epi32(u[5], K32One); + v[6] = _mm256_add_epi32(u[6], K32One); + v[7] = _mm256_add_epi32(u[7], K32One); + v[8] = _mm256_add_epi32(u[8], K32One); + v[9] = _mm256_add_epi32(u[9], K32One); + v[10] = _mm256_add_epi32(u[10], K32One); + v[11] = _mm256_add_epi32(u[11], K32One); + v[12] = _mm256_add_epi32(u[12], K32One); + v[13] = _mm256_add_epi32(u[13], K32One); + v[14] = _mm256_add_epi32(u[14], K32One); + v[15] = _mm256_add_epi32(u[15], K32One); + + u[0] = _mm256_srai_epi32(v[0], 2); + u[1] = _mm256_srai_epi32(v[1], 2); + u[2] = _mm256_srai_epi32(v[2], 2); + u[3] = _mm256_srai_epi32(v[3], 2); + u[4] = _mm256_srai_epi32(v[4], 2); + u[5] = _mm256_srai_epi32(v[5], 2); + u[6] = _mm256_srai_epi32(v[6], 2); + u[7] = _mm256_srai_epi32(v[7], 2); + u[8] = _mm256_srai_epi32(v[8], 2); + u[9] = _mm256_srai_epi32(v[9], 2); + u[10] = _mm256_srai_epi32(v[10], 2); + u[11] = _mm256_srai_epi32(v[11], 2); + u[12] = _mm256_srai_epi32(v[12], 2); + u[13] = _mm256_srai_epi32(v[13], 2); + u[14] = _mm256_srai_epi32(v[14], 2); + u[15] = _mm256_srai_epi32(v[15], 2); + + out[ 5] = _mm256_packs_epi32(u[0], u[1]); + out[21] = _mm256_packs_epi32(u[2], u[3]); + out[13] = _mm256_packs_epi32(u[4], u[5]); + out[29] = _mm256_packs_epi32(u[6], u[7]); + out[ 3] = _mm256_packs_epi32(u[8], u[9]); + out[19] = _mm256_packs_epi32(u[10], u[11]); + out[11] = _mm256_packs_epi32(u[12], u[13]); + out[27] = _mm256_packs_epi32(u[14], u[15]); + } + } +#endif + // Transpose the results, do it as four 8x8 transposes. + { + int transpose_block; + int16_t *output_currStep,*output_nextStep; + if (0 == pass){ + output_currStep = &intermediate[column_start * 32]; + output_nextStep = &intermediate[(column_start + 8) * 32]; + } else{ + output_currStep = &output_org[column_start * 32]; + output_nextStep = &output_org[(column_start + 8) * 32]; + } + for (transpose_block = 0; transpose_block < 4; ++transpose_block) { + __m256i *this_out = &out[8 * transpose_block]; + // 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 + // 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 + // 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 + // 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 + // 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 + // 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 + // 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 + // 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 + const __m256i tr0_0 = _mm256_unpacklo_epi16(this_out[0], this_out[1]); + const __m256i tr0_1 = _mm256_unpacklo_epi16(this_out[2], this_out[3]); + const __m256i tr0_2 = _mm256_unpackhi_epi16(this_out[0], this_out[1]); + const __m256i tr0_3 = _mm256_unpackhi_epi16(this_out[2], this_out[3]); + const __m256i tr0_4 = _mm256_unpacklo_epi16(this_out[4], this_out[5]); + const __m256i tr0_5 = _mm256_unpacklo_epi16(this_out[6], this_out[7]); + const __m256i tr0_6 = _mm256_unpackhi_epi16(this_out[4], this_out[5]); + const __m256i tr0_7 = _mm256_unpackhi_epi16(this_out[6], this_out[7]); + // 00 20 01 21 02 22 03 23 08 28 09 29 10 30 11 31 + // 40 60 41 61 42 62 43 63 48 68 49 69 50 70 51 71 + // 04 24 05 25 06 26 07 27 12 32 13 33 14 34 15 35 + // 44 64 45 65 46 66 47 67 52 72 53 73 54 74 55 75 + // 80 100 81 101 82 102 83 103 88 108 89 109 90 110 91 101 + // 120 140 121 141 122 142 123 143 128 148 129 149 130 150 131 151 + // 84 104 85 105 86 106 87 107 92 112 93 113 94 114 95 115 + // 124 144 125 145 126 146 127 147 132 152 133 153 134 154 135 155 + + const __m256i tr1_0 = _mm256_unpacklo_epi32(tr0_0, tr0_1); + const __m256i tr1_1 = _mm256_unpacklo_epi32(tr0_2, tr0_3); + const __m256i tr1_2 = _mm256_unpackhi_epi32(tr0_0, tr0_1); + const __m256i tr1_3 = _mm256_unpackhi_epi32(tr0_2, tr0_3); + const __m256i tr1_4 = _mm256_unpacklo_epi32(tr0_4, tr0_5); + const __m256i tr1_5 = _mm256_unpacklo_epi32(tr0_6, tr0_7); + const __m256i tr1_6 = _mm256_unpackhi_epi32(tr0_4, tr0_5); + const __m256i tr1_7 = _mm256_unpackhi_epi32(tr0_6, tr0_7); + // 00 20 40 60 01 21 41 61 08 28 48 68 09 29 49 69 + // 04 24 44 64 05 25 45 65 12 32 52 72 13 33 53 73 + // 02 22 42 62 03 23 43 63 10 30 50 70 11 31 51 71 + // 06 26 46 66 07 27 47 67 14 34 54 74 15 35 55 75 + // 80 100 120 140 81 101 121 141 88 108 128 148 89 109 129 149 + // 84 104 124 144 85 105 125 145 92 112 132 152 93 113 133 153 + // 82 102 122 142 83 103 123 143 90 110 130 150 91 101 131 151 + // 86 106 126 146 87 107 127 147 94 114 134 154 95 115 135 155 + __m256i tr2_0 = _mm256_unpacklo_epi64(tr1_0, tr1_4); + __m256i tr2_1 = _mm256_unpackhi_epi64(tr1_0, tr1_4); + __m256i tr2_2 = _mm256_unpacklo_epi64(tr1_2, tr1_6); + __m256i tr2_3 = _mm256_unpackhi_epi64(tr1_2, tr1_6); + __m256i tr2_4 = _mm256_unpacklo_epi64(tr1_1, tr1_5); + __m256i tr2_5 = _mm256_unpackhi_epi64(tr1_1, tr1_5); + __m256i tr2_6 = _mm256_unpacklo_epi64(tr1_3, tr1_7); + __m256i tr2_7 = _mm256_unpackhi_epi64(tr1_3, tr1_7); + // 00 20 40 60 80 100 120 140 08 28 48 68 88 108 128 148 + // 01 21 41 61 81 101 121 141 09 29 49 69 89 109 129 149 + // 02 22 42 62 82 102 122 142 10 30 50 70 90 110 130 150 + // 03 23 43 63 83 103 123 143 11 31 51 71 91 101 131 151 + // 04 24 44 64 84 104 124 144 12 32 52 72 92 112 132 152 + // 05 25 45 65 85 105 125 145 13 33 53 73 93 113 133 153 + // 06 26 46 66 86 106 126 146 14 34 54 74 94 114 134 154 + // 07 27 47 67 87 107 127 147 15 35 55 75 95 115 135 155 + if (0 == pass) { + // output[j] = (output[j] + 1 + (output[j] > 0)) >> 2; + // TODO(cd): see quality impact of only doing + // output[j] = (output[j] + 1) >> 2; + // which would remove the code between here ... + __m256i tr2_0_0 = _mm256_cmpgt_epi16(tr2_0, kZero); + __m256i tr2_1_0 = _mm256_cmpgt_epi16(tr2_1, kZero); + __m256i tr2_2_0 = _mm256_cmpgt_epi16(tr2_2, kZero); + __m256i tr2_3_0 = _mm256_cmpgt_epi16(tr2_3, kZero); + __m256i tr2_4_0 = _mm256_cmpgt_epi16(tr2_4, kZero); + __m256i tr2_5_0 = _mm256_cmpgt_epi16(tr2_5, kZero); + __m256i tr2_6_0 = _mm256_cmpgt_epi16(tr2_6, kZero); + __m256i tr2_7_0 = _mm256_cmpgt_epi16(tr2_7, kZero); + tr2_0 = _mm256_sub_epi16(tr2_0, tr2_0_0); + tr2_1 = _mm256_sub_epi16(tr2_1, tr2_1_0); + tr2_2 = _mm256_sub_epi16(tr2_2, tr2_2_0); + tr2_3 = _mm256_sub_epi16(tr2_3, tr2_3_0); + tr2_4 = _mm256_sub_epi16(tr2_4, tr2_4_0); + tr2_5 = _mm256_sub_epi16(tr2_5, tr2_5_0); + tr2_6 = _mm256_sub_epi16(tr2_6, tr2_6_0); + tr2_7 = _mm256_sub_epi16(tr2_7, tr2_7_0); + // ... and here. + // PS: also change code in vp9/encoder/vp9_dct.c + tr2_0 = _mm256_add_epi16(tr2_0, kOne); + tr2_1 = _mm256_add_epi16(tr2_1, kOne); + tr2_2 = _mm256_add_epi16(tr2_2, kOne); + tr2_3 = _mm256_add_epi16(tr2_3, kOne); + tr2_4 = _mm256_add_epi16(tr2_4, kOne); + tr2_5 = _mm256_add_epi16(tr2_5, kOne); + tr2_6 = _mm256_add_epi16(tr2_6, kOne); + tr2_7 = _mm256_add_epi16(tr2_7, kOne); + tr2_0 = _mm256_srai_epi16(tr2_0, 2); + tr2_1 = _mm256_srai_epi16(tr2_1, 2); + tr2_2 = _mm256_srai_epi16(tr2_2, 2); + tr2_3 = _mm256_srai_epi16(tr2_3, 2); + tr2_4 = _mm256_srai_epi16(tr2_4, 2); + tr2_5 = _mm256_srai_epi16(tr2_5, 2); + tr2_6 = _mm256_srai_epi16(tr2_6, 2); + tr2_7 = _mm256_srai_epi16(tr2_7, 2); + } + // Note: even though all these stores are aligned, using the aligned + // intrinsic make the code slightly slower. + _mm_storeu_si128((__m128i *)(output_currStep + 0 * 32), _mm256_castsi256_si128(tr2_0)); + _mm_storeu_si128((__m128i *)(output_currStep + 1 * 32), _mm256_castsi256_si128(tr2_1)); + _mm_storeu_si128((__m128i *)(output_currStep + 2 * 32), _mm256_castsi256_si128(tr2_2)); + _mm_storeu_si128((__m128i *)(output_currStep + 3 * 32), _mm256_castsi256_si128(tr2_3)); + _mm_storeu_si128((__m128i *)(output_currStep + 4 * 32), _mm256_castsi256_si128(tr2_4)); + _mm_storeu_si128((__m128i *)(output_currStep + 5 * 32), _mm256_castsi256_si128(tr2_5)); + _mm_storeu_si128((__m128i *)(output_currStep + 6 * 32), _mm256_castsi256_si128(tr2_6)); + _mm_storeu_si128((__m128i *)(output_currStep + 7 * 32), _mm256_castsi256_si128(tr2_7)); + + _mm_storeu_si128((__m128i *)(output_nextStep + 0 * 32), _mm256_extractf128_si256(tr2_0,1)); + _mm_storeu_si128((__m128i *)(output_nextStep + 1 * 32), _mm256_extractf128_si256(tr2_1,1)); + _mm_storeu_si128((__m128i *)(output_nextStep + 2 * 32), _mm256_extractf128_si256(tr2_2,1)); + _mm_storeu_si128((__m128i *)(output_nextStep + 3 * 32), _mm256_extractf128_si256(tr2_3,1)); + _mm_storeu_si128((__m128i *)(output_nextStep + 4 * 32), _mm256_extractf128_si256(tr2_4,1)); + _mm_storeu_si128((__m128i *)(output_nextStep + 5 * 32), _mm256_extractf128_si256(tr2_5,1)); + _mm_storeu_si128((__m128i *)(output_nextStep + 6 * 32), _mm256_extractf128_si256(tr2_6,1)); + _mm_storeu_si128((__m128i *)(output_nextStep + 7 * 32), _mm256_extractf128_si256(tr2_7,1)); + // Process next 8x8 + output_currStep += 8; + output_nextStep += 8; + } + } + } + } +} // NOLINT diff --git a/libvpx/vp9/encoder/x86/vp9_dct_avx2.c b/libvpx/vp9/encoder/x86/vp9_dct_avx2.c new file mode 100644 index 0000000..b5269ed --- /dev/null +++ b/libvpx/vp9/encoder/x86/vp9_dct_avx2.c @@ -0,0 +1,2592 @@ +/* + * Copyright (c) 2012 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include <immintrin.h> // AVX2 +#include "vp9/common/vp9_idct.h" // for cospi constants +#include "vpx_ports/mem.h" + +void vp9_fdct4x4_avx2(const int16_t *input, int16_t *output, int stride) { + // The 2D transform is done with two passes which are actually pretty + // similar. In the first one, we transform the columns and transpose + // the results. In the second one, we transform the rows. To achieve that, + // as the first pass results are transposed, we transpose the columns (that + // is the transposed rows) and transpose the results (so that it goes back + // in normal/row positions). + int pass; + // Constants + // When we use them, in one case, they are all the same. In all others + // it's a pair of them that we need to repeat four times. This is done + // by constructing the 32 bit constant corresponding to that pair. + const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64); + const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); + const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64); + const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64); + const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); + const __m128i k__nonzero_bias_a = _mm_setr_epi16(0, 1, 1, 1, 1, 1, 1, 1); + const __m128i k__nonzero_bias_b = _mm_setr_epi16(1, 0, 0, 0, 0, 0, 0, 0); + const __m128i kOne = _mm_set1_epi16(1); + __m128i in0, in1, in2, in3; + // Load inputs. + { + in0 = _mm_loadl_epi64((const __m128i *)(input + 0 * stride)); + in1 = _mm_loadl_epi64((const __m128i *)(input + 1 * stride)); + in2 = _mm_loadl_epi64((const __m128i *)(input + 2 * stride)); + in3 = _mm_loadl_epi64((const __m128i *)(input + 3 * stride)); + // x = x << 4 + in0 = _mm_slli_epi16(in0, 4); + in1 = _mm_slli_epi16(in1, 4); + in2 = _mm_slli_epi16(in2, 4); + in3 = _mm_slli_epi16(in3, 4); + // if (i == 0 && input[0]) input[0] += 1; + { + // The mask will only contain whether the first value is zero, all + // other comparison will fail as something shifted by 4 (above << 4) + // can never be equal to one. To increment in the non-zero case, we + // add the mask and one for the first element: + // - if zero, mask = -1, v = v - 1 + 1 = v + // - if non-zero, mask = 0, v = v + 0 + 1 = v + 1 + __m128i mask = _mm_cmpeq_epi16(in0, k__nonzero_bias_a); + in0 = _mm_add_epi16(in0, mask); + in0 = _mm_add_epi16(in0, k__nonzero_bias_b); + } + } + // Do the two transform/transpose passes + for (pass = 0; pass < 2; ++pass) { + // Transform 1/2: Add/subtract + const __m128i r0 = _mm_add_epi16(in0, in3); + const __m128i r1 = _mm_add_epi16(in1, in2); + const __m128i r2 = _mm_sub_epi16(in1, in2); + const __m128i r3 = _mm_sub_epi16(in0, in3); + // Transform 1/2: Interleave to do the multiply by constants which gets us + // into 32 bits. + const __m128i t0 = _mm_unpacklo_epi16(r0, r1); + const __m128i t2 = _mm_unpacklo_epi16(r2, r3); + const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16); + const __m128i u2 = _mm_madd_epi16(t0, k__cospi_p16_m16); + const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p24_p08); + const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m08_p24); + const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); + const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); + const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING); + const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING); + const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); + const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); + const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS); + const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS); + // Combine and transpose + const __m128i res0 = _mm_packs_epi32(w0, w2); + const __m128i res1 = _mm_packs_epi32(w4, w6); + // 00 01 02 03 20 21 22 23 + // 10 11 12 13 30 31 32 33 + const __m128i tr0_0 = _mm_unpacklo_epi16(res0, res1); + const __m128i tr0_1 = _mm_unpackhi_epi16(res0, res1); + // 00 10 01 11 02 12 03 13 + // 20 30 21 31 22 32 23 33 + in0 = _mm_unpacklo_epi32(tr0_0, tr0_1); + in2 = _mm_unpackhi_epi32(tr0_0, tr0_1); + // 00 10 20 30 01 11 21 31 in0 contains 0 followed by 1 + // 02 12 22 32 03 13 23 33 in2 contains 2 followed by 3 + if (0 == pass) { + // Extract values in the high part for second pass as transform code + // only uses the first four values. + in1 = _mm_unpackhi_epi64(in0, in0); + in3 = _mm_unpackhi_epi64(in2, in2); + } else { + // Post-condition output and store it (v + 1) >> 2, taking advantage + // of the fact 1/3 are stored just after 0/2. + __m128i out01 = _mm_add_epi16(in0, kOne); + __m128i out23 = _mm_add_epi16(in2, kOne); + out01 = _mm_srai_epi16(out01, 2); + out23 = _mm_srai_epi16(out23, 2); + _mm_storeu_si128((__m128i *)(output + 0 * 4), out01); + _mm_storeu_si128((__m128i *)(output + 2 * 4), out23); + } + } +} + +static INLINE void load_buffer_4x4_avx2(const int16_t *input, __m128i *in, + int stride) { + const __m128i k__nonzero_bias_a = _mm_setr_epi16(0, 1, 1, 1, 1, 1, 1, 1); + const __m128i k__nonzero_bias_b = _mm_setr_epi16(1, 0, 0, 0, 0, 0, 0, 0); + __m128i mask; + + in[0] = _mm_loadl_epi64((const __m128i *)(input + 0 * stride)); + in[1] = _mm_loadl_epi64((const __m128i *)(input + 1 * stride)); + in[2] = _mm_loadl_epi64((const __m128i *)(input + 2 * stride)); + in[3] = _mm_loadl_epi64((const __m128i *)(input + 3 * stride)); + + in[0] = _mm_slli_epi16(in[0], 4); + in[1] = _mm_slli_epi16(in[1], 4); + in[2] = _mm_slli_epi16(in[2], 4); + in[3] = _mm_slli_epi16(in[3], 4); + + mask = _mm_cmpeq_epi16(in[0], k__nonzero_bias_a); + in[0] = _mm_add_epi16(in[0], mask); + in[0] = _mm_add_epi16(in[0], k__nonzero_bias_b); +} + +static INLINE void write_buffer_4x4_avx2(int16_t *output, __m128i *res) { + const __m128i kOne = _mm_set1_epi16(1); + __m128i in01 = _mm_unpacklo_epi64(res[0], res[1]); + __m128i in23 = _mm_unpacklo_epi64(res[2], res[3]); + __m128i out01 = _mm_add_epi16(in01, kOne); + __m128i out23 = _mm_add_epi16(in23, kOne); + out01 = _mm_srai_epi16(out01, 2); + out23 = _mm_srai_epi16(out23, 2); + _mm_store_si128((__m128i *)(output + 0 * 8), out01); + _mm_store_si128((__m128i *)(output + 1 * 8), out23); +} + +static INLINE void transpose_4x4_avx2(__m128i *res) { + // Combine and transpose + // 00 01 02 03 20 21 22 23 + // 10 11 12 13 30 31 32 33 + const __m128i tr0_0 = _mm_unpacklo_epi16(res[0], res[1]); + const __m128i tr0_1 = _mm_unpackhi_epi16(res[0], res[1]); + + // 00 10 01 11 02 12 03 13 + // 20 30 21 31 22 32 23 33 + res[0] = _mm_unpacklo_epi32(tr0_0, tr0_1); + res[2] = _mm_unpackhi_epi32(tr0_0, tr0_1); + + // 00 10 20 30 01 11 21 31 + // 02 12 22 32 03 13 23 33 + // only use the first 4 16-bit integers + res[1] = _mm_unpackhi_epi64(res[0], res[0]); + res[3] = _mm_unpackhi_epi64(res[2], res[2]); +} + +void fdct4_avx2(__m128i *in) { + const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64); + const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); + const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64); + const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64); + const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); + + __m128i u[4], v[4]; + u[0]=_mm_unpacklo_epi16(in[0], in[1]); + u[1]=_mm_unpacklo_epi16(in[3], in[2]); + + v[0] = _mm_add_epi16(u[0], u[1]); + v[1] = _mm_sub_epi16(u[0], u[1]); + + u[0] = _mm_madd_epi16(v[0], k__cospi_p16_p16); // 0 + u[1] = _mm_madd_epi16(v[0], k__cospi_p16_m16); // 2 + u[2] = _mm_madd_epi16(v[1], k__cospi_p08_p24); // 1 + u[3] = _mm_madd_epi16(v[1], k__cospi_p24_m08); // 3 + + v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); + v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); + v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); + v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); + u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS); + u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS); + u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS); + u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS); + + in[0] = _mm_packs_epi32(u[0], u[1]); + in[1] = _mm_packs_epi32(u[2], u[3]); + transpose_4x4_avx2(in); +} + +void fadst4_avx2(__m128i *in) { + const __m128i k__sinpi_p01_p02 = pair_set_epi16(sinpi_1_9, sinpi_2_9); + const __m128i k__sinpi_p04_m01 = pair_set_epi16(sinpi_4_9, -sinpi_1_9); + const __m128i k__sinpi_p03_p04 = pair_set_epi16(sinpi_3_9, sinpi_4_9); + const __m128i k__sinpi_m03_p02 = pair_set_epi16(-sinpi_3_9, sinpi_2_9); + const __m128i k__sinpi_p03_p03 = _mm_set1_epi16(sinpi_3_9); + const __m128i kZero = _mm_set1_epi16(0); + const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); + __m128i u[8], v[8]; + __m128i in7 = _mm_add_epi16(in[0], in[1]); + + u[0] = _mm_unpacklo_epi16(in[0], in[1]); + u[1] = _mm_unpacklo_epi16(in[2], in[3]); + u[2] = _mm_unpacklo_epi16(in7, kZero); + u[3] = _mm_unpacklo_epi16(in[2], kZero); + u[4] = _mm_unpacklo_epi16(in[3], kZero); + + v[0] = _mm_madd_epi16(u[0], k__sinpi_p01_p02); // s0 + s2 + v[1] = _mm_madd_epi16(u[1], k__sinpi_p03_p04); // s4 + s5 + v[2] = _mm_madd_epi16(u[2], k__sinpi_p03_p03); // x1 + v[3] = _mm_madd_epi16(u[0], k__sinpi_p04_m01); // s1 - s3 + v[4] = _mm_madd_epi16(u[1], k__sinpi_m03_p02); // -s4 + s6 + v[5] = _mm_madd_epi16(u[3], k__sinpi_p03_p03); // s4 + v[6] = _mm_madd_epi16(u[4], k__sinpi_p03_p03); + + u[0] = _mm_add_epi32(v[0], v[1]); + u[1] = _mm_sub_epi32(v[2], v[6]); + u[2] = _mm_add_epi32(v[3], v[4]); + u[3] = _mm_sub_epi32(u[2], u[0]); + u[4] = _mm_slli_epi32(v[5], 2); + u[5] = _mm_sub_epi32(u[4], v[5]); + u[6] = _mm_add_epi32(u[3], u[5]); + + v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); + v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); + v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); + v[3] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); + + u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS); + u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS); + u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS); + u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS); + + in[0] = _mm_packs_epi32(u[0], u[2]); + in[1] = _mm_packs_epi32(u[1], u[3]); + transpose_4x4_avx2(in); +} + +void vp9_fht4x4_avx2(const int16_t *input, int16_t *output, + int stride, int tx_type) { + __m128i in[4]; + + switch (tx_type) { + case DCT_DCT: + vp9_fdct4x4_avx2(input, output, stride); + break; + case ADST_DCT: + load_buffer_4x4_avx2(input, in, stride); + fadst4_avx2(in); + fdct4_avx2(in); + write_buffer_4x4_avx2(output, in); + break; + case DCT_ADST: + load_buffer_4x4_avx2(input, in, stride); + fdct4_avx2(in); + fadst4_avx2(in); + write_buffer_4x4_avx2(output, in); + break; + case ADST_ADST: + load_buffer_4x4_avx2(input, in, stride); + fadst4_avx2(in); + fadst4_avx2(in); + write_buffer_4x4_avx2(output, in); + break; + default: + assert(0); + break; + } +} + +void vp9_fdct8x8_avx2(const int16_t *input, int16_t *output, int stride) { + int pass; + // Constants + // When we use them, in one case, they are all the same. In all others + // it's a pair of them that we need to repeat four times. This is done + // by constructing the 32 bit constant corresponding to that pair. + const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64); + const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); + const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64); + const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64); + const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64); + const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); + const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); + const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); + const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); + // Load input + __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride)); + __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride)); + __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride)); + __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride)); + __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride)); + __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride)); + __m128i in6 = _mm_load_si128((const __m128i *)(input + 6 * stride)); + __m128i in7 = _mm_load_si128((const __m128i *)(input + 7 * stride)); + // Pre-condition input (shift by two) + in0 = _mm_slli_epi16(in0, 2); + in1 = _mm_slli_epi16(in1, 2); + in2 = _mm_slli_epi16(in2, 2); + in3 = _mm_slli_epi16(in3, 2); + in4 = _mm_slli_epi16(in4, 2); + in5 = _mm_slli_epi16(in5, 2); + in6 = _mm_slli_epi16(in6, 2); + in7 = _mm_slli_epi16(in7, 2); + + // We do two passes, first the columns, then the rows. The results of the + // first pass are transposed so that the same column code can be reused. The + // results of the second pass are also transposed so that the rows (processed + // as columns) are put back in row positions. + for (pass = 0; pass < 2; pass++) { + // To store results of each pass before the transpose. + __m128i res0, res1, res2, res3, res4, res5, res6, res7; + // Add/subtract + const __m128i q0 = _mm_add_epi16(in0, in7); + const __m128i q1 = _mm_add_epi16(in1, in6); + const __m128i q2 = _mm_add_epi16(in2, in5); + const __m128i q3 = _mm_add_epi16(in3, in4); + const __m128i q4 = _mm_sub_epi16(in3, in4); + const __m128i q5 = _mm_sub_epi16(in2, in5); + const __m128i q6 = _mm_sub_epi16(in1, in6); + const __m128i q7 = _mm_sub_epi16(in0, in7); + // Work on first four results + { + // Add/subtract + const __m128i r0 = _mm_add_epi16(q0, q3); + const __m128i r1 = _mm_add_epi16(q1, q2); + const __m128i r2 = _mm_sub_epi16(q1, q2); + const __m128i r3 = _mm_sub_epi16(q0, q3); + // Interleave to do the multiply by constants which gets us into 32bits + const __m128i t0 = _mm_unpacklo_epi16(r0, r1); + const __m128i t1 = _mm_unpackhi_epi16(r0, r1); + const __m128i t2 = _mm_unpacklo_epi16(r2, r3); + const __m128i t3 = _mm_unpackhi_epi16(r2, r3); + const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16); + const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p16_p16); + const __m128i u2 = _mm_madd_epi16(t0, k__cospi_p16_m16); + const __m128i u3 = _mm_madd_epi16(t1, k__cospi_p16_m16); + const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p24_p08); + const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p24_p08); + const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m08_p24); + const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m08_p24); + // dct_const_round_shift + const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); + const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); + const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); + const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); + const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING); + const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING); + const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING); + const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING); + const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); + const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); + const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); + const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); + const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS); + const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS); + const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS); + const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS); + // Combine + res0 = _mm_packs_epi32(w0, w1); + res4 = _mm_packs_epi32(w2, w3); + res2 = _mm_packs_epi32(w4, w5); + res6 = _mm_packs_epi32(w6, w7); + } + // Work on next four results + { + // Interleave to do the multiply by constants which gets us into 32bits + const __m128i d0 = _mm_unpacklo_epi16(q6, q5); + const __m128i d1 = _mm_unpackhi_epi16(q6, q5); + const __m128i e0 = _mm_madd_epi16(d0, k__cospi_p16_m16); + const __m128i e1 = _mm_madd_epi16(d1, k__cospi_p16_m16); + const __m128i e2 = _mm_madd_epi16(d0, k__cospi_p16_p16); + const __m128i e3 = _mm_madd_epi16(d1, k__cospi_p16_p16); + // dct_const_round_shift + const __m128i f0 = _mm_add_epi32(e0, k__DCT_CONST_ROUNDING); + const __m128i f1 = _mm_add_epi32(e1, k__DCT_CONST_ROUNDING); + const __m128i f2 = _mm_add_epi32(e2, k__DCT_CONST_ROUNDING); + const __m128i f3 = _mm_add_epi32(e3, k__DCT_CONST_ROUNDING); + const __m128i s0 = _mm_srai_epi32(f0, DCT_CONST_BITS); + const __m128i s1 = _mm_srai_epi32(f1, DCT_CONST_BITS); + const __m128i s2 = _mm_srai_epi32(f2, DCT_CONST_BITS); + const __m128i s3 = _mm_srai_epi32(f3, DCT_CONST_BITS); + // Combine + const __m128i r0 = _mm_packs_epi32(s0, s1); + const __m128i r1 = _mm_packs_epi32(s2, s3); + // Add/subtract + const __m128i x0 = _mm_add_epi16(q4, r0); + const __m128i x1 = _mm_sub_epi16(q4, r0); + const __m128i x2 = _mm_sub_epi16(q7, r1); + const __m128i x3 = _mm_add_epi16(q7, r1); + // Interleave to do the multiply by constants which gets us into 32bits + const __m128i t0 = _mm_unpacklo_epi16(x0, x3); + const __m128i t1 = _mm_unpackhi_epi16(x0, x3); + const __m128i t2 = _mm_unpacklo_epi16(x1, x2); + const __m128i t3 = _mm_unpackhi_epi16(x1, x2); + const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p28_p04); + const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p28_p04); + const __m128i u2 = _mm_madd_epi16(t0, k__cospi_m04_p28); + const __m128i u3 = _mm_madd_epi16(t1, k__cospi_m04_p28); + const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p12_p20); + const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p12_p20); + const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m20_p12); + const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m20_p12); + // dct_const_round_shift + const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); + const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); + const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); + const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); + const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING); + const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING); + const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING); + const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING); + const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); + const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); + const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); + const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); + const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS); + const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS); + const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS); + const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS); + // Combine + res1 = _mm_packs_epi32(w0, w1); + res7 = _mm_packs_epi32(w2, w3); + res5 = _mm_packs_epi32(w4, w5); + res3 = _mm_packs_epi32(w6, w7); + } + // Transpose the 8x8. + { + // 00 01 02 03 04 05 06 07 + // 10 11 12 13 14 15 16 17 + // 20 21 22 23 24 25 26 27 + // 30 31 32 33 34 35 36 37 + // 40 41 42 43 44 45 46 47 + // 50 51 52 53 54 55 56 57 + // 60 61 62 63 64 65 66 67 + // 70 71 72 73 74 75 76 77 + const __m128i tr0_0 = _mm_unpacklo_epi16(res0, res1); + const __m128i tr0_1 = _mm_unpacklo_epi16(res2, res3); + const __m128i tr0_2 = _mm_unpackhi_epi16(res0, res1); + const __m128i tr0_3 = _mm_unpackhi_epi16(res2, res3); + const __m128i tr0_4 = _mm_unpacklo_epi16(res4, res5); + const __m128i tr0_5 = _mm_unpacklo_epi16(res6, res7); + const __m128i tr0_6 = _mm_unpackhi_epi16(res4, res5); + const __m128i tr0_7 = _mm_unpackhi_epi16(res6, res7); + // 00 10 01 11 02 12 03 13 + // 20 30 21 31 22 32 23 33 + // 04 14 05 15 06 16 07 17 + // 24 34 25 35 26 36 27 37 + // 40 50 41 51 42 52 43 53 + // 60 70 61 71 62 72 63 73 + // 54 54 55 55 56 56 57 57 + // 64 74 65 75 66 76 67 77 + const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); + const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3); + const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); + const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3); + const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); + const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7); + const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); + const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7); + // 00 10 20 30 01 11 21 31 + // 40 50 60 70 41 51 61 71 + // 02 12 22 32 03 13 23 33 + // 42 52 62 72 43 53 63 73 + // 04 14 24 34 05 15 21 36 + // 44 54 64 74 45 55 61 76 + // 06 16 26 36 07 17 27 37 + // 46 56 66 76 47 57 67 77 + in0 = _mm_unpacklo_epi64(tr1_0, tr1_4); + in1 = _mm_unpackhi_epi64(tr1_0, tr1_4); + in2 = _mm_unpacklo_epi64(tr1_2, tr1_6); + in3 = _mm_unpackhi_epi64(tr1_2, tr1_6); + in4 = _mm_unpacklo_epi64(tr1_1, tr1_5); + in5 = _mm_unpackhi_epi64(tr1_1, tr1_5); + in6 = _mm_unpacklo_epi64(tr1_3, tr1_7); + in7 = _mm_unpackhi_epi64(tr1_3, tr1_7); + // 00 10 20 30 40 50 60 70 + // 01 11 21 31 41 51 61 71 + // 02 12 22 32 42 52 62 72 + // 03 13 23 33 43 53 63 73 + // 04 14 24 34 44 54 64 74 + // 05 15 25 35 45 55 65 75 + // 06 16 26 36 46 56 66 76 + // 07 17 27 37 47 57 67 77 + } + } + // Post-condition output and store it + { + // Post-condition (division by two) + // division of two 16 bits signed numbers using shifts + // n / 2 = (n - (n >> 15)) >> 1 + const __m128i sign_in0 = _mm_srai_epi16(in0, 15); + const __m128i sign_in1 = _mm_srai_epi16(in1, 15); + const __m128i sign_in2 = _mm_srai_epi16(in2, 15); + const __m128i sign_in3 = _mm_srai_epi16(in3, 15); + const __m128i sign_in4 = _mm_srai_epi16(in4, 15); + const __m128i sign_in5 = _mm_srai_epi16(in5, 15); + const __m128i sign_in6 = _mm_srai_epi16(in6, 15); + const __m128i sign_in7 = _mm_srai_epi16(in7, 15); + in0 = _mm_sub_epi16(in0, sign_in0); + in1 = _mm_sub_epi16(in1, sign_in1); + in2 = _mm_sub_epi16(in2, sign_in2); + in3 = _mm_sub_epi16(in3, sign_in3); + in4 = _mm_sub_epi16(in4, sign_in4); + in5 = _mm_sub_epi16(in5, sign_in5); + in6 = _mm_sub_epi16(in6, sign_in6); + in7 = _mm_sub_epi16(in7, sign_in7); + in0 = _mm_srai_epi16(in0, 1); + in1 = _mm_srai_epi16(in1, 1); + in2 = _mm_srai_epi16(in2, 1); + in3 = _mm_srai_epi16(in3, 1); + in4 = _mm_srai_epi16(in4, 1); + in5 = _mm_srai_epi16(in5, 1); + in6 = _mm_srai_epi16(in6, 1); + in7 = _mm_srai_epi16(in7, 1); + // store results + _mm_store_si128((__m128i *)(output + 0 * 8), in0); + _mm_store_si128((__m128i *)(output + 1 * 8), in1); + _mm_store_si128((__m128i *)(output + 2 * 8), in2); + _mm_store_si128((__m128i *)(output + 3 * 8), in3); + _mm_store_si128((__m128i *)(output + 4 * 8), in4); + _mm_store_si128((__m128i *)(output + 5 * 8), in5); + _mm_store_si128((__m128i *)(output + 6 * 8), in6); + _mm_store_si128((__m128i *)(output + 7 * 8), in7); + } +} + +// load 8x8 array +static INLINE void load_buffer_8x8_avx2(const int16_t *input, __m128i *in, + int stride) { + in[0] = _mm_load_si128((const __m128i *)(input + 0 * stride)); + in[1] = _mm_load_si128((const __m128i *)(input + 1 * stride)); + in[2] = _mm_load_si128((const __m128i *)(input + 2 * stride)); + in[3] = _mm_load_si128((const __m128i *)(input + 3 * stride)); + in[4] = _mm_load_si128((const __m128i *)(input + 4 * stride)); + in[5] = _mm_load_si128((const __m128i *)(input + 5 * stride)); + in[6] = _mm_load_si128((const __m128i *)(input + 6 * stride)); + in[7] = _mm_load_si128((const __m128i *)(input + 7 * stride)); + + in[0] = _mm_slli_epi16(in[0], 2); + in[1] = _mm_slli_epi16(in[1], 2); + in[2] = _mm_slli_epi16(in[2], 2); + in[3] = _mm_slli_epi16(in[3], 2); + in[4] = _mm_slli_epi16(in[4], 2); + in[5] = _mm_slli_epi16(in[5], 2); + in[6] = _mm_slli_epi16(in[6], 2); + in[7] = _mm_slli_epi16(in[7], 2); +} + +// right shift and rounding +static INLINE void right_shift_8x8_avx2(__m128i *res, int const bit) { + const __m128i kOne = _mm_set1_epi16(1); + const int bit_m02 = bit - 2; + __m128i sign0 = _mm_srai_epi16(res[0], 15); + __m128i sign1 = _mm_srai_epi16(res[1], 15); + __m128i sign2 = _mm_srai_epi16(res[2], 15); + __m128i sign3 = _mm_srai_epi16(res[3], 15); + __m128i sign4 = _mm_srai_epi16(res[4], 15); + __m128i sign5 = _mm_srai_epi16(res[5], 15); + __m128i sign6 = _mm_srai_epi16(res[6], 15); + __m128i sign7 = _mm_srai_epi16(res[7], 15); + + if (bit_m02 >= 0) { + __m128i k_const_rounding = _mm_slli_epi16(kOne, bit_m02); + res[0] = _mm_add_epi16(res[0], k_const_rounding); + res[1] = _mm_add_epi16(res[1], k_const_rounding); + res[2] = _mm_add_epi16(res[2], k_const_rounding); + res[3] = _mm_add_epi16(res[3], k_const_rounding); + res[4] = _mm_add_epi16(res[4], k_const_rounding); + res[5] = _mm_add_epi16(res[5], k_const_rounding); + res[6] = _mm_add_epi16(res[6], k_const_rounding); + res[7] = _mm_add_epi16(res[7], k_const_rounding); + } + + res[0] = _mm_sub_epi16(res[0], sign0); + res[1] = _mm_sub_epi16(res[1], sign1); + res[2] = _mm_sub_epi16(res[2], sign2); + res[3] = _mm_sub_epi16(res[3], sign3); + res[4] = _mm_sub_epi16(res[4], sign4); + res[5] = _mm_sub_epi16(res[5], sign5); + res[6] = _mm_sub_epi16(res[6], sign6); + res[7] = _mm_sub_epi16(res[7], sign7); + + res[0] = _mm_srai_epi16(res[0], bit); + res[1] = _mm_srai_epi16(res[1], bit); + res[2] = _mm_srai_epi16(res[2], bit); + res[3] = _mm_srai_epi16(res[3], bit); + res[4] = _mm_srai_epi16(res[4], bit); + res[5] = _mm_srai_epi16(res[5], bit); + res[6] = _mm_srai_epi16(res[6], bit); + res[7] = _mm_srai_epi16(res[7], bit); +} + +// write 8x8 array +static INLINE void write_buffer_8x8_avx2(int16_t *output, __m128i *res, int stride) { + _mm_store_si128((__m128i *)(output + 0 * stride), res[0]); + _mm_store_si128((__m128i *)(output + 1 * stride), res[1]); + _mm_store_si128((__m128i *)(output + 2 * stride), res[2]); + _mm_store_si128((__m128i *)(output + 3 * stride), res[3]); + _mm_store_si128((__m128i *)(output + 4 * stride), res[4]); + _mm_store_si128((__m128i *)(output + 5 * stride), res[5]); + _mm_store_si128((__m128i *)(output + 6 * stride), res[6]); + _mm_store_si128((__m128i *)(output + 7 * stride), res[7]); +} + +// perform in-place transpose +static INLINE void array_transpose_8x8_avx2(__m128i *in, __m128i *res) { + const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]); + const __m128i tr0_1 = _mm_unpacklo_epi16(in[2], in[3]); + const __m128i tr0_2 = _mm_unpackhi_epi16(in[0], in[1]); + const __m128i tr0_3 = _mm_unpackhi_epi16(in[2], in[3]); + const __m128i tr0_4 = _mm_unpacklo_epi16(in[4], in[5]); + const __m128i tr0_5 = _mm_unpacklo_epi16(in[6], in[7]); + const __m128i tr0_6 = _mm_unpackhi_epi16(in[4], in[5]); + const __m128i tr0_7 = _mm_unpackhi_epi16(in[6], in[7]); + // 00 10 01 11 02 12 03 13 + // 20 30 21 31 22 32 23 33 + // 04 14 05 15 06 16 07 17 + // 24 34 25 35 26 36 27 37 + // 40 50 41 51 42 52 43 53 + // 60 70 61 71 62 72 63 73 + // 44 54 45 55 46 56 47 57 + // 64 74 65 75 66 76 67 77 + const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); + const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_4, tr0_5); + const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); + const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_4, tr0_5); + const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_2, tr0_3); + const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7); + const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_2, tr0_3); + const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7); + // 00 10 20 30 01 11 21 31 + // 40 50 60 70 41 51 61 71 + // 02 12 22 32 03 13 23 33 + // 42 52 62 72 43 53 63 73 + // 04 14 24 34 05 15 25 35 + // 44 54 64 74 45 55 65 75 + // 06 16 26 36 07 17 27 37 + // 46 56 66 76 47 57 67 77 + res[0] = _mm_unpacklo_epi64(tr1_0, tr1_1); + res[1] = _mm_unpackhi_epi64(tr1_0, tr1_1); + res[2] = _mm_unpacklo_epi64(tr1_2, tr1_3); + res[3] = _mm_unpackhi_epi64(tr1_2, tr1_3); + res[4] = _mm_unpacklo_epi64(tr1_4, tr1_5); + res[5] = _mm_unpackhi_epi64(tr1_4, tr1_5); + res[6] = _mm_unpacklo_epi64(tr1_6, tr1_7); + res[7] = _mm_unpackhi_epi64(tr1_6, tr1_7); + // 00 10 20 30 40 50 60 70 + // 01 11 21 31 41 51 61 71 + // 02 12 22 32 42 52 62 72 + // 03 13 23 33 43 53 63 73 + // 04 14 24 34 44 54 64 74 + // 05 15 25 35 45 55 65 75 + // 06 16 26 36 46 56 66 76 + // 07 17 27 37 47 57 67 77 +} + +void fdct8_avx2(__m128i *in) { + // constants + const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64); + const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); + const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64); + const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64); + const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64); + const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); + const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); + const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); + const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); + __m128i u0, u1, u2, u3, u4, u5, u6, u7; + __m128i v0, v1, v2, v3, v4, v5, v6, v7; + __m128i s0, s1, s2, s3, s4, s5, s6, s7; + + // stage 1 + s0 = _mm_add_epi16(in[0], in[7]); + s1 = _mm_add_epi16(in[1], in[6]); + s2 = _mm_add_epi16(in[2], in[5]); + s3 = _mm_add_epi16(in[3], in[4]); + s4 = _mm_sub_epi16(in[3], in[4]); + s5 = _mm_sub_epi16(in[2], in[5]); + s6 = _mm_sub_epi16(in[1], in[6]); + s7 = _mm_sub_epi16(in[0], in[7]); + + u0 = _mm_add_epi16(s0, s3); + u1 = _mm_add_epi16(s1, s2); + u2 = _mm_sub_epi16(s1, s2); + u3 = _mm_sub_epi16(s0, s3); + // interleave and perform butterfly multiplication/addition + v0 = _mm_unpacklo_epi16(u0, u1); + v1 = _mm_unpackhi_epi16(u0, u1); + v2 = _mm_unpacklo_epi16(u2, u3); + v3 = _mm_unpackhi_epi16(u2, u3); + + u0 = _mm_madd_epi16(v0, k__cospi_p16_p16); + u1 = _mm_madd_epi16(v1, k__cospi_p16_p16); + u2 = _mm_madd_epi16(v0, k__cospi_p16_m16); + u3 = _mm_madd_epi16(v1, k__cospi_p16_m16); + u4 = _mm_madd_epi16(v2, k__cospi_p24_p08); + u5 = _mm_madd_epi16(v3, k__cospi_p24_p08); + u6 = _mm_madd_epi16(v2, k__cospi_m08_p24); + u7 = _mm_madd_epi16(v3, k__cospi_m08_p24); + + // shift and rounding + v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); + v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); + v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); + v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); + v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING); + v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING); + v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING); + v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING); + + u0 = _mm_srai_epi32(v0, DCT_CONST_BITS); + u1 = _mm_srai_epi32(v1, DCT_CONST_BITS); + u2 = _mm_srai_epi32(v2, DCT_CONST_BITS); + u3 = _mm_srai_epi32(v3, DCT_CONST_BITS); + u4 = _mm_srai_epi32(v4, DCT_CONST_BITS); + u5 = _mm_srai_epi32(v5, DCT_CONST_BITS); + u6 = _mm_srai_epi32(v6, DCT_CONST_BITS); + u7 = _mm_srai_epi32(v7, DCT_CONST_BITS); + + in[0] = _mm_packs_epi32(u0, u1); + in[2] = _mm_packs_epi32(u4, u5); + in[4] = _mm_packs_epi32(u2, u3); + in[6] = _mm_packs_epi32(u6, u7); + + // stage 2 + // interleave and perform butterfly multiplication/addition + u0 = _mm_unpacklo_epi16(s6, s5); + u1 = _mm_unpackhi_epi16(s6, s5); + v0 = _mm_madd_epi16(u0, k__cospi_p16_m16); + v1 = _mm_madd_epi16(u1, k__cospi_p16_m16); + v2 = _mm_madd_epi16(u0, k__cospi_p16_p16); + v3 = _mm_madd_epi16(u1, k__cospi_p16_p16); + + // shift and rounding + u0 = _mm_add_epi32(v0, k__DCT_CONST_ROUNDING); + u1 = _mm_add_epi32(v1, k__DCT_CONST_ROUNDING); + u2 = _mm_add_epi32(v2, k__DCT_CONST_ROUNDING); + u3 = _mm_add_epi32(v3, k__DCT_CONST_ROUNDING); + + v0 = _mm_srai_epi32(u0, DCT_CONST_BITS); + v1 = _mm_srai_epi32(u1, DCT_CONST_BITS); + v2 = _mm_srai_epi32(u2, DCT_CONST_BITS); + v3 = _mm_srai_epi32(u3, DCT_CONST_BITS); + + u0 = _mm_packs_epi32(v0, v1); + u1 = _mm_packs_epi32(v2, v3); + + // stage 3 + s0 = _mm_add_epi16(s4, u0); + s1 = _mm_sub_epi16(s4, u0); + s2 = _mm_sub_epi16(s7, u1); + s3 = _mm_add_epi16(s7, u1); + + // stage 4 + u0 = _mm_unpacklo_epi16(s0, s3); + u1 = _mm_unpackhi_epi16(s0, s3); + u2 = _mm_unpacklo_epi16(s1, s2); + u3 = _mm_unpackhi_epi16(s1, s2); + + v0 = _mm_madd_epi16(u0, k__cospi_p28_p04); + v1 = _mm_madd_epi16(u1, k__cospi_p28_p04); + v2 = _mm_madd_epi16(u2, k__cospi_p12_p20); + v3 = _mm_madd_epi16(u3, k__cospi_p12_p20); + v4 = _mm_madd_epi16(u2, k__cospi_m20_p12); + v5 = _mm_madd_epi16(u3, k__cospi_m20_p12); + v6 = _mm_madd_epi16(u0, k__cospi_m04_p28); + v7 = _mm_madd_epi16(u1, k__cospi_m04_p28); + + // shift and rounding + u0 = _mm_add_epi32(v0, k__DCT_CONST_ROUNDING); + u1 = _mm_add_epi32(v1, k__DCT_CONST_ROUNDING); + u2 = _mm_add_epi32(v2, k__DCT_CONST_ROUNDING); + u3 = _mm_add_epi32(v3, k__DCT_CONST_ROUNDING); + u4 = _mm_add_epi32(v4, k__DCT_CONST_ROUNDING); + u5 = _mm_add_epi32(v5, k__DCT_CONST_ROUNDING); + u6 = _mm_add_epi32(v6, k__DCT_CONST_ROUNDING); + u7 = _mm_add_epi32(v7, k__DCT_CONST_ROUNDING); + + v0 = _mm_srai_epi32(u0, DCT_CONST_BITS); + v1 = _mm_srai_epi32(u1, DCT_CONST_BITS); + v2 = _mm_srai_epi32(u2, DCT_CONST_BITS); + v3 = _mm_srai_epi32(u3, DCT_CONST_BITS); + v4 = _mm_srai_epi32(u4, DCT_CONST_BITS); + v5 = _mm_srai_epi32(u5, DCT_CONST_BITS); + v6 = _mm_srai_epi32(u6, DCT_CONST_BITS); + v7 = _mm_srai_epi32(u7, DCT_CONST_BITS); + + in[1] = _mm_packs_epi32(v0, v1); + in[3] = _mm_packs_epi32(v4, v5); + in[5] = _mm_packs_epi32(v2, v3); + in[7] = _mm_packs_epi32(v6, v7); + + // transpose + array_transpose_8x8_avx2(in, in); +} + +void fadst8_avx2(__m128i *in) { + // Constants + const __m128i k__cospi_p02_p30 = pair_set_epi16(cospi_2_64, cospi_30_64); + const __m128i k__cospi_p30_m02 = pair_set_epi16(cospi_30_64, -cospi_2_64); + const __m128i k__cospi_p10_p22 = pair_set_epi16(cospi_10_64, cospi_22_64); + const __m128i k__cospi_p22_m10 = pair_set_epi16(cospi_22_64, -cospi_10_64); + const __m128i k__cospi_p18_p14 = pair_set_epi16(cospi_18_64, cospi_14_64); + const __m128i k__cospi_p14_m18 = pair_set_epi16(cospi_14_64, -cospi_18_64); + const __m128i k__cospi_p26_p06 = pair_set_epi16(cospi_26_64, cospi_6_64); + const __m128i k__cospi_p06_m26 = pair_set_epi16(cospi_6_64, -cospi_26_64); + const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64); + const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64); + const __m128i k__cospi_m24_p08 = pair_set_epi16(-cospi_24_64, cospi_8_64); + const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); + const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64); + const __m128i k__const_0 = _mm_set1_epi16(0); + const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); + + __m128i u0, u1, u2, u3, u4, u5, u6, u7, u8, u9, u10, u11, u12, u13, u14, u15; + __m128i v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15; + __m128i w0, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15; + __m128i s0, s1, s2, s3, s4, s5, s6, s7; + __m128i in0, in1, in2, in3, in4, in5, in6, in7; + + // properly aligned for butterfly input + in0 = in[7]; + in1 = in[0]; + in2 = in[5]; + in3 = in[2]; + in4 = in[3]; + in5 = in[4]; + in6 = in[1]; + in7 = in[6]; + + // column transformation + // stage 1 + // interleave and multiply/add into 32-bit integer + s0 = _mm_unpacklo_epi16(in0, in1); + s1 = _mm_unpackhi_epi16(in0, in1); + s2 = _mm_unpacklo_epi16(in2, in3); + s3 = _mm_unpackhi_epi16(in2, in3); + s4 = _mm_unpacklo_epi16(in4, in5); + s5 = _mm_unpackhi_epi16(in4, in5); + s6 = _mm_unpacklo_epi16(in6, in7); + s7 = _mm_unpackhi_epi16(in6, in7); + + u0 = _mm_madd_epi16(s0, k__cospi_p02_p30); + u1 = _mm_madd_epi16(s1, k__cospi_p02_p30); + u2 = _mm_madd_epi16(s0, k__cospi_p30_m02); + u3 = _mm_madd_epi16(s1, k__cospi_p30_m02); + u4 = _mm_madd_epi16(s2, k__cospi_p10_p22); + u5 = _mm_madd_epi16(s3, k__cospi_p10_p22); + u6 = _mm_madd_epi16(s2, k__cospi_p22_m10); + u7 = _mm_madd_epi16(s3, k__cospi_p22_m10); + u8 = _mm_madd_epi16(s4, k__cospi_p18_p14); + u9 = _mm_madd_epi16(s5, k__cospi_p18_p14); + u10 = _mm_madd_epi16(s4, k__cospi_p14_m18); + u11 = _mm_madd_epi16(s5, k__cospi_p14_m18); + u12 = _mm_madd_epi16(s6, k__cospi_p26_p06); + u13 = _mm_madd_epi16(s7, k__cospi_p26_p06); + u14 = _mm_madd_epi16(s6, k__cospi_p06_m26); + u15 = _mm_madd_epi16(s7, k__cospi_p06_m26); + + // addition + w0 = _mm_add_epi32(u0, u8); + w1 = _mm_add_epi32(u1, u9); + w2 = _mm_add_epi32(u2, u10); + w3 = _mm_add_epi32(u3, u11); + w4 = _mm_add_epi32(u4, u12); + w5 = _mm_add_epi32(u5, u13); + w6 = _mm_add_epi32(u6, u14); + w7 = _mm_add_epi32(u7, u15); + w8 = _mm_sub_epi32(u0, u8); + w9 = _mm_sub_epi32(u1, u9); + w10 = _mm_sub_epi32(u2, u10); + w11 = _mm_sub_epi32(u3, u11); + w12 = _mm_sub_epi32(u4, u12); + w13 = _mm_sub_epi32(u5, u13); + w14 = _mm_sub_epi32(u6, u14); + w15 = _mm_sub_epi32(u7, u15); + + // shift and rounding + v0 = _mm_add_epi32(w0, k__DCT_CONST_ROUNDING); + v1 = _mm_add_epi32(w1, k__DCT_CONST_ROUNDING); + v2 = _mm_add_epi32(w2, k__DCT_CONST_ROUNDING); + v3 = _mm_add_epi32(w3, k__DCT_CONST_ROUNDING); + v4 = _mm_add_epi32(w4, k__DCT_CONST_ROUNDING); + v5 = _mm_add_epi32(w5, k__DCT_CONST_ROUNDING); + v6 = _mm_add_epi32(w6, k__DCT_CONST_ROUNDING); + v7 = _mm_add_epi32(w7, k__DCT_CONST_ROUNDING); + v8 = _mm_add_epi32(w8, k__DCT_CONST_ROUNDING); + v9 = _mm_add_epi32(w9, k__DCT_CONST_ROUNDING); + v10 = _mm_add_epi32(w10, k__DCT_CONST_ROUNDING); + v11 = _mm_add_epi32(w11, k__DCT_CONST_ROUNDING); + v12 = _mm_add_epi32(w12, k__DCT_CONST_ROUNDING); + v13 = _mm_add_epi32(w13, k__DCT_CONST_ROUNDING); + v14 = _mm_add_epi32(w14, k__DCT_CONST_ROUNDING); + v15 = _mm_add_epi32(w15, k__DCT_CONST_ROUNDING); + + u0 = _mm_srai_epi32(v0, DCT_CONST_BITS); + u1 = _mm_srai_epi32(v1, DCT_CONST_BITS); + u2 = _mm_srai_epi32(v2, DCT_CONST_BITS); + u3 = _mm_srai_epi32(v3, DCT_CONST_BITS); + u4 = _mm_srai_epi32(v4, DCT_CONST_BITS); + u5 = _mm_srai_epi32(v5, DCT_CONST_BITS); + u6 = _mm_srai_epi32(v6, DCT_CONST_BITS); + u7 = _mm_srai_epi32(v7, DCT_CONST_BITS); + u8 = _mm_srai_epi32(v8, DCT_CONST_BITS); + u9 = _mm_srai_epi32(v9, DCT_CONST_BITS); + u10 = _mm_srai_epi32(v10, DCT_CONST_BITS); + u11 = _mm_srai_epi32(v11, DCT_CONST_BITS); + u12 = _mm_srai_epi32(v12, DCT_CONST_BITS); + u13 = _mm_srai_epi32(v13, DCT_CONST_BITS); + u14 = _mm_srai_epi32(v14, DCT_CONST_BITS); + u15 = _mm_srai_epi32(v15, DCT_CONST_BITS); + + // back to 16-bit and pack 8 integers into __m128i + in[0] = _mm_packs_epi32(u0, u1); + in[1] = _mm_packs_epi32(u2, u3); + in[2] = _mm_packs_epi32(u4, u5); + in[3] = _mm_packs_epi32(u6, u7); + in[4] = _mm_packs_epi32(u8, u9); + in[5] = _mm_packs_epi32(u10, u11); + in[6] = _mm_packs_epi32(u12, u13); + in[7] = _mm_packs_epi32(u14, u15); + + // stage 2 + s0 = _mm_add_epi16(in[0], in[2]); + s1 = _mm_add_epi16(in[1], in[3]); + s2 = _mm_sub_epi16(in[0], in[2]); + s3 = _mm_sub_epi16(in[1], in[3]); + u0 = _mm_unpacklo_epi16(in[4], in[5]); + u1 = _mm_unpackhi_epi16(in[4], in[5]); + u2 = _mm_unpacklo_epi16(in[6], in[7]); + u3 = _mm_unpackhi_epi16(in[6], in[7]); + + v0 = _mm_madd_epi16(u0, k__cospi_p08_p24); + v1 = _mm_madd_epi16(u1, k__cospi_p08_p24); + v2 = _mm_madd_epi16(u0, k__cospi_p24_m08); + v3 = _mm_madd_epi16(u1, k__cospi_p24_m08); + v4 = _mm_madd_epi16(u2, k__cospi_m24_p08); + v5 = _mm_madd_epi16(u3, k__cospi_m24_p08); + v6 = _mm_madd_epi16(u2, k__cospi_p08_p24); + v7 = _mm_madd_epi16(u3, k__cospi_p08_p24); + + w0 = _mm_add_epi32(v0, v4); + w1 = _mm_add_epi32(v1, v5); + w2 = _mm_add_epi32(v2, v6); + w3 = _mm_add_epi32(v3, v7); + w4 = _mm_sub_epi32(v0, v4); + w5 = _mm_sub_epi32(v1, v5); + w6 = _mm_sub_epi32(v2, v6); + w7 = _mm_sub_epi32(v3, v7); + + v0 = _mm_add_epi32(w0, k__DCT_CONST_ROUNDING); + v1 = _mm_add_epi32(w1, k__DCT_CONST_ROUNDING); + v2 = _mm_add_epi32(w2, k__DCT_CONST_ROUNDING); + v3 = _mm_add_epi32(w3, k__DCT_CONST_ROUNDING); + v4 = _mm_add_epi32(w4, k__DCT_CONST_ROUNDING); + v5 = _mm_add_epi32(w5, k__DCT_CONST_ROUNDING); + v6 = _mm_add_epi32(w6, k__DCT_CONST_ROUNDING); + v7 = _mm_add_epi32(w7, k__DCT_CONST_ROUNDING); + + u0 = _mm_srai_epi32(v0, DCT_CONST_BITS); + u1 = _mm_srai_epi32(v1, DCT_CONST_BITS); + u2 = _mm_srai_epi32(v2, DCT_CONST_BITS); + u3 = _mm_srai_epi32(v3, DCT_CONST_BITS); + u4 = _mm_srai_epi32(v4, DCT_CONST_BITS); + u5 = _mm_srai_epi32(v5, DCT_CONST_BITS); + u6 = _mm_srai_epi32(v6, DCT_CONST_BITS); + u7 = _mm_srai_epi32(v7, DCT_CONST_BITS); + + // back to 16-bit intergers + s4 = _mm_packs_epi32(u0, u1); + s5 = _mm_packs_epi32(u2, u3); + s6 = _mm_packs_epi32(u4, u5); + s7 = _mm_packs_epi32(u6, u7); + + // stage 3 + u0 = _mm_unpacklo_epi16(s2, s3); + u1 = _mm_unpackhi_epi16(s2, s3); + u2 = _mm_unpacklo_epi16(s6, s7); + u3 = _mm_unpackhi_epi16(s6, s7); + + v0 = _mm_madd_epi16(u0, k__cospi_p16_p16); + v1 = _mm_madd_epi16(u1, k__cospi_p16_p16); + v2 = _mm_madd_epi16(u0, k__cospi_p16_m16); + v3 = _mm_madd_epi16(u1, k__cospi_p16_m16); + v4 = _mm_madd_epi16(u2, k__cospi_p16_p16); + v5 = _mm_madd_epi16(u3, k__cospi_p16_p16); + v6 = _mm_madd_epi16(u2, k__cospi_p16_m16); + v7 = _mm_madd_epi16(u3, k__cospi_p16_m16); + + u0 = _mm_add_epi32(v0, k__DCT_CONST_ROUNDING); + u1 = _mm_add_epi32(v1, k__DCT_CONST_ROUNDING); + u2 = _mm_add_epi32(v2, k__DCT_CONST_ROUNDING); + u3 = _mm_add_epi32(v3, k__DCT_CONST_ROUNDING); + u4 = _mm_add_epi32(v4, k__DCT_CONST_ROUNDING); + u5 = _mm_add_epi32(v5, k__DCT_CONST_ROUNDING); + u6 = _mm_add_epi32(v6, k__DCT_CONST_ROUNDING); + u7 = _mm_add_epi32(v7, k__DCT_CONST_ROUNDING); + + v0 = _mm_srai_epi32(u0, DCT_CONST_BITS); + v1 = _mm_srai_epi32(u1, DCT_CONST_BITS); + v2 = _mm_srai_epi32(u2, DCT_CONST_BITS); + v3 = _mm_srai_epi32(u3, DCT_CONST_BITS); + v4 = _mm_srai_epi32(u4, DCT_CONST_BITS); + v5 = _mm_srai_epi32(u5, DCT_CONST_BITS); + v6 = _mm_srai_epi32(u6, DCT_CONST_BITS); + v7 = _mm_srai_epi32(u7, DCT_CONST_BITS); + + s2 = _mm_packs_epi32(v0, v1); + s3 = _mm_packs_epi32(v2, v3); + s6 = _mm_packs_epi32(v4, v5); + s7 = _mm_packs_epi32(v6, v7); + + // FIXME(jingning): do subtract using bit inversion? + in[0] = s0; + in[1] = _mm_sub_epi16(k__const_0, s4); + in[2] = s6; + in[3] = _mm_sub_epi16(k__const_0, s2); + in[4] = s3; + in[5] = _mm_sub_epi16(k__const_0, s7); + in[6] = s5; + in[7] = _mm_sub_epi16(k__const_0, s1); + + // transpose + array_transpose_8x8_avx2(in, in); +} + +void vp9_fht8x8_avx2(const int16_t *input, int16_t *output, + int stride, int tx_type) { + __m128i in[8]; + + switch (tx_type) { + case DCT_DCT: + vp9_fdct8x8_avx2(input, output, stride); + break; + case ADST_DCT: + load_buffer_8x8_avx2(input, in, stride); + fadst8_avx2(in); + fdct8_avx2(in); + right_shift_8x8_avx2(in, 1); + write_buffer_8x8_avx2(output, in, 8); + break; + case DCT_ADST: + load_buffer_8x8_avx2(input, in, stride); + fdct8_avx2(in); + fadst8_avx2(in); + right_shift_8x8_avx2(in, 1); + write_buffer_8x8_avx2(output, in, 8); + break; + case ADST_ADST: + load_buffer_8x8_avx2(input, in, stride); + fadst8_avx2(in); + fadst8_avx2(in); + right_shift_8x8_avx2(in, 1); + write_buffer_8x8_avx2(output, in, 8); + break; + default: + assert(0); + break; + } +} + +void vp9_fdct16x16_avx2(const int16_t *input, int16_t *output, int stride) { + // The 2D transform is done with two passes which are actually pretty + // similar. In the first one, we transform the columns and transpose + // the results. In the second one, we transform the rows. To achieve that, + // as the first pass results are transposed, we transpose the columns (that + // is the transposed rows) and transpose the results (so that it goes back + // in normal/row positions). + int pass; + // We need an intermediate buffer between passes. + DECLARE_ALIGNED_ARRAY(16, int16_t, intermediate, 256); + const int16_t *in = input; + int16_t *out = intermediate; + // Constants + // When we use them, in one case, they are all the same. In all others + // it's a pair of them that we need to repeat four times. This is done + // by constructing the 32 bit constant corresponding to that pair. + const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64); + const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); + const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64); + const __m128i k__cospi_m24_m08 = pair_set_epi16(-cospi_24_64, -cospi_8_64); + const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64); + const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64); + const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); + const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); + const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); + const __m128i k__cospi_p30_p02 = pair_set_epi16(cospi_30_64, cospi_2_64); + const __m128i k__cospi_p14_p18 = pair_set_epi16(cospi_14_64, cospi_18_64); + const __m128i k__cospi_m02_p30 = pair_set_epi16(-cospi_2_64, cospi_30_64); + const __m128i k__cospi_m18_p14 = pair_set_epi16(-cospi_18_64, cospi_14_64); + const __m128i k__cospi_p22_p10 = pair_set_epi16(cospi_22_64, cospi_10_64); + const __m128i k__cospi_p06_p26 = pair_set_epi16(cospi_6_64, cospi_26_64); + const __m128i k__cospi_m10_p22 = pair_set_epi16(-cospi_10_64, cospi_22_64); + const __m128i k__cospi_m26_p06 = pair_set_epi16(-cospi_26_64, cospi_6_64); + const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); + const __m128i kOne = _mm_set1_epi16(1); + // Do the two transform/transpose passes + for (pass = 0; pass < 2; ++pass) { + // We process eight columns (transposed rows in second pass) at a time. + int column_start; + for (column_start = 0; column_start < 16; column_start += 8) { + __m128i in00, in01, in02, in03, in04, in05, in06, in07; + __m128i in08, in09, in10, in11, in12, in13, in14, in15; + __m128i input0, input1, input2, input3, input4, input5, input6, input7; + __m128i step1_0, step1_1, step1_2, step1_3; + __m128i step1_4, step1_5, step1_6, step1_7; + __m128i step2_1, step2_2, step2_3, step2_4, step2_5, step2_6; + __m128i step3_0, step3_1, step3_2, step3_3; + __m128i step3_4, step3_5, step3_6, step3_7; + __m128i res00, res01, res02, res03, res04, res05, res06, res07; + __m128i res08, res09, res10, res11, res12, res13, res14, res15; + // Load and pre-condition input. + if (0 == pass) { + in00 = _mm_load_si128((const __m128i *)(in + 0 * stride)); + in01 = _mm_load_si128((const __m128i *)(in + 1 * stride)); + in02 = _mm_load_si128((const __m128i *)(in + 2 * stride)); + in03 = _mm_load_si128((const __m128i *)(in + 3 * stride)); + in04 = _mm_load_si128((const __m128i *)(in + 4 * stride)); + in05 = _mm_load_si128((const __m128i *)(in + 5 * stride)); + in06 = _mm_load_si128((const __m128i *)(in + 6 * stride)); + in07 = _mm_load_si128((const __m128i *)(in + 7 * stride)); + in08 = _mm_load_si128((const __m128i *)(in + 8 * stride)); + in09 = _mm_load_si128((const __m128i *)(in + 9 * stride)); + in10 = _mm_load_si128((const __m128i *)(in + 10 * stride)); + in11 = _mm_load_si128((const __m128i *)(in + 11 * stride)); + in12 = _mm_load_si128((const __m128i *)(in + 12 * stride)); + in13 = _mm_load_si128((const __m128i *)(in + 13 * stride)); + in14 = _mm_load_si128((const __m128i *)(in + 14 * stride)); + in15 = _mm_load_si128((const __m128i *)(in + 15 * stride)); + // x = x << 2 + in00 = _mm_slli_epi16(in00, 2); + in01 = _mm_slli_epi16(in01, 2); + in02 = _mm_slli_epi16(in02, 2); + in03 = _mm_slli_epi16(in03, 2); + in04 = _mm_slli_epi16(in04, 2); + in05 = _mm_slli_epi16(in05, 2); + in06 = _mm_slli_epi16(in06, 2); + in07 = _mm_slli_epi16(in07, 2); + in08 = _mm_slli_epi16(in08, 2); + in09 = _mm_slli_epi16(in09, 2); + in10 = _mm_slli_epi16(in10, 2); + in11 = _mm_slli_epi16(in11, 2); + in12 = _mm_slli_epi16(in12, 2); + in13 = _mm_slli_epi16(in13, 2); + in14 = _mm_slli_epi16(in14, 2); + in15 = _mm_slli_epi16(in15, 2); + } else { + in00 = _mm_load_si128((const __m128i *)(in + 0 * 16)); + in01 = _mm_load_si128((const __m128i *)(in + 1 * 16)); + in02 = _mm_load_si128((const __m128i *)(in + 2 * 16)); + in03 = _mm_load_si128((const __m128i *)(in + 3 * 16)); + in04 = _mm_load_si128((const __m128i *)(in + 4 * 16)); + in05 = _mm_load_si128((const __m128i *)(in + 5 * 16)); + in06 = _mm_load_si128((const __m128i *)(in + 6 * 16)); + in07 = _mm_load_si128((const __m128i *)(in + 7 * 16)); + in08 = _mm_load_si128((const __m128i *)(in + 8 * 16)); + in09 = _mm_load_si128((const __m128i *)(in + 9 * 16)); + in10 = _mm_load_si128((const __m128i *)(in + 10 * 16)); + in11 = _mm_load_si128((const __m128i *)(in + 11 * 16)); + in12 = _mm_load_si128((const __m128i *)(in + 12 * 16)); + in13 = _mm_load_si128((const __m128i *)(in + 13 * 16)); + in14 = _mm_load_si128((const __m128i *)(in + 14 * 16)); + in15 = _mm_load_si128((const __m128i *)(in + 15 * 16)); + // x = (x + 1) >> 2 + in00 = _mm_add_epi16(in00, kOne); + in01 = _mm_add_epi16(in01, kOne); + in02 = _mm_add_epi16(in02, kOne); + in03 = _mm_add_epi16(in03, kOne); + in04 = _mm_add_epi16(in04, kOne); + in05 = _mm_add_epi16(in05, kOne); + in06 = _mm_add_epi16(in06, kOne); + in07 = _mm_add_epi16(in07, kOne); + in08 = _mm_add_epi16(in08, kOne); + in09 = _mm_add_epi16(in09, kOne); + in10 = _mm_add_epi16(in10, kOne); + in11 = _mm_add_epi16(in11, kOne); + in12 = _mm_add_epi16(in12, kOne); + in13 = _mm_add_epi16(in13, kOne); + in14 = _mm_add_epi16(in14, kOne); + in15 = _mm_add_epi16(in15, kOne); + in00 = _mm_srai_epi16(in00, 2); + in01 = _mm_srai_epi16(in01, 2); + in02 = _mm_srai_epi16(in02, 2); + in03 = _mm_srai_epi16(in03, 2); + in04 = _mm_srai_epi16(in04, 2); + in05 = _mm_srai_epi16(in05, 2); + in06 = _mm_srai_epi16(in06, 2); + in07 = _mm_srai_epi16(in07, 2); + in08 = _mm_srai_epi16(in08, 2); + in09 = _mm_srai_epi16(in09, 2); + in10 = _mm_srai_epi16(in10, 2); + in11 = _mm_srai_epi16(in11, 2); + in12 = _mm_srai_epi16(in12, 2); + in13 = _mm_srai_epi16(in13, 2); + in14 = _mm_srai_epi16(in14, 2); + in15 = _mm_srai_epi16(in15, 2); + } + in += 8; + // Calculate input for the first 8 results. + { + input0 = _mm_add_epi16(in00, in15); + input1 = _mm_add_epi16(in01, in14); + input2 = _mm_add_epi16(in02, in13); + input3 = _mm_add_epi16(in03, in12); + input4 = _mm_add_epi16(in04, in11); + input5 = _mm_add_epi16(in05, in10); + input6 = _mm_add_epi16(in06, in09); + input7 = _mm_add_epi16(in07, in08); + } + // Calculate input for the next 8 results. + { + step1_0 = _mm_sub_epi16(in07, in08); + step1_1 = _mm_sub_epi16(in06, in09); + step1_2 = _mm_sub_epi16(in05, in10); + step1_3 = _mm_sub_epi16(in04, in11); + step1_4 = _mm_sub_epi16(in03, in12); + step1_5 = _mm_sub_epi16(in02, in13); + step1_6 = _mm_sub_epi16(in01, in14); + step1_7 = _mm_sub_epi16(in00, in15); + } + // Work on the first eight values; fdct8(input, even_results); + { + // Add/subtract + const __m128i q0 = _mm_add_epi16(input0, input7); + const __m128i q1 = _mm_add_epi16(input1, input6); + const __m128i q2 = _mm_add_epi16(input2, input5); + const __m128i q3 = _mm_add_epi16(input3, input4); + const __m128i q4 = _mm_sub_epi16(input3, input4); + const __m128i q5 = _mm_sub_epi16(input2, input5); + const __m128i q6 = _mm_sub_epi16(input1, input6); + const __m128i q7 = _mm_sub_epi16(input0, input7); + // Work on first four results + { + // Add/subtract + const __m128i r0 = _mm_add_epi16(q0, q3); + const __m128i r1 = _mm_add_epi16(q1, q2); + const __m128i r2 = _mm_sub_epi16(q1, q2); + const __m128i r3 = _mm_sub_epi16(q0, q3); + // Interleave to do the multiply by constants which gets us + // into 32 bits. + const __m128i t0 = _mm_unpacklo_epi16(r0, r1); + const __m128i t1 = _mm_unpackhi_epi16(r0, r1); + const __m128i t2 = _mm_unpacklo_epi16(r2, r3); + const __m128i t3 = _mm_unpackhi_epi16(r2, r3); + const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16); + const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p16_p16); + const __m128i u2 = _mm_madd_epi16(t0, k__cospi_p16_m16); + const __m128i u3 = _mm_madd_epi16(t1, k__cospi_p16_m16); + const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p24_p08); + const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p24_p08); + const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m08_p24); + const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m08_p24); + // dct_const_round_shift + const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); + const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); + const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); + const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); + const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING); + const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING); + const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING); + const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING); + const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); + const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); + const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); + const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); + const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS); + const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS); + const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS); + const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS); + // Combine + res00 = _mm_packs_epi32(w0, w1); + res08 = _mm_packs_epi32(w2, w3); + res04 = _mm_packs_epi32(w4, w5); + res12 = _mm_packs_epi32(w6, w7); + } + // Work on next four results + { + // Interleave to do the multiply by constants which gets us + // into 32 bits. + const __m128i d0 = _mm_unpacklo_epi16(q6, q5); + const __m128i d1 = _mm_unpackhi_epi16(q6, q5); + const __m128i e0 = _mm_madd_epi16(d0, k__cospi_p16_m16); + const __m128i e1 = _mm_madd_epi16(d1, k__cospi_p16_m16); + const __m128i e2 = _mm_madd_epi16(d0, k__cospi_p16_p16); + const __m128i e3 = _mm_madd_epi16(d1, k__cospi_p16_p16); + // dct_const_round_shift + const __m128i f0 = _mm_add_epi32(e0, k__DCT_CONST_ROUNDING); + const __m128i f1 = _mm_add_epi32(e1, k__DCT_CONST_ROUNDING); + const __m128i f2 = _mm_add_epi32(e2, k__DCT_CONST_ROUNDING); + const __m128i f3 = _mm_add_epi32(e3, k__DCT_CONST_ROUNDING); + const __m128i s0 = _mm_srai_epi32(f0, DCT_CONST_BITS); + const __m128i s1 = _mm_srai_epi32(f1, DCT_CONST_BITS); + const __m128i s2 = _mm_srai_epi32(f2, DCT_CONST_BITS); + const __m128i s3 = _mm_srai_epi32(f3, DCT_CONST_BITS); + // Combine + const __m128i r0 = _mm_packs_epi32(s0, s1); + const __m128i r1 = _mm_packs_epi32(s2, s3); + // Add/subtract + const __m128i x0 = _mm_add_epi16(q4, r0); + const __m128i x1 = _mm_sub_epi16(q4, r0); + const __m128i x2 = _mm_sub_epi16(q7, r1); + const __m128i x3 = _mm_add_epi16(q7, r1); + // Interleave to do the multiply by constants which gets us + // into 32 bits. + const __m128i t0 = _mm_unpacklo_epi16(x0, x3); + const __m128i t1 = _mm_unpackhi_epi16(x0, x3); + const __m128i t2 = _mm_unpacklo_epi16(x1, x2); + const __m128i t3 = _mm_unpackhi_epi16(x1, x2); + const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p28_p04); + const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p28_p04); + const __m128i u2 = _mm_madd_epi16(t0, k__cospi_m04_p28); + const __m128i u3 = _mm_madd_epi16(t1, k__cospi_m04_p28); + const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p12_p20); + const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p12_p20); + const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m20_p12); + const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m20_p12); + // dct_const_round_shift + const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); + const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); + const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); + const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); + const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING); + const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING); + const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING); + const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING); + const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); + const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); + const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); + const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); + const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS); + const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS); + const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS); + const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS); + // Combine + res02 = _mm_packs_epi32(w0, w1); + res14 = _mm_packs_epi32(w2, w3); + res10 = _mm_packs_epi32(w4, w5); + res06 = _mm_packs_epi32(w6, w7); + } + } + // Work on the next eight values; step1 -> odd_results + { + // step 2 + { + const __m128i t0 = _mm_unpacklo_epi16(step1_5, step1_2); + const __m128i t1 = _mm_unpackhi_epi16(step1_5, step1_2); + const __m128i t2 = _mm_unpacklo_epi16(step1_4, step1_3); + const __m128i t3 = _mm_unpackhi_epi16(step1_4, step1_3); + const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_m16); + const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p16_m16); + const __m128i u2 = _mm_madd_epi16(t2, k__cospi_p16_m16); + const __m128i u3 = _mm_madd_epi16(t3, k__cospi_p16_m16); + // dct_const_round_shift + const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); + const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); + const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); + const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); + const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); + const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); + const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); + const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); + // Combine + step2_2 = _mm_packs_epi32(w0, w1); + step2_3 = _mm_packs_epi32(w2, w3); + } + { + const __m128i t0 = _mm_unpacklo_epi16(step1_5, step1_2); + const __m128i t1 = _mm_unpackhi_epi16(step1_5, step1_2); + const __m128i t2 = _mm_unpacklo_epi16(step1_4, step1_3); + const __m128i t3 = _mm_unpackhi_epi16(step1_4, step1_3); + const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16); + const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p16_p16); + const __m128i u2 = _mm_madd_epi16(t2, k__cospi_p16_p16); + const __m128i u3 = _mm_madd_epi16(t3, k__cospi_p16_p16); + // dct_const_round_shift + const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); + const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); + const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); + const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); + const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); + const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); + const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); + const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); + // Combine + step2_5 = _mm_packs_epi32(w0, w1); + step2_4 = _mm_packs_epi32(w2, w3); + } + // step 3 + { + step3_0 = _mm_add_epi16(step1_0, step2_3); + step3_1 = _mm_add_epi16(step1_1, step2_2); + step3_2 = _mm_sub_epi16(step1_1, step2_2); + step3_3 = _mm_sub_epi16(step1_0, step2_3); + step3_4 = _mm_sub_epi16(step1_7, step2_4); + step3_5 = _mm_sub_epi16(step1_6, step2_5); + step3_6 = _mm_add_epi16(step1_6, step2_5); + step3_7 = _mm_add_epi16(step1_7, step2_4); + } + // step 4 + { + const __m128i t0 = _mm_unpacklo_epi16(step3_1, step3_6); + const __m128i t1 = _mm_unpackhi_epi16(step3_1, step3_6); + const __m128i t2 = _mm_unpacklo_epi16(step3_2, step3_5); + const __m128i t3 = _mm_unpackhi_epi16(step3_2, step3_5); + const __m128i u0 = _mm_madd_epi16(t0, k__cospi_m08_p24); + const __m128i u1 = _mm_madd_epi16(t1, k__cospi_m08_p24); + const __m128i u2 = _mm_madd_epi16(t2, k__cospi_m24_m08); + const __m128i u3 = _mm_madd_epi16(t3, k__cospi_m24_m08); + // dct_const_round_shift + const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); + const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); + const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); + const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); + const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); + const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); + const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); + const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); + // Combine + step2_1 = _mm_packs_epi32(w0, w1); + step2_2 = _mm_packs_epi32(w2, w3); + } + { + const __m128i t0 = _mm_unpacklo_epi16(step3_1, step3_6); + const __m128i t1 = _mm_unpackhi_epi16(step3_1, step3_6); + const __m128i t2 = _mm_unpacklo_epi16(step3_2, step3_5); + const __m128i t3 = _mm_unpackhi_epi16(step3_2, step3_5); + const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p24_p08); + const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p24_p08); + const __m128i u2 = _mm_madd_epi16(t2, k__cospi_m08_p24); + const __m128i u3 = _mm_madd_epi16(t3, k__cospi_m08_p24); + // dct_const_round_shift + const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); + const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); + const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); + const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); + const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); + const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); + const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); + const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); + // Combine + step2_6 = _mm_packs_epi32(w0, w1); + step2_5 = _mm_packs_epi32(w2, w3); + } + // step 5 + { + step1_0 = _mm_add_epi16(step3_0, step2_1); + step1_1 = _mm_sub_epi16(step3_0, step2_1); + step1_2 = _mm_sub_epi16(step3_3, step2_2); + step1_3 = _mm_add_epi16(step3_3, step2_2); + step1_4 = _mm_add_epi16(step3_4, step2_5); + step1_5 = _mm_sub_epi16(step3_4, step2_5); + step1_6 = _mm_sub_epi16(step3_7, step2_6); + step1_7 = _mm_add_epi16(step3_7, step2_6); + } + // step 6 + { + const __m128i t0 = _mm_unpacklo_epi16(step1_0, step1_7); + const __m128i t1 = _mm_unpackhi_epi16(step1_0, step1_7); + const __m128i t2 = _mm_unpacklo_epi16(step1_1, step1_6); + const __m128i t3 = _mm_unpackhi_epi16(step1_1, step1_6); + const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p30_p02); + const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p30_p02); + const __m128i u2 = _mm_madd_epi16(t2, k__cospi_p14_p18); + const __m128i u3 = _mm_madd_epi16(t3, k__cospi_p14_p18); + // dct_const_round_shift + const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); + const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); + const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); + const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); + const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); + const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); + const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); + const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); + // Combine + res01 = _mm_packs_epi32(w0, w1); + res09 = _mm_packs_epi32(w2, w3); + } + { + const __m128i t0 = _mm_unpacklo_epi16(step1_2, step1_5); + const __m128i t1 = _mm_unpackhi_epi16(step1_2, step1_5); + const __m128i t2 = _mm_unpacklo_epi16(step1_3, step1_4); + const __m128i t3 = _mm_unpackhi_epi16(step1_3, step1_4); + const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p22_p10); + const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p22_p10); + const __m128i u2 = _mm_madd_epi16(t2, k__cospi_p06_p26); + const __m128i u3 = _mm_madd_epi16(t3, k__cospi_p06_p26); + // dct_const_round_shift + const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); + const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); + const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); + const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); + const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); + const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); + const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); + const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); + // Combine + res05 = _mm_packs_epi32(w0, w1); + res13 = _mm_packs_epi32(w2, w3); + } + { + const __m128i t0 = _mm_unpacklo_epi16(step1_2, step1_5); + const __m128i t1 = _mm_unpackhi_epi16(step1_2, step1_5); + const __m128i t2 = _mm_unpacklo_epi16(step1_3, step1_4); + const __m128i t3 = _mm_unpackhi_epi16(step1_3, step1_4); + const __m128i u0 = _mm_madd_epi16(t0, k__cospi_m10_p22); + const __m128i u1 = _mm_madd_epi16(t1, k__cospi_m10_p22); + const __m128i u2 = _mm_madd_epi16(t2, k__cospi_m26_p06); + const __m128i u3 = _mm_madd_epi16(t3, k__cospi_m26_p06); + // dct_const_round_shift + const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); + const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); + const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); + const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); + const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); + const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); + const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); + const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); + // Combine + res11 = _mm_packs_epi32(w0, w1); + res03 = _mm_packs_epi32(w2, w3); + } + { + const __m128i t0 = _mm_unpacklo_epi16(step1_0, step1_7); + const __m128i t1 = _mm_unpackhi_epi16(step1_0, step1_7); + const __m128i t2 = _mm_unpacklo_epi16(step1_1, step1_6); + const __m128i t3 = _mm_unpackhi_epi16(step1_1, step1_6); + const __m128i u0 = _mm_madd_epi16(t0, k__cospi_m02_p30); + const __m128i u1 = _mm_madd_epi16(t1, k__cospi_m02_p30); + const __m128i u2 = _mm_madd_epi16(t2, k__cospi_m18_p14); + const __m128i u3 = _mm_madd_epi16(t3, k__cospi_m18_p14); + // dct_const_round_shift + const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); + const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); + const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); + const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); + const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); + const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); + const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); + const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); + // Combine + res15 = _mm_packs_epi32(w0, w1); + res07 = _mm_packs_epi32(w2, w3); + } + } + // Transpose the results, do it as two 8x8 transposes. + { + // 00 01 02 03 04 05 06 07 + // 10 11 12 13 14 15 16 17 + // 20 21 22 23 24 25 26 27 + // 30 31 32 33 34 35 36 37 + // 40 41 42 43 44 45 46 47 + // 50 51 52 53 54 55 56 57 + // 60 61 62 63 64 65 66 67 + // 70 71 72 73 74 75 76 77 + const __m128i tr0_0 = _mm_unpacklo_epi16(res00, res01); + const __m128i tr0_1 = _mm_unpacklo_epi16(res02, res03); + const __m128i tr0_2 = _mm_unpackhi_epi16(res00, res01); + const __m128i tr0_3 = _mm_unpackhi_epi16(res02, res03); + const __m128i tr0_4 = _mm_unpacklo_epi16(res04, res05); + const __m128i tr0_5 = _mm_unpacklo_epi16(res06, res07); + const __m128i tr0_6 = _mm_unpackhi_epi16(res04, res05); + const __m128i tr0_7 = _mm_unpackhi_epi16(res06, res07); + // 00 10 01 11 02 12 03 13 + // 20 30 21 31 22 32 23 33 + // 04 14 05 15 06 16 07 17 + // 24 34 25 35 26 36 27 37 + // 40 50 41 51 42 52 43 53 + // 60 70 61 71 62 72 63 73 + // 54 54 55 55 56 56 57 57 + // 64 74 65 75 66 76 67 77 + const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); + const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3); + const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); + const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3); + const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); + const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7); + const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); + const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7); + // 00 10 20 30 01 11 21 31 + // 40 50 60 70 41 51 61 71 + // 02 12 22 32 03 13 23 33 + // 42 52 62 72 43 53 63 73 + // 04 14 24 34 05 15 21 36 + // 44 54 64 74 45 55 61 76 + // 06 16 26 36 07 17 27 37 + // 46 56 66 76 47 57 67 77 + const __m128i tr2_0 = _mm_unpacklo_epi64(tr1_0, tr1_4); + const __m128i tr2_1 = _mm_unpackhi_epi64(tr1_0, tr1_4); + const __m128i tr2_2 = _mm_unpacklo_epi64(tr1_2, tr1_6); + const __m128i tr2_3 = _mm_unpackhi_epi64(tr1_2, tr1_6); + const __m128i tr2_4 = _mm_unpacklo_epi64(tr1_1, tr1_5); + const __m128i tr2_5 = _mm_unpackhi_epi64(tr1_1, tr1_5); + const __m128i tr2_6 = _mm_unpacklo_epi64(tr1_3, tr1_7); + const __m128i tr2_7 = _mm_unpackhi_epi64(tr1_3, tr1_7); + // 00 10 20 30 40 50 60 70 + // 01 11 21 31 41 51 61 71 + // 02 12 22 32 42 52 62 72 + // 03 13 23 33 43 53 63 73 + // 04 14 24 34 44 54 64 74 + // 05 15 25 35 45 55 65 75 + // 06 16 26 36 46 56 66 76 + // 07 17 27 37 47 57 67 77 + _mm_storeu_si128((__m128i *)(out + 0 * 16), tr2_0); + _mm_storeu_si128((__m128i *)(out + 1 * 16), tr2_1); + _mm_storeu_si128((__m128i *)(out + 2 * 16), tr2_2); + _mm_storeu_si128((__m128i *)(out + 3 * 16), tr2_3); + _mm_storeu_si128((__m128i *)(out + 4 * 16), tr2_4); + _mm_storeu_si128((__m128i *)(out + 5 * 16), tr2_5); + _mm_storeu_si128((__m128i *)(out + 6 * 16), tr2_6); + _mm_storeu_si128((__m128i *)(out + 7 * 16), tr2_7); + } + { + // 00 01 02 03 04 05 06 07 + // 10 11 12 13 14 15 16 17 + // 20 21 22 23 24 25 26 27 + // 30 31 32 33 34 35 36 37 + // 40 41 42 43 44 45 46 47 + // 50 51 52 53 54 55 56 57 + // 60 61 62 63 64 65 66 67 + // 70 71 72 73 74 75 76 77 + const __m128i tr0_0 = _mm_unpacklo_epi16(res08, res09); + const __m128i tr0_1 = _mm_unpacklo_epi16(res10, res11); + const __m128i tr0_2 = _mm_unpackhi_epi16(res08, res09); + const __m128i tr0_3 = _mm_unpackhi_epi16(res10, res11); + const __m128i tr0_4 = _mm_unpacklo_epi16(res12, res13); + const __m128i tr0_5 = _mm_unpacklo_epi16(res14, res15); + const __m128i tr0_6 = _mm_unpackhi_epi16(res12, res13); + const __m128i tr0_7 = _mm_unpackhi_epi16(res14, res15); + // 00 10 01 11 02 12 03 13 + // 20 30 21 31 22 32 23 33 + // 04 14 05 15 06 16 07 17 + // 24 34 25 35 26 36 27 37 + // 40 50 41 51 42 52 43 53 + // 60 70 61 71 62 72 63 73 + // 54 54 55 55 56 56 57 57 + // 64 74 65 75 66 76 67 77 + const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); + const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3); + const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); + const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3); + const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); + const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7); + const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); + const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7); + // 00 10 20 30 01 11 21 31 + // 40 50 60 70 41 51 61 71 + // 02 12 22 32 03 13 23 33 + // 42 52 62 72 43 53 63 73 + // 04 14 24 34 05 15 21 36 + // 44 54 64 74 45 55 61 76 + // 06 16 26 36 07 17 27 37 + // 46 56 66 76 47 57 67 77 + const __m128i tr2_0 = _mm_unpacklo_epi64(tr1_0, tr1_4); + const __m128i tr2_1 = _mm_unpackhi_epi64(tr1_0, tr1_4); + const __m128i tr2_2 = _mm_unpacklo_epi64(tr1_2, tr1_6); + const __m128i tr2_3 = _mm_unpackhi_epi64(tr1_2, tr1_6); + const __m128i tr2_4 = _mm_unpacklo_epi64(tr1_1, tr1_5); + const __m128i tr2_5 = _mm_unpackhi_epi64(tr1_1, tr1_5); + const __m128i tr2_6 = _mm_unpacklo_epi64(tr1_3, tr1_7); + const __m128i tr2_7 = _mm_unpackhi_epi64(tr1_3, tr1_7); + // 00 10 20 30 40 50 60 70 + // 01 11 21 31 41 51 61 71 + // 02 12 22 32 42 52 62 72 + // 03 13 23 33 43 53 63 73 + // 04 14 24 34 44 54 64 74 + // 05 15 25 35 45 55 65 75 + // 06 16 26 36 46 56 66 76 + // 07 17 27 37 47 57 67 77 + // Store results + _mm_store_si128((__m128i *)(out + 8 + 0 * 16), tr2_0); + _mm_store_si128((__m128i *)(out + 8 + 1 * 16), tr2_1); + _mm_store_si128((__m128i *)(out + 8 + 2 * 16), tr2_2); + _mm_store_si128((__m128i *)(out + 8 + 3 * 16), tr2_3); + _mm_store_si128((__m128i *)(out + 8 + 4 * 16), tr2_4); + _mm_store_si128((__m128i *)(out + 8 + 5 * 16), tr2_5); + _mm_store_si128((__m128i *)(out + 8 + 6 * 16), tr2_6); + _mm_store_si128((__m128i *)(out + 8 + 7 * 16), tr2_7); + } + out += 8*16; + } + // Setup in/out for next pass. + in = intermediate; + out = output; + } +} + +static INLINE void load_buffer_16x16_avx2(const int16_t* input, __m128i *in0, + __m128i *in1, int stride) { + // load first 8 columns + load_buffer_8x8_avx2(input, in0, stride); + load_buffer_8x8_avx2(input + 8 * stride, in0 + 8, stride); + + input += 8; + // load second 8 columns + load_buffer_8x8_avx2(input, in1, stride); + load_buffer_8x8_avx2(input + 8 * stride, in1 + 8, stride); +} + +static INLINE void write_buffer_16x16_avx2(int16_t *output, __m128i *in0, + __m128i *in1, int stride) { + // write first 8 columns + write_buffer_8x8_avx2(output, in0, stride); + write_buffer_8x8_avx2(output + 8 * stride, in0 + 8, stride); + // write second 8 columns + output += 8; + write_buffer_8x8_avx2(output, in1, stride); + write_buffer_8x8_avx2(output + 8 * stride, in1 + 8, stride); +} + +static INLINE void array_transpose_16x16_avx2(__m128i *res0, __m128i *res1) { + __m128i tbuf[8]; + array_transpose_8x8_avx2(res0, res0); + array_transpose_8x8_avx2(res1, tbuf); + array_transpose_8x8_avx2(res0 + 8, res1); + array_transpose_8x8_avx2(res1 + 8, res1 + 8); + + res0[8] = tbuf[0]; + res0[9] = tbuf[1]; + res0[10] = tbuf[2]; + res0[11] = tbuf[3]; + res0[12] = tbuf[4]; + res0[13] = tbuf[5]; + res0[14] = tbuf[6]; + res0[15] = tbuf[7]; +} + +static INLINE void right_shift_16x16_avx2(__m128i *res0, __m128i *res1) { + // perform rounding operations + right_shift_8x8_avx2(res0, 2); + right_shift_8x8_avx2(res0 + 8, 2); + right_shift_8x8_avx2(res1, 2); + right_shift_8x8_avx2(res1 + 8, 2); +} + +void fdct16_8col_avx2(__m128i *in) { + // perform 16x16 1-D DCT for 8 columns + __m128i i[8], s[8], p[8], t[8], u[16], v[16]; + const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64); + const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); + const __m128i k__cospi_m16_p16 = pair_set_epi16(-cospi_16_64, cospi_16_64); + const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64); + const __m128i k__cospi_m24_m08 = pair_set_epi16(-cospi_24_64, -cospi_8_64); + const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64); + const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64); + const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); + const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); + const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); + const __m128i k__cospi_p30_p02 = pair_set_epi16(cospi_30_64, cospi_2_64); + const __m128i k__cospi_p14_p18 = pair_set_epi16(cospi_14_64, cospi_18_64); + const __m128i k__cospi_m02_p30 = pair_set_epi16(-cospi_2_64, cospi_30_64); + const __m128i k__cospi_m18_p14 = pair_set_epi16(-cospi_18_64, cospi_14_64); + const __m128i k__cospi_p22_p10 = pair_set_epi16(cospi_22_64, cospi_10_64); + const __m128i k__cospi_p06_p26 = pair_set_epi16(cospi_6_64, cospi_26_64); + const __m128i k__cospi_m10_p22 = pair_set_epi16(-cospi_10_64, cospi_22_64); + const __m128i k__cospi_m26_p06 = pair_set_epi16(-cospi_26_64, cospi_6_64); + const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); + + // stage 1 + i[0] = _mm_add_epi16(in[0], in[15]); + i[1] = _mm_add_epi16(in[1], in[14]); + i[2] = _mm_add_epi16(in[2], in[13]); + i[3] = _mm_add_epi16(in[3], in[12]); + i[4] = _mm_add_epi16(in[4], in[11]); + i[5] = _mm_add_epi16(in[5], in[10]); + i[6] = _mm_add_epi16(in[6], in[9]); + i[7] = _mm_add_epi16(in[7], in[8]); + + s[0] = _mm_sub_epi16(in[7], in[8]); + s[1] = _mm_sub_epi16(in[6], in[9]); + s[2] = _mm_sub_epi16(in[5], in[10]); + s[3] = _mm_sub_epi16(in[4], in[11]); + s[4] = _mm_sub_epi16(in[3], in[12]); + s[5] = _mm_sub_epi16(in[2], in[13]); + s[6] = _mm_sub_epi16(in[1], in[14]); + s[7] = _mm_sub_epi16(in[0], in[15]); + + p[0] = _mm_add_epi16(i[0], i[7]); + p[1] = _mm_add_epi16(i[1], i[6]); + p[2] = _mm_add_epi16(i[2], i[5]); + p[3] = _mm_add_epi16(i[3], i[4]); + p[4] = _mm_sub_epi16(i[3], i[4]); + p[5] = _mm_sub_epi16(i[2], i[5]); + p[6] = _mm_sub_epi16(i[1], i[6]); + p[7] = _mm_sub_epi16(i[0], i[7]); + + u[0] = _mm_add_epi16(p[0], p[3]); + u[1] = _mm_add_epi16(p[1], p[2]); + u[2] = _mm_sub_epi16(p[1], p[2]); + u[3] = _mm_sub_epi16(p[0], p[3]); + + v[0] = _mm_unpacklo_epi16(u[0], u[1]); + v[1] = _mm_unpackhi_epi16(u[0], u[1]); + v[2] = _mm_unpacklo_epi16(u[2], u[3]); + v[3] = _mm_unpackhi_epi16(u[2], u[3]); + + u[0] = _mm_madd_epi16(v[0], k__cospi_p16_p16); + u[1] = _mm_madd_epi16(v[1], k__cospi_p16_p16); + u[2] = _mm_madd_epi16(v[0], k__cospi_p16_m16); + u[3] = _mm_madd_epi16(v[1], k__cospi_p16_m16); + u[4] = _mm_madd_epi16(v[2], k__cospi_p24_p08); + u[5] = _mm_madd_epi16(v[3], k__cospi_p24_p08); + u[6] = _mm_madd_epi16(v[2], k__cospi_m08_p24); + u[7] = _mm_madd_epi16(v[3], k__cospi_m08_p24); + + v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); + v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); + v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); + v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); + v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING); + v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING); + v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); + v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING); + + u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS); + u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS); + u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS); + u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS); + u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS); + u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS); + u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS); + u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS); + + in[0] = _mm_packs_epi32(u[0], u[1]); + in[4] = _mm_packs_epi32(u[4], u[5]); + in[8] = _mm_packs_epi32(u[2], u[3]); + in[12] = _mm_packs_epi32(u[6], u[7]); + + u[0] = _mm_unpacklo_epi16(p[5], p[6]); + u[1] = _mm_unpackhi_epi16(p[5], p[6]); + v[0] = _mm_madd_epi16(u[0], k__cospi_m16_p16); + v[1] = _mm_madd_epi16(u[1], k__cospi_m16_p16); + v[2] = _mm_madd_epi16(u[0], k__cospi_p16_p16); + v[3] = _mm_madd_epi16(u[1], k__cospi_p16_p16); + + u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING); + u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING); + u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING); + u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING); + + v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS); + v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS); + v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS); + v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS); + + u[0] = _mm_packs_epi32(v[0], v[1]); + u[1] = _mm_packs_epi32(v[2], v[3]); + + t[0] = _mm_add_epi16(p[4], u[0]); + t[1] = _mm_sub_epi16(p[4], u[0]); + t[2] = _mm_sub_epi16(p[7], u[1]); + t[3] = _mm_add_epi16(p[7], u[1]); + + u[0] = _mm_unpacklo_epi16(t[0], t[3]); + u[1] = _mm_unpackhi_epi16(t[0], t[3]); + u[2] = _mm_unpacklo_epi16(t[1], t[2]); + u[3] = _mm_unpackhi_epi16(t[1], t[2]); + + v[0] = _mm_madd_epi16(u[0], k__cospi_p28_p04); + v[1] = _mm_madd_epi16(u[1], k__cospi_p28_p04); + v[2] = _mm_madd_epi16(u[2], k__cospi_p12_p20); + v[3] = _mm_madd_epi16(u[3], k__cospi_p12_p20); + v[4] = _mm_madd_epi16(u[2], k__cospi_m20_p12); + v[5] = _mm_madd_epi16(u[3], k__cospi_m20_p12); + v[6] = _mm_madd_epi16(u[0], k__cospi_m04_p28); + v[7] = _mm_madd_epi16(u[1], k__cospi_m04_p28); + + u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING); + u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING); + u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING); + u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING); + u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING); + u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING); + u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING); + u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING); + + v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS); + v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS); + v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS); + v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS); + v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS); + v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS); + v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS); + v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS); + + in[2] = _mm_packs_epi32(v[0], v[1]); + in[6] = _mm_packs_epi32(v[4], v[5]); + in[10] = _mm_packs_epi32(v[2], v[3]); + in[14] = _mm_packs_epi32(v[6], v[7]); + + // stage 2 + u[0] = _mm_unpacklo_epi16(s[2], s[5]); + u[1] = _mm_unpackhi_epi16(s[2], s[5]); + u[2] = _mm_unpacklo_epi16(s[3], s[4]); + u[3] = _mm_unpackhi_epi16(s[3], s[4]); + + v[0] = _mm_madd_epi16(u[0], k__cospi_m16_p16); + v[1] = _mm_madd_epi16(u[1], k__cospi_m16_p16); + v[2] = _mm_madd_epi16(u[2], k__cospi_m16_p16); + v[3] = _mm_madd_epi16(u[3], k__cospi_m16_p16); + v[4] = _mm_madd_epi16(u[2], k__cospi_p16_p16); + v[5] = _mm_madd_epi16(u[3], k__cospi_p16_p16); + v[6] = _mm_madd_epi16(u[0], k__cospi_p16_p16); + v[7] = _mm_madd_epi16(u[1], k__cospi_p16_p16); + + u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING); + u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING); + u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING); + u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING); + u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING); + u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING); + u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING); + u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING); + + v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS); + v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS); + v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS); + v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS); + v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS); + v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS); + v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS); + v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS); + + t[2] = _mm_packs_epi32(v[0], v[1]); + t[3] = _mm_packs_epi32(v[2], v[3]); + t[4] = _mm_packs_epi32(v[4], v[5]); + t[5] = _mm_packs_epi32(v[6], v[7]); + + // stage 3 + p[0] = _mm_add_epi16(s[0], t[3]); + p[1] = _mm_add_epi16(s[1], t[2]); + p[2] = _mm_sub_epi16(s[1], t[2]); + p[3] = _mm_sub_epi16(s[0], t[3]); + p[4] = _mm_sub_epi16(s[7], t[4]); + p[5] = _mm_sub_epi16(s[6], t[5]); + p[6] = _mm_add_epi16(s[6], t[5]); + p[7] = _mm_add_epi16(s[7], t[4]); + + // stage 4 + u[0] = _mm_unpacklo_epi16(p[1], p[6]); + u[1] = _mm_unpackhi_epi16(p[1], p[6]); + u[2] = _mm_unpacklo_epi16(p[2], p[5]); + u[3] = _mm_unpackhi_epi16(p[2], p[5]); + + v[0] = _mm_madd_epi16(u[0], k__cospi_m08_p24); + v[1] = _mm_madd_epi16(u[1], k__cospi_m08_p24); + v[2] = _mm_madd_epi16(u[2], k__cospi_m24_m08); + v[3] = _mm_madd_epi16(u[3], k__cospi_m24_m08); + v[4] = _mm_madd_epi16(u[2], k__cospi_m08_p24); + v[5] = _mm_madd_epi16(u[3], k__cospi_m08_p24); + v[6] = _mm_madd_epi16(u[0], k__cospi_p24_p08); + v[7] = _mm_madd_epi16(u[1], k__cospi_p24_p08); + + u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING); + u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING); + u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING); + u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING); + u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING); + u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING); + u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING); + u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING); + + v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS); + v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS); + v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS); + v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS); + v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS); + v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS); + v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS); + v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS); + + t[1] = _mm_packs_epi32(v[0], v[1]); + t[2] = _mm_packs_epi32(v[2], v[3]); + t[5] = _mm_packs_epi32(v[4], v[5]); + t[6] = _mm_packs_epi32(v[6], v[7]); + + // stage 5 + s[0] = _mm_add_epi16(p[0], t[1]); + s[1] = _mm_sub_epi16(p[0], t[1]); + s[2] = _mm_sub_epi16(p[3], t[2]); + s[3] = _mm_add_epi16(p[3], t[2]); + s[4] = _mm_add_epi16(p[4], t[5]); + s[5] = _mm_sub_epi16(p[4], t[5]); + s[6] = _mm_sub_epi16(p[7], t[6]); + s[7] = _mm_add_epi16(p[7], t[6]); + + // stage 6 + u[0] = _mm_unpacklo_epi16(s[0], s[7]); + u[1] = _mm_unpackhi_epi16(s[0], s[7]); + u[2] = _mm_unpacklo_epi16(s[1], s[6]); + u[3] = _mm_unpackhi_epi16(s[1], s[6]); + u[4] = _mm_unpacklo_epi16(s[2], s[5]); + u[5] = _mm_unpackhi_epi16(s[2], s[5]); + u[6] = _mm_unpacklo_epi16(s[3], s[4]); + u[7] = _mm_unpackhi_epi16(s[3], s[4]); + + v[0] = _mm_madd_epi16(u[0], k__cospi_p30_p02); + v[1] = _mm_madd_epi16(u[1], k__cospi_p30_p02); + v[2] = _mm_madd_epi16(u[2], k__cospi_p14_p18); + v[3] = _mm_madd_epi16(u[3], k__cospi_p14_p18); + v[4] = _mm_madd_epi16(u[4], k__cospi_p22_p10); + v[5] = _mm_madd_epi16(u[5], k__cospi_p22_p10); + v[6] = _mm_madd_epi16(u[6], k__cospi_p06_p26); + v[7] = _mm_madd_epi16(u[7], k__cospi_p06_p26); + v[8] = _mm_madd_epi16(u[6], k__cospi_m26_p06); + v[9] = _mm_madd_epi16(u[7], k__cospi_m26_p06); + v[10] = _mm_madd_epi16(u[4], k__cospi_m10_p22); + v[11] = _mm_madd_epi16(u[5], k__cospi_m10_p22); + v[12] = _mm_madd_epi16(u[2], k__cospi_m18_p14); + v[13] = _mm_madd_epi16(u[3], k__cospi_m18_p14); + v[14] = _mm_madd_epi16(u[0], k__cospi_m02_p30); + v[15] = _mm_madd_epi16(u[1], k__cospi_m02_p30); + + u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING); + u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING); + u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING); + u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING); + u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING); + u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING); + u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING); + u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING); + u[8] = _mm_add_epi32(v[8], k__DCT_CONST_ROUNDING); + u[9] = _mm_add_epi32(v[9], k__DCT_CONST_ROUNDING); + u[10] = _mm_add_epi32(v[10], k__DCT_CONST_ROUNDING); + u[11] = _mm_add_epi32(v[11], k__DCT_CONST_ROUNDING); + u[12] = _mm_add_epi32(v[12], k__DCT_CONST_ROUNDING); + u[13] = _mm_add_epi32(v[13], k__DCT_CONST_ROUNDING); + u[14] = _mm_add_epi32(v[14], k__DCT_CONST_ROUNDING); + u[15] = _mm_add_epi32(v[15], k__DCT_CONST_ROUNDING); + + v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS); + v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS); + v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS); + v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS); + v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS); + v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS); + v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS); + v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS); + v[8] = _mm_srai_epi32(u[8], DCT_CONST_BITS); + v[9] = _mm_srai_epi32(u[9], DCT_CONST_BITS); + v[10] = _mm_srai_epi32(u[10], DCT_CONST_BITS); + v[11] = _mm_srai_epi32(u[11], DCT_CONST_BITS); + v[12] = _mm_srai_epi32(u[12], DCT_CONST_BITS); + v[13] = _mm_srai_epi32(u[13], DCT_CONST_BITS); + v[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS); + v[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS); + + in[1] = _mm_packs_epi32(v[0], v[1]); + in[9] = _mm_packs_epi32(v[2], v[3]); + in[5] = _mm_packs_epi32(v[4], v[5]); + in[13] = _mm_packs_epi32(v[6], v[7]); + in[3] = _mm_packs_epi32(v[8], v[9]); + in[11] = _mm_packs_epi32(v[10], v[11]); + in[7] = _mm_packs_epi32(v[12], v[13]); + in[15] = _mm_packs_epi32(v[14], v[15]); +} + +void fadst16_8col_avx2(__m128i *in) { + // perform 16x16 1-D ADST for 8 columns + __m128i s[16], x[16], u[32], v[32]; + const __m128i k__cospi_p01_p31 = pair_set_epi16(cospi_1_64, cospi_31_64); + const __m128i k__cospi_p31_m01 = pair_set_epi16(cospi_31_64, -cospi_1_64); + const __m128i k__cospi_p05_p27 = pair_set_epi16(cospi_5_64, cospi_27_64); + const __m128i k__cospi_p27_m05 = pair_set_epi16(cospi_27_64, -cospi_5_64); + const __m128i k__cospi_p09_p23 = pair_set_epi16(cospi_9_64, cospi_23_64); + const __m128i k__cospi_p23_m09 = pair_set_epi16(cospi_23_64, -cospi_9_64); + const __m128i k__cospi_p13_p19 = pair_set_epi16(cospi_13_64, cospi_19_64); + const __m128i k__cospi_p19_m13 = pair_set_epi16(cospi_19_64, -cospi_13_64); + const __m128i k__cospi_p17_p15 = pair_set_epi16(cospi_17_64, cospi_15_64); + const __m128i k__cospi_p15_m17 = pair_set_epi16(cospi_15_64, -cospi_17_64); + const __m128i k__cospi_p21_p11 = pair_set_epi16(cospi_21_64, cospi_11_64); + const __m128i k__cospi_p11_m21 = pair_set_epi16(cospi_11_64, -cospi_21_64); + const __m128i k__cospi_p25_p07 = pair_set_epi16(cospi_25_64, cospi_7_64); + const __m128i k__cospi_p07_m25 = pair_set_epi16(cospi_7_64, -cospi_25_64); + const __m128i k__cospi_p29_p03 = pair_set_epi16(cospi_29_64, cospi_3_64); + const __m128i k__cospi_p03_m29 = pair_set_epi16(cospi_3_64, -cospi_29_64); + const __m128i k__cospi_p04_p28 = pair_set_epi16(cospi_4_64, cospi_28_64); + const __m128i k__cospi_p28_m04 = pair_set_epi16(cospi_28_64, -cospi_4_64); + const __m128i k__cospi_p20_p12 = pair_set_epi16(cospi_20_64, cospi_12_64); + const __m128i k__cospi_p12_m20 = pair_set_epi16(cospi_12_64, -cospi_20_64); + const __m128i k__cospi_m28_p04 = pair_set_epi16(-cospi_28_64, cospi_4_64); + const __m128i k__cospi_m12_p20 = pair_set_epi16(-cospi_12_64, cospi_20_64); + const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64); + const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64); + const __m128i k__cospi_m24_p08 = pair_set_epi16(-cospi_24_64, cospi_8_64); + const __m128i k__cospi_m16_m16 = _mm_set1_epi16(-cospi_16_64); + const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64); + const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); + const __m128i k__cospi_m16_p16 = pair_set_epi16(-cospi_16_64, cospi_16_64); + const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); + const __m128i kZero = _mm_set1_epi16(0); + + u[0] = _mm_unpacklo_epi16(in[15], in[0]); + u[1] = _mm_unpackhi_epi16(in[15], in[0]); + u[2] = _mm_unpacklo_epi16(in[13], in[2]); + u[3] = _mm_unpackhi_epi16(in[13], in[2]); + u[4] = _mm_unpacklo_epi16(in[11], in[4]); + u[5] = _mm_unpackhi_epi16(in[11], in[4]); + u[6] = _mm_unpacklo_epi16(in[9], in[6]); + u[7] = _mm_unpackhi_epi16(in[9], in[6]); + u[8] = _mm_unpacklo_epi16(in[7], in[8]); + u[9] = _mm_unpackhi_epi16(in[7], in[8]); + u[10] = _mm_unpacklo_epi16(in[5], in[10]); + u[11] = _mm_unpackhi_epi16(in[5], in[10]); + u[12] = _mm_unpacklo_epi16(in[3], in[12]); + u[13] = _mm_unpackhi_epi16(in[3], in[12]); + u[14] = _mm_unpacklo_epi16(in[1], in[14]); + u[15] = _mm_unpackhi_epi16(in[1], in[14]); + + v[0] = _mm_madd_epi16(u[0], k__cospi_p01_p31); + v[1] = _mm_madd_epi16(u[1], k__cospi_p01_p31); + v[2] = _mm_madd_epi16(u[0], k__cospi_p31_m01); + v[3] = _mm_madd_epi16(u[1], k__cospi_p31_m01); + v[4] = _mm_madd_epi16(u[2], k__cospi_p05_p27); + v[5] = _mm_madd_epi16(u[3], k__cospi_p05_p27); + v[6] = _mm_madd_epi16(u[2], k__cospi_p27_m05); + v[7] = _mm_madd_epi16(u[3], k__cospi_p27_m05); + v[8] = _mm_madd_epi16(u[4], k__cospi_p09_p23); + v[9] = _mm_madd_epi16(u[5], k__cospi_p09_p23); + v[10] = _mm_madd_epi16(u[4], k__cospi_p23_m09); + v[11] = _mm_madd_epi16(u[5], k__cospi_p23_m09); + v[12] = _mm_madd_epi16(u[6], k__cospi_p13_p19); + v[13] = _mm_madd_epi16(u[7], k__cospi_p13_p19); + v[14] = _mm_madd_epi16(u[6], k__cospi_p19_m13); + v[15] = _mm_madd_epi16(u[7], k__cospi_p19_m13); + v[16] = _mm_madd_epi16(u[8], k__cospi_p17_p15); + v[17] = _mm_madd_epi16(u[9], k__cospi_p17_p15); + v[18] = _mm_madd_epi16(u[8], k__cospi_p15_m17); + v[19] = _mm_madd_epi16(u[9], k__cospi_p15_m17); + v[20] = _mm_madd_epi16(u[10], k__cospi_p21_p11); + v[21] = _mm_madd_epi16(u[11], k__cospi_p21_p11); + v[22] = _mm_madd_epi16(u[10], k__cospi_p11_m21); + v[23] = _mm_madd_epi16(u[11], k__cospi_p11_m21); + v[24] = _mm_madd_epi16(u[12], k__cospi_p25_p07); + v[25] = _mm_madd_epi16(u[13], k__cospi_p25_p07); + v[26] = _mm_madd_epi16(u[12], k__cospi_p07_m25); + v[27] = _mm_madd_epi16(u[13], k__cospi_p07_m25); + v[28] = _mm_madd_epi16(u[14], k__cospi_p29_p03); + v[29] = _mm_madd_epi16(u[15], k__cospi_p29_p03); + v[30] = _mm_madd_epi16(u[14], k__cospi_p03_m29); + v[31] = _mm_madd_epi16(u[15], k__cospi_p03_m29); + + u[0] = _mm_add_epi32(v[0], v[16]); + u[1] = _mm_add_epi32(v[1], v[17]); + u[2] = _mm_add_epi32(v[2], v[18]); + u[3] = _mm_add_epi32(v[3], v[19]); + u[4] = _mm_add_epi32(v[4], v[20]); + u[5] = _mm_add_epi32(v[5], v[21]); + u[6] = _mm_add_epi32(v[6], v[22]); + u[7] = _mm_add_epi32(v[7], v[23]); + u[8] = _mm_add_epi32(v[8], v[24]); + u[9] = _mm_add_epi32(v[9], v[25]); + u[10] = _mm_add_epi32(v[10], v[26]); + u[11] = _mm_add_epi32(v[11], v[27]); + u[12] = _mm_add_epi32(v[12], v[28]); + u[13] = _mm_add_epi32(v[13], v[29]); + u[14] = _mm_add_epi32(v[14], v[30]); + u[15] = _mm_add_epi32(v[15], v[31]); + u[16] = _mm_sub_epi32(v[0], v[16]); + u[17] = _mm_sub_epi32(v[1], v[17]); + u[18] = _mm_sub_epi32(v[2], v[18]); + u[19] = _mm_sub_epi32(v[3], v[19]); + u[20] = _mm_sub_epi32(v[4], v[20]); + u[21] = _mm_sub_epi32(v[5], v[21]); + u[22] = _mm_sub_epi32(v[6], v[22]); + u[23] = _mm_sub_epi32(v[7], v[23]); + u[24] = _mm_sub_epi32(v[8], v[24]); + u[25] = _mm_sub_epi32(v[9], v[25]); + u[26] = _mm_sub_epi32(v[10], v[26]); + u[27] = _mm_sub_epi32(v[11], v[27]); + u[28] = _mm_sub_epi32(v[12], v[28]); + u[29] = _mm_sub_epi32(v[13], v[29]); + u[30] = _mm_sub_epi32(v[14], v[30]); + u[31] = _mm_sub_epi32(v[15], v[31]); + + v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); + v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); + v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); + v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); + v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING); + v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING); + v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); + v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING); + v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING); + v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING); + v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING); + v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING); + v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING); + v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING); + v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING); + v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING); + v[16] = _mm_add_epi32(u[16], k__DCT_CONST_ROUNDING); + v[17] = _mm_add_epi32(u[17], k__DCT_CONST_ROUNDING); + v[18] = _mm_add_epi32(u[18], k__DCT_CONST_ROUNDING); + v[19] = _mm_add_epi32(u[19], k__DCT_CONST_ROUNDING); + v[20] = _mm_add_epi32(u[20], k__DCT_CONST_ROUNDING); + v[21] = _mm_add_epi32(u[21], k__DCT_CONST_ROUNDING); + v[22] = _mm_add_epi32(u[22], k__DCT_CONST_ROUNDING); + v[23] = _mm_add_epi32(u[23], k__DCT_CONST_ROUNDING); + v[24] = _mm_add_epi32(u[24], k__DCT_CONST_ROUNDING); + v[25] = _mm_add_epi32(u[25], k__DCT_CONST_ROUNDING); + v[26] = _mm_add_epi32(u[26], k__DCT_CONST_ROUNDING); + v[27] = _mm_add_epi32(u[27], k__DCT_CONST_ROUNDING); + v[28] = _mm_add_epi32(u[28], k__DCT_CONST_ROUNDING); + v[29] = _mm_add_epi32(u[29], k__DCT_CONST_ROUNDING); + v[30] = _mm_add_epi32(u[30], k__DCT_CONST_ROUNDING); + v[31] = _mm_add_epi32(u[31], k__DCT_CONST_ROUNDING); + + u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS); + u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS); + u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS); + u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS); + u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS); + u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS); + u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS); + u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS); + u[8] = _mm_srai_epi32(v[8], DCT_CONST_BITS); + u[9] = _mm_srai_epi32(v[9], DCT_CONST_BITS); + u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS); + u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS); + u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS); + u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS); + u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS); + u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS); + u[16] = _mm_srai_epi32(v[16], DCT_CONST_BITS); + u[17] = _mm_srai_epi32(v[17], DCT_CONST_BITS); + u[18] = _mm_srai_epi32(v[18], DCT_CONST_BITS); + u[19] = _mm_srai_epi32(v[19], DCT_CONST_BITS); + u[20] = _mm_srai_epi32(v[20], DCT_CONST_BITS); + u[21] = _mm_srai_epi32(v[21], DCT_CONST_BITS); + u[22] = _mm_srai_epi32(v[22], DCT_CONST_BITS); + u[23] = _mm_srai_epi32(v[23], DCT_CONST_BITS); + u[24] = _mm_srai_epi32(v[24], DCT_CONST_BITS); + u[25] = _mm_srai_epi32(v[25], DCT_CONST_BITS); + u[26] = _mm_srai_epi32(v[26], DCT_CONST_BITS); + u[27] = _mm_srai_epi32(v[27], DCT_CONST_BITS); + u[28] = _mm_srai_epi32(v[28], DCT_CONST_BITS); + u[29] = _mm_srai_epi32(v[29], DCT_CONST_BITS); + u[30] = _mm_srai_epi32(v[30], DCT_CONST_BITS); + u[31] = _mm_srai_epi32(v[31], DCT_CONST_BITS); + + s[0] = _mm_packs_epi32(u[0], u[1]); + s[1] = _mm_packs_epi32(u[2], u[3]); + s[2] = _mm_packs_epi32(u[4], u[5]); + s[3] = _mm_packs_epi32(u[6], u[7]); + s[4] = _mm_packs_epi32(u[8], u[9]); + s[5] = _mm_packs_epi32(u[10], u[11]); + s[6] = _mm_packs_epi32(u[12], u[13]); + s[7] = _mm_packs_epi32(u[14], u[15]); + s[8] = _mm_packs_epi32(u[16], u[17]); + s[9] = _mm_packs_epi32(u[18], u[19]); + s[10] = _mm_packs_epi32(u[20], u[21]); + s[11] = _mm_packs_epi32(u[22], u[23]); + s[12] = _mm_packs_epi32(u[24], u[25]); + s[13] = _mm_packs_epi32(u[26], u[27]); + s[14] = _mm_packs_epi32(u[28], u[29]); + s[15] = _mm_packs_epi32(u[30], u[31]); + + // stage 2 + u[0] = _mm_unpacklo_epi16(s[8], s[9]); + u[1] = _mm_unpackhi_epi16(s[8], s[9]); + u[2] = _mm_unpacklo_epi16(s[10], s[11]); + u[3] = _mm_unpackhi_epi16(s[10], s[11]); + u[4] = _mm_unpacklo_epi16(s[12], s[13]); + u[5] = _mm_unpackhi_epi16(s[12], s[13]); + u[6] = _mm_unpacklo_epi16(s[14], s[15]); + u[7] = _mm_unpackhi_epi16(s[14], s[15]); + + v[0] = _mm_madd_epi16(u[0], k__cospi_p04_p28); + v[1] = _mm_madd_epi16(u[1], k__cospi_p04_p28); + v[2] = _mm_madd_epi16(u[0], k__cospi_p28_m04); + v[3] = _mm_madd_epi16(u[1], k__cospi_p28_m04); + v[4] = _mm_madd_epi16(u[2], k__cospi_p20_p12); + v[5] = _mm_madd_epi16(u[3], k__cospi_p20_p12); + v[6] = _mm_madd_epi16(u[2], k__cospi_p12_m20); + v[7] = _mm_madd_epi16(u[3], k__cospi_p12_m20); + v[8] = _mm_madd_epi16(u[4], k__cospi_m28_p04); + v[9] = _mm_madd_epi16(u[5], k__cospi_m28_p04); + v[10] = _mm_madd_epi16(u[4], k__cospi_p04_p28); + v[11] = _mm_madd_epi16(u[5], k__cospi_p04_p28); + v[12] = _mm_madd_epi16(u[6], k__cospi_m12_p20); + v[13] = _mm_madd_epi16(u[7], k__cospi_m12_p20); + v[14] = _mm_madd_epi16(u[6], k__cospi_p20_p12); + v[15] = _mm_madd_epi16(u[7], k__cospi_p20_p12); + + u[0] = _mm_add_epi32(v[0], v[8]); + u[1] = _mm_add_epi32(v[1], v[9]); + u[2] = _mm_add_epi32(v[2], v[10]); + u[3] = _mm_add_epi32(v[3], v[11]); + u[4] = _mm_add_epi32(v[4], v[12]); + u[5] = _mm_add_epi32(v[5], v[13]); + u[6] = _mm_add_epi32(v[6], v[14]); + u[7] = _mm_add_epi32(v[7], v[15]); + u[8] = _mm_sub_epi32(v[0], v[8]); + u[9] = _mm_sub_epi32(v[1], v[9]); + u[10] = _mm_sub_epi32(v[2], v[10]); + u[11] = _mm_sub_epi32(v[3], v[11]); + u[12] = _mm_sub_epi32(v[4], v[12]); + u[13] = _mm_sub_epi32(v[5], v[13]); + u[14] = _mm_sub_epi32(v[6], v[14]); + u[15] = _mm_sub_epi32(v[7], v[15]); + + v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); + v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); + v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); + v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); + v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING); + v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING); + v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); + v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING); + v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING); + v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING); + v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING); + v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING); + v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING); + v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING); + v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING); + v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING); + + u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS); + u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS); + u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS); + u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS); + u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS); + u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS); + u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS); + u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS); + u[8] = _mm_srai_epi32(v[8], DCT_CONST_BITS); + u[9] = _mm_srai_epi32(v[9], DCT_CONST_BITS); + u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS); + u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS); + u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS); + u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS); + u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS); + u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS); + + x[0] = _mm_add_epi16(s[0], s[4]); + x[1] = _mm_add_epi16(s[1], s[5]); + x[2] = _mm_add_epi16(s[2], s[6]); + x[3] = _mm_add_epi16(s[3], s[7]); + x[4] = _mm_sub_epi16(s[0], s[4]); + x[5] = _mm_sub_epi16(s[1], s[5]); + x[6] = _mm_sub_epi16(s[2], s[6]); + x[7] = _mm_sub_epi16(s[3], s[7]); + x[8] = _mm_packs_epi32(u[0], u[1]); + x[9] = _mm_packs_epi32(u[2], u[3]); + x[10] = _mm_packs_epi32(u[4], u[5]); + x[11] = _mm_packs_epi32(u[6], u[7]); + x[12] = _mm_packs_epi32(u[8], u[9]); + x[13] = _mm_packs_epi32(u[10], u[11]); + x[14] = _mm_packs_epi32(u[12], u[13]); + x[15] = _mm_packs_epi32(u[14], u[15]); + + // stage 3 + u[0] = _mm_unpacklo_epi16(x[4], x[5]); + u[1] = _mm_unpackhi_epi16(x[4], x[5]); + u[2] = _mm_unpacklo_epi16(x[6], x[7]); + u[3] = _mm_unpackhi_epi16(x[6], x[7]); + u[4] = _mm_unpacklo_epi16(x[12], x[13]); + u[5] = _mm_unpackhi_epi16(x[12], x[13]); + u[6] = _mm_unpacklo_epi16(x[14], x[15]); + u[7] = _mm_unpackhi_epi16(x[14], x[15]); + + v[0] = _mm_madd_epi16(u[0], k__cospi_p08_p24); + v[1] = _mm_madd_epi16(u[1], k__cospi_p08_p24); + v[2] = _mm_madd_epi16(u[0], k__cospi_p24_m08); + v[3] = _mm_madd_epi16(u[1], k__cospi_p24_m08); + v[4] = _mm_madd_epi16(u[2], k__cospi_m24_p08); + v[5] = _mm_madd_epi16(u[3], k__cospi_m24_p08); + v[6] = _mm_madd_epi16(u[2], k__cospi_p08_p24); + v[7] = _mm_madd_epi16(u[3], k__cospi_p08_p24); + v[8] = _mm_madd_epi16(u[4], k__cospi_p08_p24); + v[9] = _mm_madd_epi16(u[5], k__cospi_p08_p24); + v[10] = _mm_madd_epi16(u[4], k__cospi_p24_m08); + v[11] = _mm_madd_epi16(u[5], k__cospi_p24_m08); + v[12] = _mm_madd_epi16(u[6], k__cospi_m24_p08); + v[13] = _mm_madd_epi16(u[7], k__cospi_m24_p08); + v[14] = _mm_madd_epi16(u[6], k__cospi_p08_p24); + v[15] = _mm_madd_epi16(u[7], k__cospi_p08_p24); + + u[0] = _mm_add_epi32(v[0], v[4]); + u[1] = _mm_add_epi32(v[1], v[5]); + u[2] = _mm_add_epi32(v[2], v[6]); + u[3] = _mm_add_epi32(v[3], v[7]); + u[4] = _mm_sub_epi32(v[0], v[4]); + u[5] = _mm_sub_epi32(v[1], v[5]); + u[6] = _mm_sub_epi32(v[2], v[6]); + u[7] = _mm_sub_epi32(v[3], v[7]); + u[8] = _mm_add_epi32(v[8], v[12]); + u[9] = _mm_add_epi32(v[9], v[13]); + u[10] = _mm_add_epi32(v[10], v[14]); + u[11] = _mm_add_epi32(v[11], v[15]); + u[12] = _mm_sub_epi32(v[8], v[12]); + u[13] = _mm_sub_epi32(v[9], v[13]); + u[14] = _mm_sub_epi32(v[10], v[14]); + u[15] = _mm_sub_epi32(v[11], v[15]); + + u[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); + u[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); + u[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); + u[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); + u[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING); + u[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING); + u[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); + u[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING); + u[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING); + u[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING); + u[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING); + u[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING); + u[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING); + u[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING); + u[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING); + u[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING); + + v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS); + v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS); + v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS); + v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS); + v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS); + v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS); + v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS); + v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS); + v[8] = _mm_srai_epi32(u[8], DCT_CONST_BITS); + v[9] = _mm_srai_epi32(u[9], DCT_CONST_BITS); + v[10] = _mm_srai_epi32(u[10], DCT_CONST_BITS); + v[11] = _mm_srai_epi32(u[11], DCT_CONST_BITS); + v[12] = _mm_srai_epi32(u[12], DCT_CONST_BITS); + v[13] = _mm_srai_epi32(u[13], DCT_CONST_BITS); + v[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS); + v[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS); + + s[0] = _mm_add_epi16(x[0], x[2]); + s[1] = _mm_add_epi16(x[1], x[3]); + s[2] = _mm_sub_epi16(x[0], x[2]); + s[3] = _mm_sub_epi16(x[1], x[3]); + s[4] = _mm_packs_epi32(v[0], v[1]); + s[5] = _mm_packs_epi32(v[2], v[3]); + s[6] = _mm_packs_epi32(v[4], v[5]); + s[7] = _mm_packs_epi32(v[6], v[7]); + s[8] = _mm_add_epi16(x[8], x[10]); + s[9] = _mm_add_epi16(x[9], x[11]); + s[10] = _mm_sub_epi16(x[8], x[10]); + s[11] = _mm_sub_epi16(x[9], x[11]); + s[12] = _mm_packs_epi32(v[8], v[9]); + s[13] = _mm_packs_epi32(v[10], v[11]); + s[14] = _mm_packs_epi32(v[12], v[13]); + s[15] = _mm_packs_epi32(v[14], v[15]); + + // stage 4 + u[0] = _mm_unpacklo_epi16(s[2], s[3]); + u[1] = _mm_unpackhi_epi16(s[2], s[3]); + u[2] = _mm_unpacklo_epi16(s[6], s[7]); + u[3] = _mm_unpackhi_epi16(s[6], s[7]); + u[4] = _mm_unpacklo_epi16(s[10], s[11]); + u[5] = _mm_unpackhi_epi16(s[10], s[11]); + u[6] = _mm_unpacklo_epi16(s[14], s[15]); + u[7] = _mm_unpackhi_epi16(s[14], s[15]); + + v[0] = _mm_madd_epi16(u[0], k__cospi_m16_m16); + v[1] = _mm_madd_epi16(u[1], k__cospi_m16_m16); + v[2] = _mm_madd_epi16(u[0], k__cospi_p16_m16); + v[3] = _mm_madd_epi16(u[1], k__cospi_p16_m16); + v[4] = _mm_madd_epi16(u[2], k__cospi_p16_p16); + v[5] = _mm_madd_epi16(u[3], k__cospi_p16_p16); + v[6] = _mm_madd_epi16(u[2], k__cospi_m16_p16); + v[7] = _mm_madd_epi16(u[3], k__cospi_m16_p16); + v[8] = _mm_madd_epi16(u[4], k__cospi_p16_p16); + v[9] = _mm_madd_epi16(u[5], k__cospi_p16_p16); + v[10] = _mm_madd_epi16(u[4], k__cospi_m16_p16); + v[11] = _mm_madd_epi16(u[5], k__cospi_m16_p16); + v[12] = _mm_madd_epi16(u[6], k__cospi_m16_m16); + v[13] = _mm_madd_epi16(u[7], k__cospi_m16_m16); + v[14] = _mm_madd_epi16(u[6], k__cospi_p16_m16); + v[15] = _mm_madd_epi16(u[7], k__cospi_p16_m16); + + u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING); + u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING); + u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING); + u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING); + u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING); + u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING); + u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING); + u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING); + u[8] = _mm_add_epi32(v[8], k__DCT_CONST_ROUNDING); + u[9] = _mm_add_epi32(v[9], k__DCT_CONST_ROUNDING); + u[10] = _mm_add_epi32(v[10], k__DCT_CONST_ROUNDING); + u[11] = _mm_add_epi32(v[11], k__DCT_CONST_ROUNDING); + u[12] = _mm_add_epi32(v[12], k__DCT_CONST_ROUNDING); + u[13] = _mm_add_epi32(v[13], k__DCT_CONST_ROUNDING); + u[14] = _mm_add_epi32(v[14], k__DCT_CONST_ROUNDING); + u[15] = _mm_add_epi32(v[15], k__DCT_CONST_ROUNDING); + + v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS); + v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS); + v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS); + v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS); + v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS); + v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS); + v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS); + v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS); + v[8] = _mm_srai_epi32(u[8], DCT_CONST_BITS); + v[9] = _mm_srai_epi32(u[9], DCT_CONST_BITS); + v[10] = _mm_srai_epi32(u[10], DCT_CONST_BITS); + v[11] = _mm_srai_epi32(u[11], DCT_CONST_BITS); + v[12] = _mm_srai_epi32(u[12], DCT_CONST_BITS); + v[13] = _mm_srai_epi32(u[13], DCT_CONST_BITS); + v[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS); + v[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS); + + in[0] = s[0]; + in[1] = _mm_sub_epi16(kZero, s[8]); + in[2] = s[12]; + in[3] = _mm_sub_epi16(kZero, s[4]); + in[4] = _mm_packs_epi32(v[4], v[5]); + in[5] = _mm_packs_epi32(v[12], v[13]); + in[6] = _mm_packs_epi32(v[8], v[9]); + in[7] = _mm_packs_epi32(v[0], v[1]); + in[8] = _mm_packs_epi32(v[2], v[3]); + in[9] = _mm_packs_epi32(v[10], v[11]); + in[10] = _mm_packs_epi32(v[14], v[15]); + in[11] = _mm_packs_epi32(v[6], v[7]); + in[12] = s[5]; + in[13] = _mm_sub_epi16(kZero, s[13]); + in[14] = s[9]; + in[15] = _mm_sub_epi16(kZero, s[1]); +} + +void fdct16_avx2(__m128i *in0, __m128i *in1) { + fdct16_8col_avx2(in0); + fdct16_8col_avx2(in1); + array_transpose_16x16_avx2(in0, in1); +} + +void fadst16_avx2(__m128i *in0, __m128i *in1) { + fadst16_8col_avx2(in0); + fadst16_8col_avx2(in1); + array_transpose_16x16_avx2(in0, in1); +} + +void vp9_fht16x16_avx2(const int16_t *input, int16_t *output, + int stride, int tx_type) { + __m128i in0[16], in1[16]; + + switch (tx_type) { + case DCT_DCT: + vp9_fdct16x16_avx2(input, output, stride); + break; + case ADST_DCT: + load_buffer_16x16_avx2(input, in0, in1, stride); + fadst16_avx2(in0, in1); + right_shift_16x16_avx2(in0, in1); + fdct16_avx2(in0, in1); + write_buffer_16x16_avx2(output, in0, in1, 16); + break; + case DCT_ADST: + load_buffer_16x16_avx2(input, in0, in1, stride); + fdct16_avx2(in0, in1); + right_shift_16x16_avx2(in0, in1); + fadst16_avx2(in0, in1); + write_buffer_16x16_avx2(output, in0, in1, 16); + break; + case ADST_ADST: + load_buffer_16x16_avx2(input, in0, in1, stride); + fadst16_avx2(in0, in1); + right_shift_16x16_avx2(in0, in1); + fadst16_avx2(in0, in1); + write_buffer_16x16_avx2(output, in0, in1, 16); + break; + default: + assert(0); + break; + } +} + +#define FDCT32x32_2D_AVX2 vp9_fdct32x32_rd_avx2 +#define FDCT32x32_HIGH_PRECISION 0 +#include "vp9/encoder/x86/vp9_dct32x32_avx2.c" +#undef FDCT32x32_2D_AVX2 +#undef FDCT32x32_HIGH_PRECISION + +#define FDCT32x32_2D_AVX2 vp9_fdct32x32_avx2 +#define FDCT32x32_HIGH_PRECISION 1 +#include "vp9/encoder/x86/vp9_dct32x32_avx2.c" // NOLINT +#undef FDCT32x32_2D_AVX2 +#undef FDCT32x32_HIGH_PRECISION diff --git a/libvpx/vp9/encoder/x86/vp9_dct_sse2.c b/libvpx/vp9/encoder/x86/vp9_dct_sse2.c index fefca66..6865822 100644 --- a/libvpx/vp9/encoder/x86/vp9_dct_sse2.c +++ b/libvpx/vp9/encoder/x86/vp9_dct_sse2.c @@ -13,40 +13,82 @@ #include "vpx_ports/mem.h" void vp9_fdct4x4_sse2(const int16_t *input, int16_t *output, int stride) { - // The 2D transform is done with two passes which are actually pretty - // similar. In the first one, we transform the columns and transpose - // the results. In the second one, we transform the rows. To achieve that, - // as the first pass results are transposed, we tranpose the columns (that - // is the transposed rows) and transpose the results (so that it goes back - // in normal/row positions). - int pass; + // This 2D transform implements 4 vertical 1D transforms followed + // by 4 horizontal 1D transforms. The multiplies and adds are as given + // by Chen, Smith and Fralick ('77). The commands for moving the data + // around have been minimized by hand. + // For the purposes of the comments, the 16 inputs are referred to at i0 + // through iF (in raster order), intermediate variables are a0, b0, c0 + // through f, and correspond to the in-place computations mapped to input + // locations. The outputs, o0 through oF are labeled according to the + // output locations. + // Constants - // When we use them, in one case, they are all the same. In all others - // it's a pair of them that we need to repeat four times. This is done - // by constructing the 32 bit constant corresponding to that pair. - const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64); - const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); - const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64); - const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64); + // These are the coefficients used for the multiplies. + // In the comments, pN means cos(N pi /64) and mN is -cos(N pi /64), + // where cospi_N_64 = cos(N pi /64) + const __m128i k__cospi_A = _mm_setr_epi16(cospi_16_64, cospi_16_64, + cospi_16_64, cospi_16_64, + cospi_16_64, -cospi_16_64, + cospi_16_64, -cospi_16_64); + const __m128i k__cospi_B = _mm_setr_epi16(cospi_16_64, -cospi_16_64, + cospi_16_64, -cospi_16_64, + cospi_16_64, cospi_16_64, + cospi_16_64, cospi_16_64); + const __m128i k__cospi_C = _mm_setr_epi16(cospi_8_64, cospi_24_64, + cospi_8_64, cospi_24_64, + cospi_24_64, -cospi_8_64, + cospi_24_64, -cospi_8_64); + const __m128i k__cospi_D = _mm_setr_epi16(cospi_24_64, -cospi_8_64, + cospi_24_64, -cospi_8_64, + cospi_8_64, cospi_24_64, + cospi_8_64, cospi_24_64); + const __m128i k__cospi_E = _mm_setr_epi16(cospi_16_64, cospi_16_64, + cospi_16_64, cospi_16_64, + cospi_16_64, cospi_16_64, + cospi_16_64, cospi_16_64); + const __m128i k__cospi_F = _mm_setr_epi16(cospi_16_64, -cospi_16_64, + cospi_16_64, -cospi_16_64, + cospi_16_64, -cospi_16_64, + cospi_16_64, -cospi_16_64); + const __m128i k__cospi_G = _mm_setr_epi16(cospi_8_64, cospi_24_64, + cospi_8_64, cospi_24_64, + -cospi_8_64, -cospi_24_64, + -cospi_8_64, -cospi_24_64); + const __m128i k__cospi_H = _mm_setr_epi16(cospi_24_64, -cospi_8_64, + cospi_24_64, -cospi_8_64, + -cospi_24_64, cospi_8_64, + -cospi_24_64, cospi_8_64); + const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); + // This second rounding constant saves doing some extra adds at the end + const __m128i k__DCT_CONST_ROUNDING2 = _mm_set1_epi32(DCT_CONST_ROUNDING + +(DCT_CONST_ROUNDING << 1)); + const int DCT_CONST_BITS2 = DCT_CONST_BITS+2; const __m128i k__nonzero_bias_a = _mm_setr_epi16(0, 1, 1, 1, 1, 1, 1, 1); const __m128i k__nonzero_bias_b = _mm_setr_epi16(1, 0, 0, 0, 0, 0, 0, 0); - const __m128i kOne = _mm_set1_epi16(1); - __m128i in0, in1, in2, in3; + __m128i in0, in1; + // Load inputs. { in0 = _mm_loadl_epi64((const __m128i *)(input + 0 * stride)); in1 = _mm_loadl_epi64((const __m128i *)(input + 1 * stride)); - in2 = _mm_loadl_epi64((const __m128i *)(input + 2 * stride)); - in3 = _mm_loadl_epi64((const __m128i *)(input + 3 * stride)); - // x = x << 4 + in1 = _mm_unpacklo_epi64(in1, _mm_loadl_epi64((const __m128i *) + (input + 2 * stride))); + in0 = _mm_unpacklo_epi64(in0, _mm_loadl_epi64((const __m128i *) + (input + 3 * stride))); + // in0 = [i0 i1 i2 i3 iC iD iE iF] + // in1 = [i4 i5 i6 i7 i8 i9 iA iB] + + + // multiply by 16 to give some extra precision in0 = _mm_slli_epi16(in0, 4); in1 = _mm_slli_epi16(in1, 4); - in2 = _mm_slli_epi16(in2, 4); - in3 = _mm_slli_epi16(in3, 4); // if (i == 0 && input[0]) input[0] += 1; + // add 1 to the upper left pixel if it is non-zero, which helps reduce + // the round-trip error { - // The mask will only contain wether the first value is zero, all + // The mask will only contain whether the first value is zero, all // other comparison will fail as something shifted by 4 (above << 4) // can never be equal to one. To increment in the non-zero case, we // add the mask and one for the first element: @@ -57,60 +99,119 @@ void vp9_fdct4x4_sse2(const int16_t *input, int16_t *output, int stride) { in0 = _mm_add_epi16(in0, k__nonzero_bias_b); } } - // Do the two transform/transpose passes - for (pass = 0; pass < 2; ++pass) { - // Transform 1/2: Add/substract - const __m128i r0 = _mm_add_epi16(in0, in3); - const __m128i r1 = _mm_add_epi16(in1, in2); - const __m128i r2 = _mm_sub_epi16(in1, in2); - const __m128i r3 = _mm_sub_epi16(in0, in3); - // Transform 1/2: Interleave to do the multiply by constants which gets us - // into 32 bits. - const __m128i t0 = _mm_unpacklo_epi16(r0, r1); - const __m128i t2 = _mm_unpacklo_epi16(r2, r3); - const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16); - const __m128i u2 = _mm_madd_epi16(t0, k__cospi_p16_m16); - const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p24_p08); - const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m08_p24); + // There are 4 total stages, alternating between an add/subtract stage + // followed by an multiply-and-add stage. + { + // Stage 1: Add/subtract + + // in0 = [i0 i1 i2 i3 iC iD iE iF] + // in1 = [i4 i5 i6 i7 i8 i9 iA iB] + const __m128i r0 = _mm_unpacklo_epi16(in0, in1); + const __m128i r1 = _mm_unpackhi_epi16(in0, in1); + // r0 = [i0 i4 i1 i5 i2 i6 i3 i7] + // r1 = [iC i8 iD i9 iE iA iF iB] + const __m128i r2 = _mm_shuffle_epi32(r0, 0xB4); + const __m128i r3 = _mm_shuffle_epi32(r1, 0xB4); + // r2 = [i0 i4 i1 i5 i3 i7 i2 i6] + // r3 = [iC i8 iD i9 iF iB iE iA] + + const __m128i t0 = _mm_add_epi16(r2, r3); + const __m128i t1 = _mm_sub_epi16(r2, r3); + // t0 = [a0 a4 a1 a5 a3 a7 a2 a6] + // t1 = [aC a8 aD a9 aF aB aE aA] + + // Stage 2: multiply by constants (which gets us into 32 bits). + // The constants needed here are: + // k__cospi_A = [p16 p16 p16 p16 p16 m16 p16 m16] + // k__cospi_B = [p16 m16 p16 m16 p16 p16 p16 p16] + // k__cospi_C = [p08 p24 p08 p24 p24 m08 p24 m08] + // k__cospi_D = [p24 m08 p24 m08 p08 p24 p08 p24] + const __m128i u0 = _mm_madd_epi16(t0, k__cospi_A); + const __m128i u2 = _mm_madd_epi16(t0, k__cospi_B); + const __m128i u1 = _mm_madd_epi16(t1, k__cospi_C); + const __m128i u3 = _mm_madd_epi16(t1, k__cospi_D); + // Then add and right-shift to get back to 16-bit range const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); + const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); - const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING); - const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING); + const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); + const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); - const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS); - const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS); - // Combine and transpose - const __m128i res0 = _mm_packs_epi32(w0, w2); - const __m128i res1 = _mm_packs_epi32(w4, w6); - // 00 01 02 03 20 21 22 23 - // 10 11 12 13 30 31 32 33 - const __m128i tr0_0 = _mm_unpacklo_epi16(res0, res1); - const __m128i tr0_1 = _mm_unpackhi_epi16(res0, res1); - // 00 10 01 11 02 12 03 13 - // 20 30 21 31 22 32 23 33 - in0 = _mm_unpacklo_epi32(tr0_0, tr0_1); - in2 = _mm_unpackhi_epi32(tr0_0, tr0_1); - // 00 10 20 30 01 11 21 31 in0 contains 0 followed by 1 - // 02 12 22 32 03 13 23 33 in2 contains 2 followed by 3 - if (0 == pass) { - // Extract values in the high part for second pass as transform code - // only uses the first four values. - in1 = _mm_unpackhi_epi64(in0, in0); - in3 = _mm_unpackhi_epi64(in2, in2); - } else { - // Post-condition output and store it (v + 1) >> 2, taking advantage - // of the fact 1/3 are stored just after 0/2. - __m128i out01 = _mm_add_epi16(in0, kOne); - __m128i out23 = _mm_add_epi16(in2, kOne); - out01 = _mm_srai_epi16(out01, 2); - out23 = _mm_srai_epi16(out23, 2); - _mm_storeu_si128((__m128i *)(output + 0 * 4), out01); - _mm_storeu_si128((__m128i *)(output + 2 * 4), out23); - } + const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); + // w0 = [b0 b1 b7 b6] + // w1 = [b8 b9 bF bE] + // w2 = [b4 b5 b3 b2] + // w3 = [bC bD bB bA] + const __m128i x0 = _mm_packs_epi32(w0, w1); + const __m128i x1 = _mm_packs_epi32(w2, w3); + // x0 = [b0 b1 b7 b6 b8 b9 bF bE] + // x1 = [b4 b5 b3 b2 bC bD bB bA] + in0 = _mm_shuffle_epi32(x0, 0xD8); + in1 = _mm_shuffle_epi32(x1, 0x8D); + // in0 = [b0 b1 b8 b9 b7 b6 bF bE] + // in1 = [b3 b2 bB bA b4 b5 bC bD] + } + { + // vertical DCTs finished. Now we do the horizontal DCTs. + // Stage 3: Add/subtract + + const __m128i t0 = _mm_add_epi16(in0, in1); + const __m128i t1 = _mm_sub_epi16(in0, in1); + // t0 = [c0 c1 c8 c9 c4 c5 cC cD] + // t1 = [c3 c2 cB cA -c7 -c6 -cF -cE] + + // Stage 4: multiply by constants (which gets us into 32 bits). + // The constants needed here are: + // k__cospi_E = [p16 p16 p16 p16 p16 p16 p16 p16] + // k__cospi_F = [p16 m16 p16 m16 p16 m16 p16 m16] + // k__cospi_G = [p08 p24 p08 p24 m08 m24 m08 m24] + // k__cospi_H = [p24 m08 p24 m08 m24 p08 m24 p08] + const __m128i u0 = _mm_madd_epi16(t0, k__cospi_E); + const __m128i u1 = _mm_madd_epi16(t0, k__cospi_F); + const __m128i u2 = _mm_madd_epi16(t1, k__cospi_G); + const __m128i u3 = _mm_madd_epi16(t1, k__cospi_H); + // Then add and right-shift to get back to 16-bit range + // but this combines the final right-shift as well to save operations + // This unusual rounding operations is to maintain bit-accurate + // compatibility with the c version of this function which has two + // rounding steps in a row. + const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING2); + const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING2); + const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING2); + const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING2); + const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS2); + const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS2); + const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS2); + const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS2); + // w0 = [o0 o4 o8 oC] + // w1 = [o2 o6 oA oE] + // w2 = [o1 o5 o9 oD] + // w3 = [o3 o7 oB oF] + // remember the o's are numbered according to the correct output location + const __m128i x0 = _mm_packs_epi32(w0, w1); + const __m128i x1 = _mm_packs_epi32(w2, w3); + // x0 = [o0 o4 o8 oC o2 o6 oA oE] + // x1 = [o1 o5 o9 oD o3 o7 oB oF] + const __m128i y0 = _mm_unpacklo_epi16(x0, x1); + const __m128i y1 = _mm_unpackhi_epi16(x0, x1); + // y0 = [o0 o1 o4 o5 o8 o9 oC oD] + // y1 = [o2 o3 o6 o7 oA oB oE oF] + in0 = _mm_unpacklo_epi32(y0, y1); + // in0 = [o0 o1 o2 o3 o4 o5 o6 o7] + in1 = _mm_unpackhi_epi32(y0, y1); + // in1 = [o8 o9 oA oB oC oD oE oF] + } + // Post-condition (v + 1) >> 2 is now incorporated into previous + // add and right-shift commands. Only 2 store instructions needed + // because we are using the fact that 1/3 are stored just after 0/2. + { + _mm_storeu_si128((__m128i *)(output + 0 * 4), in0); + _mm_storeu_si128((__m128i *)(output + 2 * 4), in1); } } + static INLINE void load_buffer_4x4(const int16_t *input, __m128i *in, int stride) { const __m128i k__nonzero_bias_a = _mm_setr_epi16(0, 1, 1, 1, 1, 1, 1, 1); @@ -163,7 +264,7 @@ static INLINE void transpose_4x4(__m128i *res) { res[3] = _mm_unpackhi_epi64(res[2], res[2]); } -void fdct4_1d_sse2(__m128i *in) { +void fdct4_sse2(__m128i *in) { const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64); const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64); @@ -196,7 +297,7 @@ void fdct4_1d_sse2(__m128i *in) { transpose_4x4(in); } -void fadst4_1d_sse2(__m128i *in) { +void fadst4_sse2(__m128i *in) { const __m128i k__sinpi_p01_p02 = pair_set_epi16(sinpi_1_9, sinpi_2_9); const __m128i k__sinpi_p04_m01 = pair_set_epi16(sinpi_4_9, -sinpi_1_9); const __m128i k__sinpi_p03_p04 = pair_set_epi16(sinpi_3_9, sinpi_4_9); @@ -244,32 +345,36 @@ void fadst4_1d_sse2(__m128i *in) { transpose_4x4(in); } -void vp9_short_fht4x4_sse2(const int16_t *input, int16_t *output, - int stride, int tx_type) { +void vp9_fht4x4_sse2(const int16_t *input, int16_t *output, + int stride, int tx_type) { __m128i in[4]; - load_buffer_4x4(input, in, stride); + switch (tx_type) { - case 0: // DCT_DCT - fdct4_1d_sse2(in); - fdct4_1d_sse2(in); + case DCT_DCT: + vp9_fdct4x4_sse2(input, output, stride); break; - case 1: // ADST_DCT - fadst4_1d_sse2(in); - fdct4_1d_sse2(in); + case ADST_DCT: + load_buffer_4x4(input, in, stride); + fadst4_sse2(in); + fdct4_sse2(in); + write_buffer_4x4(output, in); break; - case 2: // DCT_ADST - fdct4_1d_sse2(in); - fadst4_1d_sse2(in); + case DCT_ADST: + load_buffer_4x4(input, in, stride); + fdct4_sse2(in); + fadst4_sse2(in); + write_buffer_4x4(output, in); break; - case 3: // ADST_ADST - fadst4_1d_sse2(in); - fadst4_1d_sse2(in); - break; - default: - assert(0); + case ADST_ADST: + load_buffer_4x4(input, in, stride); + fadst4_sse2(in); + fadst4_sse2(in); + write_buffer_4x4(output, in); break; + default: + assert(0); + break; } - write_buffer_4x4(output, in); } void vp9_fdct8x8_sse2(const int16_t *input, int16_t *output, int stride) { @@ -313,7 +418,7 @@ void vp9_fdct8x8_sse2(const int16_t *input, int16_t *output, int stride) { for (pass = 0; pass < 2; pass++) { // To store results of each pass before the transpose. __m128i res0, res1, res2, res3, res4, res5, res6, res7; - // Add/substract + // Add/subtract const __m128i q0 = _mm_add_epi16(in0, in7); const __m128i q1 = _mm_add_epi16(in1, in6); const __m128i q2 = _mm_add_epi16(in2, in5); @@ -324,7 +429,7 @@ void vp9_fdct8x8_sse2(const int16_t *input, int16_t *output, int stride) { const __m128i q7 = _mm_sub_epi16(in0, in7); // Work on first four results { - // Add/substract + // Add/subtract const __m128i r0 = _mm_add_epi16(q0, q3); const __m128i r1 = _mm_add_epi16(q1, q2); const __m128i r2 = _mm_sub_epi16(q1, q2); @@ -386,7 +491,7 @@ void vp9_fdct8x8_sse2(const int16_t *input, int16_t *output, int stride) { // Combine const __m128i r0 = _mm_packs_epi32(s0, s1); const __m128i r1 = _mm_packs_epi32(s2, s3); - // Add/substract + // Add/subtract const __m128i x0 = _mm_add_epi16(q4, r0); const __m128i x1 = _mm_sub_epi16(q4, r0); const __m128i x2 = _mm_sub_epi16(q7, r1); @@ -658,7 +763,7 @@ static INLINE void array_transpose_8x8(__m128i *in, __m128i *res) { // 07 17 27 37 47 57 67 77 } -void fdct8_1d_sse2(__m128i *in) { +void fdct8_sse2(__m128i *in) { // constants const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64); const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); @@ -798,7 +903,7 @@ void fdct8_1d_sse2(__m128i *in) { array_transpose_8x8(in, in); } -void fadst8_1d_sse2(__m128i *in) { +void fadst8_sse2(__m128i *in) { // Constants const __m128i k__cospi_p02_p30 = pair_set_epi16(cospi_2_64, cospi_30_64); const __m128i k__cospi_p30_m02 = pair_set_epi16(cospi_30_64, -cospi_2_64); @@ -1028,40 +1133,46 @@ void fadst8_1d_sse2(__m128i *in) { array_transpose_8x8(in, in); } -void vp9_short_fht8x8_sse2(const int16_t *input, int16_t *output, - int stride, int tx_type) { +void vp9_fht8x8_sse2(const int16_t *input, int16_t *output, + int stride, int tx_type) { __m128i in[8]; - load_buffer_8x8(input, in, stride); + switch (tx_type) { - case 0: // DCT_DCT - fdct8_1d_sse2(in); - fdct8_1d_sse2(in); + case DCT_DCT: + vp9_fdct8x8_sse2(input, output, stride); break; - case 1: // ADST_DCT - fadst8_1d_sse2(in); - fdct8_1d_sse2(in); + case ADST_DCT: + load_buffer_8x8(input, in, stride); + fadst8_sse2(in); + fdct8_sse2(in); + right_shift_8x8(in, 1); + write_buffer_8x8(output, in, 8); break; - case 2: // DCT_ADST - fdct8_1d_sse2(in); - fadst8_1d_sse2(in); + case DCT_ADST: + load_buffer_8x8(input, in, stride); + fdct8_sse2(in); + fadst8_sse2(in); + right_shift_8x8(in, 1); + write_buffer_8x8(output, in, 8); break; - case 3: // ADST_ADST - fadst8_1d_sse2(in); - fadst8_1d_sse2(in); + case ADST_ADST: + load_buffer_8x8(input, in, stride); + fadst8_sse2(in); + fadst8_sse2(in); + right_shift_8x8(in, 1); + write_buffer_8x8(output, in, 8); break; default: assert(0); break; } - right_shift_8x8(in, 1); - write_buffer_8x8(output, in, 8); } void vp9_fdct16x16_sse2(const int16_t *input, int16_t *output, int stride) { // The 2D transform is done with two passes which are actually pretty // similar. In the first one, we transform the columns and transpose // the results. In the second one, we transform the rows. To achieve that, - // as the first pass results are transposed, we tranpose the columns (that + // as the first pass results are transposed, we transpose the columns (that // is the transposed rows) and transpose the results (so that it goes back // in normal/row positions). int pass; @@ -1216,9 +1327,9 @@ void vp9_fdct16x16_sse2(const int16_t *input, int16_t *output, int stride) { step1_6 = _mm_sub_epi16(in01, in14); step1_7 = _mm_sub_epi16(in00, in15); } - // Work on the first eight values; fdct8_1d(input, even_results); + // Work on the first eight values; fdct8(input, even_results); { - // Add/substract + // Add/subtract const __m128i q0 = _mm_add_epi16(input0, input7); const __m128i q1 = _mm_add_epi16(input1, input6); const __m128i q2 = _mm_add_epi16(input2, input5); @@ -1229,7 +1340,7 @@ void vp9_fdct16x16_sse2(const int16_t *input, int16_t *output, int stride) { const __m128i q7 = _mm_sub_epi16(input0, input7); // Work on first four results { - // Add/substract + // Add/subtract const __m128i r0 = _mm_add_epi16(q0, q3); const __m128i r1 = _mm_add_epi16(q1, q2); const __m128i r2 = _mm_sub_epi16(q1, q2); @@ -1293,7 +1404,7 @@ void vp9_fdct16x16_sse2(const int16_t *input, int16_t *output, int stride) { // Combine const __m128i r0 = _mm_packs_epi32(s0, s1); const __m128i r1 = _mm_packs_epi32(s2, s3); - // Add/substract + // Add/subtract const __m128i x0 = _mm_add_epi16(q4, r0); const __m128i x1 = _mm_sub_epi16(q4, r0); const __m128i x2 = _mm_sub_epi16(q7, r1); @@ -1730,7 +1841,7 @@ static INLINE void right_shift_16x16(__m128i *res0, __m128i *res1) { right_shift_8x8(res1 + 8, 2); } -void fdct16_1d_8col(__m128i *in) { +void fdct16_8col(__m128i *in) { // perform 16x16 1-D DCT for 8 columns __m128i i[8], s[8], p[8], t[8], u[16], v[16]; const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64); @@ -2052,7 +2163,7 @@ void fdct16_1d_8col(__m128i *in) { in[15] = _mm_packs_epi32(v[14], v[15]); } -void fadst16_1d_8col(__m128i *in) { +void fadst16_8col(__m128i *in) { // perform 16x16 1-D ADST for 8 columns __m128i s[16], x[16], u[32], v[32]; const __m128i k__cospi_p01_p31 = pair_set_epi16(cospi_1_64, cospi_31_64); @@ -2522,48 +2633,51 @@ void fadst16_1d_8col(__m128i *in) { in[15] = _mm_sub_epi16(kZero, s[1]); } -void fdct16_1d_sse2(__m128i *in0, __m128i *in1) { - fdct16_1d_8col(in0); - fdct16_1d_8col(in1); +void fdct16_sse2(__m128i *in0, __m128i *in1) { + fdct16_8col(in0); + fdct16_8col(in1); array_transpose_16x16(in0, in1); } -void fadst16_1d_sse2(__m128i *in0, __m128i *in1) { - fadst16_1d_8col(in0); - fadst16_1d_8col(in1); +void fadst16_sse2(__m128i *in0, __m128i *in1) { + fadst16_8col(in0); + fadst16_8col(in1); array_transpose_16x16(in0, in1); } -void vp9_short_fht16x16_sse2(const int16_t *input, int16_t *output, - int stride, int tx_type) { +void vp9_fht16x16_sse2(const int16_t *input, int16_t *output, + int stride, int tx_type) { __m128i in0[16], in1[16]; - load_buffer_16x16(input, in0, in1, stride); + switch (tx_type) { - case 0: // DCT_DCT - fdct16_1d_sse2(in0, in1); - right_shift_16x16(in0, in1); - fdct16_1d_sse2(in0, in1); + case DCT_DCT: + vp9_fdct16x16_sse2(input, output, stride); break; - case 1: // ADST_DCT - fadst16_1d_sse2(in0, in1); + case ADST_DCT: + load_buffer_16x16(input, in0, in1, stride); + fadst16_sse2(in0, in1); right_shift_16x16(in0, in1); - fdct16_1d_sse2(in0, in1); + fdct16_sse2(in0, in1); + write_buffer_16x16(output, in0, in1, 16); break; - case 2: // DCT_ADST - fdct16_1d_sse2(in0, in1); + case DCT_ADST: + load_buffer_16x16(input, in0, in1, stride); + fdct16_sse2(in0, in1); right_shift_16x16(in0, in1); - fadst16_1d_sse2(in0, in1); + fadst16_sse2(in0, in1); + write_buffer_16x16(output, in0, in1, 16); break; - case 3: // ADST_ADST - fadst16_1d_sse2(in0, in1); + case ADST_ADST: + load_buffer_16x16(input, in0, in1, stride); + fadst16_sse2(in0, in1); right_shift_16x16(in0, in1); - fadst16_1d_sse2(in0, in1); + fadst16_sse2(in0, in1); + write_buffer_16x16(output, in0, in1, 16); break; default: assert(0); break; } - write_buffer_16x16(output, in0, in1, 16); } #define FDCT32x32_2D vp9_fdct32x32_rd_sse2 diff --git a/libvpx/vp9/encoder/x86/vp9_mcomp_x86.h b/libvpx/vp9/encoder/x86/vp9_mcomp_x86.h index ca80b8b..c15039a 100644 --- a/libvpx/vp9/encoder/x86/vp9_mcomp_x86.h +++ b/libvpx/vp9/encoder/x86/vp9_mcomp_x86.h @@ -12,6 +12,10 @@ #ifndef VP9_ENCODER_X86_VP9_MCOMP_X86_H_ #define VP9_ENCODER_X86_VP9_MCOMP_X86_H_ +#ifdef __cplusplus +extern "C" { +#endif + #if HAVE_SSE3 #if !CONFIG_RUNTIME_CPU_DETECT @@ -36,5 +40,9 @@ #endif #endif +#ifdef __cplusplus +} // extern "C" #endif +#endif // VP9_ENCODER_X86_VP9_MCOMP_X86_H_ + diff --git a/libvpx/vp9/encoder/x86/vp9_quantize_ssse3.asm b/libvpx/vp9/encoder/x86/vp9_quantize_ssse3.asm index db30660..48ccef8 100644 --- a/libvpx/vp9/encoder/x86/vp9_quantize_ssse3.asm +++ b/libvpx/vp9/encoder/x86/vp9_quantize_ssse3.asm @@ -188,7 +188,8 @@ cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, skip, zbin, round, quant, \ pmaxsw m8, m7 pshuflw m7, m8, 0x1 pmaxsw m8, m7 - pextrw [r2], m8, 0 + pextrw r6, m8, 0 + mov [r2], r6 RET ; skip-block, i.e. just write all zeroes @@ -214,5 +215,5 @@ cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, skip, zbin, round, quant, \ %endmacro INIT_XMM ssse3 -QUANTIZE_FN b, 6 +QUANTIZE_FN b, 7 QUANTIZE_FN b_32x32, 7 diff --git a/libvpx/vp9/encoder/x86/vp9_subpel_variance_impl_intrin_avx2.c b/libvpx/vp9/encoder/x86/vp9_subpel_variance_impl_intrin_avx2.c new file mode 100644 index 0000000..34ed186 --- /dev/null +++ b/libvpx/vp9/encoder/x86/vp9_subpel_variance_impl_intrin_avx2.c @@ -0,0 +1,539 @@ +/* + * Copyright (c) 2012 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include <immintrin.h> // AVX2 +#include "vpx_ports/mem.h" +#include "vp9/encoder/vp9_variance.h" + +DECLARE_ALIGNED(32, static const uint8_t, bilinear_filters_avx2[512]) = { + 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, + 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, + 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, + 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, + 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, + 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, + 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, + 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, + 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, + 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, + 11, 5, 11, 5, 11, 5, 11, 5, 11, 5, 11, 5, 11, 5, 11, 5, + 11, 5, 11, 5, 11, 5, 11, 5, 11, 5, 11, 5, 11, 5, 11, 5, + 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, + 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, + 9, 7, 9, 7, 9, 7, 9, 7, 9, 7, 9, 7, 9, 7, 9, 7, + 9, 7, 9, 7, 9, 7, 9, 7, 9, 7, 9, 7, 9, 7, 9, 7, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 7, 9, 7, 9, 7, 9, 7, 9, 7, 9, 7, 9, 7, 9, 7, 9, + 7, 9, 7, 9, 7, 9, 7, 9, 7, 9, 7, 9, 7, 9, 7, 9, + 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, + 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, + 5, 11, 5, 11, 5, 11, 5, 11, 5, 11, 5, 11, 5, 11, 5, 11, + 5, 11, 5, 11, 5, 11, 5, 11, 5, 11, 5, 11, 5, 11, 5, 11, + 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, + 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, + 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, + 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, + 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, + 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, + 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, + 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15 +}; + +#define FILTER_SRC(filter) \ + /* filter the source */ \ + exp_src_lo = _mm256_maddubs_epi16(exp_src_lo, filter); \ + exp_src_hi = _mm256_maddubs_epi16(exp_src_hi, filter); \ + \ + /* add 8 to source */ \ + exp_src_lo = _mm256_add_epi16(exp_src_lo, pw8); \ + exp_src_hi = _mm256_add_epi16(exp_src_hi, pw8); \ + \ + /* divide source by 16 */ \ + exp_src_lo = _mm256_srai_epi16(exp_src_lo, 4); \ + exp_src_hi = _mm256_srai_epi16(exp_src_hi, 4); + +#define MERGE_WITH_SRC(src_reg, reg) \ + exp_src_lo = _mm256_unpacklo_epi8(src_reg, reg); \ + exp_src_hi = _mm256_unpackhi_epi8(src_reg, reg); + +#define LOAD_SRC_DST \ + /* load source and destination */ \ + src_reg = _mm256_loadu_si256((__m256i const *) (src)); \ + dst_reg = _mm256_load_si256((__m256i const *) (dst)); + +#define AVG_NEXT_SRC(src_reg, size_stride) \ + src_next_reg = _mm256_loadu_si256((__m256i const *) \ + (src + size_stride)); \ + /* average between current and next stride source */ \ + src_reg = _mm256_avg_epu8(src_reg, src_next_reg); + +#define MERGE_NEXT_SRC(src_reg, size_stride) \ + src_next_reg = _mm256_loadu_si256((__m256i const *) \ + (src + size_stride)); \ + MERGE_WITH_SRC(src_reg, src_next_reg) + +#define CALC_SUM_SSE_INSIDE_LOOP \ + /* expand each byte to 2 bytes */ \ + exp_dst_lo = _mm256_unpacklo_epi8(dst_reg, zero_reg); \ + exp_dst_hi = _mm256_unpackhi_epi8(dst_reg, zero_reg); \ + /* source - dest */ \ + exp_src_lo = _mm256_sub_epi16(exp_src_lo, exp_dst_lo); \ + exp_src_hi = _mm256_sub_epi16(exp_src_hi, exp_dst_hi); \ + /* caculate sum */ \ + sum_reg = _mm256_add_epi16(sum_reg, exp_src_lo); \ + exp_src_lo = _mm256_madd_epi16(exp_src_lo, exp_src_lo); \ + sum_reg = _mm256_add_epi16(sum_reg, exp_src_hi); \ + exp_src_hi = _mm256_madd_epi16(exp_src_hi, exp_src_hi); \ + /* calculate sse */ \ + sse_reg = _mm256_add_epi32(sse_reg, exp_src_lo); \ + sse_reg = _mm256_add_epi32(sse_reg, exp_src_hi); + +// final calculation to sum and sse +#define CALC_SUM_AND_SSE \ + res_cmp = _mm256_cmpgt_epi16(zero_reg, sum_reg); \ + sse_reg_hi = _mm256_srli_si256(sse_reg, 8); \ + sum_reg_lo = _mm256_unpacklo_epi16(sum_reg, res_cmp); \ + sum_reg_hi = _mm256_unpackhi_epi16(sum_reg, res_cmp); \ + sse_reg = _mm256_add_epi32(sse_reg, sse_reg_hi); \ + sum_reg = _mm256_add_epi32(sum_reg_lo, sum_reg_hi); \ + \ + sse_reg_hi = _mm256_srli_si256(sse_reg, 4); \ + sum_reg_hi = _mm256_srli_si256(sum_reg, 8); \ + \ + sse_reg = _mm256_add_epi32(sse_reg, sse_reg_hi); \ + sum_reg = _mm256_add_epi32(sum_reg, sum_reg_hi); \ + *((int*)sse)= _mm_cvtsi128_si32(_mm256_castsi256_si128(sse_reg)) + \ + _mm_cvtsi128_si32(_mm256_extractf128_si256(sse_reg, 1)); \ + sum_reg_hi = _mm256_srli_si256(sum_reg, 4); \ + sum_reg = _mm256_add_epi32(sum_reg, sum_reg_hi); \ + sum = _mm_cvtsi128_si32(_mm256_castsi256_si128(sum_reg)) + \ + _mm_cvtsi128_si32(_mm256_extractf128_si256(sum_reg, 1)); + + +unsigned int vp9_sub_pixel_variance32xh_avx2(const uint8_t *src, + int src_stride, + int x_offset, + int y_offset, + const uint8_t *dst, + int dst_stride, + int height, + unsigned int *sse) { + __m256i src_reg, dst_reg, exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi; + __m256i sse_reg, sum_reg, sse_reg_hi, res_cmp, sum_reg_lo, sum_reg_hi; + __m256i zero_reg; + int i, sum; + sum_reg = _mm256_set1_epi16(0); + sse_reg = _mm256_set1_epi16(0); + zero_reg = _mm256_set1_epi16(0); + + // x_offset = 0 and y_offset = 0 + if (x_offset == 0) { + if (y_offset == 0) { + for (i = 0; i < height ; i++) { + LOAD_SRC_DST + // expend each byte to 2 bytes + MERGE_WITH_SRC(src_reg, zero_reg) + CALC_SUM_SSE_INSIDE_LOOP + src+= src_stride; + dst+= dst_stride; + } + // x_offset = 0 and y_offset = 8 + } else if (y_offset == 8) { + __m256i src_next_reg; + for (i = 0; i < height ; i++) { + LOAD_SRC_DST + AVG_NEXT_SRC(src_reg, src_stride) + // expend each byte to 2 bytes + MERGE_WITH_SRC(src_reg, zero_reg) + CALC_SUM_SSE_INSIDE_LOOP + src+= src_stride; + dst+= dst_stride; + } + // x_offset = 0 and y_offset = bilin interpolation + } else { + __m256i filter, pw8, src_next_reg; + + y_offset <<= 5; + filter = _mm256_load_si256((__m256i const *) + (bilinear_filters_avx2 + y_offset)); + pw8 = _mm256_set1_epi16(8); + for (i = 0; i < height ; i++) { + LOAD_SRC_DST + MERGE_NEXT_SRC(src_reg, src_stride) + FILTER_SRC(filter) + CALC_SUM_SSE_INSIDE_LOOP + src+= src_stride; + dst+= dst_stride; + } + } + // x_offset = 8 and y_offset = 0 + } else if (x_offset == 8) { + if (y_offset == 0) { + __m256i src_next_reg; + for (i = 0; i < height ; i++) { + LOAD_SRC_DST + AVG_NEXT_SRC(src_reg, 1) + // expand each byte to 2 bytes + MERGE_WITH_SRC(src_reg, zero_reg) + CALC_SUM_SSE_INSIDE_LOOP + src+= src_stride; + dst+= dst_stride; + } + // x_offset = 8 and y_offset = 8 + } else if (y_offset == 8) { + __m256i src_next_reg, src_avg; + // load source and another source starting from the next + // following byte + src_reg = _mm256_loadu_si256((__m256i const *) (src)); + AVG_NEXT_SRC(src_reg, 1) + for (i = 0; i < height ; i++) { + src_avg = src_reg; + src+= src_stride; + LOAD_SRC_DST + AVG_NEXT_SRC(src_reg, 1) + // average between previous average to current average + src_avg = _mm256_avg_epu8(src_avg, src_reg); + // expand each byte to 2 bytes + MERGE_WITH_SRC(src_avg, zero_reg) + // save current source average + CALC_SUM_SSE_INSIDE_LOOP + dst+= dst_stride; + } + // x_offset = 8 and y_offset = bilin interpolation + } else { + __m256i filter, pw8, src_next_reg, src_avg; + y_offset <<= 5; + filter = _mm256_load_si256((__m256i const *) + (bilinear_filters_avx2 + y_offset)); + pw8 = _mm256_set1_epi16(8); + // load source and another source starting from the next + // following byte + src_reg = _mm256_loadu_si256((__m256i const *) (src)); + AVG_NEXT_SRC(src_reg, 1) + for (i = 0; i < height ; i++) { + // save current source average + src_avg = src_reg; + src+= src_stride; + LOAD_SRC_DST + AVG_NEXT_SRC(src_reg, 1) + MERGE_WITH_SRC(src_avg, src_reg) + FILTER_SRC(filter) + CALC_SUM_SSE_INSIDE_LOOP + dst+= dst_stride; + } + } + // x_offset = bilin interpolation and y_offset = 0 + } else { + if (y_offset == 0) { + __m256i filter, pw8, src_next_reg; + x_offset <<= 5; + filter = _mm256_load_si256((__m256i const *) + (bilinear_filters_avx2 + x_offset)); + pw8 = _mm256_set1_epi16(8); + for (i = 0; i < height ; i++) { + LOAD_SRC_DST + MERGE_NEXT_SRC(src_reg, 1) + FILTER_SRC(filter) + CALC_SUM_SSE_INSIDE_LOOP + src+= src_stride; + dst+= dst_stride; + } + // x_offset = bilin interpolation and y_offset = 8 + } else if (y_offset == 8) { + __m256i filter, pw8, src_next_reg, src_pack; + x_offset <<= 5; + filter = _mm256_load_si256((__m256i const *) + (bilinear_filters_avx2 + x_offset)); + pw8 = _mm256_set1_epi16(8); + src_reg = _mm256_loadu_si256((__m256i const *) (src)); + MERGE_NEXT_SRC(src_reg, 1) + FILTER_SRC(filter) + // convert each 16 bit to 8 bit to each low and high lane source + src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi); + for (i = 0; i < height ; i++) { + src+= src_stride; + LOAD_SRC_DST + MERGE_NEXT_SRC(src_reg, 1) + FILTER_SRC(filter) + src_reg = _mm256_packus_epi16(exp_src_lo, exp_src_hi); + // average between previous pack to the current + src_pack = _mm256_avg_epu8(src_pack, src_reg); + MERGE_WITH_SRC(src_pack, zero_reg) + CALC_SUM_SSE_INSIDE_LOOP + src_pack = src_reg; + dst+= dst_stride; + } + // x_offset = bilin interpolation and y_offset = bilin interpolation + } else { + __m256i xfilter, yfilter, pw8, src_next_reg, src_pack; + x_offset <<= 5; + xfilter = _mm256_load_si256((__m256i const *) + (bilinear_filters_avx2 + x_offset)); + y_offset <<= 5; + yfilter = _mm256_load_si256((__m256i const *) + (bilinear_filters_avx2 + y_offset)); + pw8 = _mm256_set1_epi16(8); + // load source and another source starting from the next + // following byte + src_reg = _mm256_loadu_si256((__m256i const *) (src)); + MERGE_NEXT_SRC(src_reg, 1) + + FILTER_SRC(xfilter) + // convert each 16 bit to 8 bit to each low and high lane source + src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi); + for (i = 0; i < height ; i++) { + src+= src_stride; + LOAD_SRC_DST + MERGE_NEXT_SRC(src_reg, 1) + FILTER_SRC(xfilter) + src_reg = _mm256_packus_epi16(exp_src_lo, exp_src_hi); + // merge previous pack to current pack source + MERGE_WITH_SRC(src_pack, src_reg) + // filter the source + FILTER_SRC(yfilter) + src_pack = src_reg; + CALC_SUM_SSE_INSIDE_LOOP + dst+= dst_stride; + } + } + } + CALC_SUM_AND_SSE + return sum; +} + +unsigned int vp9_sub_pixel_avg_variance32xh_avx2(const uint8_t *src, + int src_stride, + int x_offset, + int y_offset, + const uint8_t *dst, + int dst_stride, + const uint8_t *sec, + int sec_stride, + int height, + unsigned int *sse) { + __m256i sec_reg; + __m256i src_reg, dst_reg, exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi; + __m256i sse_reg, sum_reg, sse_reg_hi, res_cmp, sum_reg_lo, sum_reg_hi; + __m256i zero_reg; + int i, sum; + sum_reg = _mm256_set1_epi16(0); + sse_reg = _mm256_set1_epi16(0); + zero_reg = _mm256_set1_epi16(0); + + // x_offset = 0 and y_offset = 0 + if (x_offset == 0) { + if (y_offset == 0) { + for (i = 0; i < height ; i++) { + LOAD_SRC_DST + sec_reg = _mm256_load_si256((__m256i const *) (sec)); + src_reg = _mm256_avg_epu8(src_reg, sec_reg); + sec+= sec_stride; + // expend each byte to 2 bytes + MERGE_WITH_SRC(src_reg, zero_reg) + CALC_SUM_SSE_INSIDE_LOOP + src+= src_stride; + dst+= dst_stride; + } + } else if (y_offset == 8) { + __m256i src_next_reg; + for (i = 0; i < height ; i++) { + LOAD_SRC_DST + AVG_NEXT_SRC(src_reg, src_stride) + sec_reg = _mm256_load_si256((__m256i const *) (sec)); + src_reg = _mm256_avg_epu8(src_reg, sec_reg); + sec+= sec_stride; + // expend each byte to 2 bytes + MERGE_WITH_SRC(src_reg, zero_reg) + CALC_SUM_SSE_INSIDE_LOOP + src+= src_stride; + dst+= dst_stride; + } + // x_offset = 0 and y_offset = bilin interpolation + } else { + __m256i filter, pw8, src_next_reg; + + y_offset <<= 5; + filter = _mm256_load_si256((__m256i const *) + (bilinear_filters_avx2 + y_offset)); + pw8 = _mm256_set1_epi16(8); + for (i = 0; i < height ; i++) { + LOAD_SRC_DST + MERGE_NEXT_SRC(src_reg, src_stride) + FILTER_SRC(filter) + src_reg = _mm256_packus_epi16(exp_src_lo, exp_src_hi); + sec_reg = _mm256_load_si256((__m256i const *) (sec)); + src_reg = _mm256_avg_epu8(src_reg, sec_reg); + sec+= sec_stride; + MERGE_WITH_SRC(src_reg, zero_reg) + CALC_SUM_SSE_INSIDE_LOOP + src+= src_stride; + dst+= dst_stride; + } + } + // x_offset = 8 and y_offset = 0 + } else if (x_offset == 8) { + if (y_offset == 0) { + __m256i src_next_reg; + for (i = 0; i < height ; i++) { + LOAD_SRC_DST + AVG_NEXT_SRC(src_reg, 1) + sec_reg = _mm256_load_si256((__m256i const *) (sec)); + src_reg = _mm256_avg_epu8(src_reg, sec_reg); + sec+= sec_stride; + // expand each byte to 2 bytes + MERGE_WITH_SRC(src_reg, zero_reg) + CALC_SUM_SSE_INSIDE_LOOP + src+= src_stride; + dst+= dst_stride; + } + // x_offset = 8 and y_offset = 8 + } else if (y_offset == 8) { + __m256i src_next_reg, src_avg; + // load source and another source starting from the next + // following byte + src_reg = _mm256_loadu_si256((__m256i const *) (src)); + AVG_NEXT_SRC(src_reg, 1) + for (i = 0; i < height ; i++) { + // save current source average + src_avg = src_reg; + src+= src_stride; + LOAD_SRC_DST + AVG_NEXT_SRC(src_reg, 1) + // average between previous average to current average + src_avg = _mm256_avg_epu8(src_avg, src_reg); + sec_reg = _mm256_load_si256((__m256i const *) (sec)); + src_avg = _mm256_avg_epu8(src_avg, sec_reg); + sec+= sec_stride; + // expand each byte to 2 bytes + MERGE_WITH_SRC(src_avg, zero_reg) + CALC_SUM_SSE_INSIDE_LOOP + dst+= dst_stride; + } + // x_offset = 8 and y_offset = bilin interpolation + } else { + __m256i filter, pw8, src_next_reg, src_avg; + y_offset <<= 5; + filter = _mm256_load_si256((__m256i const *) + (bilinear_filters_avx2 + y_offset)); + pw8 = _mm256_set1_epi16(8); + // load source and another source starting from the next + // following byte + src_reg = _mm256_loadu_si256((__m256i const *) (src)); + AVG_NEXT_SRC(src_reg, 1) + for (i = 0; i < height ; i++) { + // save current source average + src_avg = src_reg; + src+= src_stride; + LOAD_SRC_DST + AVG_NEXT_SRC(src_reg, 1) + MERGE_WITH_SRC(src_avg, src_reg) + FILTER_SRC(filter) + src_avg = _mm256_packus_epi16(exp_src_lo, exp_src_hi); + sec_reg = _mm256_load_si256((__m256i const *) (sec)); + src_avg = _mm256_avg_epu8(src_avg, sec_reg); + // expand each byte to 2 bytes + MERGE_WITH_SRC(src_avg, zero_reg) + sec+= sec_stride; + CALC_SUM_SSE_INSIDE_LOOP + dst+= dst_stride; + } + } + // x_offset = bilin interpolation and y_offset = 0 + } else { + if (y_offset == 0) { + __m256i filter, pw8, src_next_reg; + x_offset <<= 5; + filter = _mm256_load_si256((__m256i const *) + (bilinear_filters_avx2 + x_offset)); + pw8 = _mm256_set1_epi16(8); + for (i = 0; i < height ; i++) { + LOAD_SRC_DST + MERGE_NEXT_SRC(src_reg, 1) + FILTER_SRC(filter) + src_reg = _mm256_packus_epi16(exp_src_lo, exp_src_hi); + sec_reg = _mm256_load_si256((__m256i const *) (sec)); + src_reg = _mm256_avg_epu8(src_reg, sec_reg); + MERGE_WITH_SRC(src_reg, zero_reg) + sec+= sec_stride; + CALC_SUM_SSE_INSIDE_LOOP + src+= src_stride; + dst+= dst_stride; + } + // x_offset = bilin interpolation and y_offset = 8 + } else if (y_offset == 8) { + __m256i filter, pw8, src_next_reg, src_pack; + x_offset <<= 5; + filter = _mm256_load_si256((__m256i const *) + (bilinear_filters_avx2 + x_offset)); + pw8 = _mm256_set1_epi16(8); + src_reg = _mm256_loadu_si256((__m256i const *) (src)); + MERGE_NEXT_SRC(src_reg, 1) + FILTER_SRC(filter) + // convert each 16 bit to 8 bit to each low and high lane source + src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi); + for (i = 0; i < height ; i++) { + src+= src_stride; + LOAD_SRC_DST + MERGE_NEXT_SRC(src_reg, 1) + FILTER_SRC(filter) + src_reg = _mm256_packus_epi16(exp_src_lo, exp_src_hi); + // average between previous pack to the current + src_pack = _mm256_avg_epu8(src_pack, src_reg); + sec_reg = _mm256_load_si256((__m256i const *) (sec)); + src_pack = _mm256_avg_epu8(src_pack, sec_reg); + sec+= sec_stride; + MERGE_WITH_SRC(src_pack, zero_reg) + src_pack = src_reg; + CALC_SUM_SSE_INSIDE_LOOP + dst+= dst_stride; + } + // x_offset = bilin interpolation and y_offset = bilin interpolation + } else { + __m256i xfilter, yfilter, pw8, src_next_reg, src_pack; + x_offset <<= 5; + xfilter = _mm256_load_si256((__m256i const *) + (bilinear_filters_avx2 + x_offset)); + y_offset <<= 5; + yfilter = _mm256_load_si256((__m256i const *) + (bilinear_filters_avx2 + y_offset)); + pw8 = _mm256_set1_epi16(8); + // load source and another source starting from the next + // following byte + src_reg = _mm256_loadu_si256((__m256i const *) (src)); + MERGE_NEXT_SRC(src_reg, 1) + + FILTER_SRC(xfilter) + // convert each 16 bit to 8 bit to each low and high lane source + src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi); + for (i = 0; i < height ; i++) { + src+= src_stride; + LOAD_SRC_DST + MERGE_NEXT_SRC(src_reg, 1) + FILTER_SRC(xfilter) + src_reg = _mm256_packus_epi16(exp_src_lo, exp_src_hi); + // merge previous pack to current pack source + MERGE_WITH_SRC(src_pack, src_reg) + // filter the source + FILTER_SRC(yfilter) + src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi); + sec_reg = _mm256_load_si256((__m256i const *) (sec)); + src_pack = _mm256_avg_epu8(src_pack, sec_reg); + MERGE_WITH_SRC(src_pack, zero_reg) + src_pack = src_reg; + sec+= sec_stride; + CALC_SUM_SSE_INSIDE_LOOP + dst+= dst_stride; + } + } + } + CALC_SUM_AND_SSE + return sum; +} diff --git a/libvpx/vp9/encoder/x86/vp9_variance_avx2.c b/libvpx/vp9/encoder/x86/vp9_variance_avx2.c new file mode 100644 index 0000000..835c519 --- /dev/null +++ b/libvpx/vp9/encoder/x86/vp9_variance_avx2.c @@ -0,0 +1,268 @@ +/* + * Copyright (c) 2012 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "./vpx_config.h" + +#include "vp9/encoder/vp9_variance.h" +#include "vp9/common/vp9_pragmas.h" +#include "vpx_ports/mem.h" + +typedef void (*get_var_avx2) ( + const unsigned char *src_ptr, + int source_stride, + const unsigned char *ref_ptr, + int recon_stride, + unsigned int *SSE, + int *Sum +); + +void vp9_get16x16var_avx2 +( + const unsigned char *src_ptr, + int source_stride, + const unsigned char *ref_ptr, + int recon_stride, + unsigned int *SSE, + int *Sum +); + +void vp9_get32x32var_avx2 +( + const unsigned char *src_ptr, + int source_stride, + const unsigned char *ref_ptr, + int recon_stride, + unsigned int *SSE, + int *Sum +); + +unsigned int vp9_sub_pixel_variance32xh_avx2 +( + const uint8_t *src, + int src_stride, + int x_offset, + int y_offset, + const uint8_t *dst, + int dst_stride, + int height, + unsigned int *sse +); + +unsigned int vp9_sub_pixel_avg_variance32xh_avx2 +( + const uint8_t *src, + int src_stride, + int x_offset, + int y_offset, + const uint8_t *dst, + int dst_stride, + const uint8_t *sec, + int sec_stride, + int height, + unsigned int *sseptr +); + +static void variance_avx2(const unsigned char *src_ptr, int source_stride, + const unsigned char *ref_ptr, int recon_stride, + int w, int h, unsigned int *sse, int *sum, + get_var_avx2 var_fn, int block_size) { + unsigned int sse0; + int sum0; + int i, j; + + *sse = 0; + *sum = 0; + + for (i = 0; i < h; i += 16) { + for (j = 0; j < w; j += block_size) { + // processing 16 rows horizontally each call + var_fn(src_ptr + source_stride * i + j, source_stride, + ref_ptr + recon_stride * i + j, recon_stride, &sse0, &sum0); + *sse += sse0; + *sum += sum0; + } + } +} + +unsigned int vp9_variance16x16_avx2 +( + const unsigned char *src_ptr, + int source_stride, + const unsigned char *ref_ptr, + int recon_stride, + unsigned int *sse) { + unsigned int var; + int avg; + + variance_avx2(src_ptr, source_stride, ref_ptr, recon_stride, 16, 16, + &var, &avg, vp9_get16x16var_avx2, 16); + *sse = var; + return (var - (((unsigned int)avg * avg) >> 8)); +} + +unsigned int vp9_mse16x16_avx2( + const unsigned char *src_ptr, + int source_stride, + const unsigned char *ref_ptr, + int recon_stride, + unsigned int *sse) { + unsigned int sse0; + int sum0; + vp9_get16x16var_avx2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, + &sum0); + *sse = sse0; + return sse0; +} + +unsigned int vp9_variance32x32_avx2(const uint8_t *src_ptr, + int source_stride, + const uint8_t *ref_ptr, + int recon_stride, + unsigned int *sse) { + unsigned int var; + int avg; + + // processing 32 elements vertically in parallel + variance_avx2(src_ptr, source_stride, ref_ptr, recon_stride, 32, 32, + &var, &avg, vp9_get32x32var_avx2, 32); + *sse = var; + return (var - (((int64_t)avg * avg) >> 10)); +} + +unsigned int vp9_variance32x16_avx2(const uint8_t *src_ptr, + int source_stride, + const uint8_t *ref_ptr, + int recon_stride, + unsigned int *sse) { + unsigned int var; + int avg; + + // processing 32 elements vertically in parallel + variance_avx2(src_ptr, source_stride, ref_ptr, recon_stride, 32, 16, + &var, &avg, vp9_get32x32var_avx2, 32); + *sse = var; + return (var - (((int64_t)avg * avg) >> 9)); +} + + +unsigned int vp9_variance64x64_avx2(const uint8_t *src_ptr, + int source_stride, + const uint8_t *ref_ptr, + int recon_stride, + unsigned int *sse) { + unsigned int var; + int avg; + + // processing 32 elements vertically in parallel + variance_avx2(src_ptr, source_stride, ref_ptr, recon_stride, 64, 64, + &var, &avg, vp9_get32x32var_avx2, 32); + *sse = var; + return (var - (((int64_t)avg * avg) >> 12)); +} + +unsigned int vp9_variance64x32_avx2(const uint8_t *src_ptr, + int source_stride, + const uint8_t *ref_ptr, + int recon_stride, + unsigned int *sse) { + unsigned int var; + int avg; + + // processing 32 elements vertically in parallel + variance_avx2(src_ptr, source_stride, ref_ptr, recon_stride, 64, 32, + &var, &avg, vp9_get32x32var_avx2, 32); + + *sse = var; + return (var - (((int64_t)avg * avg) >> 11)); +} + +unsigned int vp9_sub_pixel_variance64x64_avx2(const uint8_t *src, + int src_stride, + int x_offset, + int y_offset, + const uint8_t *dst, + int dst_stride, + unsigned int *sse_ptr) { + // processing 32 elements in parallel + unsigned int sse; + int se = vp9_sub_pixel_variance32xh_avx2(src, src_stride, x_offset, + y_offset, dst, dst_stride, + 64, &sse); + // processing the next 32 elements in parallel + unsigned int sse2; + int se2 = vp9_sub_pixel_variance32xh_avx2(src + 32, src_stride, + x_offset, y_offset, + dst + 32, dst_stride, + 64, &sse2); + se += se2; + sse += sse2; + *sse_ptr = sse; + return sse - (((int64_t)se * se) >> 12); +} + +unsigned int vp9_sub_pixel_variance32x32_avx2(const uint8_t *src, + int src_stride, + int x_offset, + int y_offset, + const uint8_t *dst, + int dst_stride, + unsigned int *sse_ptr) { + // processing 32 element in parallel + unsigned int sse; + int se = vp9_sub_pixel_variance32xh_avx2(src, src_stride, x_offset, + y_offset, dst, dst_stride, + 32, &sse); + *sse_ptr = sse; + return sse - (((int64_t)se * se) >> 10); +} + +unsigned int vp9_sub_pixel_avg_variance64x64_avx2(const uint8_t *src, + int src_stride, + int x_offset, + int y_offset, + const uint8_t *dst, + int dst_stride, + unsigned int *sseptr, + const uint8_t *sec) { + // processing 32 elements in parallel + unsigned int sse; + + int se = vp9_sub_pixel_avg_variance32xh_avx2(src, src_stride, x_offset, + y_offset, dst, dst_stride, + sec, 64, 64, &sse); + unsigned int sse2; + // processing the next 32 elements in parallel + int se2 = vp9_sub_pixel_avg_variance32xh_avx2(src + 32, src_stride, x_offset, + y_offset, dst + 32, dst_stride, + sec + 32, 64, 64, &sse2); + se += se2; + sse += sse2; + *sseptr = sse; + + return sse - (((int64_t)se * se) >> 12); +} + +unsigned int vp9_sub_pixel_avg_variance32x32_avx2(const uint8_t *src, + int src_stride, + int x_offset, + int y_offset, + const uint8_t *dst, + int dst_stride, + unsigned int *sseptr, + const uint8_t *sec) { + // processing 32 element in parallel + unsigned int sse; + int se = vp9_sub_pixel_avg_variance32xh_avx2(src, src_stride, x_offset, + y_offset, dst, dst_stride, + sec, 32, 32, &sse); + *sseptr = sse; + return sse - (((int64_t)se * se) >> 10); +} + + diff --git a/libvpx/vp9/encoder/x86/vp9_variance_impl_intrin_avx2.c b/libvpx/vp9/encoder/x86/vp9_variance_impl_intrin_avx2.c new file mode 100644 index 0000000..f992328 --- /dev/null +++ b/libvpx/vp9/encoder/x86/vp9_variance_impl_intrin_avx2.c @@ -0,0 +1,213 @@ +/* + * Copyright (c) 2012 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include <immintrin.h> // AVX2 + +void vp9_get16x16var_avx2(const unsigned char *src_ptr, + int source_stride, + const unsigned char *ref_ptr, + int recon_stride, + unsigned int *SSE, + int *Sum) { + __m256i src, src_expand_low, src_expand_high, ref, ref_expand_low; + __m256i ref_expand_high, madd_low, madd_high; + unsigned int i, src_2strides, ref_2strides; + __m256i zero_reg = _mm256_set1_epi16(0); + __m256i sum_ref_src = _mm256_set1_epi16(0); + __m256i madd_ref_src = _mm256_set1_epi16(0); + + // processing two strides in a 256 bit register reducing the number + // of loop stride by half (comparing to the sse2 code) + src_2strides = source_stride << 1; + ref_2strides = recon_stride << 1; + for (i = 0; i < 8; i++) { + src = _mm256_castsi128_si256( + _mm_loadu_si128((__m128i const *) (src_ptr))); + src = _mm256_inserti128_si256(src, + _mm_loadu_si128((__m128i const *)(src_ptr+source_stride)), 1); + + ref =_mm256_castsi128_si256( + _mm_loadu_si128((__m128i const *) (ref_ptr))); + ref = _mm256_inserti128_si256(ref, + _mm_loadu_si128((__m128i const *)(ref_ptr+recon_stride)), 1); + + // expanding to 16 bit each lane + src_expand_low = _mm256_unpacklo_epi8(src, zero_reg); + src_expand_high = _mm256_unpackhi_epi8(src, zero_reg); + + ref_expand_low = _mm256_unpacklo_epi8(ref, zero_reg); + ref_expand_high = _mm256_unpackhi_epi8(ref, zero_reg); + + // src-ref + src_expand_low = _mm256_sub_epi16(src_expand_low, ref_expand_low); + src_expand_high = _mm256_sub_epi16(src_expand_high, ref_expand_high); + + // madd low (src - ref) + madd_low = _mm256_madd_epi16(src_expand_low, src_expand_low); + + // add high to low + src_expand_low = _mm256_add_epi16(src_expand_low, src_expand_high); + + // madd high (src - ref) + madd_high = _mm256_madd_epi16(src_expand_high, src_expand_high); + + sum_ref_src = _mm256_add_epi16(sum_ref_src, src_expand_low); + + // add high to low + madd_ref_src = _mm256_add_epi32(madd_ref_src, + _mm256_add_epi32(madd_low, madd_high)); + + src_ptr+= src_2strides; + ref_ptr+= ref_2strides; + } + + { + __m128i sum_res, madd_res; + __m128i expand_sum_low, expand_sum_high, expand_sum; + __m128i expand_madd_low, expand_madd_high, expand_madd; + __m128i ex_expand_sum_low, ex_expand_sum_high, ex_expand_sum; + + // extract the low lane and add it to the high lane + sum_res = _mm_add_epi16(_mm256_castsi256_si128(sum_ref_src), + _mm256_extractf128_si256(sum_ref_src, 1)); + + madd_res = _mm_add_epi32(_mm256_castsi256_si128(madd_ref_src), + _mm256_extractf128_si256(madd_ref_src, 1)); + + // padding each 2 bytes with another 2 zeroed bytes + expand_sum_low = _mm_unpacklo_epi16(_mm256_castsi256_si128(zero_reg), + sum_res); + expand_sum_high = _mm_unpackhi_epi16(_mm256_castsi256_si128(zero_reg), + sum_res); + + // shifting the sign 16 bits right + expand_sum_low = _mm_srai_epi32(expand_sum_low, 16); + expand_sum_high = _mm_srai_epi32(expand_sum_high, 16); + + expand_sum = _mm_add_epi32(expand_sum_low, expand_sum_high); + + // expand each 32 bits of the madd result to 64 bits + expand_madd_low = _mm_unpacklo_epi32(madd_res, + _mm256_castsi256_si128(zero_reg)); + expand_madd_high = _mm_unpackhi_epi32(madd_res, + _mm256_castsi256_si128(zero_reg)); + + expand_madd = _mm_add_epi32(expand_madd_low, expand_madd_high); + + ex_expand_sum_low = _mm_unpacklo_epi32(expand_sum, + _mm256_castsi256_si128(zero_reg)); + ex_expand_sum_high = _mm_unpackhi_epi32(expand_sum, + _mm256_castsi256_si128(zero_reg)); + + ex_expand_sum = _mm_add_epi32(ex_expand_sum_low, ex_expand_sum_high); + + // shift 8 bytes eight + madd_res = _mm_srli_si128(expand_madd, 8); + sum_res = _mm_srli_si128(ex_expand_sum, 8); + + madd_res = _mm_add_epi32(madd_res, expand_madd); + sum_res = _mm_add_epi32(sum_res, ex_expand_sum); + + *((int*)SSE)= _mm_cvtsi128_si32(madd_res); + + *((int*)Sum)= _mm_cvtsi128_si32(sum_res); + } +} + +void vp9_get32x32var_avx2(const unsigned char *src_ptr, + int source_stride, + const unsigned char *ref_ptr, + int recon_stride, + unsigned int *SSE, + int *Sum) { + __m256i src, src_expand_low, src_expand_high, ref, ref_expand_low; + __m256i ref_expand_high, madd_low, madd_high; + unsigned int i; + __m256i zero_reg = _mm256_set1_epi16(0); + __m256i sum_ref_src = _mm256_set1_epi16(0); + __m256i madd_ref_src = _mm256_set1_epi16(0); + + // processing 32 elements in parallel + for (i = 0; i < 16; i++) { + src = _mm256_loadu_si256((__m256i const *) (src_ptr)); + + ref = _mm256_loadu_si256((__m256i const *) (ref_ptr)); + + // expanding to 16 bit each lane + src_expand_low = _mm256_unpacklo_epi8(src, zero_reg); + src_expand_high = _mm256_unpackhi_epi8(src, zero_reg); + + ref_expand_low = _mm256_unpacklo_epi8(ref, zero_reg); + ref_expand_high = _mm256_unpackhi_epi8(ref, zero_reg); + + // src-ref + src_expand_low = _mm256_sub_epi16(src_expand_low, ref_expand_low); + src_expand_high = _mm256_sub_epi16(src_expand_high, ref_expand_high); + + // madd low (src - ref) + madd_low = _mm256_madd_epi16(src_expand_low, src_expand_low); + + // add high to low + src_expand_low = _mm256_add_epi16(src_expand_low, src_expand_high); + + // madd high (src - ref) + madd_high = _mm256_madd_epi16(src_expand_high, src_expand_high); + + sum_ref_src = _mm256_add_epi16(sum_ref_src, src_expand_low); + + // add high to low + madd_ref_src = _mm256_add_epi32(madd_ref_src, + _mm256_add_epi32(madd_low, madd_high)); + + src_ptr+= source_stride; + ref_ptr+= recon_stride; + } + + { + __m256i expand_sum_low, expand_sum_high, expand_sum; + __m256i expand_madd_low, expand_madd_high, expand_madd; + __m256i ex_expand_sum_low, ex_expand_sum_high, ex_expand_sum; + + // padding each 2 bytes with another 2 zeroed bytes + expand_sum_low = _mm256_unpacklo_epi16(zero_reg, sum_ref_src); + expand_sum_high = _mm256_unpackhi_epi16(zero_reg, sum_ref_src); + + // shifting the sign 16 bits right + expand_sum_low = _mm256_srai_epi32(expand_sum_low, 16); + expand_sum_high = _mm256_srai_epi32(expand_sum_high, 16); + + expand_sum = _mm256_add_epi32(expand_sum_low, expand_sum_high); + + // expand each 32 bits of the madd result to 64 bits + expand_madd_low = _mm256_unpacklo_epi32(madd_ref_src, zero_reg); + expand_madd_high = _mm256_unpackhi_epi32(madd_ref_src, zero_reg); + + expand_madd = _mm256_add_epi32(expand_madd_low, expand_madd_high); + + ex_expand_sum_low = _mm256_unpacklo_epi32(expand_sum, zero_reg); + ex_expand_sum_high = _mm256_unpackhi_epi32(expand_sum, zero_reg); + + ex_expand_sum = _mm256_add_epi32(ex_expand_sum_low, ex_expand_sum_high); + + // shift 8 bytes eight + madd_ref_src = _mm256_srli_si256(expand_madd, 8); + sum_ref_src = _mm256_srli_si256(ex_expand_sum, 8); + + madd_ref_src = _mm256_add_epi32(madd_ref_src, expand_madd); + sum_ref_src = _mm256_add_epi32(sum_ref_src, ex_expand_sum); + + // extract the low lane and the high lane and add the results + *((int*)SSE)= _mm_cvtsi128_si32(_mm256_castsi256_si128(madd_ref_src)) + + _mm_cvtsi128_si32(_mm256_extractf128_si256(madd_ref_src, 1)); + + *((int*)Sum)= _mm_cvtsi128_si32(_mm256_castsi256_si128(sum_ref_src)) + + _mm_cvtsi128_si32(_mm256_extractf128_si256(sum_ref_src, 1)); + } +} diff --git a/libvpx/vp9/exports_enc b/libvpx/vp9/exports_enc index 25156e8..2a0fef3 100644 --- a/libvpx/vp9/exports_enc +++ b/libvpx/vp9/exports_enc @@ -1,4 +1,2 @@ data vpx_codec_vp9_cx_algo text vpx_codec_vp9_cx -data vpx_codec_vp9x_cx_algo -text vpx_codec_vp9x_cx diff --git a/libvpx/vp9/vp9_common.mk b/libvpx/vp9/vp9_common.mk index 2dd2bf0..b1ba0b1 100644 --- a/libvpx/vp9/vp9_common.mk +++ b/libvpx/vp9/vp9_common.mk @@ -12,20 +12,18 @@ VP9_COMMON_SRCS-yes += vp9_common.mk VP9_COMMON_SRCS-yes += vp9_iface_common.h VP9_COMMON_SRCS-yes += common/vp9_pragmas.h VP9_COMMON_SRCS-yes += common/vp9_ppflags.h -VP9_COMMON_SRCS-yes += common/vp9_onyx.h VP9_COMMON_SRCS-yes += common/vp9_alloccommon.c +VP9_COMMON_SRCS-yes += common/vp9_blockd.c VP9_COMMON_SRCS-yes += common/vp9_convolve.c VP9_COMMON_SRCS-yes += common/vp9_convolve.h VP9_COMMON_SRCS-yes += common/vp9_debugmodes.c -VP9_COMMON_SRCS-yes += common/vp9_default_coef_probs.h VP9_COMMON_SRCS-yes += common/vp9_entropy.c VP9_COMMON_SRCS-yes += common/vp9_entropymode.c VP9_COMMON_SRCS-yes += common/vp9_entropymv.c -VP9_COMMON_SRCS-yes += common/vp9_extend.c VP9_COMMON_SRCS-yes += common/vp9_filter.c VP9_COMMON_SRCS-yes += common/vp9_filter.h -VP9_COMMON_SRCS-yes += common/vp9_findnearmv.c -VP9_COMMON_SRCS-yes += common/generic/vp9_systemdependent.c +VP9_COMMON_SRCS-yes += common/vp9_frame_buffers.c +VP9_COMMON_SRCS-yes += common/vp9_frame_buffers.h VP9_COMMON_SRCS-yes += common/vp9_idct.c VP9_COMMON_SRCS-yes += common/vp9_alloccommon.h VP9_COMMON_SRCS-yes += common/vp9_blockd.h @@ -34,20 +32,19 @@ VP9_COMMON_SRCS-yes += common/vp9_entropy.h VP9_COMMON_SRCS-yes += common/vp9_entropymode.h VP9_COMMON_SRCS-yes += common/vp9_entropymv.h VP9_COMMON_SRCS-yes += common/vp9_enums.h -VP9_COMMON_SRCS-yes += common/vp9_extend.h -VP9_COMMON_SRCS-yes += common/vp9_findnearmv.h VP9_COMMON_SRCS-yes += common/vp9_idct.h VP9_COMMON_SRCS-yes += common/vp9_loopfilter.h VP9_COMMON_SRCS-yes += common/vp9_mv.h VP9_COMMON_SRCS-yes += common/vp9_onyxc_int.h VP9_COMMON_SRCS-yes += common/vp9_pred_common.h VP9_COMMON_SRCS-yes += common/vp9_pred_common.c +VP9_COMMON_SRCS-yes += common/vp9_prob.h +VP9_COMMON_SRCS-yes += common/vp9_prob.c VP9_COMMON_SRCS-yes += common/vp9_quant_common.h VP9_COMMON_SRCS-yes += common/vp9_reconinter.h VP9_COMMON_SRCS-yes += common/vp9_reconintra.h VP9_COMMON_SRCS-yes += common/vp9_rtcd.c -VP9_COMMON_SRCS-yes += common/vp9_rtcd_defs.sh -VP9_COMMON_SRCS-yes += common/vp9_sadmxn.h +VP9_COMMON_SRCS-yes += common/vp9_rtcd_defs.pl VP9_COMMON_SRCS-yes += common/vp9_scale.h VP9_COMMON_SRCS-yes += common/vp9_scale.c VP9_COMMON_SRCS-yes += common/vp9_seg_common.h @@ -56,7 +53,6 @@ VP9_COMMON_SRCS-yes += common/vp9_systemdependent.h VP9_COMMON_SRCS-yes += common/vp9_textblit.h VP9_COMMON_SRCS-yes += common/vp9_tile_common.h VP9_COMMON_SRCS-yes += common/vp9_tile_common.c -VP9_COMMON_SRCS-yes += common/vp9_treecoder.h VP9_COMMON_SRCS-yes += common/vp9_loopfilter.c VP9_COMMON_SRCS-yes += common/vp9_loopfilter_filters.c VP9_COMMON_SRCS-yes += common/vp9_mvref_common.c @@ -65,7 +61,6 @@ VP9_COMMON_SRCS-yes += common/vp9_quant_common.c VP9_COMMON_SRCS-yes += common/vp9_reconinter.c VP9_COMMON_SRCS-yes += common/vp9_reconintra.c VP9_COMMON_SRCS-$(CONFIG_POSTPROC_VISUALIZER) += common/vp9_textblit.c -VP9_COMMON_SRCS-yes += common/vp9_treecoder.c VP9_COMMON_SRCS-yes += common/vp9_common_data.c VP9_COMMON_SRCS-yes += common/vp9_common_data.h VP9_COMMON_SRCS-yes += common/vp9_scan.c @@ -79,13 +74,17 @@ VP9_COMMON_SRCS-$(CONFIG_VP9_POSTPROC) += common/vp9_postproc.h VP9_COMMON_SRCS-$(CONFIG_VP9_POSTPROC) += common/vp9_postproc.c VP9_COMMON_SRCS-$(HAVE_MMX) += common/x86/vp9_loopfilter_mmx.asm VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_subpixel_8t_sse2.asm +VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_subpixel_bilinear_sse2.asm VP9_COMMON_SRCS-$(HAVE_SSSE3) += common/x86/vp9_subpixel_8t_ssse3.asm +VP9_COMMON_SRCS-$(HAVE_SSSE3) += common/x86/vp9_subpixel_bilinear_ssse3.asm +VP9_COMMON_SRCS-$(HAVE_AVX2) += common/x86/vp9_subpixel_8t_intrin_avx2.c +VP9_COMMON_SRCS-$(HAVE_SSSE3) += common/x86/vp9_subpixel_8t_intrin_ssse3.c ifeq ($(CONFIG_VP9_POSTPROC),yes) VP9_COMMON_SRCS-$(HAVE_MMX) += common/x86/vp9_postproc_mmx.asm VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_postproc_sse2.asm endif -ifeq ($(USE_X86INC),yes) +ifeq ($(CONFIG_USE_X86INC),yes) VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_copy_sse2.asm VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_intrapred_sse2.asm VP9_COMMON_SRCS-$(HAVE_SSSE3) += common/x86/vp9_intrapred_ssse3.asm @@ -123,23 +122,26 @@ VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_idct_intrin_sse2.c VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_convolve_neon.c VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_idct16x16_neon.c +VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_loopfilter_16_neon.c VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_convolve8_neon$(ASM) VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_convolve8_avg_neon$(ASM) VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_loopfilter_neon$(ASM) +VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_loopfilter_16_neon$(ASM) VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_dc_only_idct_add_neon$(ASM) -VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_short_idct4x4_1_add_neon$(ASM) -VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_short_idct4x4_add_neon$(ASM) -VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_short_idct8x8_1_add_neon$(ASM) -VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_short_idct8x8_add_neon$(ASM) -VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_short_idct16x16_1_add_neon$(ASM) -VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_short_idct16x16_add_neon$(ASM) -VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_short_idct32x32_1_add_neon$(ASM) -VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_short_idct32x32_add_neon$(ASM) -VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_short_iht4x4_add_neon$(ASM) -VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_short_iht8x8_add_neon$(ASM) +VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_idct4x4_1_add_neon$(ASM) +VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_idct4x4_add_neon$(ASM) +VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_idct8x8_1_add_neon$(ASM) +VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_idct8x8_add_neon$(ASM) +VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_idct16x16_1_add_neon$(ASM) +VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_idct16x16_add_neon$(ASM) +VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_idct32x32_1_add_neon$(ASM) +VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_idct32x32_add_neon$(ASM) +VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_iht4x4_add_neon$(ASM) +VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_iht8x8_add_neon$(ASM) VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_mb_lpf_neon$(ASM) VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_copy_neon$(ASM) VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_avg_neon$(ASM) VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_save_reg_neon$(ASM) +VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_reconintra_neon$(ASM) -$(eval $(call rtcd_h_template,vp9_rtcd,vp9/common/vp9_rtcd_defs.sh)) +$(eval $(call rtcd_h_template,vp9_rtcd,vp9/common/vp9_rtcd_defs.pl)) diff --git a/libvpx/vp9/vp9_cx_iface.c b/libvpx/vp9/vp9_cx_iface.c index 1942039..220fed9 100644 --- a/libvpx/vp9/vp9_cx_iface.c +++ b/libvpx/vp9/vp9_cx_iface.c @@ -17,7 +17,6 @@ #include "vp9/encoder/vp9_onyx_int.h" #include "vpx/vp8cx.h" #include "vp9/encoder/vp9_firstpass.h" -#include "vp9/common/vp9_onyx.h" #include "vp9/vp9_iface_common.h" struct vp9_extracfg { @@ -25,19 +24,19 @@ struct vp9_extracfg { int cpu_used; /* available cpu percentage in 1/16 */ unsigned int enable_auto_alt_ref; unsigned int noise_sensitivity; - unsigned int Sharpness; + unsigned int sharpness; unsigned int static_thresh; unsigned int tile_columns; unsigned int tile_rows; unsigned int arnr_max_frames; unsigned int arnr_strength; unsigned int arnr_type; - unsigned int experimental; vp8e_tuning tuning; unsigned int cq_level; /* constrained quality level */ unsigned int rc_max_intra_bitrate_pct; unsigned int lossless; unsigned int frame_parallel_decoding_mode; + unsigned int aq_mode; }; struct extraconfig_map { @@ -53,19 +52,19 @@ static const struct extraconfig_map extracfg_map[] = { 0, /* cpu_used */ 1, /* enable_auto_alt_ref */ 0, /* noise_sensitivity */ - 0, /* Sharpness */ + 0, /* sharpness */ 0, /* static_thresh */ 0, /* tile_columns */ 0, /* tile_rows */ 7, /* arnr_max_frames */ 5, /* arnr_strength */ 3, /* arnr_type*/ - 0, /* experimental mode */ 0, /* tuning*/ 10, /* cq_level */ 0, /* rc_max_intra_bitrate_pct */ 0, /* lossless */ 0, /* frame_parallel_decoding_mode */ + 0, /* aq_mode */ } } }; @@ -75,14 +74,14 @@ struct vpx_codec_alg_priv { vpx_codec_enc_cfg_t cfg; struct vp9_extracfg vp8_cfg; VP9_CONFIG oxcf; - VP9_PTR cpi; + VP9_COMP *cpi; unsigned char *cx_data; - unsigned int cx_data_sz; + size_t cx_data_sz; unsigned char *pending_cx_data; - unsigned int pending_cx_data_sz; + size_t pending_cx_data_sz; int pending_frame_count; - uint32_t pending_frame_sizes[8]; - uint32_t pending_frame_magnitude; + size_t pending_frame_sizes[8]; + size_t pending_frame_magnitude; vpx_image_t preview_img; vp8_postproc_cfg_t preview_ppcfg; vpx_codec_pkt_list_decl(64) pkt_list; @@ -98,7 +97,7 @@ static VP9_REFFRAME ref_frame_to_vp9_reframe(vpx_ref_frame_type_t frame) { case VP8_ALTR_FRAME: return VP9_ALT_FLAG; } - assert(!"Invalid Reference Frame"); + assert(0 && "Invalid Reference Frame"); return VP9_LAST_FLAG; } @@ -157,6 +156,7 @@ static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx, RANGE_CHECK_HI(cfg, rc_max_quantizer, 0); RANGE_CHECK_HI(cfg, rc_min_quantizer, 0); } + RANGE_CHECK(vp8_cfg, aq_mode, 0, AQ_MODE_COUNT - 1); RANGE_CHECK_HI(cfg, g_threads, 64); RANGE_CHECK_HI(cfg, g_lag_in_frames, MAX_LAG_BUFFERS); @@ -174,6 +174,23 @@ static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx, RANGE_CHECK(cfg, ss_number_layers, 1, VPX_SS_MAX_LAYERS); /*Spatial layers max */ + + RANGE_CHECK(cfg, ts_number_layers, 1, VPX_TS_MAX_LAYERS); + if (cfg->ts_number_layers > 1) { + unsigned int i; + for (i = 1; i < cfg->ts_number_layers; ++i) { + if (cfg->ts_target_bitrate[i] < cfg->ts_target_bitrate[i-1]) { + ERROR("ts_target_bitrate entries are not increasing"); + } + } + RANGE_CHECK(cfg, ts_rate_decimator[cfg->ts_number_layers-1], 1, 1); + for (i = cfg->ts_number_layers-2; i > 0; --i) { + if (cfg->ts_rate_decimator[i-1] != 2*cfg->ts_rate_decimator[i]) { + ERROR("ts_rate_decimator factors are not powers of 2"); + } + } + } + /* VP8 does not support a lower bound on the keyframe interval in * automatic keyframe placement mode. */ @@ -189,18 +206,22 @@ static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx, RANGE_CHECK(vp8_cfg, tile_columns, 0, 6); RANGE_CHECK(vp8_cfg, tile_rows, 0, 2); - RANGE_CHECK_HI(vp8_cfg, Sharpness, 7); + RANGE_CHECK_HI(vp8_cfg, sharpness, 7); RANGE_CHECK(vp8_cfg, arnr_max_frames, 0, 15); RANGE_CHECK_HI(vp8_cfg, arnr_strength, 6); RANGE_CHECK(vp8_cfg, arnr_type, 1, 3); RANGE_CHECK(vp8_cfg, cq_level, 0, 63); + // TODO(yaowu): remove this when ssim tuning is implemented for vp9 + if (vp8_cfg->tuning == VP8_TUNE_SSIM) + ERROR("Option --tune=ssim is not currently supported in VP9."); + if (cfg->g_pass == VPX_RC_LAST_PASS) { size_t packet_sz = sizeof(FIRSTPASS_STATS); int n_packets = (int)(cfg->rc_twopass_stats_in.sz / packet_sz); FIRSTPASS_STATS *stats; - if (!cfg->rc_twopass_stats_in.buf) + if (cfg->rc_twopass_stats_in.buf == NULL) ERROR("rc_twopass_stats_in.buf not set."); if (cfg->rc_twopass_stats_in.sz % packet_sz) @@ -242,8 +263,8 @@ static vpx_codec_err_t validate_img(vpx_codec_alg_priv_t *ctx, static vpx_codec_err_t set_vp9e_config(VP9_CONFIG *oxcf, vpx_codec_enc_cfg_t cfg, - struct vp9_extracfg vp8_cfg) { - oxcf->version = cfg.g_profile | (vp8_cfg.experimental ? 0x4 : 0); + struct vp9_extracfg vp9_cfg) { + oxcf->version = cfg.g_profile; oxcf->width = cfg.g_w; oxcf->height = cfg.g_h; /* guess a frame rate if out of whack, use 30 */ @@ -256,41 +277,36 @@ static vpx_codec_err_t set_vp9e_config(VP9_CONFIG *oxcf, switch (cfg.g_pass) { case VPX_RC_ONE_PASS: - oxcf->Mode = MODE_GOODQUALITY; + oxcf->mode = MODE_GOODQUALITY; break; case VPX_RC_FIRST_PASS: - oxcf->Mode = MODE_FIRSTPASS; + oxcf->mode = MODE_FIRSTPASS; break; case VPX_RC_LAST_PASS: - oxcf->Mode = MODE_SECONDPASS_BEST; + oxcf->mode = MODE_SECONDPASS_BEST; break; } if (cfg.g_pass == VPX_RC_FIRST_PASS) { - oxcf->allow_lag = 0; oxcf->lag_in_frames = 0; } else { - oxcf->allow_lag = (cfg.g_lag_in_frames) > 0; oxcf->lag_in_frames = cfg.g_lag_in_frames; } - // VBR only supported for now. - // CBR code has been deprectated for experimental phase. - // CQ mode not yet tested - oxcf->end_usage = USAGE_LOCAL_FILE_PLAYBACK; + oxcf->end_usage = USAGE_LOCAL_FILE_PLAYBACK; if (cfg.rc_end_usage == VPX_CQ) - oxcf->end_usage = USAGE_CONSTRAINED_QUALITY; + oxcf->end_usage = USAGE_CONSTRAINED_QUALITY; else if (cfg.rc_end_usage == VPX_Q) - oxcf->end_usage = USAGE_CONSTANT_QUALITY; + oxcf->end_usage = USAGE_CONSTANT_QUALITY; else if (cfg.rc_end_usage == VPX_CBR) oxcf->end_usage = USAGE_STREAM_FROM_SERVER; oxcf->target_bandwidth = cfg.rc_target_bitrate; - oxcf->rc_max_intra_bitrate_pct = vp8_cfg.rc_max_intra_bitrate_pct; + oxcf->rc_max_intra_bitrate_pct = vp9_cfg.rc_max_intra_bitrate_pct; oxcf->best_allowed_q = cfg.rc_min_quantizer; oxcf->worst_allowed_q = cfg.rc_max_quantizer; - oxcf->cq_level = vp8_cfg.cq_level; + oxcf->cq_level = vp9_cfg.cq_level; oxcf->fixed_q = -1; oxcf->under_shoot_pct = cfg.rc_undershoot_pct; @@ -300,6 +316,8 @@ static vpx_codec_err_t set_vp9e_config(VP9_CONFIG *oxcf, oxcf->starting_buffer_level = cfg.rc_buf_initial_sz; oxcf->optimal_buffer_level = cfg.rc_buf_optimal_sz; + oxcf->drop_frames_water_mark = cfg.rc_dropframe_thresh; + oxcf->two_pass_vbrbias = cfg.rc_2pass_vbr_bias_pct; oxcf->two_pass_vbrmin_section = cfg.rc_2pass_vbr_minsection_pct; oxcf->two_pass_vbrmax_section = cfg.rc_2pass_vbr_maxsection_pct; @@ -309,41 +327,59 @@ static vpx_codec_err_t set_vp9e_config(VP9_CONFIG *oxcf, // oxcf->kf_min_dist = cfg.kf_min_dis; oxcf->key_freq = cfg.kf_max_dist; - // oxcf->delete_first_pass_file = cfg.g_delete_firstpassfile; - // strcpy(oxcf->first_pass_file, cfg.g_firstpass_file); - - oxcf->cpu_used = vp8_cfg.cpu_used; - oxcf->encode_breakout = vp8_cfg.static_thresh; - oxcf->play_alternate = vp8_cfg.enable_auto_alt_ref; - oxcf->noise_sensitivity = vp8_cfg.noise_sensitivity; - oxcf->Sharpness = vp8_cfg.Sharpness; + oxcf->cpu_used = vp9_cfg.cpu_used; + oxcf->encode_breakout = vp9_cfg.static_thresh; + oxcf->play_alternate = vp9_cfg.enable_auto_alt_ref; + oxcf->noise_sensitivity = vp9_cfg.noise_sensitivity; + oxcf->sharpness = vp9_cfg.sharpness; oxcf->two_pass_stats_in = cfg.rc_twopass_stats_in; - oxcf->output_pkt_list = vp8_cfg.pkt_list; + oxcf->output_pkt_list = vp9_cfg.pkt_list; - oxcf->arnr_max_frames = vp8_cfg.arnr_max_frames; - oxcf->arnr_strength = vp8_cfg.arnr_strength; - oxcf->arnr_type = vp8_cfg.arnr_type; + oxcf->arnr_max_frames = vp9_cfg.arnr_max_frames; + oxcf->arnr_strength = vp9_cfg.arnr_strength; + oxcf->arnr_type = vp9_cfg.arnr_type; - oxcf->tuning = vp8_cfg.tuning; + oxcf->tuning = vp9_cfg.tuning; - oxcf->tile_columns = vp8_cfg.tile_columns; - oxcf->tile_rows = vp8_cfg.tile_rows; + oxcf->tile_columns = vp9_cfg.tile_columns; + oxcf->tile_rows = vp9_cfg.tile_rows; - oxcf->lossless = vp8_cfg.lossless; + oxcf->lossless = vp9_cfg.lossless; oxcf->error_resilient_mode = cfg.g_error_resilient; - oxcf->frame_parallel_decoding_mode = vp8_cfg.frame_parallel_decoding_mode; + oxcf->frame_parallel_decoding_mode = vp9_cfg.frame_parallel_decoding_mode; + + oxcf->aq_mode = vp9_cfg.aq_mode; oxcf->ss_number_layers = cfg.ss_number_layers; + + if (oxcf->ss_number_layers > 1) { + memcpy(oxcf->ss_target_bitrate, cfg.ss_target_bitrate, + sizeof(cfg.ss_target_bitrate)); + } else if (oxcf->ss_number_layers == 1) { + oxcf->ss_target_bitrate[0] = (int)oxcf->target_bandwidth; + } + + oxcf->ts_number_layers = cfg.ts_number_layers; + + if (oxcf->ts_number_layers > 1) { + memcpy(oxcf->ts_target_bitrate, cfg.ts_target_bitrate, + sizeof(cfg.ts_target_bitrate)); + memcpy(oxcf->ts_rate_decimator, cfg.ts_rate_decimator, + sizeof(cfg.ts_rate_decimator)); + } else if (oxcf->ts_number_layers == 1) { + oxcf->ts_target_bitrate[0] = (int)oxcf->target_bandwidth; + oxcf->ts_rate_decimator[0] = 1; + } + /* printf("Current VP9 Settings: \n"); printf("target_bandwidth: %d\n", oxcf->target_bandwidth); printf("noise_sensitivity: %d\n", oxcf->noise_sensitivity); - printf("Sharpness: %d\n", oxcf->Sharpness); + printf("sharpness: %d\n", oxcf->sharpness); printf("cpu_used: %d\n", oxcf->cpu_used); - printf("Mode: %d\n", oxcf->Mode); - // printf("delete_first_pass_file: %d\n", oxcf->delete_first_pass_file); + printf("Mode: %d\n", oxcf->mode); printf("auto_key: %d\n", oxcf->auto_key); printf("key_freq: %d\n", oxcf->key_freq); printf("end_usage: %d\n", oxcf->end_usage); @@ -358,7 +394,6 @@ static vpx_codec_err_t set_vp9e_config(VP9_CONFIG *oxcf, printf("two_pass_vbrbias: %d\n", oxcf->two_pass_vbrbias); printf("two_pass_vbrmin_section: %d\n", oxcf->two_pass_vbrmin_section); printf("two_pass_vbrmax_section: %d\n", oxcf->two_pass_vbrmax_section); - printf("allow_lag: %d\n", oxcf->allow_lag); printf("lag_in_frames: %d\n", oxcf->lag_in_frames); printf("play_alternate: %d\n", oxcf->play_alternate); printf("Version: %d\n", oxcf->Version); @@ -387,7 +422,7 @@ static vpx_codec_err_t vp9e_set_config(vpx_codec_alg_priv_t *ctx, res = validate_config(ctx, cfg, &ctx->vp8_cfg); - if (!res) { + if (res == VPX_CODEC_OK) { ctx->cfg = *cfg; set_vp9e_config(&ctx->oxcf, ctx->cfg, ctx->vp8_cfg); vp9_change_config(ctx->cpi, &ctx->oxcf); @@ -407,8 +442,7 @@ static vpx_codec_err_t get_param(vpx_codec_alg_priv_t *ctx, #define MAP(id, var) case id: *(RECAST(id, arg)) = var; break - if (!arg) - return VPX_CODEC_INVALID_PARAM; + if (arg == NULL) return VPX_CODEC_INVALID_PARAM; switch (ctrl_id) { MAP(VP8E_GET_LAST_QUANTIZER, vp9_get_quantizer(ctx->cpi)); @@ -433,7 +467,7 @@ static vpx_codec_err_t set_param(vpx_codec_alg_priv_t *ctx, MAP(VP8E_SET_CPUUSED, xcfg.cpu_used); MAP(VP8E_SET_ENABLEAUTOALTREF, xcfg.enable_auto_alt_ref); MAP(VP8E_SET_NOISE_SENSITIVITY, xcfg.noise_sensitivity); - MAP(VP8E_SET_SHARPNESS, xcfg.Sharpness); + MAP(VP8E_SET_SHARPNESS, xcfg.sharpness); MAP(VP8E_SET_STATIC_THRESHOLD, xcfg.static_thresh); MAP(VP9E_SET_TILE_COLUMNS, xcfg.tile_columns); MAP(VP9E_SET_TILE_ROWS, xcfg.tile_rows); @@ -445,11 +479,12 @@ static vpx_codec_err_t set_param(vpx_codec_alg_priv_t *ctx, MAP(VP8E_SET_MAX_INTRA_BITRATE_PCT, xcfg.rc_max_intra_bitrate_pct); MAP(VP9E_SET_LOSSLESS, xcfg.lossless); MAP(VP9E_SET_FRAME_PARALLEL_DECODING, xcfg.frame_parallel_decoding_mode); + MAP(VP9E_SET_AQ_MODE, xcfg.aq_mode); } res = validate_config(ctx, &ctx->cfg, &xcfg); - if (!res) { + if (res == VPX_CODEC_OK) { ctx->vp8_cfg = xcfg; set_vp9e_config(&ctx->oxcf, ctx->cfg, ctx->vp8_cfg); vp9_change_config(ctx->cpi, &ctx->oxcf); @@ -460,21 +495,16 @@ static vpx_codec_err_t set_param(vpx_codec_alg_priv_t *ctx, } -static vpx_codec_err_t vp9e_common_init(vpx_codec_ctx_t *ctx, - int experimental) { +static vpx_codec_err_t vp9e_common_init(vpx_codec_ctx_t *ctx) { vpx_codec_err_t res = VPX_CODEC_OK; struct vpx_codec_alg_priv *priv; vpx_codec_enc_cfg_t *cfg; unsigned int i; - VP9_PTR optr; - - if (!ctx->priv) { + if (ctx->priv == NULL) { priv = calloc(1, sizeof(struct vpx_codec_alg_priv)); - if (!priv) { - return VPX_CODEC_MEM_ERROR; - } + if (priv == NULL) return VPX_CODEC_MEM_ERROR; ctx->priv = &priv->base; ctx->priv->sz = sizeof(*ctx->priv); @@ -503,35 +533,30 @@ static vpx_codec_err_t vp9e_common_init(vpx_codec_ctx_t *ctx, priv->vp8_cfg = extracfg_map[i].cfg; priv->vp8_cfg.pkt_list = &priv->pkt_list.head; - priv->vp8_cfg.experimental = experimental; - // TODO(agrange) Check the limits set on this buffer, or the check that is - // applied in vp9e_encode. + // Maximum buffer size approximated based on having multiple ARF. priv->cx_data_sz = priv->cfg.g_w * priv->cfg.g_h * 3 / 2 * 8; -// priv->cx_data_sz = priv->cfg.g_w * priv->cfg.g_h * 3 / 2 * 2; if (priv->cx_data_sz < 4096) priv->cx_data_sz = 4096; priv->cx_data = malloc(priv->cx_data_sz); - if (!priv->cx_data) { - return VPX_CODEC_MEM_ERROR; - } + if (priv->cx_data == NULL) return VPX_CODEC_MEM_ERROR; vp9_initialize_enc(); res = validate_config(priv, &priv->cfg, &priv->vp8_cfg); - if (!res) { + if (res == VPX_CODEC_OK) { + VP9_COMP *cpi; set_vp9e_config(&ctx->priv->alg_priv->oxcf, ctx->priv->alg_priv->cfg, ctx->priv->alg_priv->vp8_cfg); - optr = vp9_create_compressor(&ctx->priv->alg_priv->oxcf); - - if (!optr) + cpi = vp9_create_compressor(&ctx->priv->alg_priv->oxcf); + if (cpi == NULL) res = VPX_CODEC_MEM_ERROR; else - ctx->priv->alg_priv->cpi = optr; + ctx->priv->alg_priv->cpi = cpi; } } @@ -541,45 +566,41 @@ static vpx_codec_err_t vp9e_common_init(vpx_codec_ctx_t *ctx, static vpx_codec_err_t vp9e_init(vpx_codec_ctx_t *ctx, vpx_codec_priv_enc_mr_cfg_t *data) { - return vp9e_common_init(ctx, 0); + return vp9e_common_init(ctx); } - -#if CONFIG_EXPERIMENTAL -static vpx_codec_err_t vp9e_exp_init(vpx_codec_ctx_t *ctx, - vpx_codec_priv_enc_mr_cfg_t *data) { - return vp9e_common_init(ctx, 1); -} -#endif - - static vpx_codec_err_t vp9e_destroy(vpx_codec_alg_priv_t *ctx) { free(ctx->cx_data); - vp9_remove_compressor(&ctx->cpi); + vp9_remove_compressor(ctx->cpi); free(ctx); return VPX_CODEC_OK; } static void pick_quickcompress_mode(vpx_codec_alg_priv_t *ctx, - unsigned long duration, - unsigned long deadline) { - unsigned int new_qc; - - /* Use best quality mode if no deadline is given. */ - if (deadline) - new_qc = MODE_GOODQUALITY; - else - new_qc = MODE_BESTQUALITY; + unsigned long duration, + unsigned long deadline) { + // Use best quality mode if no deadline is given. + MODE new_qc = MODE_BESTQUALITY; + + if (deadline) { + // Convert duration parameter from stream timebase to microseconds + const uint64_t duration_us = (uint64_t)duration * 1000000 * + (uint64_t)ctx->cfg.g_timebase.num / + (uint64_t)ctx->cfg.g_timebase.den; + + // If the deadline is more that the duration this frame is to be shown, + // use good quality mode. Otherwise use realtime mode. + new_qc = (deadline > duration_us) ? MODE_GOODQUALITY : MODE_REALTIME; + } if (ctx->cfg.g_pass == VPX_RC_FIRST_PASS) new_qc = MODE_FIRSTPASS; else if (ctx->cfg.g_pass == VPX_RC_LAST_PASS) - new_qc = (new_qc == MODE_BESTQUALITY) - ? MODE_SECONDPASS_BEST - : MODE_SECONDPASS; + new_qc = (new_qc == MODE_BESTQUALITY) ? MODE_SECONDPASS_BEST + : MODE_SECONDPASS; - if (ctx->oxcf.Mode != new_qc) { - ctx->oxcf.Mode = new_qc; + if (ctx->oxcf.mode != new_qc) { + ctx->oxcf.mode = new_qc; vp9_change_config(ctx->cpi, &ctx->oxcf); } } @@ -613,7 +634,7 @@ static int write_superframe_index(vpx_codec_alg_priv_t *ctx) { *x++ = marker; for (i = 0; i < ctx->pending_frame_count; i++) { - int this_sz = ctx->pending_frame_sizes[i]; + unsigned int this_sz = (unsigned int)ctx->pending_frame_sizes[i]; for (j = 0; j <= mag; j++) { *x++ = this_sz & 0xff; @@ -693,21 +714,18 @@ static vpx_codec_err_t vp9e_encode(vpx_codec_alg_priv_t *ctx, } } - /* Initialize the encoder instance on the first frame*/ - if (!res && ctx->cpi) { + /* Initialize the encoder instance on the first frame. */ + if (res == VPX_CODEC_OK && ctx->cpi != NULL) { unsigned int lib_flags; YV12_BUFFER_CONFIG sd; int64_t dst_time_stamp, dst_end_time_stamp; - unsigned long size, cx_data_sz; + size_t size, cx_data_sz; unsigned char *cx_data; /* Set up internal flags */ if (ctx->base.init_flags & VPX_CODEC_USE_PSNR) ((VP9_COMP *)ctx->cpi)->b_calculate_psnr = 1; - // if (ctx->base.init_flags & VPX_CODEC_USE_OUTPUT_PARTITION) - // ((VP9_COMP *)ctx->cpi)->output_partition = 1; - /* Convert API flags to internal codec lib flags */ lib_flags = (flags & VPX_EFLAG_FORCE_KF) ? FRAMEFLAGS_KEY : 0; @@ -757,8 +775,8 @@ static vpx_codec_err_t vp9e_encode(vpx_codec_alg_priv_t *ctx, VP9_COMP *cpi = (VP9_COMP *)ctx->cpi; /* Pack invisible frames with the next visible frame */ - if (!cpi->common.show_frame) { - if (!ctx->pending_cx_data) + if (cpi->common.show_frame == 0) { + if (ctx->pending_cx_data == 0) ctx->pending_cx_data = cx_data; ctx->pending_cx_data_sz += size; ctx->pending_frame_sizes[ctx->pending_frame_count++] = size; @@ -783,7 +801,7 @@ static vpx_codec_err_t vp9e_encode(vpx_codec_alg_priv_t *ctx, if (lib_flags & FRAMEFLAGS_KEY) pkt.data.frame.flags |= VPX_FRAME_IS_KEY; - if (!cpi->common.show_frame) { + if (cpi->common.show_frame == 0) { pkt.data.frame.flags |= VPX_FRAME_IS_INVISIBLE; // This timestamp should be as close as possible to the @@ -857,10 +875,9 @@ static const vpx_codec_cx_pkt_t *vp9e_get_cxdata(vpx_codec_alg_priv_t *ctx, static vpx_codec_err_t vp9e_set_reference(vpx_codec_alg_priv_t *ctx, int ctr_id, va_list args) { - vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *); + vpx_ref_frame_t *frame = va_arg(args, vpx_ref_frame_t *); - if (data) { - vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data; + if (frame != NULL) { YV12_BUFFER_CONFIG sd; image2yuvconfig(&frame->img, &sd); @@ -875,10 +892,9 @@ static vpx_codec_err_t vp9e_set_reference(vpx_codec_alg_priv_t *ctx, static vpx_codec_err_t vp9e_copy_reference(vpx_codec_alg_priv_t *ctx, int ctr_id, va_list args) { - vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *); + vpx_ref_frame_t *frame = va_arg(args, vpx_ref_frame_t *); - if (data) { - vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data; + if (frame != NULL) { YV12_BUFFER_CONFIG sd; image2yuvconfig(&frame->img, &sd); @@ -893,13 +909,13 @@ static vpx_codec_err_t vp9e_copy_reference(vpx_codec_alg_priv_t *ctx, static vpx_codec_err_t get_reference(vpx_codec_alg_priv_t *ctx, int ctr_id, va_list args) { - vp9_ref_frame_t *data = va_arg(args, vp9_ref_frame_t *); + vp9_ref_frame_t *frame = va_arg(args, vp9_ref_frame_t *); - if (data) { + if (frame != NULL) { YV12_BUFFER_CONFIG* fb; - vp9_get_reference_enc(ctx->cpi, data->idx, &fb); - yuvconfig2image(&data->img, fb, NULL); + vp9_get_reference_enc(ctx->cpi, frame->idx, &fb); + yuvconfig2image(&frame->img, fb, NULL); return VPX_CODEC_OK; } else { return VPX_CODEC_INVALID_PARAM; @@ -910,11 +926,11 @@ static vpx_codec_err_t vp9e_set_previewpp(vpx_codec_alg_priv_t *ctx, int ctr_id, va_list args) { #if CONFIG_VP9_POSTPROC - vp8_postproc_cfg_t *data = va_arg(args, vp8_postproc_cfg_t *); + vp8_postproc_cfg_t *config = va_arg(args, vp8_postproc_cfg_t *); (void)ctr_id; - if (data) { - ctx->preview_ppcfg = *((vp8_postproc_cfg_t *)data); + if (config != NULL) { + ctx->preview_ppcfg = *config; return VPX_CODEC_OK; } else { return VPX_CODEC_INVALID_PARAM; @@ -988,20 +1004,14 @@ static vpx_codec_err_t vp9e_set_activemap(vpx_codec_alg_priv_t *ctx, static vpx_codec_err_t vp9e_set_scalemode(vpx_codec_alg_priv_t *ctx, int ctr_id, va_list args) { - vpx_scaling_mode_t *data = va_arg(args, vpx_scaling_mode_t *); + vpx_scaling_mode_t *scalemode = va_arg(args, vpx_scaling_mode_t *); - if (data) { + if (scalemode != NULL) { int res; - vpx_scaling_mode_t scalemode = *(vpx_scaling_mode_t *)data; res = vp9_set_internal_size(ctx->cpi, - (VPX_SCALING)scalemode.h_scaling_mode, - (VPX_SCALING)scalemode.v_scaling_mode); - - if (!res) { - return VPX_CODEC_OK; - } else { - return VPX_CODEC_INVALID_PARAM; - } + (VPX_SCALING)scalemode->h_scaling_mode, + (VPX_SCALING)scalemode->v_scaling_mode); + return (res == 0) ? VPX_CODEC_OK : VPX_CODEC_INVALID_PARAM; } else { return VPX_CODEC_INVALID_PARAM; } @@ -1011,32 +1021,54 @@ static vpx_codec_err_t vp9e_set_svc(vpx_codec_alg_priv_t *ctx, int ctr_id, va_list args) { int data = va_arg(args, int); vp9_set_svc(ctx->cpi, data); + // CBR mode for SVC with both temporal and spatial layers not yet supported. + if (data == 1 && + ctx->cfg.rc_end_usage == VPX_CBR && + ctx->cfg.ss_number_layers > 1 && + ctx->cfg.ts_number_layers > 1) { + return VPX_CODEC_INVALID_PARAM; + } + return VPX_CODEC_OK; +} + +static vpx_codec_err_t vp9e_set_svc_layer_id(vpx_codec_alg_priv_t *ctx, + int ctr_id, + va_list args) { + vpx_svc_layer_id_t *data = va_arg(args, vpx_svc_layer_id_t *); + VP9_COMP *cpi = (VP9_COMP *)ctx->cpi; + cpi->svc.spatial_layer_id = data->spatial_layer_id; + cpi->svc.temporal_layer_id = data->temporal_layer_id; + // Checks on valid layer_id input. + if (cpi->svc.temporal_layer_id < 0 || + cpi->svc.temporal_layer_id >= (int)ctx->cfg.ts_number_layers) { + return VPX_CODEC_INVALID_PARAM; + } + if (cpi->svc.spatial_layer_id < 0 || + cpi->svc.spatial_layer_id >= (int)ctx->cfg.ss_number_layers) { + return VPX_CODEC_INVALID_PARAM; + } return VPX_CODEC_OK; } static vpx_codec_err_t vp9e_set_svc_parameters(vpx_codec_alg_priv_t *ctx, int ctr_id, va_list args) { - vpx_svc_parameters_t *data = va_arg(args, vpx_svc_parameters_t *); VP9_COMP *cpi = (VP9_COMP *)ctx->cpi; - vpx_svc_parameters_t params; + vpx_svc_parameters_t *params = va_arg(args, vpx_svc_parameters_t *); - if (data == NULL) { - return VPX_CODEC_INVALID_PARAM; - } + if (params == NULL) return VPX_CODEC_INVALID_PARAM; - params = *(vpx_svc_parameters_t *)data; + cpi->svc.spatial_layer_id = params->spatial_layer; + cpi->svc.temporal_layer_id = params->temporal_layer; - cpi->current_layer = params.layer; - cpi->lst_fb_idx = params.lst_fb_idx; - cpi->gld_fb_idx = params.gld_fb_idx; - cpi->alt_fb_idx = params.alt_fb_idx; + cpi->lst_fb_idx = params->lst_fb_idx; + cpi->gld_fb_idx = params->gld_fb_idx; + cpi->alt_fb_idx = params->alt_fb_idx; - if (vp9_set_size_literal(ctx->cpi, params.width, params.height) != 0) { + if (vp9_set_size_literal(ctx->cpi, params->width, params->height) != 0) return VPX_CODEC_INVALID_PARAM; - } - ctx->cfg.rc_max_quantizer = params.max_quantizer; - ctx->cfg.rc_min_quantizer = params.min_quantizer; + ctx->cfg.rc_max_quantizer = params->max_quantizer; + ctx->cfg.rc_min_quantizer = params->min_quantizer; set_vp9e_config(&ctx->oxcf, ctx->cfg, ctx->vp8_cfg); vp9_change_config(ctx->cpi, &ctx->oxcf); @@ -1071,9 +1103,11 @@ static vpx_codec_ctrl_fn_map_t vp9e_ctf_maps[] = { {VP8E_SET_MAX_INTRA_BITRATE_PCT, set_param}, {VP9E_SET_LOSSLESS, set_param}, {VP9E_SET_FRAME_PARALLEL_DECODING, set_param}, + {VP9E_SET_AQ_MODE, set_param}, {VP9_GET_REFERENCE, get_reference}, {VP9E_SET_SVC, vp9e_set_svc}, {VP9E_SET_SVC_PARAMETERS, vp9e_set_svc_parameters}, + {VP9E_SET_SVC_LAYER_ID, vp9e_set_svc_layer_id}, { -1, NULL}, }; @@ -1124,9 +1158,13 @@ static vpx_codec_enc_cfg_map_t vp9e_usage_cfg_map[] = { 9999, /* kf_max_dist */ VPX_SS_DEFAULT_LAYERS, /* ss_number_layers */ - + {0}, /* ss_target_bitrate */ + 1, /* ts_number_layers */ + {0}, /* ts_target_bitrate */ + {0}, /* ts_rate_decimator */ + 0, /* ts_periodicity */ + {0}, /* ts_layer_id */ #if VPX_ENCODER_ABI_VERSION == (1 + VPX_CODEC_ABI_VERSION) - 1, /* g_delete_first_pass_file */ "vp8.fpf" /* first pass filename */ #endif } @@ -1164,33 +1202,3 @@ CODEC_INTERFACE(vpx_codec_vp9_cx) = { vp9e_get_preview, } /* encoder functions */ }; - - -#if CONFIG_EXPERIMENTAL - -CODEC_INTERFACE(vpx_codec_vp9x_cx) = { - "VP8 Experimental Encoder" VERSION_STRING, - VPX_CODEC_INTERNAL_ABI_VERSION, - VPX_CODEC_CAP_ENCODER | VPX_CODEC_CAP_PSNR, - /* vpx_codec_caps_t caps; */ - vp9e_exp_init, /* vpx_codec_init_fn_t init; */ - vp9e_destroy, /* vpx_codec_destroy_fn_t destroy; */ - vp9e_ctf_maps, /* vpx_codec_ctrl_fn_map_t *ctrl_maps; */ - NOT_IMPLEMENTED, /* vpx_codec_get_mmap_fn_t get_mmap; */ - NOT_IMPLEMENTED, /* vpx_codec_set_mmap_fn_t set_mmap; */ - { // NOLINT - NOT_IMPLEMENTED, /* vpx_codec_peek_si_fn_t peek_si; */ - NOT_IMPLEMENTED, /* vpx_codec_get_si_fn_t get_si; */ - NOT_IMPLEMENTED, /* vpx_codec_decode_fn_t decode; */ - NOT_IMPLEMENTED, /* vpx_codec_frame_get_fn_t frame_get; */ - }, - { // NOLINT - vp9e_usage_cfg_map, /* vpx_codec_enc_cfg_map_t peek_si; */ - vp9e_encode, /* vpx_codec_encode_fn_t encode; */ - vp9e_get_cxdata, /* vpx_codec_get_cx_data_fn_t frame_get; */ - vp9e_set_config, - NOT_IMPLEMENTED, - vp9e_get_preview, - } /* encoder functions */ -}; -#endif diff --git a/libvpx/vp9/vp9_dx_iface.c b/libvpx/vp9/vp9_dx_iface.c index 5dacab4..ae6ccff 100644 --- a/libvpx/vp9/vp9_dx_iface.c +++ b/libvpx/vp9/vp9_dx_iface.c @@ -15,8 +15,8 @@ #include "vpx/vp8dx.h" #include "vpx/internal/vpx_codec_internal.h" #include "./vpx_version.h" -#include "vp9/decoder/vp9_onyxd.h" -#include "vp9/decoder/vp9_onyxd_int.h" +#include "vp9/common/vp9_frame_buffers.h" +#include "vp9/decoder/vp9_decoder.h" #include "vp9/decoder/vp9_read_bit_buffer.h" #include "vp9/vp9_iface_common.h" @@ -25,7 +25,7 @@ typedef vpx_codec_stream_info_t vp9_stream_info_t; /* Structures for handling memory allocations */ typedef enum { - VP9_SEG_ALG_PRIV = 256, + VP9_SEG_ALG_PRIV = 256, VP9_SEG_MAX } mem_seg_id_t; #define NELEMENTS(x) ((int)(sizeof(x)/sizeof(x[0]))) @@ -45,7 +45,7 @@ struct vpx_codec_alg_priv { vp9_stream_info_t si; int defer_alloc; int decoder_init; - VP9D_PTR pbi; + struct VP9Decompressor *pbi; int postproc_cfg_set; vp8_postproc_cfg_t postproc_cfg; #if CONFIG_POSTPROC_VISUALIZER @@ -59,6 +59,11 @@ struct vpx_codec_alg_priv { int img_setup; int img_avail; int invert_tile_order; + + // External frame buffer info to save for VP9 common. + void *ext_priv; // Private data associated with the external frame buffers. + vpx_get_frame_buffer_cb_fn_t get_ext_fb_cb; + vpx_release_frame_buffer_cb_fn_t release_ext_fb_cb; }; static unsigned long priv_sz(const vpx_codec_dec_cfg_t *si, @@ -100,12 +105,11 @@ static void vp9_finalize_mmaps(vpx_codec_alg_priv_t *ctx) { static vpx_codec_err_t vp9_init(vpx_codec_ctx_t *ctx, vpx_codec_priv_enc_mr_cfg_t *data) { - vpx_codec_err_t res = VPX_CODEC_OK; + vpx_codec_err_t res = VPX_CODEC_OK; - /* This function only allocates space for the vpx_codec_alg_priv_t - * structure. More memory may be required at the time the stream - * information becomes known. - */ + // This function only allocates space for the vpx_codec_alg_priv_t + // structure. More memory may be required at the time the stream + // information becomes known. if (!ctx->priv) { vpx_codec_mmap_t mmap; @@ -115,12 +119,10 @@ static vpx_codec_err_t vp9_init(vpx_codec_ctx_t *ctx, mmap.flags = vp9_mem_req_segs[0].flags; res = vpx_mmap_alloc(&mmap); - if (!res) { vp9_init_ctx(ctx, &mmap); ctx->priv->alg_priv->defer_alloc = 1; - /*post processing level initialized to do nothing */ } } @@ -140,8 +142,7 @@ static vpx_codec_err_t vp9_destroy(vpx_codec_alg_priv_t *ctx) { return VPX_CODEC_OK; } -static vpx_codec_err_t vp9_peek_si(const uint8_t *data, - unsigned int data_sz, +static vpx_codec_err_t vp9_peek_si(const uint8_t *data, unsigned int data_sz, vpx_codec_stream_info_t *si) { if (data_sz <= 8) return VPX_CODEC_UNSUP_BITSTREAM; if (data + data_sz <= data) return VPX_CODEC_INVALID_PARAM; @@ -152,13 +153,12 @@ static vpx_codec_err_t vp9_peek_si(const uint8_t *data, { struct vp9_read_bit_buffer rb = { data, data + data_sz, 0, NULL, NULL }; const int frame_marker = vp9_rb_read_literal(&rb, 2); - const int version = vp9_rb_read_bit(&rb) | (vp9_rb_read_bit(&rb) << 1); - if (frame_marker != 0x2) return VPX_CODEC_UNSUP_BITSTREAM; -#if CONFIG_NON420 + const int version = vp9_rb_read_bit(&rb); + (void) vp9_rb_read_bit(&rb); // unused version bit + + if (frame_marker != VP9_FRAME_MARKER) + return VPX_CODEC_UNSUP_BITSTREAM; if (version > 1) return VPX_CODEC_UNSUP_BITSTREAM; -#else - if (version != 0) return VPX_CODEC_UNSUP_BITSTREAM; -#endif if (vp9_rb_read_bit(&rb)) { // show an existing frame return VPX_CODEC_OK; @@ -205,38 +205,27 @@ static vpx_codec_err_t vp9_peek_si(const uint8_t *data, static vpx_codec_err_t vp9_get_si(vpx_codec_alg_priv_t *ctx, vpx_codec_stream_info_t *si) { - unsigned int sz; - - if (si->sz >= sizeof(vp9_stream_info_t)) - sz = sizeof(vp9_stream_info_t); - else - sz = sizeof(vpx_codec_stream_info_t); - + const size_t sz = (si->sz >= sizeof(vp9_stream_info_t)) + ? sizeof(vp9_stream_info_t) + : sizeof(vpx_codec_stream_info_t); memcpy(si, &ctx->si, sz); - si->sz = sz; + si->sz = (unsigned int)sz; return VPX_CODEC_OK; } -static vpx_codec_err_t -update_error_state(vpx_codec_alg_priv_t *ctx, - const struct vpx_internal_error_info *error) { - vpx_codec_err_t res; - - if ((res = error->error_code)) - ctx->base.err_detail = error->has_detail - ? error->detail - : NULL; +static vpx_codec_err_t update_error_state(vpx_codec_alg_priv_t *ctx, + const struct vpx_internal_error_info *error) { + if (error->error_code) + ctx->base.err_detail = error->has_detail ? error->detail : NULL; - return res; + return error->error_code; } -static vpx_codec_err_t decode_one(vpx_codec_alg_priv_t *ctx, - const uint8_t **data, - unsigned int data_sz, - void *user_priv, - long deadline) { +static vpx_codec_err_t decode_one(vpx_codec_alg_priv_t *ctx, + const uint8_t **data, unsigned int data_sz, + void *user_priv, int64_t deadline) { vpx_codec_err_t res = VPX_CODEC_OK; ctx->img_avail = 0; @@ -284,7 +273,7 @@ static vpx_codec_err_t decode_one(vpx_codec_alg_priv_t *ctx, if (!res) { VP9D_CONFIG oxcf; - VP9D_PTR optr; + struct VP9Decompressor *optr; vp9_initialize_dec(); @@ -296,21 +285,40 @@ static vpx_codec_err_t decode_one(vpx_codec_alg_priv_t *ctx, oxcf.inv_tile_order = ctx->invert_tile_order; optr = vp9_create_decompressor(&oxcf); - /* If postprocessing was enabled by the application and a - * configuration has not been provided, default it. - */ - if (!ctx->postproc_cfg_set - && (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC)) { - ctx->postproc_cfg.post_proc_flag = - VP8_DEBLOCK | VP8_DEMACROBLOCK; + // If postprocessing was enabled by the application and a + // configuration has not been provided, default it. + if (!ctx->postproc_cfg_set && + (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC)) { + ctx->postproc_cfg.post_proc_flag = VP8_DEBLOCK | VP8_DEMACROBLOCK; ctx->postproc_cfg.deblocking_level = 4; ctx->postproc_cfg.noise_level = 0; } - if (!optr) + if (!optr) { res = VPX_CODEC_ERROR; - else + } else { + VP9D_COMP *const pbi = (VP9D_COMP*)optr; + VP9_COMMON *const cm = &pbi->common; + + // Set index to not initialized. + cm->new_fb_idx = -1; + + if (ctx->get_ext_fb_cb != NULL && ctx->release_ext_fb_cb != NULL) { + cm->get_fb_cb = ctx->get_ext_fb_cb; + cm->release_fb_cb = ctx->release_ext_fb_cb; + cm->cb_priv = ctx->ext_priv; + } else { + cm->get_fb_cb = vp9_get_frame_buffer; + cm->release_fb_cb = vp9_release_frame_buffer; + + if (vp9_alloc_internal_frame_buffers(&cm->int_frame_buffers)) + vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, + "Failed to initialize internal frame buffers"); + cm->cb_priv = &cm->int_frame_buffers; + } + ctx->pbi = optr; + } } ctx->decoder_init = 1; @@ -324,36 +332,35 @@ static vpx_codec_err_t decode_one(vpx_codec_alg_priv_t *ctx, if (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC) { flags.post_proc_flag = #if CONFIG_POSTPROC_VISUALIZER - ((ctx->dbg_color_ref_frame_flag != 0) ? - VP9D_DEBUG_CLR_FRM_REF_BLKS : 0) - | ((ctx->dbg_color_mb_modes_flag != 0) ? - VP9D_DEBUG_CLR_BLK_MODES : 0) - | ((ctx->dbg_color_b_modes_flag != 0) ? - VP9D_DEBUG_CLR_BLK_MODES : 0) - | ((ctx->dbg_display_mv_flag != 0) ? - VP9D_DEBUG_DRAW_MV : 0) - | + (ctx->dbg_color_ref_frame_flag ? VP9D_DEBUG_CLR_FRM_REF_BLKS : 0) | + (ctx->dbg_color_mb_modes_flag ? VP9D_DEBUG_CLR_BLK_MODES : 0) | + (ctx->dbg_color_b_modes_flag ? VP9D_DEBUG_CLR_BLK_MODES : 0) | + (ctx->dbg_display_mv_flag ? VP9D_DEBUG_DRAW_MV : 0) | #endif ctx->postproc_cfg.post_proc_flag; - flags.deblocking_level = ctx->postproc_cfg.deblocking_level; - flags.noise_level = ctx->postproc_cfg.noise_level; + flags.deblocking_level = ctx->postproc_cfg.deblocking_level; + flags.noise_level = ctx->postproc_cfg.noise_level; #if CONFIG_POSTPROC_VISUALIZER flags.display_ref_frame_flag = ctx->dbg_color_ref_frame_flag; flags.display_mb_modes_flag = ctx->dbg_color_mb_modes_flag; - flags.display_b_modes_flag = ctx->dbg_color_b_modes_flag; - flags.display_mv_flag = ctx->dbg_display_mv_flag; + flags.display_b_modes_flag = ctx->dbg_color_b_modes_flag; + flags.display_mv_flag = ctx->dbg_display_mv_flag; #endif } if (vp9_receive_compressed_data(ctx->pbi, data_sz, data, deadline)) { - VP9D_COMP *pbi = (VP9D_COMP *)ctx->pbi; + VP9D_COMP *pbi = (VP9D_COMP*)ctx->pbi; res = update_error_state(ctx, &pbi->common.error); } if (!res && 0 == vp9_get_raw_frame(ctx->pbi, &sd, &time_stamp, &time_end_stamp, &flags)) { + VP9D_COMP *const pbi = (VP9D_COMP*)ctx->pbi; + VP9_COMMON *const cm = &pbi->common; yuvconfig2image(&ctx->img, &sd, user_priv); + + ctx->img.fb_priv = cm->frame_bufs[cm->new_fb_idx].raw_frame_buffer.priv; ctx->img_avail = 1; } } @@ -361,10 +368,8 @@ static vpx_codec_err_t decode_one(vpx_codec_alg_priv_t *ctx, return res; } -static void parse_superframe_index(const uint8_t *data, - size_t data_sz, - uint32_t sizes[8], - int *count) { +static void parse_superframe_index(const uint8_t *data, size_t data_sz, + uint32_t sizes[8], int *count) { uint8_t marker; assert(data_sz); @@ -452,7 +457,7 @@ static vpx_codec_err_t vp9_decode(vpx_codec_alg_priv_t *ctx, while (data_start < data_end && *data_start == 0) data_start++; - data_sz = data_end - data_start; + data_sz = (unsigned int)(data_end - data_start); } while (data_start < data_end); return res; } @@ -475,11 +480,29 @@ static vpx_image_t *vp9_get_frame(vpx_codec_alg_priv_t *ctx, return img; } -static vpx_codec_err_t vp9_xma_get_mmap(const vpx_codec_ctx_t *ctx, - vpx_codec_mmap_t *mmap, - vpx_codec_iter_t *iter) { - vpx_codec_err_t res; - const mem_req_t *seg_iter = *iter; +static vpx_codec_err_t vp9_set_fb_fn( + vpx_codec_alg_priv_t *ctx, + vpx_get_frame_buffer_cb_fn_t cb_get, + vpx_release_frame_buffer_cb_fn_t cb_release, void *cb_priv) { + if (cb_get == NULL || cb_release == NULL) { + return VPX_CODEC_INVALID_PARAM; + } else if (ctx->pbi == NULL) { + // If the decoder has already been initialized, do not accept changes to + // the frame buffer functions. + ctx->get_ext_fb_cb = cb_get; + ctx->release_ext_fb_cb = cb_release; + ctx->ext_priv = cb_priv; + return VPX_CODEC_OK; + } + + return VPX_CODEC_ERROR; +} + +static vpx_codec_err_t vp9_xma_get_mmap(const vpx_codec_ctx_t *ctx, + vpx_codec_mmap_t *mmap, + vpx_codec_iter_t *iter) { + vpx_codec_err_t res; + const mem_req_t *seg_iter = *iter; /* Get address of next segment request */ do { @@ -508,7 +531,7 @@ static vpx_codec_err_t vp9_xma_get_mmap(const vpx_codec_ctx_t *ctx, return res; } -static vpx_codec_err_t vp9_xma_set_mmap(vpx_codec_ctx_t *ctx, +static vpx_codec_err_t vp9_xma_set_mmap(vpx_codec_ctx_t *ctx, const vpx_codec_mmap_t *mmap) { vpx_codec_err_t res = VPX_CODEC_MEM_ERROR; int i, done; @@ -544,8 +567,7 @@ static vpx_codec_err_t vp9_xma_set_mmap(vpx_codec_ctx_t *ctx, return res; } -static vpx_codec_err_t set_reference(vpx_codec_alg_priv_t *ctx, - int ctr_id, +static vpx_codec_err_t set_reference(vpx_codec_alg_priv_t *ctx, int ctr_id, va_list args) { vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *); @@ -554,7 +576,6 @@ static vpx_codec_err_t set_reference(vpx_codec_alg_priv_t *ctx, YV12_BUFFER_CONFIG sd; image2yuvconfig(&frame->img, &sd); - return vp9_set_reference_dec(ctx->pbi, (VP9_REFFRAME)frame->frame_type, &sd); } else { @@ -562,8 +583,7 @@ static vpx_codec_err_t set_reference(vpx_codec_alg_priv_t *ctx, } } -static vpx_codec_err_t copy_reference(vpx_codec_alg_priv_t *ctx, - int ctr_id, +static vpx_codec_err_t copy_reference(vpx_codec_alg_priv_t *ctx, int ctr_id, va_list args) { vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *); @@ -580,8 +600,7 @@ static vpx_codec_err_t copy_reference(vpx_codec_alg_priv_t *ctx, } } -static vpx_codec_err_t get_reference(vpx_codec_alg_priv_t *ctx, - int ctr_id, +static vpx_codec_err_t get_reference(vpx_codec_alg_priv_t *ctx, int ctr_id, va_list args) { vp9_ref_frame_t *data = va_arg(args, vp9_ref_frame_t *); @@ -596,8 +615,7 @@ static vpx_codec_err_t get_reference(vpx_codec_alg_priv_t *ctx, } } -static vpx_codec_err_t set_postproc(vpx_codec_alg_priv_t *ctx, - int ctr_id, +static vpx_codec_err_t set_postproc(vpx_codec_alg_priv_t *ctx, int ctr_id, va_list args) { #if CONFIG_VP9_POSTPROC vp8_postproc_cfg_t *data = va_arg(args, vp8_postproc_cfg_t *); @@ -614,8 +632,7 @@ static vpx_codec_err_t set_postproc(vpx_codec_alg_priv_t *ctx, #endif } -static vpx_codec_err_t set_dbg_options(vpx_codec_alg_priv_t *ctx, - int ctrl_id, +static vpx_codec_err_t set_dbg_options(vpx_codec_alg_priv_t *ctx, int ctrl_id, va_list args) { #if CONFIG_POSTPROC_VISUALIZER && CONFIG_POSTPROC int data = va_arg(args, int); @@ -636,10 +653,9 @@ static vpx_codec_err_t set_dbg_options(vpx_codec_alg_priv_t *ctx, } static vpx_codec_err_t get_last_ref_updates(vpx_codec_alg_priv_t *ctx, - int ctrl_id, - va_list args) { + int ctrl_id, va_list args) { int *update_info = va_arg(args, int *); - VP9D_COMP *pbi = (VP9D_COMP *)ctx->pbi; + VP9D_COMP *pbi = (VP9D_COMP*)ctx->pbi; if (update_info) { *update_info = pbi->refresh_frame_flags; @@ -652,12 +668,11 @@ static vpx_codec_err_t get_last_ref_updates(vpx_codec_alg_priv_t *ctx, static vpx_codec_err_t get_frame_corrupted(vpx_codec_alg_priv_t *ctx, - int ctrl_id, - va_list args) { + int ctrl_id, va_list args) { int *corrupted = va_arg(args, int *); if (corrupted) { - VP9D_COMP *pbi = (VP9D_COMP *)ctx->pbi; + VP9D_COMP *pbi = (VP9D_COMP*)ctx->pbi; if (pbi) *corrupted = pbi->common.frame_to_show->corrupted; else @@ -668,6 +683,24 @@ static vpx_codec_err_t get_frame_corrupted(vpx_codec_alg_priv_t *ctx, } } +static vpx_codec_err_t get_display_size(vpx_codec_alg_priv_t *ctx, + int ctrl_id, va_list args) { + int *const display_size = va_arg(args, int *); + + if (display_size) { + const VP9D_COMP *const pbi = (VP9D_COMP*)ctx->pbi; + if (pbi) { + display_size[0] = pbi->common.display_width; + display_size[1] = pbi->common.display_height; + } else { + return VPX_CODEC_ERROR; + } + return VPX_CODEC_OK; + } else { + return VPX_CODEC_INVALID_PARAM; + } +} + static vpx_codec_err_t set_invert_tile_order(vpx_codec_alg_priv_t *ctx, int ctr_id, va_list args) { @@ -686,6 +719,7 @@ static vpx_codec_ctrl_fn_map_t ctf_maps[] = { {VP8D_GET_LAST_REF_UPDATES, get_last_ref_updates}, {VP8D_GET_FRAME_CORRUPTED, get_frame_corrupted}, {VP9_GET_REFERENCE, get_reference}, + {VP9D_GET_DISPLAY_SIZE, get_display_size}, {VP9_INVERT_TILE_DECODE_ORDER, set_invert_tile_order}, { -1, NULL}, }; @@ -697,7 +731,8 @@ static vpx_codec_ctrl_fn_map_t ctf_maps[] = { CODEC_INTERFACE(vpx_codec_vp9_dx) = { "WebM Project VP9 Decoder" VERSION_STRING, VPX_CODEC_INTERNAL_ABI_VERSION, - VPX_CODEC_CAP_DECODER | VP9_CAP_POSTPROC, + VPX_CODEC_CAP_DECODER | VP9_CAP_POSTPROC | + VPX_CODEC_CAP_EXTERNAL_FRAME_BUFFER, /* vpx_codec_caps_t caps; */ vp9_init, /* vpx_codec_init_fn_t init; */ vp9_destroy, /* vpx_codec_destroy_fn_t destroy; */ @@ -709,6 +744,7 @@ CODEC_INTERFACE(vpx_codec_vp9_dx) = { vp9_get_si, /* vpx_codec_get_si_fn_t get_si; */ vp9_decode, /* vpx_codec_decode_fn_t decode; */ vp9_get_frame, /* vpx_codec_frame_get_fn_t frame_get; */ + vp9_set_fb_fn, /* vpx_codec_set_fb_fn_t set_fb_fn; */ }, { // NOLINT /* encoder functions */ diff --git a/libvpx/vp9/vp9_iface_common.h b/libvpx/vp9/vp9_iface_common.h index ed0122c..58256b2 100644 --- a/libvpx/vp9/vp9_iface_common.h +++ b/libvpx/vp9/vp9_iface_common.h @@ -29,7 +29,7 @@ static void yuvconfig2image(vpx_image_t *img, const YV12_BUFFER_CONFIG *yv12, img->fmt = VPX_IMG_FMT_I420; } img->w = yv12->y_stride; - img->h = ALIGN_POWER_OF_TWO(yv12->y_height + 2 * VP9BORDERINPIXELS, 3); + img->h = ALIGN_POWER_OF_TWO(yv12->y_height + 2 * VP9_ENC_BORDER_IN_PIXELS, 3); img->d_w = yv12->y_crop_width; img->d_h = yv12->y_crop_height; img->x_chroma_shift = yv12->uv_width < yv12->y_width; @@ -75,7 +75,7 @@ static vpx_codec_err_t image2yuvconfig(const vpx_image_t *img, yv12->border = (img->stride[VPX_PLANE_Y] - img->w) / 2; #if CONFIG_ALPHA - // For development purposes, force alpha to hold the same data a Y for now. + // For development purposes, force alpha to hold the same data as Y for now. yv12->alpha_buffer = yv12->y_buffer; yv12->alpha_width = yv12->y_width; yv12->alpha_height = yv12->y_height; diff --git a/libvpx/vp9/vp9cx.mk b/libvpx/vp9/vp9cx.mk index 0993c6c..ff20f05 100644 --- a/libvpx/vp9/vp9cx.mk +++ b/libvpx/vp9/vp9cx.mk @@ -18,53 +18,54 @@ VP9_CX_SRCS_REMOVE-no += $(VP9_COMMON_SRCS_REMOVE-no) VP9_CX_SRCS-yes += vp9_cx_iface.c VP9_CX_SRCS-yes += encoder/vp9_bitstream.c -VP9_CX_SRCS-yes += encoder/vp9_boolhuff.c +VP9_CX_SRCS-yes += encoder/vp9_cost.h +VP9_CX_SRCS-yes += encoder/vp9_cost.c VP9_CX_SRCS-yes += encoder/vp9_dct.c -VP9_CX_SRCS-yes += encoder/vp9_dct.h VP9_CX_SRCS-yes += encoder/vp9_encodeframe.c VP9_CX_SRCS-yes += encoder/vp9_encodeframe.h -VP9_CX_SRCS-yes += encoder/vp9_encodeintra.c VP9_CX_SRCS-yes += encoder/vp9_encodemb.c VP9_CX_SRCS-yes += encoder/vp9_encodemv.c +VP9_CX_SRCS-yes += encoder/vp9_extend.c VP9_CX_SRCS-yes += encoder/vp9_firstpass.c VP9_CX_SRCS-yes += encoder/vp9_block.h -VP9_CX_SRCS-yes += encoder/vp9_boolhuff.h +VP9_CX_SRCS-yes += encoder/vp9_writer.h +VP9_CX_SRCS-yes += encoder/vp9_writer.c VP9_CX_SRCS-yes += encoder/vp9_write_bit_buffer.h VP9_CX_SRCS-yes += encoder/vp9_bitstream.h -VP9_CX_SRCS-yes += encoder/vp9_encodeintra.h VP9_CX_SRCS-yes += encoder/vp9_encodemb.h VP9_CX_SRCS-yes += encoder/vp9_encodemv.h +VP9_CX_SRCS-yes += encoder/vp9_extend.h VP9_CX_SRCS-yes += encoder/vp9_firstpass.h VP9_CX_SRCS-yes += encoder/vp9_lookahead.c VP9_CX_SRCS-yes += encoder/vp9_lookahead.h VP9_CX_SRCS-yes += encoder/vp9_mcomp.h -VP9_CX_SRCS-yes += encoder/vp9_modecosts.h VP9_CX_SRCS-yes += encoder/vp9_onyx_int.h -VP9_CX_SRCS-yes += encoder/vp9_psnr.h VP9_CX_SRCS-yes += encoder/vp9_quantize.h VP9_CX_SRCS-yes += encoder/vp9_ratectrl.h VP9_CX_SRCS-yes += encoder/vp9_rdopt.h +VP9_CX_SRCS-yes += encoder/vp9_pickmode.h VP9_CX_SRCS-yes += encoder/vp9_tokenize.h VP9_CX_SRCS-yes += encoder/vp9_treewriter.h VP9_CX_SRCS-yes += encoder/vp9_variance.h VP9_CX_SRCS-yes += encoder/vp9_mcomp.c -VP9_CX_SRCS-yes += encoder/vp9_modecosts.c VP9_CX_SRCS-yes += encoder/vp9_onyx_if.c VP9_CX_SRCS-yes += encoder/vp9_picklpf.c VP9_CX_SRCS-yes += encoder/vp9_picklpf.h -VP9_CX_SRCS-yes += encoder/vp9_psnr.c VP9_CX_SRCS-yes += encoder/vp9_quantize.c VP9_CX_SRCS-yes += encoder/vp9_ratectrl.c VP9_CX_SRCS-yes += encoder/vp9_rdopt.c -VP9_CX_SRCS-yes += encoder/vp9_sad_c.c +VP9_CX_SRCS-yes += encoder/vp9_pickmode.c +VP9_CX_SRCS-yes += encoder/vp9_sad.c VP9_CX_SRCS-yes += encoder/vp9_segmentation.c VP9_CX_SRCS-yes += encoder/vp9_segmentation.h VP9_CX_SRCS-yes += encoder/vp9_subexp.c VP9_CX_SRCS-yes += encoder/vp9_subexp.h +VP9_CX_SRCS-yes += encoder/vp9_resize.c +VP9_CX_SRCS-yes += encoder/vp9_resize.h VP9_CX_SRCS-$(CONFIG_INTERNAL_STATS) += encoder/vp9_ssim.c VP9_CX_SRCS-yes += encoder/vp9_tokenize.c VP9_CX_SRCS-yes += encoder/vp9_treewriter.c -VP9_CX_SRCS-yes += encoder/vp9_variance_c.c +VP9_CX_SRCS-yes += encoder/vp9_variance.c VP9_CX_SRCS-yes += encoder/vp9_vaq.c VP9_CX_SRCS-yes += encoder/vp9_vaq.h ifeq ($(CONFIG_VP9_POSTPROC),yes) @@ -82,16 +83,19 @@ VP9_CX_SRCS-$(HAVE_MMX) += encoder/x86/vp9_variance_mmx.c VP9_CX_SRCS-$(HAVE_MMX) += encoder/x86/vp9_variance_impl_mmx.asm VP9_CX_SRCS-$(HAVE_MMX) += encoder/x86/vp9_sad_mmx.asm VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_variance_impl_sse2.asm +VP9_CX_SRCS-$(HAVE_AVX2) += encoder/x86/vp9_variance_impl_intrin_avx2.c VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_sad4d_sse2.asm VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_subpel_variance_impl_sse2.asm +VP9_CX_SRCS-$(HAVE_AVX2) += encoder/x86/vp9_subpel_variance_impl_intrin_avx2.c VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_temporal_filter_apply_sse2.asm VP9_CX_SRCS-$(HAVE_SSE3) += encoder/x86/vp9_sad_sse3.asm -ifeq ($(USE_X86INC),yes) +ifeq ($(CONFIG_USE_X86INC),yes) VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_error_sse2.asm VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_sad_sse2.asm VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_subtract_sse2.asm VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_variance_sse2.c +VP9_CX_SRCS-$(HAVE_AVX2) += encoder/x86/vp9_variance_avx2.c VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_subpel_variance.asm endif @@ -105,4 +109,7 @@ VP9_CX_SRCS-$(ARCH_X86_64) += encoder/x86/vp9_ssim_opt.asm VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_dct_sse2.c VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_dct32x32_sse2.c +VP9_CX_SRCS-$(HAVE_AVX2) += encoder/x86/vp9_dct_avx2.c +VP9_CX_SRCS-$(HAVE_AVX2) += encoder/x86/vp9_dct32x32_avx2.c + VP9_CX_SRCS-yes := $(filter-out $(VP9_CX_SRCS_REMOVE-yes),$(VP9_CX_SRCS-yes)) diff --git a/libvpx/vp9/vp9dx.mk b/libvpx/vp9/vp9dx.mk index 3a27cdd..92ec6fd 100644 --- a/libvpx/vp9/vp9dx.mk +++ b/libvpx/vp9/vp9dx.mk @@ -17,21 +17,22 @@ VP9_DX_SRCS_REMOVE-no += $(VP9_COMMON_SRCS_REMOVE-no) VP9_DX_SRCS-yes += vp9_dx_iface.c -VP9_DX_SRCS-yes += decoder/vp9_dboolhuff.c VP9_DX_SRCS-yes += decoder/vp9_decodemv.c -VP9_DX_SRCS-yes += decoder/vp9_decodframe.c -VP9_DX_SRCS-yes += decoder/vp9_decodframe.h +VP9_DX_SRCS-yes += decoder/vp9_decodeframe.c +VP9_DX_SRCS-yes += decoder/vp9_decodeframe.h VP9_DX_SRCS-yes += decoder/vp9_detokenize.c -VP9_DX_SRCS-yes += decoder/vp9_dboolhuff.h +VP9_DX_SRCS-yes += decoder/vp9_dthread.c +VP9_DX_SRCS-yes += decoder/vp9_dthread.h +VP9_DX_SRCS-yes += decoder/vp9_reader.h +VP9_DX_SRCS-yes += decoder/vp9_reader.c +VP9_DX_SRCS-yes += decoder/vp9_read_bit_buffer.c VP9_DX_SRCS-yes += decoder/vp9_read_bit_buffer.h VP9_DX_SRCS-yes += decoder/vp9_decodemv.h VP9_DX_SRCS-yes += decoder/vp9_detokenize.h -VP9_DX_SRCS-yes += decoder/vp9_onyxd.h -VP9_DX_SRCS-yes += decoder/vp9_onyxd_int.h +VP9_DX_SRCS-yes += decoder/vp9_decoder.c +VP9_DX_SRCS-yes += decoder/vp9_decoder.h VP9_DX_SRCS-yes += decoder/vp9_thread.c VP9_DX_SRCS-yes += decoder/vp9_thread.h -VP9_DX_SRCS-yes += decoder/vp9_treereader.h -VP9_DX_SRCS-yes += decoder/vp9_onyxd_if.c VP9_DX_SRCS-yes += decoder/vp9_dsubexp.c VP9_DX_SRCS-yes += decoder/vp9_dsubexp.h |