summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRicardo Cerqueira <cyanogenmod@cerqueira.org>2013-11-01 16:04:38 +0000
committerRicardo Cerqueira <cyanogenmod@cerqueira.org>2013-11-01 16:04:38 +0000
commit0358fb7dd6d2402a03fa2e3972d3ce686b14d413 (patch)
tree63828ad2107f2c97fcb0598127f0fb71d1eb2c9c
parent9c0c9b59319f4528414b74e28b0309e7349ba0f7 (diff)
parente6eeaaa14ccef4c0938fcce21c54979204041a30 (diff)
downloadandroid_external_libvpx-0358fb7dd6d2402a03fa2e3972d3ce686b14d413.tar.gz
android_external_libvpx-0358fb7dd6d2402a03fa2e3972d3ce686b14d413.tar.bz2
android_external_libvpx-0358fb7dd6d2402a03fa2e3972d3ce686b14d413.zip
Merge tag 'android-4.4_r1' into cm-11.0
Android 4.4 Release 1.0
-rw-r--r--UPDATING6
-rw-r--r--armv7a-neon/libvpx_srcs.txt116
-rw-r--r--armv7a-neon/vp8_rtcd.h (renamed from armv7a-neon/vpx_rtcd.h)95
-rw-r--r--armv7a-neon/vp9_rtcd.h345
-rw-r--r--armv7a-neon/vpx_config.c2
-rw-r--r--armv7a-neon/vpx_config.h14
-rw-r--r--armv7a-neon/vpx_scale_rtcd.h65
-rw-r--r--armv7a-neon/vpx_version.h6
-rw-r--r--armv7a/libvpx_srcs.txt96
-rw-r--r--armv7a/vp8_rtcd.h (renamed from armv7a/vpx_rtcd.h)92
-rw-r--r--armv7a/vp9_rtcd.h317
-rw-r--r--armv7a/vpx_config.c2
-rw-r--r--armv7a/vpx_config.h14
-rw-r--r--armv7a/vpx_scale_rtcd.h62
-rw-r--r--armv7a/vpx_version.h6
-rw-r--r--generic/libvpx_srcs.txt96
-rw-r--r--generic/vp8_rtcd.h (renamed from generic/vpx_rtcd.h)90
-rw-r--r--generic/vp9_rtcd.h312
-rw-r--r--generic/vpx_config.c2
-rw-r--r--generic/vpx_config.h14
-rw-r--r--generic/vpx_scale_rtcd.h57
-rw-r--r--generic/vpx_version.h6
-rw-r--r--libvpx.mk11
-rw-r--r--libvpx/CHANGELOG29
-rw-r--r--libvpx/README36
-rw-r--r--libvpx/args.c335
-rw-r--r--libvpx/args.h33
-rw-r--r--libvpx/build/arm-msvs/obj_int_extract.bat14
-rw-r--r--libvpx/build/make/Android.mk47
-rw-r--r--libvpx/build/make/Makefile40
-rwxr-xr-xlibvpx/build/make/ads2armasm_ms.pl38
-rwxr-xr-xlibvpx/build/make/ads2gas.pl28
-rwxr-xr-xlibvpx/build/make/ads2gas_apple.pl4
-rwxr-xr-xlibvpx/build/make/armlink_adapter.sh14
-rwxr-xr-xlibvpx/build/make/configure.sh193
-rwxr-xr-xlibvpx/build/make/gen_asm_deps.sh2
-rwxr-xr-xlibvpx/build/make/gen_msvs_proj.sh55
-rwxr-xr-xlibvpx/build/make/gen_msvs_sln.sh86
-rwxr-xr-xlibvpx/build/make/gen_msvs_vcxproj.sh530
-rw-r--r--libvpx/build/make/obj_int_extract.c1461
-rwxr-xr-xlibvpx/build/make/rtcd.sh59
-rw-r--r--libvpx/build/make/thumb.pm70
-rwxr-xr-xlibvpx/build/make/version.sh2
-rw-r--r--libvpx/build/x86-msvs/obj_int_extract.bat9
-rw-r--r--libvpx/build/x86-msvs/yasm.rules115
-rwxr-xr-xlibvpx/configure179
-rw-r--r--libvpx/example_xma.c317
-rw-r--r--libvpx/examples.mk60
-rw-r--r--libvpx/examples/decoder_tmpl.c1
-rw-r--r--libvpx/examples/decoder_tmpl.txt2
-rw-r--r--libvpx/examples/encoder_tmpl.txt2
-rw-r--r--libvpx/examples/postproc.txt2
-rw-r--r--libvpx/libmkv/EbmlBufferWriter.c74
-rw-r--r--libvpx/libmkv/EbmlBufferWriter.h14
-rw-r--r--libvpx/libmkv/EbmlIDs.h200
-rw-r--r--libvpx/libmkv/EbmlWriter.c206
-rw-r--r--libvpx/libmkv/WebMElement.c182
-rw-r--r--libvpx/libmkv/WebMElement.h4
-rw-r--r--libvpx/libmkv/testlibmkv.c87
-rw-r--r--libvpx/libs.mk283
-rw-r--r--libvpx/md5_utils.c320
-rw-r--r--libvpx/md5_utils.h9
-rw-r--r--libvpx/nestegg/halloc/src/macros.h2
-rw-r--r--libvpx/nestegg/include/nestegg/nestegg.h1
-rw-r--r--libvpx/nestegg/src/nestegg.c4
-rw-r--r--libvpx/solution.mk8
-rw-r--r--libvpx/test/acm_random.h35
-rw-r--r--libvpx/test/altref_test.cc20
-rw-r--r--libvpx/test/borders_test.cc82
-rw-r--r--libvpx/test/clear_system_state.h31
-rw-r--r--libvpx/test/codec_factory.h232
-rw-r--r--libvpx/test/config_test.cc18
-rw-r--r--libvpx/test/convolve_test.cc645
-rw-r--r--libvpx/test/cpu_speed_test.cc112
-rw-r--r--libvpx/test/cq_test.cc25
-rw-r--r--libvpx/test/datarate_test.cc21
-rw-r--r--libvpx/test/dct16x16_test.cc521
-rw-r--r--libvpx/test/dct32x32_test.cc262
-rw-r--r--libvpx/test/decode_test_driver.cc31
-rw-r--r--libvpx/test/decode_test_driver.h45
-rw-r--r--libvpx/test/encode_test_driver.cc99
-rw-r--r--libvpx/test/encode_test_driver.h56
-rw-r--r--libvpx/test/error_resilience_test.cc169
-rw-r--r--libvpx/test/fdct4x4_test.cc203
-rw-r--r--libvpx/test/fdct8x8_test.cc254
-rw-r--r--libvpx/test/i420_video_source.h11
-rw-r--r--libvpx/test/idct8x8_test.cc140
-rw-r--r--libvpx/test/idct_test.cc118
-rw-r--r--libvpx/test/idctllm_test.cc125
-rw-r--r--libvpx/test/intrapred_test.cc23
-rw-r--r--libvpx/test/ivf_video_source.h8
-rw-r--r--libvpx/test/keyframe_test.cc21
-rw-r--r--libvpx/test/md5_helper.h70
-rw-r--r--libvpx/test/pp_filter_test.cc20
-rw-r--r--libvpx/test/register_state_check.h95
-rw-r--r--libvpx/test/resize_test.cc84
-rw-r--r--libvpx/test/sad_test.cc384
-rw-r--r--libvpx/test/set_roi.cc12
-rw-r--r--libvpx/test/sixtap_predict_test.cc23
-rw-r--r--libvpx/test/subtract_test.cc19
-rw-r--r--libvpx/test/superframe_test.cc96
-rw-r--r--libvpx/test/test-data.sha1405
-rw-r--r--libvpx/test/test.mk489
-rw-r--r--libvpx/test/test_libvpx.cc36
-rw-r--r--libvpx/test/test_vector_test.cc177
-rw-r--r--libvpx/test/tile_independence_test.cc109
-rw-r--r--libvpx/test/util.h30
-rw-r--r--libvpx/test/variance_test.cc694
-rw-r--r--libvpx/test/video_source.h2
-rw-r--r--libvpx/test/vp8_boolcoder_test.cc (renamed from libvpx/test/boolcoder_test.cc)60
-rw-r--r--libvpx/test/vp8_decrypt_test.cc73
-rw-r--r--libvpx/test/vp8_fdct4x4_test.cc169
-rw-r--r--libvpx/test/vp9_boolcoder_test.cc91
-rw-r--r--libvpx/test/vp9_lossless_test.cc75
-rw-r--r--libvpx/test/vp9_subtract_test.cc100
-rw-r--r--libvpx/test/vp9_thread_test.cc109
-rw-r--r--libvpx/test/webm_video_source.h185
-rw-r--r--libvpx/third_party/libyuv/source/scale.c14
-rw-r--r--libvpx/third_party/x86inc/LICENSE18
-rw-r--r--libvpx/third_party/x86inc/README.webm11
-rw-r--r--libvpx/third_party/x86inc/x86inc.asm1125
-rwxr-xr-xlibvpx/tools/all_builds.py72
-rwxr-xr-xlibvpx/tools/cpplint.py4020
-rw-r--r--libvpx/tools/diff.py127
-rwxr-xr-xlibvpx/tools/ftfy.sh16
-rwxr-xr-xlibvpx/tools/intersect-diffs.py118
-rwxr-xr-xlibvpx/tools/lint-hunks.py144
-rwxr-xr-xlibvpx/tools/vpx-astyle.sh27
-rw-r--r--libvpx/tools_common.c9
-rw-r--r--libvpx/tools_common.h2
-rw-r--r--libvpx/vp8/common/alloccommon.c1
-rw-r--r--libvpx/vp8/common/arm/armv6/filter_v6.asm2
-rw-r--r--libvpx/vp8/common/arm/armv6/idct_blk_v6.c2
-rw-r--r--libvpx/vp8/common/arm/bilinearfilter_arm.c2
-rw-r--r--libvpx/vp8/common/arm/filter_arm.c2
-rw-r--r--libvpx/vp8/common/arm/loopfilter_arm.c2
-rw-r--r--libvpx/vp8/common/arm/neon/idct_blk_neon.c2
-rw-r--r--libvpx/vp8/common/arm/neon/vp8_subpixelvariance16x16_neon.asm6
-rw-r--r--libvpx/vp8/common/arm/reconintra_arm.c2
-rw-r--r--libvpx/vp8/common/arm/variance_arm.c2
-rw-r--r--libvpx/vp8/common/dequantize.c2
-rw-r--r--libvpx/vp8/common/generic/systemdependent.c3
-rw-r--r--libvpx/vp8/common/idct_blk.c2
-rw-r--r--libvpx/vp8/common/invtrans.h2
-rw-r--r--libvpx/vp8/common/loopfilter.c65
-rw-r--r--libvpx/vp8/common/loopfilter.h2
-rw-r--r--libvpx/vp8/common/loopfilter_filters.c52
-rw-r--r--libvpx/vp8/common/mfqe.c4
-rw-r--r--libvpx/vp8/common/mips/dspr2/dequantize_dspr2.c2
-rw-r--r--libvpx/vp8/common/mips/dspr2/filter_dspr2.c2
-rw-r--r--libvpx/vp8/common/mips/dspr2/idct_blk_dspr2.c2
-rw-r--r--libvpx/vp8/common/mips/dspr2/idctllm_dspr2.c2
-rw-r--r--libvpx/vp8/common/mips/dspr2/loopfilter_filters_dspr2.c2
-rw-r--r--libvpx/vp8/common/mips/dspr2/reconinter_dspr2.c2
-rw-r--r--libvpx/vp8/common/onyx.h3
-rw-r--r--libvpx/vp8/common/onyxc_int.h12
-rw-r--r--libvpx/vp8/common/onyxd.h5
-rw-r--r--libvpx/vp8/common/postproc.c86
-rw-r--r--libvpx/vp8/common/ppc/systemdependent.c5
-rw-r--r--libvpx/vp8/common/reconinter.c2
-rw-r--r--libvpx/vp8/common/reconintra.c10
-rw-r--r--libvpx/vp8/common/reconintra4x4.c6
-rw-r--r--libvpx/vp8/common/rtcd.c94
-rw-r--r--libvpx/vp8/common/rtcd_defs.sh58
-rw-r--r--libvpx/vp8/common/systemdependent.h6
-rw-r--r--libvpx/vp8/common/variance_c.c10
-rw-r--r--libvpx/vp8/common/x86/idct_blk_mmx.c2
-rw-r--r--libvpx/vp8/common/x86/idct_blk_sse2.c2
-rw-r--r--libvpx/vp8/common/x86/iwalsh_mmx.asm2
-rw-r--r--libvpx/vp8/common/x86/loopfilter_block_sse2.asm10
-rw-r--r--libvpx/vp8/common/x86/mfqe_sse2.asm6
-rw-r--r--libvpx/vp8/common/x86/postproc_mmx.asm3
-rw-r--r--libvpx/vp8/common/x86/recon_sse2.asm2
-rw-r--r--libvpx/vp8/common/x86/recon_wrapper_sse2.c2
-rw-r--r--libvpx/vp8/common/x86/sad_sse3.asm8
-rw-r--r--libvpx/vp8/common/x86/subpixel_ssse3.asm1
-rw-r--r--libvpx/vp8/common/x86/variance_mmx.c20
-rw-r--r--libvpx/vp8/common/x86/variance_sse2.c26
-rw-r--r--libvpx/vp8/common/x86/variance_ssse3.c4
-rw-r--r--libvpx/vp8/common/x86/vp8_asm_stubs.c2
-rw-r--r--libvpx/vp8/decoder/asm_dec_offsets.c26
-rw-r--r--libvpx/vp8/decoder/dboolhuff.c55
-rw-r--r--libvpx/vp8/decoder/dboolhuff.h56
-rw-r--r--libvpx/vp8/decoder/decodemv.h4
-rw-r--r--libvpx/vp8/decoder/decoderthreading.h20
-rw-r--r--libvpx/vp8/decoder/decodframe.c110
-rw-r--r--libvpx/vp8/decoder/detokenize.h7
-rw-r--r--libvpx/vp8/decoder/ec_types.h3
-rw-r--r--libvpx/vp8/decoder/error_concealment.c4
-rw-r--r--libvpx/vp8/decoder/error_concealment.h6
-rw-r--r--libvpx/vp8/decoder/onyxd_if.c183
-rw-r--r--libvpx/vp8/decoder/onyxd_int.h43
-rw-r--r--libvpx/vp8/decoder/threading.c5
-rw-r--r--libvpx/vp8/decoder/treereader.h9
-rw-r--r--libvpx/vp8/encoder/arm/armv5te/boolhuff_armv5te.asm2
-rw-r--r--libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_armv5.asm2
-rw-r--r--libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_mbrow_armv5.asm2
-rw-r--r--libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_partitions_armv5.asm2
-rw-r--r--libvpx/vp8/encoder/arm/armv6/vp8_fast_quantize_b_armv6.asm2
-rw-r--r--libvpx/vp8/encoder/arm/armv6/vp8_subtract_armv6.asm2
-rw-r--r--libvpx/vp8/encoder/arm/dct_arm.c2
-rw-r--r--libvpx/vp8/encoder/arm/neon/fastquantizeb_neon.asm2
-rw-r--r--libvpx/vp8/encoder/arm/neon/shortfdct_neon.asm4
-rw-r--r--libvpx/vp8/encoder/arm/neon/subtract_neon.asm2
-rw-r--r--libvpx/vp8/encoder/arm/quantize_arm.c2
-rw-r--r--libvpx/vp8/encoder/bitstream.c66
-rw-r--r--libvpx/vp8/encoder/block.h19
-rw-r--r--libvpx/vp8/encoder/boolhuff.c2
-rw-r--r--libvpx/vp8/encoder/boolhuff.h2
-rw-r--r--libvpx/vp8/encoder/denoising.c16
-rw-r--r--libvpx/vp8/encoder/encodeframe.c83
-rw-r--r--libvpx/vp8/encoder/encodeintra.c2
-rw-r--r--libvpx/vp8/encoder/encodemb.c2
-rw-r--r--libvpx/vp8/encoder/encodemv.c6
-rw-r--r--libvpx/vp8/encoder/ethreading.c19
-rw-r--r--libvpx/vp8/encoder/firstpass.c41
-rw-r--r--libvpx/vp8/encoder/mcomp.c25
-rw-r--r--libvpx/vp8/encoder/mcomp.h2
-rw-r--r--libvpx/vp8/encoder/onyx_if.c571
-rw-r--r--libvpx/vp8/encoder/onyx_int.h51
-rw-r--r--libvpx/vp8/encoder/pickinter.c65
-rw-r--r--libvpx/vp8/encoder/picklpf.c9
-rw-r--r--libvpx/vp8/encoder/psnr.c2
-rw-r--r--libvpx/vp8/encoder/quantize.c52
-rw-r--r--libvpx/vp8/encoder/ratectrl.c61
-rw-r--r--libvpx/vp8/encoder/rdopt.c123
-rw-r--r--libvpx/vp8/encoder/rdopt.h2
-rw-r--r--libvpx/vp8/encoder/temporal_filter.c2
-rw-r--r--libvpx/vp8/encoder/tokenize.c4
-rw-r--r--libvpx/vp8/encoder/tokenize.h2
-rw-r--r--libvpx/vp8/encoder/vp8_asm_enc_offsets.c (renamed from libvpx/vp8/encoder/asm_enc_offsets.c)0
-rw-r--r--libvpx/vp8/encoder/x86/dct_sse2.asm4
-rw-r--r--libvpx/vp8/encoder/x86/denoising_sse2.c3
-rw-r--r--libvpx/vp8/encoder/x86/quantize_sse2.asm386
-rw-r--r--libvpx/vp8/encoder/x86/quantize_sse2.c229
-rw-r--r--libvpx/vp8/encoder/x86/quantize_sse4.asm8
-rw-r--r--libvpx/vp8/encoder/x86/quantize_ssse3.asm8
-rw-r--r--libvpx/vp8/encoder/x86/temporal_filter_apply_sse2.asm2
-rw-r--r--libvpx/vp8/encoder/x86/vp8_enc_stubs_mmx.c2
-rw-r--r--libvpx/vp8/encoder/x86/vp8_enc_stubs_sse2.c2
-rw-r--r--libvpx/vp8/vp8_common.mk3
-rw-r--r--libvpx/vp8/vp8_cx_iface.c42
-rw-r--r--libvpx/vp8/vp8_dx_iface.c301
-rw-r--r--libvpx/vp8/vp8cx.mk13
-rw-r--r--libvpx/vp8/vp8dx.mk27
-rw-r--r--libvpx/vp8_multi_resolution_encoder.c23
-rw-r--r--libvpx/vp9/common/arm/neon/vp9_avg_neon.asm116
-rw-r--r--libvpx/vp9/common/arm/neon/vp9_convolve8_avg_neon.asm302
-rw-r--r--libvpx/vp9/common/arm/neon/vp9_convolve8_neon.asm280
-rw-r--r--libvpx/vp9/common/arm/neon/vp9_convolve_neon.c78
-rw-r--r--libvpx/vp9/common/arm/neon/vp9_copy_neon.asm84
-rw-r--r--libvpx/vp9/common/arm/neon/vp9_dc_only_idct_add_neon.asm69
-rw-r--r--libvpx/vp9/common/arm/neon/vp9_idct16x16_neon.c169
-rw-r--r--libvpx/vp9/common/arm/neon/vp9_idct32x32_neon.c47
-rw-r--r--libvpx/vp9/common/arm/neon/vp9_loopfilter_neon.asm708
-rw-r--r--libvpx/vp9/common/arm/neon/vp9_mb_lpf_neon.asm603
-rw-r--r--libvpx/vp9/common/arm/neon/vp9_short_idct16x16_1_add_neon.asm198
-rw-r--r--libvpx/vp9/common/arm/neon/vp9_short_idct16x16_add_neon.asm1191
-rw-r--r--libvpx/vp9/common/arm/neon/vp9_short_idct32x32_add_neon.asm1013
-rw-r--r--libvpx/vp9/common/arm/neon/vp9_short_idct4x4_1_add_neon.asm68
-rw-r--r--libvpx/vp9/common/arm/neon/vp9_short_idct4x4_add_neon.asm190
-rw-r--r--libvpx/vp9/common/arm/neon/vp9_short_idct8x8_1_add_neon.asm88
-rw-r--r--libvpx/vp9/common/arm/neon/vp9_short_idct8x8_add_neon.asm519
-rw-r--r--libvpx/vp9/common/arm/neon/vp9_short_iht4x4_add_neon.asm237
-rw-r--r--libvpx/vp9/common/arm/neon/vp9_short_iht8x8_add_neon.asm696
-rw-r--r--libvpx/vp9/common/generic/vp9_systemdependent.c19
-rw-r--r--libvpx/vp9/common/vp9_alloccommon.c208
-rw-r--r--libvpx/vp9/common/vp9_alloccommon.h30
-rw-r--r--libvpx/vp9/common/vp9_blockd.h595
-rw-r--r--libvpx/vp9/common/vp9_common.h92
-rw-r--r--libvpx/vp9/common/vp9_common_data.c135
-rw-r--r--libvpx/vp9/common/vp9_common_data.h32
-rw-r--r--libvpx/vp9/common/vp9_convolve.c305
-rw-r--r--libvpx/vp9/common/vp9_convolve.h29
-rw-r--r--libvpx/vp9/common/vp9_debugmodes.c80
-rw-r--r--libvpx/vp9/common/vp9_default_coef_probs.h696
-rw-r--r--libvpx/vp9/common/vp9_entropy.c660
-rw-r--r--libvpx/vp9/common/vp9_entropy.h380
-rw-r--r--libvpx/vp9/common/vp9_entropymode.c518
-rw-r--r--libvpx/vp9/common/vp9_entropymode.h72
-rw-r--r--libvpx/vp9/common/vp9_entropymv.c262
-rw-r--r--libvpx/vp9/common/vp9_entropymv.h129
-rw-r--r--libvpx/vp9/common/vp9_enums.h79
-rw-r--r--libvpx/vp9/common/vp9_extend.c143
-rw-r--r--libvpx/vp9/common/vp9_extend.h25
-rw-r--r--libvpx/vp9/common/vp9_filter.c96
-rw-r--r--libvpx/vp9/common/vp9_filter.h33
-rw-r--r--libvpx/vp9/common/vp9_findnearmv.c86
-rw-r--r--libvpx/vp9/common/vp9_findnearmv.h92
-rw-r--r--libvpx/vp9/common/vp9_idct.c1276
-rw-r--r--libvpx/vp9/common/vp9_idct.h88
-rw-r--r--libvpx/vp9/common/vp9_loopfilter.c1061
-rw-r--r--libvpx/vp9/common/vp9_loopfilter.h93
-rw-r--r--libvpx/vp9/common/vp9_loopfilter_filters.c309
-rw-r--r--libvpx/vp9/common/vp9_mv.h39
-rw-r--r--libvpx/vp9/common/vp9_mvref_common.c286
-rw-r--r--libvpx/vp9/common/vp9_mvref_common.h33
-rw-r--r--libvpx/vp9/common/vp9_onyx.h229
-rw-r--r--libvpx/vp9/common/vp9_onyxc_int.h309
-rw-r--r--libvpx/vp9/common/vp9_postproc.c1018
-rw-r--r--libvpx/vp9/common/vp9_postproc.h36
-rw-r--r--libvpx/vp9/common/vp9_ppflags.h38
-rw-r--r--libvpx/vp9/common/vp9_pragmas.h (renamed from libvpx/vpx_scale/scale_mode.h)24
-rw-r--r--libvpx/vp9/common/vp9_pred_common.c416
-rw-r--r--libvpx/vp9/common/vp9_pred_common.h133
-rw-r--r--libvpx/vp9/common/vp9_quant_common.c143
-rw-r--r--libvpx/vp9/common/vp9_quant_common.h28
-rw-r--r--libvpx/vp9/common/vp9_reconinter.c264
-rw-r--r--libvpx/vp9/common/vp9_reconinter.h101
-rw-r--r--libvpx/vp9/common/vp9_reconintra.c385
-rw-r--r--libvpx/vp9/common/vp9_reconintra.h21
-rw-r--r--libvpx/vp9/common/vp9_rtcd.c20
-rw-r--r--libvpx/vp9/common/vp9_rtcd_defs.sh775
-rw-r--r--libvpx/vp9/common/vp9_sadmxn.h38
-rw-r--r--libvpx/vp9/common/vp9_scale.c146
-rw-r--r--libvpx/vp9/common/vp9_scale.h51
-rw-r--r--libvpx/vp9/common/vp9_seg_common.c85
-rw-r--r--libvpx/vp9/common/vp9_seg_common.h82
-rw-r--r--libvpx/vp9/common/vp9_subpelvar.h145
-rw-r--r--libvpx/vp9/common/vp9_systemdependent.h39
-rw-r--r--libvpx/vp9/common/vp9_tapify.py106
-rw-r--r--libvpx/vp9/common/vp9_textblit.c120
-rw-r--r--libvpx/vp9/common/vp9_textblit.h19
-rw-r--r--libvpx/vp9/common/vp9_tile_common.c61
-rw-r--r--libvpx/vp9/common/vp9_tile_common.h23
-rw-r--r--libvpx/vp9/common/vp9_treecoder.c75
-rw-r--r--libvpx/vp9/common/vp9_treecoder.h100
-rw-r--r--libvpx/vp9/common/x86/vp9_asm_stubs.c319
-rw-r--r--libvpx/vp9/common/x86/vp9_copy_sse2.asm152
-rw-r--r--libvpx/vp9/common/x86/vp9_idct_intrin_sse2.c3551
-rw-r--r--libvpx/vp9/common/x86/vp9_intrapred_sse2.asm341
-rw-r--r--libvpx/vp9/common/x86/vp9_intrapred_ssse3.asm291
-rw-r--r--libvpx/vp9/common/x86/vp9_loopfilter_intrin_sse2.c1282
-rw-r--r--libvpx/vp9/common/x86/vp9_loopfilter_mmx.asm626
-rw-r--r--libvpx/vp9/common/x86/vp9_postproc_mmx.asm534
-rw-r--r--libvpx/vp9/common/x86/vp9_postproc_sse2.asm695
-rw-r--r--libvpx/vp9/common/x86/vp9_postproc_x86.h64
-rw-r--r--libvpx/vp9/common/x86/vp9_subpixel_8t_ssse3.asm1011
-rw-r--r--libvpx/vp9/decoder/arm/neon/vp9_add_constant_residual_neon.asm230
-rw-r--r--libvpx/vp9/decoder/vp9_dboolhuff.c92
-rw-r--r--libvpx/vp9/decoder/vp9_dboolhuff.h93
-rw-r--r--libvpx/vp9/decoder/vp9_decodemv.c690
-rw-r--r--libvpx/vp9/decoder/vp9_decodemv.h21
-rw-r--r--libvpx/vp9/decoder/vp9_decodframe.c1019
-rw-r--r--libvpx/vp9/decoder/vp9_decodframe.h21
-rw-r--r--libvpx/vp9/decoder/vp9_detokenize.c249
-rw-r--r--libvpx/vp9/decoder/vp9_detokenize.h20
-rw-r--r--libvpx/vp9/decoder/vp9_dsubexp.c106
-rw-r--r--libvpx/vp9/decoder/vp9_dsubexp.h19
-rw-r--r--libvpx/vp9/decoder/vp9_idct_blk.c152
-rw-r--r--libvpx/vp9/decoder/vp9_idct_blk.h30
-rw-r--r--libvpx/vp9/decoder/vp9_onyxd.h69
-rw-r--r--libvpx/vp9/decoder/vp9_onyxd_if.c448
-rw-r--r--libvpx/vp9/decoder/vp9_onyxd_int.h44
-rw-r--r--libvpx/vp9/decoder/vp9_read_bit_buffer.h60
-rw-r--r--libvpx/vp9/decoder/vp9_thread.c248
-rw-r--r--libvpx/vp9/decoder/vp9_thread.h93
-rw-r--r--libvpx/vp9/decoder/vp9_treereader.h31
-rw-r--r--libvpx/vp9/decoder/x86/vp9_dequantize_sse2.c220
-rw-r--r--libvpx/vp9/encoder/vp9_bitstream.c1621
-rw-r--r--libvpx/vp9/encoder/vp9_bitstream.h17
-rw-r--r--libvpx/vp9/encoder/vp9_block.h190
-rw-r--r--libvpx/vp9/encoder/vp9_boolhuff.c63
-rw-r--r--libvpx/vp9/encoder/vp9_boolhuff.h115
-rw-r--r--libvpx/vp9/encoder/vp9_dct.c1385
-rw-r--r--libvpx/vp9/encoder/vp9_encodeframe.c2799
-rw-r--r--libvpx/vp9/encoder/vp9_encodeframe.h22
-rw-r--r--libvpx/vp9/encoder/vp9_encodeintra.c28
-rw-r--r--libvpx/vp9/encoder/vp9_encodeintra.h20
-rw-r--r--libvpx/vp9/encoder/vp9_encodemb.c731
-rw-r--r--libvpx/vp9/encoder/vp9_encodemb.h69
-rw-r--r--libvpx/vp9/encoder/vp9_encodemv.c357
-rw-r--r--libvpx/vp9/encoder/vp9_encodemv.h31
-rw-r--r--libvpx/vp9/encoder/vp9_firstpass.c2654
-rw-r--r--libvpx/vp9/encoder/vp9_firstpass.h22
-rw-r--r--libvpx/vp9/encoder/vp9_lookahead.c188
-rw-r--r--libvpx/vp9/encoder/vp9_lookahead.h97
-rw-r--r--libvpx/vp9/encoder/vp9_mbgraph.c429
-rw-r--r--libvpx/vp9/encoder/vp9_mbgraph.h16
-rw-r--r--libvpx/vp9/encoder/vp9_mcomp.c2160
-rw-r--r--libvpx/vp9/encoder/vp9_mcomp.h126
-rw-r--r--libvpx/vp9/encoder/vp9_modecosts.c43
-rw-r--r--libvpx/vp9/encoder/vp9_modecosts.h17
-rw-r--r--libvpx/vp9/encoder/vp9_onyx_if.c4201
-rw-r--r--libvpx/vp9/encoder/vp9_onyx_int.h722
-rw-r--r--libvpx/vp9/encoder/vp9_picklpf.c218
-rw-r--r--libvpx/vp9/encoder/vp9_picklpf.h22
-rw-r--r--libvpx/vp9/encoder/vp9_psnr.c29
-rw-r--r--libvpx/vp9/encoder/vp9_psnr.h17
-rw-r--r--libvpx/vp9/encoder/vp9_quantize.c355
-rw-r--r--libvpx/vp9/encoder/vp9_quantize.h43
-rw-r--r--libvpx/vp9/encoder/vp9_ratectrl.c503
-rw-r--r--libvpx/vp9/encoder/vp9_ratectrl.h39
-rw-r--r--libvpx/vp9/encoder/vp9_rdopt.c4050
-rw-r--r--libvpx/vp9/encoder/vp9_rdopt.h36
-rw-r--r--libvpx/vp9/encoder/vp9_sad_c.c615
-rw-r--r--libvpx/vp9/encoder/vp9_segmentation.c282
-rw-r--r--libvpx/vp9/encoder/vp9_segmentation.h40
-rw-r--r--libvpx/vp9/encoder/vp9_ssim.c148
-rw-r--r--libvpx/vp9/encoder/vp9_subexp.c236
-rw-r--r--libvpx/vp9/encoder/vp9_subexp.h35
-rw-r--r--libvpx/vp9/encoder/vp9_temporal_filter.c527
-rw-r--r--libvpx/vp9/encoder/vp9_temporal_filter.h18
-rw-r--r--libvpx/vp9/encoder/vp9_tokenize.c384
-rw-r--r--libvpx/vp9/encoder/vp9_tokenize.h56
-rw-r--r--libvpx/vp9/encoder/vp9_treewriter.c38
-rw-r--r--libvpx/vp9/encoder/vp9_treewriter.h87
-rw-r--r--libvpx/vp9/encoder/vp9_variance.h112
-rw-r--r--libvpx/vp9/encoder/vp9_variance_c.c957
-rw-r--r--libvpx/vp9/encoder/vp9_write_bit_buffer.h48
-rw-r--r--libvpx/vp9/encoder/x86/vp9_dct32x32_sse2.c2650
-rw-r--r--libvpx/vp9/encoder/x86/vp9_dct_sse2.c2585
-rw-r--r--libvpx/vp9/encoder/x86/vp9_error_sse2.asm74
-rw-r--r--libvpx/vp9/encoder/x86/vp9_mcomp_x86.h40
-rw-r--r--libvpx/vp9/encoder/x86/vp9_quantize_ssse3.asm218
-rw-r--r--libvpx/vp9/encoder/x86/vp9_sad4d_sse2.asm231
-rw-r--r--libvpx/vp9/encoder/x86/vp9_sad_mmx.asm427
-rw-r--r--libvpx/vp9/encoder/x86/vp9_sad_sse2.asm267
-rw-r--r--libvpx/vp9/encoder/x86/vp9_sad_sse3.asm378
-rw-r--r--libvpx/vp9/encoder/x86/vp9_sad_sse4.asm359
-rw-r--r--libvpx/vp9/encoder/x86/vp9_sad_ssse3.asm370
-rw-r--r--libvpx/vp9/encoder/x86/vp9_ssim_opt.asm216
-rw-r--r--libvpx/vp9/encoder/x86/vp9_subpel_variance.asm1300
-rw-r--r--libvpx/vp9/encoder/x86/vp9_subpel_variance_impl_sse2.asm337
-rw-r--r--libvpx/vp9/encoder/x86/vp9_subtract_sse2.asm127
-rw-r--r--libvpx/vp9/encoder/x86/vp9_temporal_filter_apply_sse2.asm207
-rw-r--r--libvpx/vp9/encoder/x86/vp9_variance_impl_mmx.asm510
-rw-r--r--libvpx/vp9/encoder/x86/vp9_variance_impl_sse2.asm734
-rw-r--r--libvpx/vp9/encoder/x86/vp9_variance_mmx.c147
-rw-r--r--libvpx/vp9/encoder/x86/vp9_variance_sse2.c556
-rw-r--r--libvpx/vp9/exports_dec2
-rw-r--r--libvpx/vp9/exports_enc4
-rw-r--r--libvpx/vp9/vp9_common.mk113
-rw-r--r--libvpx/vp9/vp9_cx_iface.c1245
-rw-r--r--libvpx/vp9/vp9_dx_iface.c717
-rw-r--r--libvpx/vp9/vp9_iface_common.h87
-rw-r--r--libvpx/vp9/vp9cx.mk105
-rw-r--r--libvpx/vp9/vp9dx.mk43
-rw-r--r--libvpx/vp9_spatial_scalable_encoder.c487
-rw-r--r--libvpx/vpx/internal/vpx_codec_internal.h305
-rw-r--r--libvpx/vpx/src/vpx_codec.c202
-rw-r--r--libvpx/vpx/src/vpx_decoder.c341
-rw-r--r--libvpx/vpx/src/vpx_encoder.c628
-rw-r--r--libvpx/vpx/src/vpx_image.c374
-rw-r--r--libvpx/vpx/vp8.h78
-rw-r--r--libvpx/vpx/vp8cx.h211
-rw-r--r--libvpx/vpx/vp8dx.h58
-rw-r--r--libvpx/vpx/vpx_codec.h862
-rw-r--r--libvpx/vpx/vpx_decoder.h567
-rw-r--r--libvpx/vpx/vpx_encoder.h1574
-rw-r--r--libvpx/vpx/vpx_image.h318
-rw-r--r--libvpx/vpx/vpx_integer.h3
-rw-r--r--libvpx/vpx_mem/include/vpx_mem_intrnl.h28
-rw-r--r--libvpx/vpx_mem/include/vpx_mem_tracker.h287
-rw-r--r--libvpx/vpx_mem/memory_manager/hmm_alloc.c64
-rw-r--r--libvpx/vpx_mem/memory_manager/hmm_base.c501
-rw-r--r--libvpx/vpx_mem/memory_manager/hmm_dflt_abort.c33
-rw-r--r--libvpx/vpx_mem/memory_manager/hmm_grow.c41
-rw-r--r--libvpx/vpx_mem/memory_manager/hmm_largest.c61
-rw-r--r--libvpx/vpx_mem/memory_manager/hmm_resize.c131
-rw-r--r--libvpx/vpx_mem/memory_manager/hmm_shrink.c136
-rw-r--r--libvpx/vpx_mem/memory_manager/hmm_true.c19
-rw-r--r--libvpx/vpx_mem/memory_manager/include/cavl_if.h57
-rw-r--r--libvpx/vpx_mem/memory_manager/include/cavl_impl.h1579
-rw-r--r--libvpx/vpx_mem/memory_manager/include/heapmm.h77
-rw-r--r--libvpx/vpx_mem/memory_manager/include/hmm_cnfg.h10
-rw-r--r--libvpx/vpx_mem/memory_manager/include/hmm_intrnl.h64
-rw-r--r--libvpx/vpx_mem/vpx_mem.c807
-rw-r--r--libvpx/vpx_mem/vpx_mem.h150
-rw-r--r--libvpx/vpx_mem/vpx_mem_tracker.c712
-rw-r--r--libvpx/vpx_ports/arm_cpudetect.c261
-rw-r--r--libvpx/vpx_ports/asm_offsets.h4
-rw-r--r--libvpx/vpx_ports/config.h10
-rw-r--r--libvpx/vpx_ports/emmintrin_compat.h55
-rw-r--r--libvpx/vpx_ports/emms.asm2
-rw-r--r--libvpx/vpx_ports/mem.h5
-rw-r--r--libvpx/vpx_ports/mem_ops.h182
-rw-r--r--libvpx/vpx_ports/mem_ops_aligned.h80
-rw-r--r--libvpx/vpx_ports/vpx_once.h97
-rw-r--r--libvpx/vpx_ports/vpx_timer.h63
-rw-r--r--libvpx/vpx_ports/vpxtypes.h167
-rw-r--r--libvpx/vpx_ports/x86.h238
-rw-r--r--libvpx/vpx_ports/x86_abi_support.asm27
-rw-r--r--libvpx/vpx_ports/x86_cpuid.c60
-rw-r--r--libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copy_y_neon.asm9
-rw-r--r--libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copyframe_func_neon.asm4
-rw-r--r--libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copysrcframe_func_neon.asm11
-rw-r--r--libvpx/vpx_scale/arm/neon/vp8_vpxyv12_extendframeborders_neon.asm2
-rw-r--r--libvpx/vpx_scale/arm/neon/yv12extend_arm.c10
-rw-r--r--libvpx/vpx_scale/generic/bicubic_scaler.c569
-rw-r--r--libvpx/vpx_scale/generic/gen_scalers.c684
-rw-r--r--libvpx/vpx_scale/generic/vpx_scale.c (renamed from libvpx/vpx_scale/generic/vpxscale.c)502
-rw-r--r--libvpx/vpx_scale/generic/yv12config.c176
-rw-r--r--libvpx/vpx_scale/generic/yv12extend.c357
-rw-r--r--libvpx/vpx_scale/generic/yv12extend_generic.h25
-rw-r--r--libvpx/vpx_scale/include/generic/vpxscale_arbitrary.h55
-rw-r--r--libvpx/vpx_scale/include/generic/vpxscale_depricated.h34
-rw-r--r--libvpx/vpx_scale/vpx_scale.h (renamed from libvpx/vpx_scale/vpxscale.h)12
-rw-r--r--libvpx/vpx_scale/vpx_scale.mk14
-rw-r--r--libvpx/vpx_scale/vpx_scale_asm_offsets.c (renamed from libvpx/vp8/common/asm_com_offsets.c)33
-rw-r--r--libvpx/vpx_scale/vpx_scale_rtcd.c18
-rw-r--r--libvpx/vpx_scale/vpx_scale_rtcd.sh34
-rw-r--r--libvpx/vpx_scale/win32/scaleopt.c1195
-rw-r--r--libvpx/vpx_scale/win32/scalesystemdependent.c87
-rw-r--r--libvpx/vpx_scale/yv12config.h63
-rw-r--r--libvpx/vpxdec.c1803
-rw-r--r--libvpx/vpxenc.c4144
-rw-r--r--libvpx/y4minput.c784
-rw-r--r--libvpx/y4minput.h11
-rw-r--r--libwebm/mkvparser.cpp6
-rw-r--r--mips-dspr2/libvpx_srcs.txt96
-rw-r--r--mips-dspr2/vp8_rtcd.h (renamed from mips-dspr2/vpx_rtcd.h)90
-rw-r--r--mips-dspr2/vp9_rtcd.h316
-rw-r--r--mips-dspr2/vpx_config.c2
-rw-r--r--mips-dspr2/vpx_config.h14
-rw-r--r--mips-dspr2/vpx_scale_rtcd.h61
-rw-r--r--mips-dspr2/vpx_version.h6
-rw-r--r--mips/.bins0
-rw-r--r--mips/.docs0
-rw-r--r--mips/.libs0
-rw-r--r--mips/libvpx_srcs.txt96
-rw-r--r--mips/vp8_rtcd.h (renamed from mips/vpx_rtcd.h)90
-rw-r--r--mips/vp9_rtcd.h316
-rw-r--r--mips/vpx_config.c2
-rw-r--r--mips/vpx_config.h14
-rw-r--r--mips/vpx_scale_rtcd.h61
-rw-r--r--mips/vpx_version.h6
527 files changed, 102599 insertions, 17775 deletions
diff --git a/UPDATING b/UPDATING
index e71403a..1c9f863 100644
--- a/UPDATING
+++ b/UPDATING
@@ -32,7 +32,7 @@ Aesthetic:
Example:
$ cd external/libvpx/armv7a
$ ../libvpx/configure --target=armv7-android-gcc --disable-runtime-cpu-detect \
- --disable-neon --sdk-path=$ANDROID_NDK_ROOT --disable-vp8-encoder \
+ --disable-neon --sdk-path=$ANDROID_NDK_ROOT --disable-vp9-encoder \
--disable-examples --disable-docs
Run 'make libvpx_srcs.txt'
@@ -43,5 +43,7 @@ Remove the unused files leaving only:
libvpx_srcs.txt
vpx_config.c
vpx_config.h
-vpx_rtcd.h
+vpx_scale_rtcd.h
+vp8_rtcd.h
+vp9_rtcd.h
vpx_version.h
diff --git a/armv7a-neon/libvpx_srcs.txt b/armv7a-neon/libvpx_srcs.txt
index 494fe37..8f8b655 100644
--- a/armv7a-neon/libvpx_srcs.txt
+++ b/armv7a-neon/libvpx_srcs.txt
@@ -63,7 +63,6 @@ vp8/common/arm/neon/vp8_subpixelvariance16x16s_neon.asm.s
vp8/common/arm/neon/vp8_subpixelvariance8x8_neon.asm.s
vp8/common/arm/reconintra_arm.c
vp8/common/arm/variance_arm.c
-vp8/common/asm_com_offsets.c
vp8/common/blockd.c
vp8/common/blockd.h
vp8/common/coefupdateprobs.h
@@ -121,7 +120,6 @@ vp8/common/treecoder.h
vp8/common/variance_c.c
vp8/common/variance.h
vp8/common/vp8_entropymodedata.h
-vp8/decoder/asm_dec_offsets.c
vp8/decoder/dboolhuff.c
vp8/decoder/dboolhuff.h
vp8/decoder/decodemv.c
@@ -153,7 +151,6 @@ vp8/encoder/arm/neon/vp8_memcpy_neon.asm.s
vp8/encoder/arm/neon/vp8_mse16x16_neon.asm.s
vp8/encoder/arm/neon/vp8_shortwalsh4x4_neon.asm.s
vp8/encoder/arm/quantize_arm.c
-vp8/encoder/asm_enc_offsets.c
vp8/encoder/bitstream.c
vp8/encoder/bitstream.h
vp8/encoder/block.h
@@ -199,12 +196,114 @@ vp8/encoder/tokenize.c
vp8/encoder/tokenize.h
vp8/encoder/treewriter.c
vp8/encoder/treewriter.h
+vp8/encoder/vp8_asm_enc_offsets.c
vp8/vp8_common.mk
vp8/vp8cx_arm.mk
vp8/vp8_cx_iface.c
vp8/vp8cx.mk
vp8/vp8_dx_iface.c
vp8/vp8dx.mk
+vp9/common/arm/neon/vp9_avg_neon.asm.s
+vp9/common/arm/neon/vp9_convolve8_avg_neon.asm.s
+vp9/common/arm/neon/vp9_convolve8_neon.asm.s
+vp9/common/arm/neon/vp9_convolve_neon.c
+vp9/common/arm/neon/vp9_copy_neon.asm.s
+vp9/common/arm/neon/vp9_dc_only_idct_add_neon.asm.s
+vp9/common/arm/neon/vp9_idct16x16_neon.c
+vp9/common/arm/neon/vp9_idct32x32_neon.c
+vp9/common/arm/neon/vp9_loopfilter_neon.asm.s
+vp9/common/arm/neon/vp9_mb_lpf_neon.asm.s
+vp9/common/arm/neon/vp9_short_idct16x16_1_add_neon.asm.s
+vp9/common/arm/neon/vp9_short_idct16x16_add_neon.asm.s
+vp9/common/arm/neon/vp9_short_idct32x32_add_neon.asm.s
+vp9/common/arm/neon/vp9_short_idct4x4_1_add_neon.asm.s
+vp9/common/arm/neon/vp9_short_idct4x4_add_neon.asm.s
+vp9/common/arm/neon/vp9_short_idct8x8_1_add_neon.asm.s
+vp9/common/arm/neon/vp9_short_idct8x8_add_neon.asm.s
+vp9/common/arm/neon/vp9_short_iht4x4_add_neon.asm.s
+vp9/common/arm/neon/vp9_short_iht8x8_add_neon.asm.s
+vp9/common/generic/vp9_systemdependent.c
+vp9/common/vp9_alloccommon.c
+vp9/common/vp9_alloccommon.h
+vp9/common/vp9_blockd.h
+vp9/common/vp9_common_data.c
+vp9/common/vp9_common_data.h
+vp9/common/vp9_common.h
+vp9/common/vp9_convolve.c
+vp9/common/vp9_convolve.h
+vp9/common/vp9_debugmodes.c
+vp9/common/vp9_default_coef_probs.h
+vp9/common/vp9_entropy.c
+vp9/common/vp9_entropy.h
+vp9/common/vp9_entropymode.c
+vp9/common/vp9_entropymode.h
+vp9/common/vp9_entropymv.c
+vp9/common/vp9_entropymv.h
+vp9/common/vp9_enums.h
+vp9/common/vp9_extend.c
+vp9/common/vp9_extend.h
+vp9/common/vp9_filter.c
+vp9/common/vp9_filter.h
+vp9/common/vp9_findnearmv.c
+vp9/common/vp9_findnearmv.h
+vp9/common/vp9_idct.c
+vp9/common/vp9_idct.h
+vp9/common/vp9_loopfilter.c
+vp9/common/vp9_loopfilter_filters.c
+vp9/common/vp9_loopfilter.h
+vp9/common/vp9_mv.h
+vp9/common/vp9_mvref_common.c
+vp9/common/vp9_mvref_common.h
+vp9/common/vp9_onyxc_int.h
+vp9/common/vp9_onyx.h
+vp9/common/vp9_ppflags.h
+vp9/common/vp9_pragmas.h
+vp9/common/vp9_pred_common.c
+vp9/common/vp9_pred_common.h
+vp9/common/vp9_quant_common.c
+vp9/common/vp9_quant_common.h
+vp9/common/vp9_reconinter.c
+vp9/common/vp9_reconinter.h
+vp9/common/vp9_reconintra.c
+vp9/common/vp9_reconintra.h
+vp9/common/vp9_rtcd.c
+vp9/common/vp9_rtcd_defs.sh
+vp9/common/vp9_sadmxn.h
+vp9/common/vp9_scale.c
+vp9/common/vp9_scale.h
+vp9/common/vp9_seg_common.c
+vp9/common/vp9_seg_common.h
+vp9/common/vp9_subpelvar.h
+vp9/common/vp9_systemdependent.h
+vp9/common/vp9_textblit.h
+vp9/common/vp9_tile_common.c
+vp9/common/vp9_tile_common.h
+vp9/common/vp9_treecoder.c
+vp9/common/vp9_treecoder.h
+vp9/decoder/arm/neon/vp9_add_constant_residual_neon.asm.s
+vp9/decoder/vp9_dboolhuff.c
+vp9/decoder/vp9_dboolhuff.h
+vp9/decoder/vp9_decodemv.c
+vp9/decoder/vp9_decodemv.h
+vp9/decoder/vp9_decodframe.c
+vp9/decoder/vp9_decodframe.h
+vp9/decoder/vp9_detokenize.c
+vp9/decoder/vp9_detokenize.h
+vp9/decoder/vp9_dsubexp.c
+vp9/decoder/vp9_dsubexp.h
+vp9/decoder/vp9_idct_blk.c
+vp9/decoder/vp9_idct_blk.h
+vp9/decoder/vp9_onyxd.h
+vp9/decoder/vp9_onyxd_if.c
+vp9/decoder/vp9_onyxd_int.h
+vp9/decoder/vp9_read_bit_buffer.h
+vp9/decoder/vp9_thread.c
+vp9/decoder/vp9_thread.h
+vp9/decoder/vp9_treereader.h
+vp9/vp9_common.mk
+vp9/vp9_dx_iface.c
+vp9/vp9dx.mk
+vp9/vp9_iface_common.h
vpx_config.c
vpx/internal/vpx_codec_internal.h
vpx_mem/include/vpx_mem_intrnl.h
@@ -214,7 +313,9 @@ vpx_mem/vpx_mem.mk
vpx_ports/arm_cpudetect.c
vpx_ports/arm.h
vpx_ports/asm_offsets.h
+vpx_ports/emmintrin_compat.h
vpx_ports/mem.h
+vpx_ports/vpx_once.h
vpx_ports/vpx_ports.mk
vpx_ports/vpx_timer.h
vpx_scale/arm/neon/vp8_vpxyv12_copyframe_func_neon.asm.s
@@ -223,13 +324,14 @@ vpx_scale/arm/neon/vp8_vpxyv12_copy_y_neon.asm.s
vpx_scale/arm/neon/vp8_vpxyv12_extendframeborders_neon.asm.s
vpx_scale/arm/neon/yv12extend_arm.c
vpx_scale/generic/gen_scalers.c
-vpx_scale/generic/vpxscale.c
+vpx_scale/generic/vpx_scale.c
vpx_scale/generic/yv12config.c
vpx_scale/generic/yv12extend.c
-vpx_scale/generic/yv12extend_generic.h
-vpx_scale/scale_mode.h
-vpx_scale/vpxscale.h
+vpx_scale/vpx_scale_asm_offsets.c
+vpx_scale/vpx_scale.h
vpx_scale/vpx_scale.mk
+vpx_scale/vpx_scale_rtcd.c
+vpx_scale/vpx_scale_rtcd.sh
vpx_scale/yv12config.h
vpx/src/vpx_codec.c
vpx/src/vpx_decoder.c
diff --git a/armv7a-neon/vpx_rtcd.h b/armv7a-neon/vp8_rtcd.h
index 914b08d..9cad64a 100644
--- a/armv7a-neon/vpx_rtcd.h
+++ b/armv7a-neon/vp8_rtcd.h
@@ -1,5 +1,5 @@
-#ifndef VPX_RTCD_
-#define VPX_RTCD_
+#ifndef VP8_RTCD_H_
+#define VP8_RTCD_H_
#ifdef RTCD_C
#define RTCD_EXTERN
@@ -7,7 +7,9 @@
#define RTCD_EXTERN extern
#endif
-#include "vp8/common/blockd.h"
+/*
+ * VP8
+ */
struct blockd;
struct macroblockd;
@@ -20,6 +22,9 @@ struct variance_vtable;
union int_mv;
struct yv12_buffer_config;
+void vp8_clear_system_state_c();
+#define vp8_clear_system_state vp8_clear_system_state_c
+
void vp8_dequantize_b_c(struct blockd*, short *dqc);
void vp8_dequantize_b_v6(struct blockd*, short *dqc);
void vp8_dequantize_b_neon(struct blockd*, short *dqc);
@@ -119,8 +124,8 @@ void vp8_build_intra_predictors_mby_s_c(struct macroblockd *x, unsigned char * y
void vp8_build_intra_predictors_mbuv_s_c(struct macroblockd *x, unsigned char * uabove_row, unsigned char * vabove_row, unsigned char *uleft, unsigned char *vleft, int left_stride, unsigned char * upred_ptr, unsigned char * vpred_ptr, int pred_stride);
#define vp8_build_intra_predictors_mbuv_s vp8_build_intra_predictors_mbuv_s_c
-void vp8_intra4x4_predict_c(unsigned char *Above, unsigned char *yleft, int left_stride, B_PREDICTION_MODE b_mode, unsigned char *dst, int dst_stride, unsigned char top_left);
-void vp8_intra4x4_predict_armv6(unsigned char *Above, unsigned char *yleft, int left_stride, B_PREDICTION_MODE b_mode, unsigned char *dst, int dst_stride, unsigned char top_left);
+void vp8_intra4x4_predict_c(unsigned char *Above, unsigned char *yleft, int left_stride, int b_mode, unsigned char *dst, int dst_stride, unsigned char top_left);
+void vp8_intra4x4_predict_armv6(unsigned char *Above, unsigned char *yleft, int left_stride, int b_mode, unsigned char *dst, int dst_stride, unsigned char top_left);
#define vp8_intra4x4_predict vp8_intra4x4_predict_armv6
void vp8_sixtap_predict16x16_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
@@ -381,85 +386,7 @@ void vp8_yv12_copy_partial_frame_neon(struct yv12_buffer_config *src_ybc, struct
int vp8_denoiser_filter_c(struct yv12_buffer_config* mc_running_avg, struct yv12_buffer_config* running_avg, struct macroblock* signal, unsigned int motion_magnitude2, int y_offset, int uv_offset);
#define vp8_denoiser_filter vp8_denoiser_filter_c
-void vp8_horizontal_line_4_5_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_4_5_scale vp8_horizontal_line_4_5_scale_c
-
-void vp8_vertical_band_4_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_4_5_scale vp8_vertical_band_4_5_scale_c
-
-void vp8_last_vertical_band_4_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_4_5_scale vp8_last_vertical_band_4_5_scale_c
-
-void vp8_horizontal_line_2_3_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_2_3_scale vp8_horizontal_line_2_3_scale_c
-
-void vp8_vertical_band_2_3_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_2_3_scale vp8_vertical_band_2_3_scale_c
-
-void vp8_last_vertical_band_2_3_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_2_3_scale vp8_last_vertical_band_2_3_scale_c
-
-void vp8_horizontal_line_3_5_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_3_5_scale vp8_horizontal_line_3_5_scale_c
-
-void vp8_vertical_band_3_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_3_5_scale vp8_vertical_band_3_5_scale_c
-
-void vp8_last_vertical_band_3_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_3_5_scale vp8_last_vertical_band_3_5_scale_c
-
-void vp8_horizontal_line_3_4_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_3_4_scale vp8_horizontal_line_3_4_scale_c
-
-void vp8_vertical_band_3_4_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_3_4_scale vp8_vertical_band_3_4_scale_c
-
-void vp8_last_vertical_band_3_4_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_3_4_scale vp8_last_vertical_band_3_4_scale_c
-
-void vp8_horizontal_line_1_2_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_1_2_scale vp8_horizontal_line_1_2_scale_c
-
-void vp8_vertical_band_1_2_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_1_2_scale vp8_vertical_band_1_2_scale_c
-
-void vp8_last_vertical_band_1_2_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_1_2_scale vp8_last_vertical_band_1_2_scale_c
-
-void vp8_horizontal_line_5_4_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_5_4_scale vp8_horizontal_line_5_4_scale_c
-
-void vp8_vertical_band_5_4_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_5_4_scale vp8_vertical_band_5_4_scale_c
-
-void vp8_horizontal_line_5_3_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_5_3_scale vp8_horizontal_line_5_3_scale_c
-
-void vp8_vertical_band_5_3_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_5_3_scale vp8_vertical_band_5_3_scale_c
-
-void vp8_horizontal_line_2_1_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_2_1_scale vp8_horizontal_line_2_1_scale_c
-
-void vp8_vertical_band_2_1_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_2_1_scale vp8_vertical_band_2_1_scale_c
-
-void vp8_vertical_band_2_1_scale_i_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_2_1_scale_i vp8_vertical_band_2_1_scale_i_c
-
-void vp8_yv12_extend_frame_borders_c(struct yv12_buffer_config *ybf);
-void vp8_yv12_extend_frame_borders_neon(struct yv12_buffer_config *ybf);
-#define vp8_yv12_extend_frame_borders vp8_yv12_extend_frame_borders_neon
-
-void vp8_yv12_copy_frame_c(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
-void vp8_yv12_copy_frame_neon(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
-#define vp8_yv12_copy_frame vp8_yv12_copy_frame_neon
-
-void vp8_yv12_copy_y_c(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
-void vp8_yv12_copy_y_neon(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
-#define vp8_yv12_copy_y vp8_yv12_copy_y_neon
-
-void vpx_rtcd(void);
+void vp8_rtcd(void);
#include "vpx_config.h"
#ifdef RTCD_C
diff --git a/armv7a-neon/vp9_rtcd.h b/armv7a-neon/vp9_rtcd.h
new file mode 100644
index 0000000..fdca309
--- /dev/null
+++ b/armv7a-neon/vp9_rtcd.h
@@ -0,0 +1,345 @@
+#ifndef VP9_RTCD_H_
+#define VP9_RTCD_H_
+
+#ifdef RTCD_C
+#define RTCD_EXTERN
+#else
+#define RTCD_EXTERN extern
+#endif
+
+/*
+ * VP9
+ */
+
+#include "vpx/vpx_integer.h"
+#include "vp9/common/vp9_enums.h"
+
+struct macroblockd;
+
+/* Encoder forward decls */
+struct macroblock;
+struct vp9_variance_vtable;
+
+#define DEC_MVCOSTS int *mvjcost, int *mvcost[2]
+union int_mv;
+struct yv12_buffer_config;
+
+void vp9_idct_add_16x16_c(int16_t *input, uint8_t *dest, int stride, int eob);
+#define vp9_idct_add_16x16 vp9_idct_add_16x16_c
+
+void vp9_idct_add_8x8_c(int16_t *input, uint8_t *dest, int stride, int eob);
+#define vp9_idct_add_8x8 vp9_idct_add_8x8_c
+
+void vp9_idct_add_c(int16_t *input, uint8_t *dest, int stride, int eob);
+#define vp9_idct_add vp9_idct_add_c
+
+void vp9_idct_add_32x32_c(int16_t *q, uint8_t *dst, int stride, int eob);
+#define vp9_idct_add_32x32 vp9_idct_add_32x32_c
+
+void vp9_d207_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d207_predictor_4x4 vp9_d207_predictor_4x4_c
+
+void vp9_d45_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d45_predictor_4x4 vp9_d45_predictor_4x4_c
+
+void vp9_d63_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d63_predictor_4x4 vp9_d63_predictor_4x4_c
+
+void vp9_h_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_h_predictor_4x4 vp9_h_predictor_4x4_c
+
+void vp9_d117_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d117_predictor_4x4 vp9_d117_predictor_4x4_c
+
+void vp9_d135_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d135_predictor_4x4 vp9_d135_predictor_4x4_c
+
+void vp9_d153_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d153_predictor_4x4 vp9_d153_predictor_4x4_c
+
+void vp9_v_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_v_predictor_4x4 vp9_v_predictor_4x4_c
+
+void vp9_tm_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_tm_predictor_4x4 vp9_tm_predictor_4x4_c
+
+void vp9_dc_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_predictor_4x4 vp9_dc_predictor_4x4_c
+
+void vp9_dc_top_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_top_predictor_4x4 vp9_dc_top_predictor_4x4_c
+
+void vp9_dc_left_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_left_predictor_4x4 vp9_dc_left_predictor_4x4_c
+
+void vp9_dc_128_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_128_predictor_4x4 vp9_dc_128_predictor_4x4_c
+
+void vp9_d207_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d207_predictor_8x8 vp9_d207_predictor_8x8_c
+
+void vp9_d45_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d45_predictor_8x8 vp9_d45_predictor_8x8_c
+
+void vp9_d63_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d63_predictor_8x8 vp9_d63_predictor_8x8_c
+
+void vp9_h_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_h_predictor_8x8 vp9_h_predictor_8x8_c
+
+void vp9_d117_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d117_predictor_8x8 vp9_d117_predictor_8x8_c
+
+void vp9_d135_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d135_predictor_8x8 vp9_d135_predictor_8x8_c
+
+void vp9_d153_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d153_predictor_8x8 vp9_d153_predictor_8x8_c
+
+void vp9_v_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_v_predictor_8x8 vp9_v_predictor_8x8_c
+
+void vp9_tm_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_tm_predictor_8x8 vp9_tm_predictor_8x8_c
+
+void vp9_dc_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_predictor_8x8 vp9_dc_predictor_8x8_c
+
+void vp9_dc_top_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_top_predictor_8x8 vp9_dc_top_predictor_8x8_c
+
+void vp9_dc_left_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_left_predictor_8x8 vp9_dc_left_predictor_8x8_c
+
+void vp9_dc_128_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_128_predictor_8x8 vp9_dc_128_predictor_8x8_c
+
+void vp9_d207_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d207_predictor_16x16 vp9_d207_predictor_16x16_c
+
+void vp9_d45_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d45_predictor_16x16 vp9_d45_predictor_16x16_c
+
+void vp9_d63_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d63_predictor_16x16 vp9_d63_predictor_16x16_c
+
+void vp9_h_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_h_predictor_16x16 vp9_h_predictor_16x16_c
+
+void vp9_d117_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d117_predictor_16x16 vp9_d117_predictor_16x16_c
+
+void vp9_d135_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d135_predictor_16x16 vp9_d135_predictor_16x16_c
+
+void vp9_d153_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d153_predictor_16x16 vp9_d153_predictor_16x16_c
+
+void vp9_v_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_v_predictor_16x16 vp9_v_predictor_16x16_c
+
+void vp9_tm_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_tm_predictor_16x16 vp9_tm_predictor_16x16_c
+
+void vp9_dc_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_predictor_16x16 vp9_dc_predictor_16x16_c
+
+void vp9_dc_top_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_top_predictor_16x16 vp9_dc_top_predictor_16x16_c
+
+void vp9_dc_left_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_left_predictor_16x16 vp9_dc_left_predictor_16x16_c
+
+void vp9_dc_128_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_128_predictor_16x16 vp9_dc_128_predictor_16x16_c
+
+void vp9_d207_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d207_predictor_32x32 vp9_d207_predictor_32x32_c
+
+void vp9_d45_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d45_predictor_32x32 vp9_d45_predictor_32x32_c
+
+void vp9_d63_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d63_predictor_32x32 vp9_d63_predictor_32x32_c
+
+void vp9_h_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_h_predictor_32x32 vp9_h_predictor_32x32_c
+
+void vp9_d117_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d117_predictor_32x32 vp9_d117_predictor_32x32_c
+
+void vp9_d135_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d135_predictor_32x32 vp9_d135_predictor_32x32_c
+
+void vp9_d153_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d153_predictor_32x32 vp9_d153_predictor_32x32_c
+
+void vp9_v_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_v_predictor_32x32 vp9_v_predictor_32x32_c
+
+void vp9_tm_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_tm_predictor_32x32 vp9_tm_predictor_32x32_c
+
+void vp9_dc_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_predictor_32x32 vp9_dc_predictor_32x32_c
+
+void vp9_dc_top_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_top_predictor_32x32 vp9_dc_top_predictor_32x32_c
+
+void vp9_dc_left_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_left_predictor_32x32 vp9_dc_left_predictor_32x32_c
+
+void vp9_dc_128_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_128_predictor_32x32 vp9_dc_128_predictor_32x32_c
+
+void vp9_add_constant_residual_8x8_c(const int16_t diff, uint8_t *dest, int stride);
+void vp9_add_constant_residual_8x8_neon(const int16_t diff, uint8_t *dest, int stride);
+#define vp9_add_constant_residual_8x8 vp9_add_constant_residual_8x8_neon
+
+void vp9_add_constant_residual_16x16_c(const int16_t diff, uint8_t *dest, int stride);
+void vp9_add_constant_residual_16x16_neon(const int16_t diff, uint8_t *dest, int stride);
+#define vp9_add_constant_residual_16x16 vp9_add_constant_residual_16x16_neon
+
+void vp9_add_constant_residual_32x32_c(const int16_t diff, uint8_t *dest, int stride);
+void vp9_add_constant_residual_32x32_neon(const int16_t diff, uint8_t *dest, int stride);
+#define vp9_add_constant_residual_32x32 vp9_add_constant_residual_32x32_neon
+
+void vp9_mb_lpf_vertical_edge_w_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
+void vp9_mb_lpf_vertical_edge_w_neon(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
+#define vp9_mb_lpf_vertical_edge_w vp9_mb_lpf_vertical_edge_w_neon
+
+void vp9_mbloop_filter_vertical_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+void vp9_mbloop_filter_vertical_edge_neon(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_mbloop_filter_vertical_edge vp9_mbloop_filter_vertical_edge_neon
+
+void vp9_loop_filter_vertical_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+void vp9_loop_filter_vertical_edge_neon(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_loop_filter_vertical_edge vp9_loop_filter_vertical_edge_neon
+
+void vp9_mb_lpf_horizontal_edge_w_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+void vp9_mb_lpf_horizontal_edge_w_neon(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_mb_lpf_horizontal_edge_w vp9_mb_lpf_horizontal_edge_w_neon
+
+void vp9_mbloop_filter_horizontal_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+void vp9_mbloop_filter_horizontal_edge_neon(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_mbloop_filter_horizontal_edge vp9_mbloop_filter_horizontal_edge_neon
+
+void vp9_loop_filter_horizontal_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+void vp9_loop_filter_horizontal_edge_neon(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_loop_filter_horizontal_edge vp9_loop_filter_horizontal_edge_neon
+
+void vp9_blend_mb_inner_c(uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride);
+#define vp9_blend_mb_inner vp9_blend_mb_inner_c
+
+void vp9_blend_mb_outer_c(uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride);
+#define vp9_blend_mb_outer vp9_blend_mb_outer_c
+
+void vp9_blend_b_c(uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride);
+#define vp9_blend_b vp9_blend_b_c
+
+void vp9_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+void vp9_convolve_copy_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve_copy vp9_convolve_copy_neon
+
+void vp9_convolve_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+void vp9_convolve_avg_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve_avg vp9_convolve_avg_neon
+
+void vp9_convolve8_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+void vp9_convolve8_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8 vp9_convolve8_neon
+
+void vp9_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+void vp9_convolve8_horiz_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_horiz vp9_convolve8_horiz_neon
+
+void vp9_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+void vp9_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_vert vp9_convolve8_vert_neon
+
+void vp9_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+void vp9_convolve8_avg_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_avg vp9_convolve8_avg_neon
+
+void vp9_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+void vp9_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_avg_horiz vp9_convolve8_avg_horiz_neon
+
+void vp9_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+void vp9_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_avg_vert vp9_convolve8_avg_vert_neon
+
+void vp9_short_idct4x4_1_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+void vp9_short_idct4x4_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct4x4_1_add vp9_short_idct4x4_1_add_neon
+
+void vp9_short_idct4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+void vp9_short_idct4x4_add_neon(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct4x4_add vp9_short_idct4x4_add_neon
+
+void vp9_short_idct8x8_1_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+void vp9_short_idct8x8_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct8x8_1_add vp9_short_idct8x8_1_add_neon
+
+void vp9_short_idct8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+void vp9_short_idct8x8_add_neon(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct8x8_add vp9_short_idct8x8_add_neon
+
+void vp9_short_idct10_8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+void vp9_short_idct10_8x8_add_neon(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct10_8x8_add vp9_short_idct10_8x8_add_neon
+
+void vp9_short_idct16x16_1_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+void vp9_short_idct16x16_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct16x16_1_add vp9_short_idct16x16_1_add_neon
+
+void vp9_short_idct16x16_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+void vp9_short_idct16x16_add_neon(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct16x16_add vp9_short_idct16x16_add_neon
+
+void vp9_short_idct10_16x16_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+void vp9_short_idct10_16x16_add_neon(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct10_16x16_add vp9_short_idct10_16x16_add_neon
+
+void vp9_short_idct32x32_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+void vp9_short_idct32x32_add_neon(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct32x32_add vp9_short_idct32x32_add_neon
+
+void vp9_short_idct1_32x32_c(int16_t *input, int16_t *output);
+#define vp9_short_idct1_32x32 vp9_short_idct1_32x32_c
+
+void vp9_short_iht4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride, int tx_type);
+void vp9_short_iht4x4_add_neon(int16_t *input, uint8_t *dest, int dest_stride, int tx_type);
+#define vp9_short_iht4x4_add vp9_short_iht4x4_add_neon
+
+void vp9_short_iht8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride, int tx_type);
+void vp9_short_iht8x8_add_neon(int16_t *input, uint8_t *dest, int dest_stride, int tx_type);
+#define vp9_short_iht8x8_add vp9_short_iht8x8_add_neon
+
+void vp9_short_iht16x16_add_c(int16_t *input, uint8_t *output, int pitch, int tx_type);
+#define vp9_short_iht16x16_add vp9_short_iht16x16_add_c
+
+void vp9_idct4_1d_c(int16_t *input, int16_t *output);
+#define vp9_idct4_1d vp9_idct4_1d_c
+
+void vp9_short_iwalsh4x4_1_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_iwalsh4x4_1_add vp9_short_iwalsh4x4_1_add_c
+
+void vp9_short_iwalsh4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_iwalsh4x4_add vp9_short_iwalsh4x4_add_c
+
+void vp9_rtcd(void);
+#include "vpx_config.h"
+
+#ifdef RTCD_C
+#include "vpx_ports/arm.h"
+static void setup_rtcd_internal(void)
+{
+ int flags = arm_cpu_caps();
+
+ (void)flags;
+
+
+}
+#endif
+#endif
diff --git a/armv7a-neon/vpx_config.c b/armv7a-neon/vpx_config.c
index 863c84c..dc64ce3 100644
--- a/armv7a-neon/vpx_config.c
+++ b/armv7a-neon/vpx_config.c
@@ -5,5 +5,5 @@
/* tree. An additional intellectual property rights grant can be found */
/* in the file PATENTS. All contributing project authors may */
/* be found in the AUTHORS file in the root of the source tree. */
-static const char* const cfg = "--force-target=armv7-android-gcc --disable-runtime-cpu-detect --sdk-path=/usr/local/google/home/johannkoenig/android-ndk --disable-examples --disable-docs --enable-realtime-only";
+static const char* const cfg = "--target=armv7-android-gcc --disable-runtime-cpu-detect --sdk-path=/usr/local/google/home/hkuang/Downloads/android-ndk-r8e --disable-vp9-encoder --disable-examples --disable-docs --enable-realtime-only";
const char *vpx_codec_build_config(void) {return cfg;}
diff --git a/armv7a-neon/vpx_config.h b/armv7a-neon/vpx_config.h
index b3179e5..452bc91 100644
--- a/armv7a-neon/vpx_config.h
+++ b/armv7a-neon/vpx_config.h
@@ -9,6 +9,7 @@
#ifndef VPX_CONFIG_H
#define VPX_CONFIG_H
#define RESTRICT
+#define INLINE __inline__ __attribute__((always_inline))
#define ARCH_ARM 1
#define ARCH_MIPS 0
#define ARCH_X86 0
@@ -34,10 +35,11 @@
#define HAVE_SYS_MMAN_H 1
#define HAVE_UNISTD_H 1
#define CONFIG_EXTERNAL_BUILD 0
-#define CONFIG_INSTALL_DOCS 1
+#define CONFIG_INSTALL_DOCS 0
#define CONFIG_INSTALL_BINS 1
#define CONFIG_INSTALL_LIBS 1
#define CONFIG_INSTALL_SRCS 0
+#define CONFIG_USE_X86INC 1
#define CONFIG_DEBUG 0
#define CONFIG_GPROF 0
#define CONFIG_GCOV 0
@@ -57,11 +59,15 @@
#define CONFIG_DC_RECON 0
#define CONFIG_RUNTIME_CPU_DETECT 0
#define CONFIG_POSTPROC 0
+#define CONFIG_VP9_POSTPROC 0
#define CONFIG_MULTITHREAD 1
#define CONFIG_INTERNAL_STATS 0
#define CONFIG_VP8_ENCODER 1
#define CONFIG_VP8_DECODER 1
+#define CONFIG_VP9_ENCODER 0
+#define CONFIG_VP9_DECODER 1
#define CONFIG_VP8 1
+#define CONFIG_VP9 1
#define CONFIG_ENCODERS 1
#define CONFIG_DECODERS 1
#define CONFIG_STATIC_MSVCRT 0
@@ -77,4 +83,10 @@
#define CONFIG_UNIT_TESTS 0
#define CONFIG_MULTI_RES_ENCODING 0
#define CONFIG_TEMPORAL_DENOISING 1
+#define CONFIG_EXPERIMENTAL 0
+#define CONFIG_DECRYPT 0
+#define CONFIG_ONESHOTQ 0
+#define CONFIG_MULTIPLE_ARF 0
+#define CONFIG_NON420 0
+#define CONFIG_ALPHA 0
#endif /* VPX_CONFIG_H */
diff --git a/armv7a-neon/vpx_scale_rtcd.h b/armv7a-neon/vpx_scale_rtcd.h
new file mode 100644
index 0000000..8c2ab2f
--- /dev/null
+++ b/armv7a-neon/vpx_scale_rtcd.h
@@ -0,0 +1,65 @@
+#ifndef VPX_SCALE_RTCD_H_
+#define VPX_SCALE_RTCD_H_
+
+#ifdef RTCD_C
+#define RTCD_EXTERN
+#else
+#define RTCD_EXTERN extern
+#endif
+
+struct yv12_buffer_config;
+
+void vp8_horizontal_line_5_4_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
+#define vp8_horizontal_line_5_4_scale vp8_horizontal_line_5_4_scale_c
+
+void vp8_vertical_band_5_4_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_5_4_scale vp8_vertical_band_5_4_scale_c
+
+void vp8_horizontal_line_5_3_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
+#define vp8_horizontal_line_5_3_scale vp8_horizontal_line_5_3_scale_c
+
+void vp8_vertical_band_5_3_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_5_3_scale vp8_vertical_band_5_3_scale_c
+
+void vp8_horizontal_line_2_1_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
+#define vp8_horizontal_line_2_1_scale vp8_horizontal_line_2_1_scale_c
+
+void vp8_vertical_band_2_1_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_2_1_scale vp8_vertical_band_2_1_scale_c
+
+void vp8_vertical_band_2_1_scale_i_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_2_1_scale_i vp8_vertical_band_2_1_scale_i_c
+
+void vp8_yv12_extend_frame_borders_c(struct yv12_buffer_config *ybf);
+void vp8_yv12_extend_frame_borders_neon(struct yv12_buffer_config *ybf);
+#define vp8_yv12_extend_frame_borders vp8_yv12_extend_frame_borders_neon
+
+void vp8_yv12_copy_frame_c(const struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
+void vp8_yv12_copy_frame_neon(const struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
+#define vp8_yv12_copy_frame vp8_yv12_copy_frame_neon
+
+void vpx_yv12_copy_y_c(const struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
+void vpx_yv12_copy_y_neon(const struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
+#define vpx_yv12_copy_y vpx_yv12_copy_y_neon
+
+void vp9_extend_frame_borders_c(struct yv12_buffer_config *ybf, int subsampling_x, int subsampling_y);
+#define vp9_extend_frame_borders vp9_extend_frame_borders_c
+
+void vp9_extend_frame_inner_borders_c(struct yv12_buffer_config *ybf, int subsampling_x, int subsampling_y);
+#define vp9_extend_frame_inner_borders vp9_extend_frame_inner_borders_c
+
+void vpx_scale_rtcd(void);
+#include "vpx_config.h"
+
+#ifdef RTCD_C
+#include "vpx_ports/arm.h"
+static void setup_rtcd_internal(void)
+{
+ int flags = arm_cpu_caps();
+
+ (void)flags;
+
+
+}
+#endif
+#endif
diff --git a/armv7a-neon/vpx_version.h b/armv7a-neon/vpx_version.h
index 663dd49..512851c 100644
--- a/armv7a-neon/vpx_version.h
+++ b/armv7a-neon/vpx_version.h
@@ -1,7 +1,7 @@
#define VERSION_MAJOR 1
-#define VERSION_MINOR 1
+#define VERSION_MINOR 2
#define VERSION_PATCH 0
#define VERSION_EXTRA ""
#define VERSION_PACKED ((VERSION_MAJOR<<16)|(VERSION_MINOR<<8)|(VERSION_PATCH))
-#define VERSION_STRING_NOSP "v1.1.0"
-#define VERSION_STRING " v1.1.0"
+#define VERSION_STRING_NOSP "v1.2.0"
+#define VERSION_STRING " v1.2.0"
diff --git a/armv7a/libvpx_srcs.txt b/armv7a/libvpx_srcs.txt
index e63834d..8f41f9f 100644
--- a/armv7a/libvpx_srcs.txt
+++ b/armv7a/libvpx_srcs.txt
@@ -32,7 +32,6 @@ vp8/common/arm/filter_arm.c
vp8/common/arm/loopfilter_arm.c
vp8/common/arm/reconintra_arm.c
vp8/common/arm/variance_arm.c
-vp8/common/asm_com_offsets.c
vp8/common/blockd.c
vp8/common/blockd.h
vp8/common/coefupdateprobs.h
@@ -90,7 +89,6 @@ vp8/common/treecoder.h
vp8/common/variance_c.c
vp8/common/variance.h
vp8/common/vp8_entropymodedata.h
-vp8/decoder/asm_dec_offsets.c
vp8/decoder/dboolhuff.c
vp8/decoder/dboolhuff.h
vp8/decoder/decodemv.c
@@ -115,7 +113,6 @@ vp8/encoder/arm/armv6/walsh_v6.asm.s
vp8/encoder/arm/boolhuff_arm.c
vp8/encoder/arm/dct_arm.c
vp8/encoder/arm/quantize_arm.c
-vp8/encoder/asm_enc_offsets.c
vp8/encoder/bitstream.c
vp8/encoder/bitstream.h
vp8/encoder/block.h
@@ -161,12 +158,94 @@ vp8/encoder/tokenize.c
vp8/encoder/tokenize.h
vp8/encoder/treewriter.c
vp8/encoder/treewriter.h
+vp8/encoder/vp8_asm_enc_offsets.c
vp8/vp8_common.mk
vp8/vp8cx_arm.mk
vp8/vp8_cx_iface.c
vp8/vp8cx.mk
vp8/vp8_dx_iface.c
vp8/vp8dx.mk
+vp9/common/generic/vp9_systemdependent.c
+vp9/common/vp9_alloccommon.c
+vp9/common/vp9_alloccommon.h
+vp9/common/vp9_blockd.h
+vp9/common/vp9_common_data.c
+vp9/common/vp9_common_data.h
+vp9/common/vp9_common.h
+vp9/common/vp9_convolve.c
+vp9/common/vp9_convolve.h
+vp9/common/vp9_debugmodes.c
+vp9/common/vp9_default_coef_probs.h
+vp9/common/vp9_entropy.c
+vp9/common/vp9_entropy.h
+vp9/common/vp9_entropymode.c
+vp9/common/vp9_entropymode.h
+vp9/common/vp9_entropymv.c
+vp9/common/vp9_entropymv.h
+vp9/common/vp9_enums.h
+vp9/common/vp9_extend.c
+vp9/common/vp9_extend.h
+vp9/common/vp9_filter.c
+vp9/common/vp9_filter.h
+vp9/common/vp9_findnearmv.c
+vp9/common/vp9_findnearmv.h
+vp9/common/vp9_idct.c
+vp9/common/vp9_idct.h
+vp9/common/vp9_loopfilter.c
+vp9/common/vp9_loopfilter_filters.c
+vp9/common/vp9_loopfilter.h
+vp9/common/vp9_mv.h
+vp9/common/vp9_mvref_common.c
+vp9/common/vp9_mvref_common.h
+vp9/common/vp9_onyxc_int.h
+vp9/common/vp9_onyx.h
+vp9/common/vp9_ppflags.h
+vp9/common/vp9_pragmas.h
+vp9/common/vp9_pred_common.c
+vp9/common/vp9_pred_common.h
+vp9/common/vp9_quant_common.c
+vp9/common/vp9_quant_common.h
+vp9/common/vp9_reconinter.c
+vp9/common/vp9_reconinter.h
+vp9/common/vp9_reconintra.c
+vp9/common/vp9_reconintra.h
+vp9/common/vp9_rtcd.c
+vp9/common/vp9_rtcd_defs.sh
+vp9/common/vp9_sadmxn.h
+vp9/common/vp9_scale.c
+vp9/common/vp9_scale.h
+vp9/common/vp9_seg_common.c
+vp9/common/vp9_seg_common.h
+vp9/common/vp9_subpelvar.h
+vp9/common/vp9_systemdependent.h
+vp9/common/vp9_textblit.h
+vp9/common/vp9_tile_common.c
+vp9/common/vp9_tile_common.h
+vp9/common/vp9_treecoder.c
+vp9/common/vp9_treecoder.h
+vp9/decoder/vp9_dboolhuff.c
+vp9/decoder/vp9_dboolhuff.h
+vp9/decoder/vp9_decodemv.c
+vp9/decoder/vp9_decodemv.h
+vp9/decoder/vp9_decodframe.c
+vp9/decoder/vp9_decodframe.h
+vp9/decoder/vp9_detokenize.c
+vp9/decoder/vp9_detokenize.h
+vp9/decoder/vp9_dsubexp.c
+vp9/decoder/vp9_dsubexp.h
+vp9/decoder/vp9_idct_blk.c
+vp9/decoder/vp9_idct_blk.h
+vp9/decoder/vp9_onyxd.h
+vp9/decoder/vp9_onyxd_if.c
+vp9/decoder/vp9_onyxd_int.h
+vp9/decoder/vp9_read_bit_buffer.h
+vp9/decoder/vp9_thread.c
+vp9/decoder/vp9_thread.h
+vp9/decoder/vp9_treereader.h
+vp9/vp9_common.mk
+vp9/vp9_dx_iface.c
+vp9/vp9dx.mk
+vp9/vp9_iface_common.h
vpx_config.c
vpx/internal/vpx_codec_internal.h
vpx_mem/include/vpx_mem_intrnl.h
@@ -176,17 +255,20 @@ vpx_mem/vpx_mem.mk
vpx_ports/arm_cpudetect.c
vpx_ports/arm.h
vpx_ports/asm_offsets.h
+vpx_ports/emmintrin_compat.h
vpx_ports/mem.h
+vpx_ports/vpx_once.h
vpx_ports/vpx_ports.mk
vpx_ports/vpx_timer.h
vpx_scale/generic/gen_scalers.c
-vpx_scale/generic/vpxscale.c
+vpx_scale/generic/vpx_scale.c
vpx_scale/generic/yv12config.c
vpx_scale/generic/yv12extend.c
-vpx_scale/generic/yv12extend_generic.h
-vpx_scale/scale_mode.h
-vpx_scale/vpxscale.h
+vpx_scale/vpx_scale_asm_offsets.c
+vpx_scale/vpx_scale.h
vpx_scale/vpx_scale.mk
+vpx_scale/vpx_scale_rtcd.c
+vpx_scale/vpx_scale_rtcd.sh
vpx_scale/yv12config.h
vpx/src/vpx_codec.c
vpx/src/vpx_decoder.c
diff --git a/armv7a/vpx_rtcd.h b/armv7a/vp8_rtcd.h
index 6553876..fa79b13 100644
--- a/armv7a/vpx_rtcd.h
+++ b/armv7a/vp8_rtcd.h
@@ -1,5 +1,5 @@
-#ifndef VPX_RTCD_
-#define VPX_RTCD_
+#ifndef VP8_RTCD_H_
+#define VP8_RTCD_H_
#ifdef RTCD_C
#define RTCD_EXTERN
@@ -7,7 +7,9 @@
#define RTCD_EXTERN extern
#endif
-#include "vp8/common/blockd.h"
+/*
+ * VP8
+ */
struct blockd;
struct macroblockd;
@@ -20,6 +22,9 @@ struct variance_vtable;
union int_mv;
struct yv12_buffer_config;
+void vp8_clear_system_state_c();
+#define vp8_clear_system_state vp8_clear_system_state_c
+
void vp8_dequantize_b_c(struct blockd*, short *dqc);
void vp8_dequantize_b_v6(struct blockd*, short *dqc);
#define vp8_dequantize_b vp8_dequantize_b_v6
@@ -101,8 +106,8 @@ void vp8_build_intra_predictors_mby_s_c(struct macroblockd *x, unsigned char * y
void vp8_build_intra_predictors_mbuv_s_c(struct macroblockd *x, unsigned char * uabove_row, unsigned char * vabove_row, unsigned char *uleft, unsigned char *vleft, int left_stride, unsigned char * upred_ptr, unsigned char * vpred_ptr, int pred_stride);
#define vp8_build_intra_predictors_mbuv_s vp8_build_intra_predictors_mbuv_s_c
-void vp8_intra4x4_predict_c(unsigned char *Above, unsigned char *yleft, int left_stride, B_PREDICTION_MODE b_mode, unsigned char *dst, int dst_stride, unsigned char top_left);
-void vp8_intra4x4_predict_armv6(unsigned char *Above, unsigned char *yleft, int left_stride, B_PREDICTION_MODE b_mode, unsigned char *dst, int dst_stride, unsigned char top_left);
+void vp8_intra4x4_predict_c(unsigned char *Above, unsigned char *yleft, int left_stride, int b_mode, unsigned char *dst, int dst_stride, unsigned char top_left);
+void vp8_intra4x4_predict_armv6(unsigned char *Above, unsigned char *yleft, int left_stride, int b_mode, unsigned char *dst, int dst_stride, unsigned char top_left);
#define vp8_intra4x4_predict vp8_intra4x4_predict_armv6
void vp8_sixtap_predict16x16_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
@@ -327,82 +332,7 @@ void vp8_yv12_copy_partial_frame_c(struct yv12_buffer_config *src_ybc, struct yv
int vp8_denoiser_filter_c(struct yv12_buffer_config* mc_running_avg, struct yv12_buffer_config* running_avg, struct macroblock* signal, unsigned int motion_magnitude2, int y_offset, int uv_offset);
#define vp8_denoiser_filter vp8_denoiser_filter_c
-void vp8_horizontal_line_4_5_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_4_5_scale vp8_horizontal_line_4_5_scale_c
-
-void vp8_vertical_band_4_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_4_5_scale vp8_vertical_band_4_5_scale_c
-
-void vp8_last_vertical_band_4_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_4_5_scale vp8_last_vertical_band_4_5_scale_c
-
-void vp8_horizontal_line_2_3_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_2_3_scale vp8_horizontal_line_2_3_scale_c
-
-void vp8_vertical_band_2_3_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_2_3_scale vp8_vertical_band_2_3_scale_c
-
-void vp8_last_vertical_band_2_3_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_2_3_scale vp8_last_vertical_band_2_3_scale_c
-
-void vp8_horizontal_line_3_5_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_3_5_scale vp8_horizontal_line_3_5_scale_c
-
-void vp8_vertical_band_3_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_3_5_scale vp8_vertical_band_3_5_scale_c
-
-void vp8_last_vertical_band_3_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_3_5_scale vp8_last_vertical_band_3_5_scale_c
-
-void vp8_horizontal_line_3_4_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_3_4_scale vp8_horizontal_line_3_4_scale_c
-
-void vp8_vertical_band_3_4_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_3_4_scale vp8_vertical_band_3_4_scale_c
-
-void vp8_last_vertical_band_3_4_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_3_4_scale vp8_last_vertical_band_3_4_scale_c
-
-void vp8_horizontal_line_1_2_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_1_2_scale vp8_horizontal_line_1_2_scale_c
-
-void vp8_vertical_band_1_2_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_1_2_scale vp8_vertical_band_1_2_scale_c
-
-void vp8_last_vertical_band_1_2_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_1_2_scale vp8_last_vertical_band_1_2_scale_c
-
-void vp8_horizontal_line_5_4_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_5_4_scale vp8_horizontal_line_5_4_scale_c
-
-void vp8_vertical_band_5_4_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_5_4_scale vp8_vertical_band_5_4_scale_c
-
-void vp8_horizontal_line_5_3_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_5_3_scale vp8_horizontal_line_5_3_scale_c
-
-void vp8_vertical_band_5_3_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_5_3_scale vp8_vertical_band_5_3_scale_c
-
-void vp8_horizontal_line_2_1_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_2_1_scale vp8_horizontal_line_2_1_scale_c
-
-void vp8_vertical_band_2_1_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_2_1_scale vp8_vertical_band_2_1_scale_c
-
-void vp8_vertical_band_2_1_scale_i_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_2_1_scale_i vp8_vertical_band_2_1_scale_i_c
-
-void vp8_yv12_extend_frame_borders_c(struct yv12_buffer_config *ybf);
-#define vp8_yv12_extend_frame_borders vp8_yv12_extend_frame_borders_c
-
-void vp8_yv12_copy_frame_c(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
-#define vp8_yv12_copy_frame vp8_yv12_copy_frame_c
-
-void vp8_yv12_copy_y_c(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
-#define vp8_yv12_copy_y vp8_yv12_copy_y_c
-
-void vpx_rtcd(void);
+void vp8_rtcd(void);
#include "vpx_config.h"
#ifdef RTCD_C
diff --git a/armv7a/vp9_rtcd.h b/armv7a/vp9_rtcd.h
new file mode 100644
index 0000000..36202d2
--- /dev/null
+++ b/armv7a/vp9_rtcd.h
@@ -0,0 +1,317 @@
+#ifndef VP9_RTCD_H_
+#define VP9_RTCD_H_
+
+#ifdef RTCD_C
+#define RTCD_EXTERN
+#else
+#define RTCD_EXTERN extern
+#endif
+
+/*
+ * VP9
+ */
+
+#include "vpx/vpx_integer.h"
+#include "vp9/common/vp9_enums.h"
+
+struct macroblockd;
+
+/* Encoder forward decls */
+struct macroblock;
+struct vp9_variance_vtable;
+
+#define DEC_MVCOSTS int *mvjcost, int *mvcost[2]
+union int_mv;
+struct yv12_buffer_config;
+
+void vp9_idct_add_16x16_c(int16_t *input, uint8_t *dest, int stride, int eob);
+#define vp9_idct_add_16x16 vp9_idct_add_16x16_c
+
+void vp9_idct_add_8x8_c(int16_t *input, uint8_t *dest, int stride, int eob);
+#define vp9_idct_add_8x8 vp9_idct_add_8x8_c
+
+void vp9_idct_add_c(int16_t *input, uint8_t *dest, int stride, int eob);
+#define vp9_idct_add vp9_idct_add_c
+
+void vp9_idct_add_32x32_c(int16_t *q, uint8_t *dst, int stride, int eob);
+#define vp9_idct_add_32x32 vp9_idct_add_32x32_c
+
+void vp9_d207_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d207_predictor_4x4 vp9_d207_predictor_4x4_c
+
+void vp9_d45_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d45_predictor_4x4 vp9_d45_predictor_4x4_c
+
+void vp9_d63_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d63_predictor_4x4 vp9_d63_predictor_4x4_c
+
+void vp9_h_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_h_predictor_4x4 vp9_h_predictor_4x4_c
+
+void vp9_d117_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d117_predictor_4x4 vp9_d117_predictor_4x4_c
+
+void vp9_d135_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d135_predictor_4x4 vp9_d135_predictor_4x4_c
+
+void vp9_d153_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d153_predictor_4x4 vp9_d153_predictor_4x4_c
+
+void vp9_v_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_v_predictor_4x4 vp9_v_predictor_4x4_c
+
+void vp9_tm_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_tm_predictor_4x4 vp9_tm_predictor_4x4_c
+
+void vp9_dc_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_predictor_4x4 vp9_dc_predictor_4x4_c
+
+void vp9_dc_top_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_top_predictor_4x4 vp9_dc_top_predictor_4x4_c
+
+void vp9_dc_left_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_left_predictor_4x4 vp9_dc_left_predictor_4x4_c
+
+void vp9_dc_128_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_128_predictor_4x4 vp9_dc_128_predictor_4x4_c
+
+void vp9_d207_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d207_predictor_8x8 vp9_d207_predictor_8x8_c
+
+void vp9_d45_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d45_predictor_8x8 vp9_d45_predictor_8x8_c
+
+void vp9_d63_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d63_predictor_8x8 vp9_d63_predictor_8x8_c
+
+void vp9_h_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_h_predictor_8x8 vp9_h_predictor_8x8_c
+
+void vp9_d117_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d117_predictor_8x8 vp9_d117_predictor_8x8_c
+
+void vp9_d135_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d135_predictor_8x8 vp9_d135_predictor_8x8_c
+
+void vp9_d153_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d153_predictor_8x8 vp9_d153_predictor_8x8_c
+
+void vp9_v_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_v_predictor_8x8 vp9_v_predictor_8x8_c
+
+void vp9_tm_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_tm_predictor_8x8 vp9_tm_predictor_8x8_c
+
+void vp9_dc_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_predictor_8x8 vp9_dc_predictor_8x8_c
+
+void vp9_dc_top_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_top_predictor_8x8 vp9_dc_top_predictor_8x8_c
+
+void vp9_dc_left_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_left_predictor_8x8 vp9_dc_left_predictor_8x8_c
+
+void vp9_dc_128_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_128_predictor_8x8 vp9_dc_128_predictor_8x8_c
+
+void vp9_d207_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d207_predictor_16x16 vp9_d207_predictor_16x16_c
+
+void vp9_d45_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d45_predictor_16x16 vp9_d45_predictor_16x16_c
+
+void vp9_d63_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d63_predictor_16x16 vp9_d63_predictor_16x16_c
+
+void vp9_h_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_h_predictor_16x16 vp9_h_predictor_16x16_c
+
+void vp9_d117_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d117_predictor_16x16 vp9_d117_predictor_16x16_c
+
+void vp9_d135_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d135_predictor_16x16 vp9_d135_predictor_16x16_c
+
+void vp9_d153_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d153_predictor_16x16 vp9_d153_predictor_16x16_c
+
+void vp9_v_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_v_predictor_16x16 vp9_v_predictor_16x16_c
+
+void vp9_tm_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_tm_predictor_16x16 vp9_tm_predictor_16x16_c
+
+void vp9_dc_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_predictor_16x16 vp9_dc_predictor_16x16_c
+
+void vp9_dc_top_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_top_predictor_16x16 vp9_dc_top_predictor_16x16_c
+
+void vp9_dc_left_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_left_predictor_16x16 vp9_dc_left_predictor_16x16_c
+
+void vp9_dc_128_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_128_predictor_16x16 vp9_dc_128_predictor_16x16_c
+
+void vp9_d207_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d207_predictor_32x32 vp9_d207_predictor_32x32_c
+
+void vp9_d45_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d45_predictor_32x32 vp9_d45_predictor_32x32_c
+
+void vp9_d63_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d63_predictor_32x32 vp9_d63_predictor_32x32_c
+
+void vp9_h_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_h_predictor_32x32 vp9_h_predictor_32x32_c
+
+void vp9_d117_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d117_predictor_32x32 vp9_d117_predictor_32x32_c
+
+void vp9_d135_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d135_predictor_32x32 vp9_d135_predictor_32x32_c
+
+void vp9_d153_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d153_predictor_32x32 vp9_d153_predictor_32x32_c
+
+void vp9_v_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_v_predictor_32x32 vp9_v_predictor_32x32_c
+
+void vp9_tm_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_tm_predictor_32x32 vp9_tm_predictor_32x32_c
+
+void vp9_dc_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_predictor_32x32 vp9_dc_predictor_32x32_c
+
+void vp9_dc_top_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_top_predictor_32x32 vp9_dc_top_predictor_32x32_c
+
+void vp9_dc_left_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_left_predictor_32x32 vp9_dc_left_predictor_32x32_c
+
+void vp9_dc_128_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_128_predictor_32x32 vp9_dc_128_predictor_32x32_c
+
+void vp9_add_constant_residual_8x8_c(const int16_t diff, uint8_t *dest, int stride);
+#define vp9_add_constant_residual_8x8 vp9_add_constant_residual_8x8_c
+
+void vp9_add_constant_residual_16x16_c(const int16_t diff, uint8_t *dest, int stride);
+#define vp9_add_constant_residual_16x16 vp9_add_constant_residual_16x16_c
+
+void vp9_add_constant_residual_32x32_c(const int16_t diff, uint8_t *dest, int stride);
+#define vp9_add_constant_residual_32x32 vp9_add_constant_residual_32x32_c
+
+void vp9_mb_lpf_vertical_edge_w_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
+#define vp9_mb_lpf_vertical_edge_w vp9_mb_lpf_vertical_edge_w_c
+
+void vp9_mbloop_filter_vertical_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_mbloop_filter_vertical_edge vp9_mbloop_filter_vertical_edge_c
+
+void vp9_loop_filter_vertical_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_loop_filter_vertical_edge vp9_loop_filter_vertical_edge_c
+
+void vp9_mb_lpf_horizontal_edge_w_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_mb_lpf_horizontal_edge_w vp9_mb_lpf_horizontal_edge_w_c
+
+void vp9_mbloop_filter_horizontal_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_mbloop_filter_horizontal_edge vp9_mbloop_filter_horizontal_edge_c
+
+void vp9_loop_filter_horizontal_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_loop_filter_horizontal_edge vp9_loop_filter_horizontal_edge_c
+
+void vp9_blend_mb_inner_c(uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride);
+#define vp9_blend_mb_inner vp9_blend_mb_inner_c
+
+void vp9_blend_mb_outer_c(uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride);
+#define vp9_blend_mb_outer vp9_blend_mb_outer_c
+
+void vp9_blend_b_c(uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride);
+#define vp9_blend_b vp9_blend_b_c
+
+void vp9_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve_copy vp9_convolve_copy_c
+
+void vp9_convolve_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve_avg vp9_convolve_avg_c
+
+void vp9_convolve8_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8 vp9_convolve8_c
+
+void vp9_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_horiz vp9_convolve8_horiz_c
+
+void vp9_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_vert vp9_convolve8_vert_c
+
+void vp9_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_avg vp9_convolve8_avg_c
+
+void vp9_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_avg_horiz vp9_convolve8_avg_horiz_c
+
+void vp9_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_avg_vert vp9_convolve8_avg_vert_c
+
+void vp9_short_idct4x4_1_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct4x4_1_add vp9_short_idct4x4_1_add_c
+
+void vp9_short_idct4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct4x4_add vp9_short_idct4x4_add_c
+
+void vp9_short_idct8x8_1_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct8x8_1_add vp9_short_idct8x8_1_add_c
+
+void vp9_short_idct8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct8x8_add vp9_short_idct8x8_add_c
+
+void vp9_short_idct10_8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct10_8x8_add vp9_short_idct10_8x8_add_c
+
+void vp9_short_idct16x16_1_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct16x16_1_add vp9_short_idct16x16_1_add_c
+
+void vp9_short_idct16x16_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct16x16_add vp9_short_idct16x16_add_c
+
+void vp9_short_idct10_16x16_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct10_16x16_add vp9_short_idct10_16x16_add_c
+
+void vp9_short_idct32x32_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct32x32_add vp9_short_idct32x32_add_c
+
+void vp9_short_idct1_32x32_c(int16_t *input, int16_t *output);
+#define vp9_short_idct1_32x32 vp9_short_idct1_32x32_c
+
+void vp9_short_iht4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride, int tx_type);
+#define vp9_short_iht4x4_add vp9_short_iht4x4_add_c
+
+void vp9_short_iht8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride, int tx_type);
+#define vp9_short_iht8x8_add vp9_short_iht8x8_add_c
+
+void vp9_short_iht16x16_add_c(int16_t *input, uint8_t *output, int pitch, int tx_type);
+#define vp9_short_iht16x16_add vp9_short_iht16x16_add_c
+
+void vp9_idct4_1d_c(int16_t *input, int16_t *output);
+#define vp9_idct4_1d vp9_idct4_1d_c
+
+void vp9_short_iwalsh4x4_1_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_iwalsh4x4_1_add vp9_short_iwalsh4x4_1_add_c
+
+void vp9_short_iwalsh4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_iwalsh4x4_add vp9_short_iwalsh4x4_add_c
+
+void vp9_rtcd(void);
+#include "vpx_config.h"
+
+#ifdef RTCD_C
+#include "vpx_ports/arm.h"
+static void setup_rtcd_internal(void)
+{
+ int flags = arm_cpu_caps();
+
+ (void)flags;
+
+
+}
+#endif
+#endif
diff --git a/armv7a/vpx_config.c b/armv7a/vpx_config.c
index 559f9b0..ecdb0cf 100644
--- a/armv7a/vpx_config.c
+++ b/armv7a/vpx_config.c
@@ -5,5 +5,5 @@
/* tree. An additional intellectual property rights grant can be found */
/* in the file PATENTS. All contributing project authors may */
/* be found in the AUTHORS file in the root of the source tree. */
-static const char* const cfg = "--force-target=armv7-android-gcc --disable-runtime-cpu-detect --sdk-path=/usr/local/google/home/johannkoenig/android-ndk --disable-examples --disable-docs --disable-neon --enable-realtime-only";
+static const char* const cfg = "--target=armv7-android-gcc --disable-runtime-cpu-detect --sdk-path=/usr/local/google/home/hkuang/Downloads/android-ndk-r8e --disable-vp9-encoder --disable-neon --disable-examples --disable-docs --enable-realtime-only";
const char *vpx_codec_build_config(void) {return cfg;}
diff --git a/armv7a/vpx_config.h b/armv7a/vpx_config.h
index ddb331f..c789546 100644
--- a/armv7a/vpx_config.h
+++ b/armv7a/vpx_config.h
@@ -9,6 +9,7 @@
#ifndef VPX_CONFIG_H
#define VPX_CONFIG_H
#define RESTRICT
+#define INLINE __inline__ __attribute__((always_inline))
#define ARCH_ARM 1
#define ARCH_MIPS 0
#define ARCH_X86 0
@@ -34,10 +35,11 @@
#define HAVE_SYS_MMAN_H 1
#define HAVE_UNISTD_H 1
#define CONFIG_EXTERNAL_BUILD 0
-#define CONFIG_INSTALL_DOCS 1
+#define CONFIG_INSTALL_DOCS 0
#define CONFIG_INSTALL_BINS 1
#define CONFIG_INSTALL_LIBS 1
#define CONFIG_INSTALL_SRCS 0
+#define CONFIG_USE_X86INC 1
#define CONFIG_DEBUG 0
#define CONFIG_GPROF 0
#define CONFIG_GCOV 0
@@ -57,11 +59,15 @@
#define CONFIG_DC_RECON 0
#define CONFIG_RUNTIME_CPU_DETECT 0
#define CONFIG_POSTPROC 0
+#define CONFIG_VP9_POSTPROC 0
#define CONFIG_MULTITHREAD 1
#define CONFIG_INTERNAL_STATS 0
#define CONFIG_VP8_ENCODER 1
#define CONFIG_VP8_DECODER 1
+#define CONFIG_VP9_ENCODER 0
+#define CONFIG_VP9_DECODER 1
#define CONFIG_VP8 1
+#define CONFIG_VP9 1
#define CONFIG_ENCODERS 1
#define CONFIG_DECODERS 1
#define CONFIG_STATIC_MSVCRT 0
@@ -77,4 +83,10 @@
#define CONFIG_UNIT_TESTS 0
#define CONFIG_MULTI_RES_ENCODING 0
#define CONFIG_TEMPORAL_DENOISING 1
+#define CONFIG_EXPERIMENTAL 0
+#define CONFIG_DECRYPT 0
+#define CONFIG_ONESHOTQ 0
+#define CONFIG_MULTIPLE_ARF 0
+#define CONFIG_NON420 0
+#define CONFIG_ALPHA 0
#endif /* VPX_CONFIG_H */
diff --git a/armv7a/vpx_scale_rtcd.h b/armv7a/vpx_scale_rtcd.h
new file mode 100644
index 0000000..0df8b37
--- /dev/null
+++ b/armv7a/vpx_scale_rtcd.h
@@ -0,0 +1,62 @@
+#ifndef VPX_SCALE_RTCD_H_
+#define VPX_SCALE_RTCD_H_
+
+#ifdef RTCD_C
+#define RTCD_EXTERN
+#else
+#define RTCD_EXTERN extern
+#endif
+
+struct yv12_buffer_config;
+
+void vp8_horizontal_line_5_4_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
+#define vp8_horizontal_line_5_4_scale vp8_horizontal_line_5_4_scale_c
+
+void vp8_vertical_band_5_4_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_5_4_scale vp8_vertical_band_5_4_scale_c
+
+void vp8_horizontal_line_5_3_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
+#define vp8_horizontal_line_5_3_scale vp8_horizontal_line_5_3_scale_c
+
+void vp8_vertical_band_5_3_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_5_3_scale vp8_vertical_band_5_3_scale_c
+
+void vp8_horizontal_line_2_1_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
+#define vp8_horizontal_line_2_1_scale vp8_horizontal_line_2_1_scale_c
+
+void vp8_vertical_band_2_1_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_2_1_scale vp8_vertical_band_2_1_scale_c
+
+void vp8_vertical_band_2_1_scale_i_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_2_1_scale_i vp8_vertical_band_2_1_scale_i_c
+
+void vp8_yv12_extend_frame_borders_c(struct yv12_buffer_config *ybf);
+#define vp8_yv12_extend_frame_borders vp8_yv12_extend_frame_borders_c
+
+void vp8_yv12_copy_frame_c(const struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
+#define vp8_yv12_copy_frame vp8_yv12_copy_frame_c
+
+void vpx_yv12_copy_y_c(const struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
+#define vpx_yv12_copy_y vpx_yv12_copy_y_c
+
+void vp9_extend_frame_borders_c(struct yv12_buffer_config *ybf, int subsampling_x, int subsampling_y);
+#define vp9_extend_frame_borders vp9_extend_frame_borders_c
+
+void vp9_extend_frame_inner_borders_c(struct yv12_buffer_config *ybf, int subsampling_x, int subsampling_y);
+#define vp9_extend_frame_inner_borders vp9_extend_frame_inner_borders_c
+
+void vpx_scale_rtcd(void);
+#include "vpx_config.h"
+
+#ifdef RTCD_C
+#include "vpx_ports/arm.h"
+static void setup_rtcd_internal(void)
+{
+ int flags = arm_cpu_caps();
+
+ (void)flags;
+
+
+}
+#endif
+#endif
diff --git a/armv7a/vpx_version.h b/armv7a/vpx_version.h
index 663dd49..512851c 100644
--- a/armv7a/vpx_version.h
+++ b/armv7a/vpx_version.h
@@ -1,7 +1,7 @@
#define VERSION_MAJOR 1
-#define VERSION_MINOR 1
+#define VERSION_MINOR 2
#define VERSION_PATCH 0
#define VERSION_EXTRA ""
#define VERSION_PACKED ((VERSION_MAJOR<<16)|(VERSION_MINOR<<8)|(VERSION_PATCH))
-#define VERSION_STRING_NOSP "v1.1.0"
-#define VERSION_STRING " v1.1.0"
+#define VERSION_STRING_NOSP "v1.2.0"
+#define VERSION_STRING " v1.2.0"
diff --git a/generic/libvpx_srcs.txt b/generic/libvpx_srcs.txt
index 5756427..8e6fad7 100644
--- a/generic/libvpx_srcs.txt
+++ b/generic/libvpx_srcs.txt
@@ -4,7 +4,6 @@ CHANGELOG
libs.mk
vp8/common/alloccommon.c
vp8/common/alloccommon.h
-vp8/common/asm_com_offsets.c
vp8/common/blockd.c
vp8/common/blockd.h
vp8/common/coefupdateprobs.h
@@ -62,7 +61,6 @@ vp8/common/treecoder.h
vp8/common/variance_c.c
vp8/common/variance.h
vp8/common/vp8_entropymodedata.h
-vp8/decoder/asm_dec_offsets.c
vp8/decoder/dboolhuff.c
vp8/decoder/dboolhuff.h
vp8/decoder/decodemv.c
@@ -75,7 +73,6 @@ vp8/decoder/onyxd_if.c
vp8/decoder/onyxd_int.h
vp8/decoder/threading.c
vp8/decoder/treereader.h
-vp8/encoder/asm_enc_offsets.c
vp8/encoder/bitstream.c
vp8/encoder/bitstream.h
vp8/encoder/block.h
@@ -122,11 +119,93 @@ vp8/encoder/tokenize.c
vp8/encoder/tokenize.h
vp8/encoder/treewriter.c
vp8/encoder/treewriter.h
+vp8/encoder/vp8_asm_enc_offsets.c
vp8/vp8_common.mk
vp8/vp8_cx_iface.c
vp8/vp8cx.mk
vp8/vp8_dx_iface.c
vp8/vp8dx.mk
+vp9/common/generic/vp9_systemdependent.c
+vp9/common/vp9_alloccommon.c
+vp9/common/vp9_alloccommon.h
+vp9/common/vp9_blockd.h
+vp9/common/vp9_common_data.c
+vp9/common/vp9_common_data.h
+vp9/common/vp9_common.h
+vp9/common/vp9_convolve.c
+vp9/common/vp9_convolve.h
+vp9/common/vp9_debugmodes.c
+vp9/common/vp9_default_coef_probs.h
+vp9/common/vp9_entropy.c
+vp9/common/vp9_entropy.h
+vp9/common/vp9_entropymode.c
+vp9/common/vp9_entropymode.h
+vp9/common/vp9_entropymv.c
+vp9/common/vp9_entropymv.h
+vp9/common/vp9_enums.h
+vp9/common/vp9_extend.c
+vp9/common/vp9_extend.h
+vp9/common/vp9_filter.c
+vp9/common/vp9_filter.h
+vp9/common/vp9_findnearmv.c
+vp9/common/vp9_findnearmv.h
+vp9/common/vp9_idct.c
+vp9/common/vp9_idct.h
+vp9/common/vp9_loopfilter.c
+vp9/common/vp9_loopfilter_filters.c
+vp9/common/vp9_loopfilter.h
+vp9/common/vp9_mv.h
+vp9/common/vp9_mvref_common.c
+vp9/common/vp9_mvref_common.h
+vp9/common/vp9_onyxc_int.h
+vp9/common/vp9_onyx.h
+vp9/common/vp9_ppflags.h
+vp9/common/vp9_pragmas.h
+vp9/common/vp9_pred_common.c
+vp9/common/vp9_pred_common.h
+vp9/common/vp9_quant_common.c
+vp9/common/vp9_quant_common.h
+vp9/common/vp9_reconinter.c
+vp9/common/vp9_reconinter.h
+vp9/common/vp9_reconintra.c
+vp9/common/vp9_reconintra.h
+vp9/common/vp9_rtcd.c
+vp9/common/vp9_rtcd_defs.sh
+vp9/common/vp9_sadmxn.h
+vp9/common/vp9_scale.c
+vp9/common/vp9_scale.h
+vp9/common/vp9_seg_common.c
+vp9/common/vp9_seg_common.h
+vp9/common/vp9_subpelvar.h
+vp9/common/vp9_systemdependent.h
+vp9/common/vp9_textblit.h
+vp9/common/vp9_tile_common.c
+vp9/common/vp9_tile_common.h
+vp9/common/vp9_treecoder.c
+vp9/common/vp9_treecoder.h
+vp9/decoder/vp9_dboolhuff.c
+vp9/decoder/vp9_dboolhuff.h
+vp9/decoder/vp9_decodemv.c
+vp9/decoder/vp9_decodemv.h
+vp9/decoder/vp9_decodframe.c
+vp9/decoder/vp9_decodframe.h
+vp9/decoder/vp9_detokenize.c
+vp9/decoder/vp9_detokenize.h
+vp9/decoder/vp9_dsubexp.c
+vp9/decoder/vp9_dsubexp.h
+vp9/decoder/vp9_idct_blk.c
+vp9/decoder/vp9_idct_blk.h
+vp9/decoder/vp9_onyxd.h
+vp9/decoder/vp9_onyxd_if.c
+vp9/decoder/vp9_onyxd_int.h
+vp9/decoder/vp9_read_bit_buffer.h
+vp9/decoder/vp9_thread.c
+vp9/decoder/vp9_thread.h
+vp9/decoder/vp9_treereader.h
+vp9/vp9_common.mk
+vp9/vp9_dx_iface.c
+vp9/vp9dx.mk
+vp9/vp9_iface_common.h
vpx_config.c
vpx/internal/vpx_codec_internal.h
vpx_mem/include/vpx_mem_intrnl.h
@@ -134,17 +213,20 @@ vpx_mem/vpx_mem.c
vpx_mem/vpx_mem.h
vpx_mem/vpx_mem.mk
vpx_ports/asm_offsets.h
+vpx_ports/emmintrin_compat.h
vpx_ports/mem.h
+vpx_ports/vpx_once.h
vpx_ports/vpx_ports.mk
vpx_ports/vpx_timer.h
vpx_scale/generic/gen_scalers.c
-vpx_scale/generic/vpxscale.c
+vpx_scale/generic/vpx_scale.c
vpx_scale/generic/yv12config.c
vpx_scale/generic/yv12extend.c
-vpx_scale/generic/yv12extend_generic.h
-vpx_scale/scale_mode.h
-vpx_scale/vpxscale.h
+vpx_scale/vpx_scale_asm_offsets.c
+vpx_scale/vpx_scale.h
vpx_scale/vpx_scale.mk
+vpx_scale/vpx_scale_rtcd.c
+vpx_scale/vpx_scale_rtcd.sh
vpx_scale/yv12config.h
vpx/src/vpx_codec.c
vpx/src/vpx_decoder.c
diff --git a/generic/vpx_rtcd.h b/generic/vp8_rtcd.h
index ab83978..97ef714 100644
--- a/generic/vpx_rtcd.h
+++ b/generic/vp8_rtcd.h
@@ -1,5 +1,5 @@
-#ifndef VPX_RTCD_
-#define VPX_RTCD_
+#ifndef VP8_RTCD_H_
+#define VP8_RTCD_H_
#ifdef RTCD_C
#define RTCD_EXTERN
@@ -7,7 +7,9 @@
#define RTCD_EXTERN extern
#endif
-#include "vp8/common/blockd.h"
+/*
+ * VP8
+ */
struct blockd;
struct macroblockd;
@@ -20,6 +22,9 @@ struct variance_vtable;
union int_mv;
struct yv12_buffer_config;
+void vp8_clear_system_state_c();
+#define vp8_clear_system_state vp8_clear_system_state_c
+
void vp8_dequantize_b_c(struct blockd*, short *dqc);
#define vp8_dequantize_b vp8_dequantize_b_c
@@ -83,7 +88,7 @@ void vp8_build_intra_predictors_mby_s_c(struct macroblockd *x, unsigned char * y
void vp8_build_intra_predictors_mbuv_s_c(struct macroblockd *x, unsigned char * uabove_row, unsigned char * vabove_row, unsigned char *uleft, unsigned char *vleft, int left_stride, unsigned char * upred_ptr, unsigned char * vpred_ptr, int pred_stride);
#define vp8_build_intra_predictors_mbuv_s vp8_build_intra_predictors_mbuv_s_c
-void vp8_intra4x4_predict_c(unsigned char *Above, unsigned char *yleft, int left_stride, B_PREDICTION_MODE b_mode, unsigned char *dst, int dst_stride, unsigned char top_left);
+void vp8_intra4x4_predict_c(unsigned char *Above, unsigned char *yleft, int left_stride, int b_mode, unsigned char *dst, int dst_stride, unsigned char top_left);
#define vp8_intra4x4_predict vp8_intra4x4_predict_c
void vp8_sixtap_predict16x16_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
@@ -284,82 +289,7 @@ void vp8_yv12_copy_partial_frame_c(struct yv12_buffer_config *src_ybc, struct yv
int vp8_denoiser_filter_c(struct yv12_buffer_config* mc_running_avg, struct yv12_buffer_config* running_avg, struct macroblock* signal, unsigned int motion_magnitude2, int y_offset, int uv_offset);
#define vp8_denoiser_filter vp8_denoiser_filter_c
-void vp8_horizontal_line_4_5_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_4_5_scale vp8_horizontal_line_4_5_scale_c
-
-void vp8_vertical_band_4_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_4_5_scale vp8_vertical_band_4_5_scale_c
-
-void vp8_last_vertical_band_4_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_4_5_scale vp8_last_vertical_band_4_5_scale_c
-
-void vp8_horizontal_line_2_3_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_2_3_scale vp8_horizontal_line_2_3_scale_c
-
-void vp8_vertical_band_2_3_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_2_3_scale vp8_vertical_band_2_3_scale_c
-
-void vp8_last_vertical_band_2_3_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_2_3_scale vp8_last_vertical_band_2_3_scale_c
-
-void vp8_horizontal_line_3_5_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_3_5_scale vp8_horizontal_line_3_5_scale_c
-
-void vp8_vertical_band_3_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_3_5_scale vp8_vertical_band_3_5_scale_c
-
-void vp8_last_vertical_band_3_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_3_5_scale vp8_last_vertical_band_3_5_scale_c
-
-void vp8_horizontal_line_3_4_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_3_4_scale vp8_horizontal_line_3_4_scale_c
-
-void vp8_vertical_band_3_4_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_3_4_scale vp8_vertical_band_3_4_scale_c
-
-void vp8_last_vertical_band_3_4_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_3_4_scale vp8_last_vertical_band_3_4_scale_c
-
-void vp8_horizontal_line_1_2_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_1_2_scale vp8_horizontal_line_1_2_scale_c
-
-void vp8_vertical_band_1_2_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_1_2_scale vp8_vertical_band_1_2_scale_c
-
-void vp8_last_vertical_band_1_2_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_1_2_scale vp8_last_vertical_band_1_2_scale_c
-
-void vp8_horizontal_line_5_4_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_5_4_scale vp8_horizontal_line_5_4_scale_c
-
-void vp8_vertical_band_5_4_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_5_4_scale vp8_vertical_band_5_4_scale_c
-
-void vp8_horizontal_line_5_3_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_5_3_scale vp8_horizontal_line_5_3_scale_c
-
-void vp8_vertical_band_5_3_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_5_3_scale vp8_vertical_band_5_3_scale_c
-
-void vp8_horizontal_line_2_1_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_2_1_scale vp8_horizontal_line_2_1_scale_c
-
-void vp8_vertical_band_2_1_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_2_1_scale vp8_vertical_band_2_1_scale_c
-
-void vp8_vertical_band_2_1_scale_i_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_2_1_scale_i vp8_vertical_band_2_1_scale_i_c
-
-void vp8_yv12_extend_frame_borders_c(struct yv12_buffer_config *ybf);
-#define vp8_yv12_extend_frame_borders vp8_yv12_extend_frame_borders_c
-
-void vp8_yv12_copy_frame_c(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
-#define vp8_yv12_copy_frame vp8_yv12_copy_frame_c
-
-void vp8_yv12_copy_y_c(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
-#define vp8_yv12_copy_y vp8_yv12_copy_y_c
-
-void vpx_rtcd(void);
+void vp8_rtcd(void);
#include "vpx_config.h"
#ifdef RTCD_C
diff --git a/generic/vp9_rtcd.h b/generic/vp9_rtcd.h
new file mode 100644
index 0000000..4dcc1f6
--- /dev/null
+++ b/generic/vp9_rtcd.h
@@ -0,0 +1,312 @@
+#ifndef VP9_RTCD_H_
+#define VP9_RTCD_H_
+
+#ifdef RTCD_C
+#define RTCD_EXTERN
+#else
+#define RTCD_EXTERN extern
+#endif
+
+/*
+ * VP9
+ */
+
+#include "vpx/vpx_integer.h"
+#include "vp9/common/vp9_enums.h"
+
+struct macroblockd;
+
+/* Encoder forward decls */
+struct macroblock;
+struct vp9_variance_vtable;
+
+#define DEC_MVCOSTS int *mvjcost, int *mvcost[2]
+union int_mv;
+struct yv12_buffer_config;
+
+void vp9_idct_add_16x16_c(int16_t *input, uint8_t *dest, int stride, int eob);
+#define vp9_idct_add_16x16 vp9_idct_add_16x16_c
+
+void vp9_idct_add_8x8_c(int16_t *input, uint8_t *dest, int stride, int eob);
+#define vp9_idct_add_8x8 vp9_idct_add_8x8_c
+
+void vp9_idct_add_c(int16_t *input, uint8_t *dest, int stride, int eob);
+#define vp9_idct_add vp9_idct_add_c
+
+void vp9_idct_add_32x32_c(int16_t *q, uint8_t *dst, int stride, int eob);
+#define vp9_idct_add_32x32 vp9_idct_add_32x32_c
+
+void vp9_d207_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d207_predictor_4x4 vp9_d207_predictor_4x4_c
+
+void vp9_d45_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d45_predictor_4x4 vp9_d45_predictor_4x4_c
+
+void vp9_d63_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d63_predictor_4x4 vp9_d63_predictor_4x4_c
+
+void vp9_h_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_h_predictor_4x4 vp9_h_predictor_4x4_c
+
+void vp9_d117_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d117_predictor_4x4 vp9_d117_predictor_4x4_c
+
+void vp9_d135_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d135_predictor_4x4 vp9_d135_predictor_4x4_c
+
+void vp9_d153_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d153_predictor_4x4 vp9_d153_predictor_4x4_c
+
+void vp9_v_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_v_predictor_4x4 vp9_v_predictor_4x4_c
+
+void vp9_tm_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_tm_predictor_4x4 vp9_tm_predictor_4x4_c
+
+void vp9_dc_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_predictor_4x4 vp9_dc_predictor_4x4_c
+
+void vp9_dc_top_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_top_predictor_4x4 vp9_dc_top_predictor_4x4_c
+
+void vp9_dc_left_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_left_predictor_4x4 vp9_dc_left_predictor_4x4_c
+
+void vp9_dc_128_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_128_predictor_4x4 vp9_dc_128_predictor_4x4_c
+
+void vp9_d207_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d207_predictor_8x8 vp9_d207_predictor_8x8_c
+
+void vp9_d45_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d45_predictor_8x8 vp9_d45_predictor_8x8_c
+
+void vp9_d63_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d63_predictor_8x8 vp9_d63_predictor_8x8_c
+
+void vp9_h_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_h_predictor_8x8 vp9_h_predictor_8x8_c
+
+void vp9_d117_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d117_predictor_8x8 vp9_d117_predictor_8x8_c
+
+void vp9_d135_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d135_predictor_8x8 vp9_d135_predictor_8x8_c
+
+void vp9_d153_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d153_predictor_8x8 vp9_d153_predictor_8x8_c
+
+void vp9_v_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_v_predictor_8x8 vp9_v_predictor_8x8_c
+
+void vp9_tm_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_tm_predictor_8x8 vp9_tm_predictor_8x8_c
+
+void vp9_dc_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_predictor_8x8 vp9_dc_predictor_8x8_c
+
+void vp9_dc_top_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_top_predictor_8x8 vp9_dc_top_predictor_8x8_c
+
+void vp9_dc_left_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_left_predictor_8x8 vp9_dc_left_predictor_8x8_c
+
+void vp9_dc_128_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_128_predictor_8x8 vp9_dc_128_predictor_8x8_c
+
+void vp9_d207_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d207_predictor_16x16 vp9_d207_predictor_16x16_c
+
+void vp9_d45_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d45_predictor_16x16 vp9_d45_predictor_16x16_c
+
+void vp9_d63_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d63_predictor_16x16 vp9_d63_predictor_16x16_c
+
+void vp9_h_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_h_predictor_16x16 vp9_h_predictor_16x16_c
+
+void vp9_d117_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d117_predictor_16x16 vp9_d117_predictor_16x16_c
+
+void vp9_d135_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d135_predictor_16x16 vp9_d135_predictor_16x16_c
+
+void vp9_d153_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d153_predictor_16x16 vp9_d153_predictor_16x16_c
+
+void vp9_v_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_v_predictor_16x16 vp9_v_predictor_16x16_c
+
+void vp9_tm_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_tm_predictor_16x16 vp9_tm_predictor_16x16_c
+
+void vp9_dc_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_predictor_16x16 vp9_dc_predictor_16x16_c
+
+void vp9_dc_top_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_top_predictor_16x16 vp9_dc_top_predictor_16x16_c
+
+void vp9_dc_left_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_left_predictor_16x16 vp9_dc_left_predictor_16x16_c
+
+void vp9_dc_128_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_128_predictor_16x16 vp9_dc_128_predictor_16x16_c
+
+void vp9_d207_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d207_predictor_32x32 vp9_d207_predictor_32x32_c
+
+void vp9_d45_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d45_predictor_32x32 vp9_d45_predictor_32x32_c
+
+void vp9_d63_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d63_predictor_32x32 vp9_d63_predictor_32x32_c
+
+void vp9_h_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_h_predictor_32x32 vp9_h_predictor_32x32_c
+
+void vp9_d117_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d117_predictor_32x32 vp9_d117_predictor_32x32_c
+
+void vp9_d135_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d135_predictor_32x32 vp9_d135_predictor_32x32_c
+
+void vp9_d153_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d153_predictor_32x32 vp9_d153_predictor_32x32_c
+
+void vp9_v_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_v_predictor_32x32 vp9_v_predictor_32x32_c
+
+void vp9_tm_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_tm_predictor_32x32 vp9_tm_predictor_32x32_c
+
+void vp9_dc_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_predictor_32x32 vp9_dc_predictor_32x32_c
+
+void vp9_dc_top_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_top_predictor_32x32 vp9_dc_top_predictor_32x32_c
+
+void vp9_dc_left_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_left_predictor_32x32 vp9_dc_left_predictor_32x32_c
+
+void vp9_dc_128_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_128_predictor_32x32 vp9_dc_128_predictor_32x32_c
+
+void vp9_add_constant_residual_8x8_c(const int16_t diff, uint8_t *dest, int stride);
+#define vp9_add_constant_residual_8x8 vp9_add_constant_residual_8x8_c
+
+void vp9_add_constant_residual_16x16_c(const int16_t diff, uint8_t *dest, int stride);
+#define vp9_add_constant_residual_16x16 vp9_add_constant_residual_16x16_c
+
+void vp9_add_constant_residual_32x32_c(const int16_t diff, uint8_t *dest, int stride);
+#define vp9_add_constant_residual_32x32 vp9_add_constant_residual_32x32_c
+
+void vp9_mb_lpf_vertical_edge_w_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
+#define vp9_mb_lpf_vertical_edge_w vp9_mb_lpf_vertical_edge_w_c
+
+void vp9_mbloop_filter_vertical_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_mbloop_filter_vertical_edge vp9_mbloop_filter_vertical_edge_c
+
+void vp9_loop_filter_vertical_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_loop_filter_vertical_edge vp9_loop_filter_vertical_edge_c
+
+void vp9_mb_lpf_horizontal_edge_w_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_mb_lpf_horizontal_edge_w vp9_mb_lpf_horizontal_edge_w_c
+
+void vp9_mbloop_filter_horizontal_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_mbloop_filter_horizontal_edge vp9_mbloop_filter_horizontal_edge_c
+
+void vp9_loop_filter_horizontal_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_loop_filter_horizontal_edge vp9_loop_filter_horizontal_edge_c
+
+void vp9_blend_mb_inner_c(uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride);
+#define vp9_blend_mb_inner vp9_blend_mb_inner_c
+
+void vp9_blend_mb_outer_c(uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride);
+#define vp9_blend_mb_outer vp9_blend_mb_outer_c
+
+void vp9_blend_b_c(uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride);
+#define vp9_blend_b vp9_blend_b_c
+
+void vp9_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve_copy vp9_convolve_copy_c
+
+void vp9_convolve_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve_avg vp9_convolve_avg_c
+
+void vp9_convolve8_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8 vp9_convolve8_c
+
+void vp9_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_horiz vp9_convolve8_horiz_c
+
+void vp9_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_vert vp9_convolve8_vert_c
+
+void vp9_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_avg vp9_convolve8_avg_c
+
+void vp9_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_avg_horiz vp9_convolve8_avg_horiz_c
+
+void vp9_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_avg_vert vp9_convolve8_avg_vert_c
+
+void vp9_short_idct4x4_1_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct4x4_1_add vp9_short_idct4x4_1_add_c
+
+void vp9_short_idct4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct4x4_add vp9_short_idct4x4_add_c
+
+void vp9_short_idct8x8_1_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct8x8_1_add vp9_short_idct8x8_1_add_c
+
+void vp9_short_idct8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct8x8_add vp9_short_idct8x8_add_c
+
+void vp9_short_idct10_8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct10_8x8_add vp9_short_idct10_8x8_add_c
+
+void vp9_short_idct16x16_1_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct16x16_1_add vp9_short_idct16x16_1_add_c
+
+void vp9_short_idct16x16_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct16x16_add vp9_short_idct16x16_add_c
+
+void vp9_short_idct10_16x16_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct10_16x16_add vp9_short_idct10_16x16_add_c
+
+void vp9_short_idct32x32_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct32x32_add vp9_short_idct32x32_add_c
+
+void vp9_short_idct1_32x32_c(int16_t *input, int16_t *output);
+#define vp9_short_idct1_32x32 vp9_short_idct1_32x32_c
+
+void vp9_short_iht4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride, int tx_type);
+#define vp9_short_iht4x4_add vp9_short_iht4x4_add_c
+
+void vp9_short_iht8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride, int tx_type);
+#define vp9_short_iht8x8_add vp9_short_iht8x8_add_c
+
+void vp9_short_iht16x16_add_c(int16_t *input, uint8_t *output, int pitch, int tx_type);
+#define vp9_short_iht16x16_add vp9_short_iht16x16_add_c
+
+void vp9_idct4_1d_c(int16_t *input, int16_t *output);
+#define vp9_idct4_1d vp9_idct4_1d_c
+
+void vp9_short_iwalsh4x4_1_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_iwalsh4x4_1_add vp9_short_iwalsh4x4_1_add_c
+
+void vp9_short_iwalsh4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_iwalsh4x4_add vp9_short_iwalsh4x4_add_c
+
+void vp9_rtcd(void);
+#include "vpx_config.h"
+
+#ifdef RTCD_C
+static void setup_rtcd_internal(void)
+{
+
+}
+#endif
+#endif
diff --git a/generic/vpx_config.c b/generic/vpx_config.c
index 61f4fd8..c87cb91 100644
--- a/generic/vpx_config.c
+++ b/generic/vpx_config.c
@@ -5,5 +5,5 @@
/* tree. An additional intellectual property rights grant can be found */
/* in the file PATENTS. All contributing project authors may */
/* be found in the AUTHORS file in the root of the source tree. */
-static const char* const cfg = "--force-target=generic-gnu --disable-examples --disable-docs --enable-realtime-only";
+static const char* const cfg = "--force-target=generic-gnu --disable-vp9-encoder --disable-examples --disable-docs --enable-realtime-only";
const char *vpx_codec_build_config(void) {return cfg;}
diff --git a/generic/vpx_config.h b/generic/vpx_config.h
index 9c6da62..c856d4d 100644
--- a/generic/vpx_config.h
+++ b/generic/vpx_config.h
@@ -9,6 +9,7 @@
#ifndef VPX_CONFIG_H
#define VPX_CONFIG_H
#define RESTRICT
+#define INLINE __inline__ __attribute__((always_inline))
#define ARCH_ARM 0
#define ARCH_MIPS 0
#define ARCH_X86 0
@@ -34,10 +35,11 @@
#define HAVE_SYS_MMAN_H 1
#define HAVE_UNISTD_H 1
#define CONFIG_EXTERNAL_BUILD 0
-#define CONFIG_INSTALL_DOCS 1
+#define CONFIG_INSTALL_DOCS 0
#define CONFIG_INSTALL_BINS 1
#define CONFIG_INSTALL_LIBS 1
#define CONFIG_INSTALL_SRCS 0
+#define CONFIG_USE_X86INC 1
#define CONFIG_DEBUG 0
#define CONFIG_GPROF 0
#define CONFIG_GCOV 0
@@ -57,11 +59,15 @@
#define CONFIG_DC_RECON 0
#define CONFIG_RUNTIME_CPU_DETECT 0
#define CONFIG_POSTPROC 0
+#define CONFIG_VP9_POSTPROC 0
#define CONFIG_MULTITHREAD 1
#define CONFIG_INTERNAL_STATS 0
#define CONFIG_VP8_ENCODER 1
#define CONFIG_VP8_DECODER 1
+#define CONFIG_VP9_ENCODER 0
+#define CONFIG_VP9_DECODER 1
#define CONFIG_VP8 1
+#define CONFIG_VP9 1
#define CONFIG_ENCODERS 1
#define CONFIG_DECODERS 1
#define CONFIG_STATIC_MSVCRT 0
@@ -77,4 +83,10 @@
#define CONFIG_UNIT_TESTS 1
#define CONFIG_MULTI_RES_ENCODING 0
#define CONFIG_TEMPORAL_DENOISING 1
+#define CONFIG_EXPERIMENTAL 0
+#define CONFIG_DECRYPT 0
+#define CONFIG_ONESHOTQ 0
+#define CONFIG_MULTIPLE_ARF 0
+#define CONFIG_NON420 0
+#define CONFIG_ALPHA 0
#endif /* VPX_CONFIG_H */
diff --git a/generic/vpx_scale_rtcd.h b/generic/vpx_scale_rtcd.h
new file mode 100644
index 0000000..472a290
--- /dev/null
+++ b/generic/vpx_scale_rtcd.h
@@ -0,0 +1,57 @@
+#ifndef VPX_SCALE_RTCD_H_
+#define VPX_SCALE_RTCD_H_
+
+#ifdef RTCD_C
+#define RTCD_EXTERN
+#else
+#define RTCD_EXTERN extern
+#endif
+
+struct yv12_buffer_config;
+
+void vp8_horizontal_line_5_4_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
+#define vp8_horizontal_line_5_4_scale vp8_horizontal_line_5_4_scale_c
+
+void vp8_vertical_band_5_4_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_5_4_scale vp8_vertical_band_5_4_scale_c
+
+void vp8_horizontal_line_5_3_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
+#define vp8_horizontal_line_5_3_scale vp8_horizontal_line_5_3_scale_c
+
+void vp8_vertical_band_5_3_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_5_3_scale vp8_vertical_band_5_3_scale_c
+
+void vp8_horizontal_line_2_1_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
+#define vp8_horizontal_line_2_1_scale vp8_horizontal_line_2_1_scale_c
+
+void vp8_vertical_band_2_1_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_2_1_scale vp8_vertical_band_2_1_scale_c
+
+void vp8_vertical_band_2_1_scale_i_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_2_1_scale_i vp8_vertical_band_2_1_scale_i_c
+
+void vp8_yv12_extend_frame_borders_c(struct yv12_buffer_config *ybf);
+#define vp8_yv12_extend_frame_borders vp8_yv12_extend_frame_borders_c
+
+void vp8_yv12_copy_frame_c(const struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
+#define vp8_yv12_copy_frame vp8_yv12_copy_frame_c
+
+void vpx_yv12_copy_y_c(const struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
+#define vpx_yv12_copy_y vpx_yv12_copy_y_c
+
+void vp9_extend_frame_borders_c(struct yv12_buffer_config *ybf, int subsampling_x, int subsampling_y);
+#define vp9_extend_frame_borders vp9_extend_frame_borders_c
+
+void vp9_extend_frame_inner_borders_c(struct yv12_buffer_config *ybf, int subsampling_x, int subsampling_y);
+#define vp9_extend_frame_inner_borders vp9_extend_frame_inner_borders_c
+
+void vpx_scale_rtcd(void);
+#include "vpx_config.h"
+
+#ifdef RTCD_C
+static void setup_rtcd_internal(void)
+{
+
+}
+#endif
+#endif
diff --git a/generic/vpx_version.h b/generic/vpx_version.h
index 663dd49..512851c 100644
--- a/generic/vpx_version.h
+++ b/generic/vpx_version.h
@@ -1,7 +1,7 @@
#define VERSION_MAJOR 1
-#define VERSION_MINOR 1
+#define VERSION_MINOR 2
#define VERSION_PATCH 0
#define VERSION_EXTRA ""
#define VERSION_PACKED ((VERSION_MAJOR<<16)|(VERSION_MINOR<<8)|(VERSION_PATCH))
-#define VERSION_STRING_NOSP "v1.1.0"
-#define VERSION_STRING " v1.1.0"
+#define VERSION_STRING_NOSP "v1.2.0"
+#define VERSION_STRING " v1.2.0"
diff --git a/libvpx.mk b/libvpx.mk
index 5cb7820..ec8e69a 100644
--- a/libvpx.mk
+++ b/libvpx.mk
@@ -60,14 +60,12 @@ LOCAL_SRC_FILES += $(libvpx_target)/vpx_config.c
# used yet but are included in the comments for future reference.
libvpx_asm_offsets_intermediates := \
- vp8/common/asm_com_offsets.intermediate \
- vp8/decoder/asm_dec_offsets.intermediate \
- vp8/encoder/asm_enc_offsets.intermediate \
+ vp8/encoder/vp8_asm_enc_offsets.intermediate \
+ vpx_scale/vpx_scale_asm_offsets.intermediate \
libvpx_asm_offsets_files := \
- vp8/common/asm_com_offsets.asm \
- vp8/decoder/asm_dec_offsets.asm \
- vp8/encoder/asm_enc_offsets.asm \
+ vp8/encoder/vp8_asm_enc_offsets.asm \
+ vpx_scale/vpx_scale_asm_offsets.asm \
# Build the S files with inline assembly.
COMPILE_TO_S := $(addprefix $(libvpx_intermediates)/, $(libvpx_asm_offsets_intermediates))
@@ -107,6 +105,7 @@ LOCAL_C_INCLUDES := \
$(libvpx_intermediates)/vp8/common \
$(libvpx_intermediates)/vp8/decoder \
$(libvpx_intermediates)/vp8/encoder \
+ $(libvpx_intermediates)/vpx_scale \
libvpx_target :=
libvpx_asm :=
diff --git a/libvpx/CHANGELOG b/libvpx/CHANGELOG
index dcb9f73..ef64a96 100644
--- a/libvpx/CHANGELOG
+++ b/libvpx/CHANGELOG
@@ -1,3 +1,32 @@
+2012-12-21 v1.2.0
+ This release acts as a checkpoint for a large amount of internal refactoring
+ and testing. It also contains a number of small bugfixes, so all users are
+ encouraged to upgrade.
+
+ - Upgrading:
+ This release is ABI and API compatible with Duclair (v1.0.0). Users
+ of older releases should refer to the Upgrading notes in this
+ document for that release.
+
+ - Enhancements:
+ VP8 optimizations for MIPS dspr2
+ vpxenc: add -quiet option
+
+ - Speed:
+ Encoder and decoder speed is consistent with the Eider release.
+
+ - Quality:
+ In general, quality is consistent with the Eider release.
+
+ Minor tweaks to ARNR filtering
+ Minor improvements to real time encoding with multiple temporal layers
+
+ - Bug Fixes:
+ Fixes multithreaded encoder race condition in loopfilter
+ Fixes multi-resolution threaded encoding
+ Fix potential encoder dead-lock after picture resize
+
+
2012-05-09 v1.1.0 "Eider"
This introduces a number of enhancements, mostly focused on real-time
encoding. In addition, it fixes a decoder bug (first introduced in
diff --git a/libvpx/README b/libvpx/README
index 0475dad..d7cb11a 100644
--- a/libvpx/README
+++ b/libvpx/README
@@ -1,7 +1,7 @@
vpx Multi-Format Codec SDK
-README - 21 June 2012
+README - 1 August 2013
-Welcome to the WebM VP8 Codec SDK!
+Welcome to the WebM VP8/VP9 Codec SDK!
COMPILING THE APPLICATIONS/LIBRARIES:
The build system used is similar to autotools. Building generally consists of
@@ -53,33 +53,63 @@ COMPILING THE APPLICATIONS/LIBRARIES:
armv5te-android-gcc
armv5te-linux-rvct
armv5te-linux-gcc
+ armv5te-none-rvct
armv6-darwin-gcc
armv6-linux-rvct
armv6-linux-gcc
+ armv6-none-rvct
armv7-android-gcc
+ armv7-darwin-gcc
armv7-linux-rvct
armv7-linux-gcc
+ armv7-none-rvct
+ armv7-win32-vs11
mips32-linux-gcc
ppc32-darwin8-gcc
ppc32-darwin9-gcc
+ ppc32-linux-gcc
ppc64-darwin8-gcc
ppc64-darwin9-gcc
ppc64-linux-gcc
+ sparc-solaris-gcc
+ x86-android-gcc
x86-darwin8-gcc
x86-darwin8-icc
x86-darwin9-gcc
x86-darwin9-icc
+ x86-darwin10-gcc
+ x86-darwin11-gcc
+ x86-darwin12-gcc
+ x86-darwin13-gcc
x86-linux-gcc
x86-linux-icc
+ x86-os2-gcc
x86-solaris-gcc
+ x86-win32-gcc
x86-win32-vs7
x86-win32-vs8
+ x86-win32-vs9
+ x86-win32-vs10
+ x86-win32-vs11
x86_64-darwin9-gcc
+ x86_64-darwin10-gcc
+ x86_64-darwin11-gcc
+ x86_64-darwin12-gcc
+ x86_64-darwin13-gcc
x86_64-linux-gcc
+ x86_64-linux-icc
x86_64-solaris-gcc
+ x86_64-win64-gcc
x86_64-win64-vs8
+ x86_64-win64-vs9
+ x86_64-win64-vs10
+ x86_64-win64-vs11
universal-darwin8-gcc
universal-darwin9-gcc
+ universal-darwin10-gcc
+ universal-darwin11-gcc
+ universal-darwin12-gcc
+ universal-darwin13-gcc
generic-gnu
The generic-gnu target, in conjunction with the CROSS environment variable,
@@ -97,7 +127,7 @@ COMPILING THE APPLICATIONS/LIBRARIES:
5. Configuration errors
If the configuration step fails, the first step is to look in the error log.
- This defaults to config.err. This should give a good indication of what went
+ This defaults to config.log. This should give a good indication of what went
wrong. If not, contact us for support.
SUPPORT
diff --git a/libvpx/args.c b/libvpx/args.c
index 37ba778..9dabc9b 100644
--- a/libvpx/args.c
+++ b/libvpx/args.c
@@ -25,241 +25,214 @@ extern void die(const char *fmt, ...);
#endif
-struct arg arg_init(char **argv)
-{
- struct arg a;
-
- a.argv = argv;
- a.argv_step = 1;
- a.name = NULL;
- a.val = NULL;
- a.def = NULL;
- return a;
+struct arg arg_init(char **argv) {
+ struct arg a;
+
+ a.argv = argv;
+ a.argv_step = 1;
+ a.name = NULL;
+ a.val = NULL;
+ a.def = NULL;
+ return a;
}
-int arg_match(struct arg *arg_, const struct arg_def *def, char **argv)
-{
- struct arg arg;
+int arg_match(struct arg *arg_, const struct arg_def *def, char **argv) {
+ struct arg arg;
- if (!argv[0] || argv[0][0] != '-')
- return 0;
+ if (!argv[0] || argv[0][0] != '-')
+ return 0;
- arg = arg_init(argv);
+ arg = arg_init(argv);
- if (def->short_name
- && strlen(arg.argv[0]) == strlen(def->short_name) + 1
- && !strcmp(arg.argv[0] + 1, def->short_name))
- {
+ if (def->short_name
+ && strlen(arg.argv[0]) == strlen(def->short_name) + 1
+ && !strcmp(arg.argv[0] + 1, def->short_name)) {
- arg.name = arg.argv[0] + 1;
- arg.val = def->has_val ? arg.argv[1] : NULL;
- arg.argv_step = def->has_val ? 2 : 1;
- }
- else if (def->long_name)
- {
- const size_t name_len = strlen(def->long_name);
-
- if (strlen(arg.argv[0]) >= name_len + 2
- && arg.argv[0][1] == '-'
- && !strncmp(arg.argv[0] + 2, def->long_name, name_len)
- && (arg.argv[0][name_len+2] == '='
- || arg.argv[0][name_len+2] == '\0'))
- {
-
- arg.name = arg.argv[0] + 2;
- arg.val = arg.name[name_len] == '=' ? arg.name + name_len + 1 : NULL;
- arg.argv_step = 1;
- }
+ arg.name = arg.argv[0] + 1;
+ arg.val = def->has_val ? arg.argv[1] : NULL;
+ arg.argv_step = def->has_val ? 2 : 1;
+ } else if (def->long_name) {
+ const size_t name_len = strlen(def->long_name);
+
+ if (strlen(arg.argv[0]) >= name_len + 2
+ && arg.argv[0][1] == '-'
+ && !strncmp(arg.argv[0] + 2, def->long_name, name_len)
+ && (arg.argv[0][name_len + 2] == '='
+ || arg.argv[0][name_len + 2] == '\0')) {
+
+ arg.name = arg.argv[0] + 2;
+ arg.val = arg.name[name_len] == '=' ? arg.name + name_len + 1 : NULL;
+ arg.argv_step = 1;
}
+ }
- if (arg.name && !arg.val && def->has_val)
- die("Error: option %s requires argument.\n", arg.name);
+ if (arg.name && !arg.val && def->has_val)
+ die("Error: option %s requires argument.\n", arg.name);
- if (arg.name && arg.val && !def->has_val)
- die("Error: option %s requires no argument.\n", arg.name);
+ if (arg.name && arg.val && !def->has_val)
+ die("Error: option %s requires no argument.\n", arg.name);
- if (arg.name
- && (arg.val || !def->has_val))
- {
- arg.def = def;
- *arg_ = arg;
- return 1;
- }
+ if (arg.name
+ && (arg.val || !def->has_val)) {
+ arg.def = def;
+ *arg_ = arg;
+ return 1;
+ }
- return 0;
+ return 0;
}
-const char *arg_next(struct arg *arg)
-{
- if (arg->argv[0])
- arg->argv += arg->argv_step;
+const char *arg_next(struct arg *arg) {
+ if (arg->argv[0])
+ arg->argv += arg->argv_step;
- return *arg->argv;
+ return *arg->argv;
}
-char **argv_dup(int argc, const char **argv)
-{
- char **new_argv = malloc((argc + 1) * sizeof(*argv));
+char **argv_dup(int argc, const char **argv) {
+ char **new_argv = malloc((argc + 1) * sizeof(*argv));
- memcpy(new_argv, argv, argc * sizeof(*argv));
- new_argv[argc] = NULL;
- return new_argv;
+ memcpy(new_argv, argv, argc * sizeof(*argv));
+ new_argv[argc] = NULL;
+ return new_argv;
}
-void arg_show_usage(FILE *fp, const struct arg_def *const *defs)
-{
- char option_text[40] = {0};
+void arg_show_usage(FILE *fp, const struct arg_def *const *defs) {
+ char option_text[40] = {0};
- for (; *defs; defs++)
- {
- const struct arg_def *def = *defs;
- char *short_val = def->has_val ? " <arg>" : "";
- char *long_val = def->has_val ? "=<arg>" : "";
+ for (; *defs; defs++) {
+ const struct arg_def *def = *defs;
+ char *short_val = def->has_val ? " <arg>" : "";
+ char *long_val = def->has_val ? "=<arg>" : "";
- if (def->short_name && def->long_name)
- {
- char *comma = def->has_val ? "," : ", ";
+ if (def->short_name && def->long_name) {
+ char *comma = def->has_val ? "," : ", ";
- snprintf(option_text, 37, "-%s%s%s --%s%6s",
- def->short_name, short_val, comma,
- def->long_name, long_val);
- }
- else if (def->short_name)
- snprintf(option_text, 37, "-%s%s",
- def->short_name, short_val);
- else if (def->long_name)
- snprintf(option_text, 37, " --%s%s",
- def->long_name, long_val);
+ snprintf(option_text, 37, "-%s%s%s --%s%6s",
+ def->short_name, short_val, comma,
+ def->long_name, long_val);
+ } else if (def->short_name)
+ snprintf(option_text, 37, "-%s%s",
+ def->short_name, short_val);
+ else if (def->long_name)
+ snprintf(option_text, 37, " --%s%s",
+ def->long_name, long_val);
- fprintf(fp, " %-37s\t%s\n", option_text, def->desc);
+ fprintf(fp, " %-37s\t%s\n", option_text, def->desc);
- if(def->enums)
- {
- const struct arg_enum_list *listptr;
+ if (def->enums) {
+ const struct arg_enum_list *listptr;
- fprintf(fp, " %-37s\t ", "");
+ fprintf(fp, " %-37s\t ", "");
- for(listptr = def->enums; listptr->name; listptr++)
- fprintf(fp, "%s%s", listptr->name,
- listptr[1].name ? ", " : "\n");
- }
+ for (listptr = def->enums; listptr->name; listptr++)
+ fprintf(fp, "%s%s", listptr->name,
+ listptr[1].name ? ", " : "\n");
}
+ }
}
-unsigned int arg_parse_uint(const struct arg *arg)
-{
- long int rawval;
- char *endptr;
+unsigned int arg_parse_uint(const struct arg *arg) {
+ long int rawval;
+ char *endptr;
- rawval = strtol(arg->val, &endptr, 10);
+ rawval = strtol(arg->val, &endptr, 10);
- if (arg->val[0] != '\0' && endptr[0] == '\0')
- {
- if (rawval >= 0 && rawval <= UINT_MAX)
- return rawval;
+ if (arg->val[0] != '\0' && endptr[0] == '\0') {
+ if (rawval >= 0 && rawval <= UINT_MAX)
+ return rawval;
- die("Option %s: Value %ld out of range for unsigned int\n",
- arg->name, rawval);
- }
+ die("Option %s: Value %ld out of range for unsigned int\n",
+ arg->name, rawval);
+ }
- die("Option %s: Invalid character '%c'\n", arg->name, *endptr);
- return 0;
+ die("Option %s: Invalid character '%c'\n", arg->name, *endptr);
+ return 0;
}
-int arg_parse_int(const struct arg *arg)
-{
- long int rawval;
- char *endptr;
+int arg_parse_int(const struct arg *arg) {
+ long int rawval;
+ char *endptr;
- rawval = strtol(arg->val, &endptr, 10);
+ rawval = strtol(arg->val, &endptr, 10);
- if (arg->val[0] != '\0' && endptr[0] == '\0')
- {
- if (rawval >= INT_MIN && rawval <= INT_MAX)
- return rawval;
+ if (arg->val[0] != '\0' && endptr[0] == '\0') {
+ if (rawval >= INT_MIN && rawval <= INT_MAX)
+ return rawval;
- die("Option %s: Value %ld out of range for signed int\n",
- arg->name, rawval);
- }
+ die("Option %s: Value %ld out of range for signed int\n",
+ arg->name, rawval);
+ }
- die("Option %s: Invalid character '%c'\n", arg->name, *endptr);
- return 0;
+ die("Option %s: Invalid character '%c'\n", arg->name, *endptr);
+ return 0;
}
-struct vpx_rational
-{
- int num; /**< fraction numerator */
- int den; /**< fraction denominator */
+struct vpx_rational {
+ int num; /**< fraction numerator */
+ int den; /**< fraction denominator */
};
-struct vpx_rational arg_parse_rational(const struct arg *arg)
-{
- long int rawval;
- char *endptr;
- struct vpx_rational rat;
-
- /* parse numerator */
- rawval = strtol(arg->val, &endptr, 10);
-
- if (arg->val[0] != '\0' && endptr[0] == '/')
- {
- if (rawval >= INT_MIN && rawval <= INT_MAX)
- rat.num = rawval;
- else die("Option %s: Value %ld out of range for signed int\n",
- arg->name, rawval);
- }
- else die("Option %s: Expected / at '%c'\n", arg->name, *endptr);
-
- /* parse denominator */
- rawval = strtol(endptr + 1, &endptr, 10);
-
- if (arg->val[0] != '\0' && endptr[0] == '\0')
- {
- if (rawval >= INT_MIN && rawval <= INT_MAX)
- rat.den = rawval;
- else die("Option %s: Value %ld out of range for signed int\n",
- arg->name, rawval);
- }
- else die("Option %s: Invalid character '%c'\n", arg->name, *endptr);
-
- return rat;
+struct vpx_rational arg_parse_rational(const struct arg *arg) {
+ long int rawval;
+ char *endptr;
+ struct vpx_rational rat;
+
+ /* parse numerator */
+ rawval = strtol(arg->val, &endptr, 10);
+
+ if (arg->val[0] != '\0' && endptr[0] == '/') {
+ if (rawval >= INT_MIN && rawval <= INT_MAX)
+ rat.num = rawval;
+ else die("Option %s: Value %ld out of range for signed int\n",
+ arg->name, rawval);
+ } else die("Option %s: Expected / at '%c'\n", arg->name, *endptr);
+
+ /* parse denominator */
+ rawval = strtol(endptr + 1, &endptr, 10);
+
+ if (arg->val[0] != '\0' && endptr[0] == '\0') {
+ if (rawval >= INT_MIN && rawval <= INT_MAX)
+ rat.den = rawval;
+ else die("Option %s: Value %ld out of range for signed int\n",
+ arg->name, rawval);
+ } else die("Option %s: Invalid character '%c'\n", arg->name, *endptr);
+
+ return rat;
}
-int arg_parse_enum(const struct arg *arg)
-{
- const struct arg_enum_list *listptr;
- long int rawval;
- char *endptr;
-
- /* First see if the value can be parsed as a raw value */
- rawval = strtol(arg->val, &endptr, 10);
- if (arg->val[0] != '\0' && endptr[0] == '\0')
- {
- /* Got a raw value, make sure it's valid */
- for(listptr = arg->def->enums; listptr->name; listptr++)
- if(listptr->val == rawval)
- return rawval;
- }
+int arg_parse_enum(const struct arg *arg) {
+ const struct arg_enum_list *listptr;
+ long int rawval;
+ char *endptr;
- /* Next see if it can be parsed as a string */
- for(listptr = arg->def->enums; listptr->name; listptr++)
- if(!strcmp(arg->val, listptr->name))
- return listptr->val;
+ /* First see if the value can be parsed as a raw value */
+ rawval = strtol(arg->val, &endptr, 10);
+ if (arg->val[0] != '\0' && endptr[0] == '\0') {
+ /* Got a raw value, make sure it's valid */
+ for (listptr = arg->def->enums; listptr->name; listptr++)
+ if (listptr->val == rawval)
+ return rawval;
+ }
- die("Option %s: Invalid value '%s'\n", arg->name, arg->val);
- return 0;
+ /* Next see if it can be parsed as a string */
+ for (listptr = arg->def->enums; listptr->name; listptr++)
+ if (!strcmp(arg->val, listptr->name))
+ return listptr->val;
+
+ die("Option %s: Invalid value '%s'\n", arg->name, arg->val);
+ return 0;
}
-int arg_parse_enum_or_int(const struct arg *arg)
-{
- if(arg->def->enums)
- return arg_parse_enum(arg);
- return arg_parse_int(arg);
+int arg_parse_enum_or_int(const struct arg *arg) {
+ if (arg->def->enums)
+ return arg_parse_enum(arg);
+ return arg_parse_int(arg);
}
diff --git a/libvpx/args.h b/libvpx/args.h
index 7963fa6..ad591af 100644
--- a/libvpx/args.h
+++ b/libvpx/args.h
@@ -13,29 +13,26 @@
#define ARGS_H
#include <stdio.h>
-struct arg
-{
- char **argv;
- const char *name;
- const char *val;
- unsigned int argv_step;
- const struct arg_def *def;
+struct arg {
+ char **argv;
+ const char *name;
+ const char *val;
+ unsigned int argv_step;
+ const struct arg_def *def;
};
-struct arg_enum_list
-{
- const char *name;
- int val;
+struct arg_enum_list {
+ const char *name;
+ int val;
};
#define ARG_ENUM_LIST_END {0}
-typedef struct arg_def
-{
- const char *short_name;
- const char *long_name;
- int has_val;
- const char *desc;
- const struct arg_enum_list *enums;
+typedef struct arg_def {
+ const char *short_name;
+ const char *long_name;
+ int has_val;
+ const char *desc;
+ const struct arg_enum_list *enums;
} arg_def_t;
#define ARG_DEF(s,l,v,d) {s,l,v,d, NULL}
#define ARG_DEF_ENUM(s,l,v,d,e) {s,l,v,d,e}
diff --git a/libvpx/build/arm-msvs/obj_int_extract.bat b/libvpx/build/arm-msvs/obj_int_extract.bat
new file mode 100644
index 0000000..7fd16a3
--- /dev/null
+++ b/libvpx/build/arm-msvs/obj_int_extract.bat
@@ -0,0 +1,14 @@
+REM Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+REM
+REM Use of this source code is governed by a BSD-style license
+REM that can be found in the LICENSE file in the root of the source
+REM tree. An additional intellectual property rights grant can be found
+REM in the file PATENTS. All contributing project authors may
+REM be found in the AUTHORS file in the root of the source tree.
+echo on
+
+cl /I "./" /I "%1" /nologo /c /DWINAPI_FAMILY=WINAPI_FAMILY_PHONE_APP "%1/vp8/encoder/vp8_asm_enc_offsets.c"
+obj_int_extract.exe rvds "vp8_asm_enc_offsets.obj" > "vp8_asm_enc_offsets.asm"
+
+cl /I "./" /I "%1" /nologo /c /DWINAPI_FAMILY=WINAPI_FAMILY_PHONE_APP "%1/vpx_scale/vpx_scale_asm_offsets.c"
+obj_int_extract.exe rvds "vpx_scale_asm_offsets.obj" > "vpx_scale_asm_offsets.asm"
diff --git a/libvpx/build/make/Android.mk b/libvpx/build/make/Android.mk
index d54639a..1ff0884 100644
--- a/libvpx/build/make/Android.mk
+++ b/libvpx/build/make/Android.mk
@@ -27,7 +27,7 @@
# Android.mk file in the libvpx directory:
# LOCAL_PATH := $(call my-dir)
# include $(CLEAR_VARS)
-# include libvpx/build/make/Android.mk
+# include jni/libvpx/build/make/Android.mk
#
# There are currently two TARGET_ARCH_ABI targets for ARM.
# armeabi and armeabi-v7a. armeabi-v7a is selected by creating an
@@ -48,7 +48,7 @@
# Running ndk-build will build libvpx and include it in your project.
#
-CONFIG_DIR := $(LOCAL_PATH)
+CONFIG_DIR := $(LOCAL_PATH)/
LIBVPX_PATH := $(LOCAL_PATH)/libvpx
ASM_CNV_PATH_LOCAL := $(TARGET_ARCH_ABI)/ads2gas
ASM_CNV_PATH := $(LOCAL_PATH)/$(ASM_CNV_PATH_LOCAL)
@@ -56,9 +56,9 @@ ASM_CNV_PATH := $(LOCAL_PATH)/$(ASM_CNV_PATH_LOCAL)
# Makefiles created by the libvpx configure process
# This will need to be fixed to handle x86.
ifeq ($(TARGET_ARCH_ABI),armeabi-v7a)
- include $(CONFIG_DIR)/libs-armv7-android-gcc.mk
+ include $(CONFIG_DIR)libs-armv7-android-gcc.mk
else
- include $(CONFIG_DIR)/libs-armv5te-android-gcc.mk
+ include $(CONFIG_DIR)libs-armv5te-android-gcc.mk
endif
# Rule that is normally in Makefile created by libvpx
@@ -106,26 +106,25 @@ $$(eval $$(call ev-build-file))
$(1) : $$(_OBJ) $(2)
@mkdir -p $$(dir $$@)
- @grep $(OFFSET_PATTERN) $$< | tr -d '\#' | $(CONFIG_DIR)/$(ASM_CONVERSION) > $$@
+ @grep $(OFFSET_PATTERN) $$< | tr -d '\#' | $(CONFIG_DIR)$(ASM_CONVERSION) > $$@
endef
# Use ads2gas script to convert from RVCT format to GAS format. This passes
# puts the processed file under $(ASM_CNV_PATH). Local clean rule
# to handle removing these
-ASM_CNV_OFFSETS_DEPEND = $(ASM_CNV_PATH)/asm_com_offsets.asm
-ifeq ($(CONFIG_VP8_DECODER), yes)
- ASM_CNV_OFFSETS_DEPEND += $(ASM_CNV_PATH)/asm_dec_offsets.asm
-endif
ifeq ($(CONFIG_VP8_ENCODER), yes)
- ASM_CNV_OFFSETS_DEPEND += $(ASM_CNV_PATH)/asm_enc_offsets.asm
+ ASM_CNV_OFFSETS_DEPEND += $(ASM_CNV_PATH)/vp8_asm_enc_offsets.asm
+endif
+ifeq ($(HAVE_NEON), yes)
+ ASM_CNV_OFFSETS_DEPEND += $(ASM_CNV_PATH)/vpx_scale_asm_offsets.asm
endif
.PRECIOUS: %.asm.s
$(ASM_CNV_PATH)/libvpx/%.asm.s: $(LIBVPX_PATH)/%.asm $(ASM_CNV_OFFSETS_DEPEND)
@mkdir -p $(dir $@)
- @$(CONFIG_DIR)/$(ASM_CONVERSION) <$< > $@
+ @$(CONFIG_DIR)$(ASM_CONVERSION) <$< > $@
-# For building vpx_rtcd.h, which has a rule in libs.mk
+# For building *_rtcd.h, which have rules in libs.mk
TGT_ISA:=$(word 1, $(subst -, ,$(TOOLCHAIN)))
target := libs
@@ -177,7 +176,14 @@ ifeq ($(CONFIG_RUNTIME_CPU_DETECT),yes)
LOCAL_STATIC_LIBRARIES := cpufeatures
endif
-$(foreach file, $(LOCAL_SRC_FILES), $(LOCAL_PATH)/$(file)): vpx_rtcd.h
+# Add a dependency to force generation of the RTCD files.
+ifeq ($(CONFIG_VP8), yes)
+$(foreach file, $(LOCAL_SRC_FILES), $(LOCAL_PATH)/$(file)): vp8_rtcd.h
+endif
+ifeq ($(CONFIG_VP9), yes)
+$(foreach file, $(LOCAL_SRC_FILES), $(LOCAL_PATH)/$(file)): vp9_rtcd.h
+endif
+$(foreach file, $(LOCAL_SRC_FILES), $(LOCAL_PATH)/$(file)): vpx_scale_rtcd.h
.PHONY: clean
clean:
@@ -189,23 +195,18 @@ clean:
include $(BUILD_SHARED_LIBRARY)
-$(eval $(call asm_offsets_template,\
- $(ASM_CNV_PATH)/asm_com_offsets.asm, \
- $(LIBVPX_PATH)/vp8/common/asm_com_offsets.c))
-
-ifeq ($(CONFIG_VP8_DECODER), yes)
+ifeq ($(HAVE_NEON), yes)
$(eval $(call asm_offsets_template,\
- $(ASM_CNV_PATH)/asm_dec_offsets.asm, \
- $(LIBVPX_PATH)/vp8/decoder/asm_dec_offsets.c))
+ $(ASM_CNV_PATH)/vpx_scale_asm_offsets.asm, \
+ $(LIBVPX_PATH)/vpx_scale/vpx_scale_asm_offsets.c))
endif
ifeq ($(CONFIG_VP8_ENCODER), yes)
$(eval $(call asm_offsets_template,\
- $(ASM_CNV_PATH)/asm_enc_offsets.asm, \
- $(LIBVPX_PATH)/vp8/encoder/asm_enc_offsets.c))
+ $(ASM_CNV_PATH)/vp8_asm_enc_offsets.asm, \
+ $(LIBVPX_PATH)/vp8/encoder/vp8_asm_enc_offsets.c))
endif
ifeq ($(CONFIG_RUNTIME_CPU_DETECT),yes)
$(call import-module,cpufeatures)
endif
-
diff --git a/libvpx/build/make/Makefile b/libvpx/build/make/Makefile
index 1088c84..7a25239 100644
--- a/libvpx/build/make/Makefile
+++ b/libvpx/build/make/Makefile
@@ -74,7 +74,7 @@ HOSTCC?=gcc
TGT_ISA:=$(word 1, $(subst -, ,$(TOOLCHAIN)))
TGT_OS:=$(word 2, $(subst -, ,$(TOOLCHAIN)))
TGT_CC:=$(word 3, $(subst -, ,$(TOOLCHAIN)))
-quiet:=$(if $(verbose),,yes)
+quiet:=$(if $(or $(verbose), $(V)),, yes)
qexec=$(if $(quiet),@)
# Cancel built-in implicit rules
@@ -103,6 +103,18 @@ test::
.PHONY: testdata
testdata::
+# Add compiler flags for intrinsic files
+$(BUILD_PFX)%_mmx.c.d: CFLAGS += -mmmx
+$(BUILD_PFX)%_mmx.c.o: CFLAGS += -mmmx
+$(BUILD_PFX)%_sse2.c.d: CFLAGS += -msse2
+$(BUILD_PFX)%_sse2.c.o: CFLAGS += -msse2
+$(BUILD_PFX)%_sse3.c.d: CFLAGS += -msse3
+$(BUILD_PFX)%_sse3.c.o: CFLAGS += -msse3
+$(BUILD_PFX)%_ssse3.c.d: CFLAGS += -mssse3
+$(BUILD_PFX)%_ssse3.c.o: CFLAGS += -mssse3
+$(BUILD_PFX)%_sse4.c.d: CFLAGS += -msse4.1
+$(BUILD_PFX)%_sse4.c.o: CFLAGS += -msse4.1
+
$(BUILD_PFX)%.c.d: %.c
$(if $(quiet),@echo " [DEP] $@")
$(qexec)mkdir -p $(dir $@)
@@ -253,10 +265,25 @@ $(1):
$(if $(quiet),@echo " [LD] $$@")
$(qexec)$$(LD) -shared $$(LDFLAGS) \
-Wl,--no-undefined -Wl,-soname,$$(SONAME) \
- -Wl,--version-script,$$(SO_VERSION_SCRIPT) -o $$@ \
- $$(filter %.o,$$?) $$(extralibs)
+ -Wl,--version-script,$$(EXPORTS_FILE) -o $$@ \
+ $$(filter %.o,$$^) $$(extralibs)
+endef
+
+define dl_template
+# Not using a pattern rule here because we don't want to generate empty
+# archives when they are listed as a dependency in files not responsible
+# for creating them.
+$(1):
+ $(if $(quiet),@echo " [LD] $$@")
+ $(qexec)$$(LD) -dynamiclib $$(LDFLAGS) \
+ -exported_symbols_list $$(EXPORTS_FILE) \
+ -Wl,-headerpad_max_install_names,-compatibility_version,1.0,-current_version,$$(VERSION_MAJOR) \
+ -o $$@ \
+ $$(filter %.o,$$^) $$(extralibs)
endef
+
+
define lipo_lib_template
$(1): $(addsuffix /$(1),$(FAT_ARCHS))
$(if $(quiet),@echo " [LIPO] $$@")
@@ -321,6 +348,7 @@ LIBS=$(call enabled,LIBS)
@touch $@
$(foreach lib,$(filter %_g.a,$(LIBS)),$(eval $(call archive_template,$(lib))))
$(foreach lib,$(filter %so.$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_PATCH),$(LIBS)),$(eval $(call so_template,$(lib))))
+$(foreach lib,$(filter %$(VERSION_MAJOR).dylib,$(LIBS)),$(eval $(call dl_template,$(lib))))
INSTALL-LIBS=$(call cond_enabled,CONFIG_INSTALL_LIBS,INSTALL-LIBS)
ifeq ($(MAKECMDGOALS),dist)
@@ -360,10 +388,14 @@ ifneq ($(call enabled,DIST-SRCS),)
DIST-SRCS-$(CONFIG_MSVS) += build/make/gen_msvs_sln.sh
DIST-SRCS-$(CONFIG_MSVS) += build/x86-msvs/yasm.rules
DIST-SRCS-$(CONFIG_MSVS) += build/x86-msvs/obj_int_extract.bat
+ DIST-SRCS-$(CONFIG_MSVS) += build/arm-msvs/obj_int_extract.bat
DIST-SRCS-$(CONFIG_RVCT) += build/make/armlink_adapter.sh
- # Include obj_int_extract if we use offsets from asm_*_offsets
+ # Include obj_int_extract if we use offsets from *_asm_*_offsets
DIST-SRCS-$(ARCH_ARM)$(ARCH_X86)$(ARCH_X86_64) += build/make/obj_int_extract.c
DIST-SRCS-$(ARCH_ARM) += build/make/ads2gas.pl
+ DIST-SRCS-$(ARCH_ARM) += build/make/ads2gas_apple.pl
+ DIST-SRCS-$(ARCH_ARM) += build/make/ads2armasm_ms.pl
+ DIST-SRCS-$(ARCH_ARM) += build/make/thumb.pm
DIST-SRCS-yes += $(target:-$(TOOLCHAIN)=).mk
endif
INSTALL-SRCS := $(call cond_enabled,CONFIG_INSTALL_SRCS,INSTALL-SRCS)
diff --git a/libvpx/build/make/ads2armasm_ms.pl b/libvpx/build/make/ads2armasm_ms.pl
new file mode 100755
index 0000000..1def539
--- /dev/null
+++ b/libvpx/build/make/ads2armasm_ms.pl
@@ -0,0 +1,38 @@
+#!/usr/bin/perl
+##
+## Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+##
+## Use of this source code is governed by a BSD-style license
+## that can be found in the LICENSE file in the root of the source
+## tree. An additional intellectual property rights grant can be found
+## in the file PATENTS. All contributing project authors may
+## be found in the AUTHORS file in the root of the source tree.
+##
+
+use FindBin;
+use lib $FindBin::Bin;
+use thumb;
+
+print "; This file was created from a .asm file\n";
+print "; using the ads2armasm_ms.pl script.\n";
+
+while (<STDIN>)
+{
+ undef $comment;
+ undef $line;
+
+ s/REQUIRE8//;
+ s/PRESERVE8//;
+ s/^\s*ARM\s*$//;
+ s/AREA\s+\|\|(.*)\|\|/AREA |$1|/;
+ s/qsubaddx/qsax/i;
+ s/qaddsubx/qasx/i;
+
+ thumb::FixThumbInstructions($_, 1);
+
+ s/ldrneb/ldrbne/i;
+ s/ldrneh/ldrhne/i;
+
+ print;
+}
+
diff --git a/libvpx/build/make/ads2gas.pl b/libvpx/build/make/ads2gas.pl
index 95be467..9c41901 100755
--- a/libvpx/build/make/ads2gas.pl
+++ b/libvpx/build/make/ads2gas.pl
@@ -17,9 +17,24 @@
#
# Usage: cat inputfile | perl ads2gas.pl > outputfile
#
+
+use FindBin;
+use lib $FindBin::Bin;
+use thumb;
+
+my $thumb = 0;
+
+foreach my $arg (@ARGV) {
+ $thumb = 1 if ($arg eq "-thumb");
+}
+
print "@ This file was created from a .asm file\n";
print "@ using the ads2gas.pl script.\n";
print "\t.equ DO1STROUNDING, 0\n";
+if ($thumb) {
+ print "\t.syntax unified\n";
+ print "\t.thumb\n";
+}
# Stack of procedure names.
@proc_stack = ();
@@ -151,8 +166,13 @@ while (<STDIN>)
# ALIGN directive
s/\bALIGN\b/.balign/g;
- # ARM code
- s/\sARM/.arm/g;
+ if ($thumb) {
+ # ARM code - we force everything to thumb with the declaration in the header
+ s/\sARM//g;
+ } else {
+ # ARM code
+ s/\sARM/.arm/g;
+ }
# push/pop
s/(push\s+)(r\d+)/stmdb sp\!, \{$2\}/g;
@@ -162,6 +182,10 @@ while (<STDIN>)
s/(vld1.\d+\s+)(q\d+)/$1\{$2\}/g;
s/(vtbl.\d+\s+[^,]+),([^,]+)/$1,\{$2\}/g;
+ if ($thumb) {
+ thumb::FixThumbInstructions($_, 0);
+ }
+
# eabi_attributes numerical equivalents can be found in the
# "ARM IHI 0045C" document.
diff --git a/libvpx/build/make/ads2gas_apple.pl b/libvpx/build/make/ads2gas_apple.pl
index 81280bf..51e6fbc 100755
--- a/libvpx/build/make/ads2gas_apple.pl
+++ b/libvpx/build/make/ads2gas_apple.pl
@@ -10,12 +10,12 @@
##
-# ads2gas.pl
+# ads2gas_apple.pl
# Author: Eric Fung (efung (at) acm.org)
#
# Convert ARM Developer Suite 1.0.1 syntax assembly source to GNU as format
#
-# Usage: cat inputfile | perl ads2gas.pl > outputfile
+# Usage: cat inputfile | perl ads2gas_apple.pl > outputfile
#
print "@ This file was created from a .asm file\n";
print "@ using the ads2gas_apple.pl script.\n\n";
diff --git a/libvpx/build/make/armlink_adapter.sh b/libvpx/build/make/armlink_adapter.sh
index b53669c..75c342e 100755
--- a/libvpx/build/make/armlink_adapter.sh
+++ b/libvpx/build/make/armlink_adapter.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
##
## Copyright (c) 2010 The WebM project authors. All Rights Reserved.
##
@@ -13,20 +13,20 @@
verbose=0
set -- $*
for i; do
- if [ "$i" == "-o" ]; then
+ if [ "$i" = "-o" ]; then
on_of=1
- elif [ "$i" == "-v" ]; then
+ elif [ "$i" = "-v" ]; then
verbose=1
- elif [ "$i" == "-g" ]; then
+ elif [ "$i" = "-g" ]; then
args="${args} --debug"
- elif [ "$on_of" == "1" ]; then
+ elif [ "$on_of" = "1" ]; then
outfile=$i
on_of=0
elif [ -f "$i" ]; then
infiles="$infiles $i"
- elif [ "${i:0:2}" == "-l" ]; then
+ elif [ "${i#-l}" != "$i" ]; then
libs="$libs ${i#-l}"
- elif [ "${i:0:2}" == "-L" ]; then
+ elif [ "${i#-L}" != "$i" ]; then
libpaths="${libpaths} ${i#-L}"
else
args="${args} ${i}"
diff --git a/libvpx/build/make/configure.sh b/libvpx/build/make/configure.sh
index 05bbabe..bb7ab41 100755
--- a/libvpx/build/make/configure.sh
+++ b/libvpx/build/make/configure.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
##
## configure.sh
##
@@ -75,7 +75,7 @@ Options:
Build options:
--help print this message
- --log=yes|no|FILE file configure log is written to [config.err]
+ --log=yes|no|FILE file configure log is written to [config.log]
--target=TARGET target platform tuple [generic-gnu]
--cpu=CPU optimize for a specific cpu rather than a family
--extra-cflags=ECFLAGS add ECFLAGS to CFLAGS [$CFLAGS]
@@ -88,6 +88,7 @@ Build options:
${toggle_debug} enable/disable debug mode
${toggle_gprof} enable/disable gprof profiling instrumentation
${toggle_gcov} enable/disable gcov coverage instrumentation
+ ${toggle_thumb} enable/disable building arm assembly in thumb mode
Install options:
${toggle_install_docs} control whether docs are installed
@@ -197,11 +198,11 @@ add_extralibs() {
#
# Boolean Manipulation Functions
#
-enable(){
+enable_feature(){
set_all yes $*
}
-disable(){
+disable_feature(){
set_all no $*
}
@@ -218,7 +219,7 @@ soft_enable() {
for var in $*; do
if ! disabled $var; then
log_echo " enabling $var"
- enable $var
+ enable_feature $var
fi
done
}
@@ -227,7 +228,7 @@ soft_disable() {
for var in $*; do
if ! enabled $var; then
log_echo " disabling $var"
- disable $var
+ disable_feature $var
fi
done
}
@@ -250,10 +251,10 @@ tolower(){
# Temporary File Functions
#
source_path=${0%/*}
-enable source_path_used
+enable_feature source_path_used
if test -z "$source_path" -o "$source_path" = "." ; then
source_path="`pwd`"
- disable source_path_used
+ disable_feature source_path_used
fi
if test ! -z "$TMPDIR" ; then
@@ -263,20 +264,23 @@ elif test ! -z "$TEMPDIR" ; then
else
TMPDIRx="/tmp"
fi
-TMP_H="${TMPDIRx}/vpx-conf-$$-${RANDOM}.h"
-TMP_C="${TMPDIRx}/vpx-conf-$$-${RANDOM}.c"
-TMP_O="${TMPDIRx}/vpx-conf-$$-${RANDOM}.o"
-TMP_X="${TMPDIRx}/vpx-conf-$$-${RANDOM}.x"
-TMP_ASM="${TMPDIRx}/vpx-conf-$$-${RANDOM}.asm"
+RAND=$(awk 'BEGIN { srand(); printf "%d\n",(rand() * 32768)}')
+TMP_H="${TMPDIRx}/vpx-conf-$$-${RAND}.h"
+TMP_C="${TMPDIRx}/vpx-conf-$$-${RAND}.c"
+TMP_CC="${TMPDIRx}/vpx-conf-$$-${RAND}.cc"
+TMP_O="${TMPDIRx}/vpx-conf-$$-${RAND}.o"
+TMP_X="${TMPDIRx}/vpx-conf-$$-${RAND}.x"
+TMP_ASM="${TMPDIRx}/vpx-conf-$$-${RAND}.asm"
clean_temp_files() {
- rm -f ${TMP_C} ${TMP_H} ${TMP_O} ${TMP_X} ${TMP_ASM}
+ rm -f ${TMP_C} ${TMP_CC} ${TMP_H} ${TMP_O} ${TMP_X} ${TMP_ASM}
}
#
# Toolchain Check Functions
#
check_cmd() {
+ enabled external_build && return
log "$@"
"$@" >>${logfile} 2>&1
}
@@ -290,9 +294,9 @@ check_cc() {
check_cxx() {
log check_cxx "$@"
- cat >${TMP_C}
- log_file ${TMP_C}
- check_cmd ${CXX} ${CXXFLAGS} "$@" -c -o ${TMP_O} ${TMP_C}
+ cat >${TMP_CC}
+ log_file ${TMP_CC}
+ check_cmd ${CXX} ${CXXFLAGS} "$@" -c -o ${TMP_O} ${TMP_CC}
}
check_cpp() {
@@ -313,8 +317,8 @@ check_header(){
header=$1
shift
var=`echo $header | sed 's/[^A-Za-z0-9_]/_/g'`
- disable $var
- check_cpp "$@" <<EOF && enable $var
+ disable_feature $var
+ check_cpp "$@" <<EOF && enable_feature $var
#include "$header"
int x;
EOF
@@ -414,6 +418,8 @@ SRC_PATH_BARE=$source_path
BUILD_PFX=${BUILD_PFX}
TOOLCHAIN=${toolchain}
ASM_CONVERSION=${asm_conversion_cmd:-${source_path}/build/make/ads2gas.pl}
+GEN_VCPROJ=${gen_vcproj_cmd}
+MSVS_ARCH_DIR=${msvs_arch_dir}
CC=${CC}
CXX=${CXX}
@@ -431,14 +437,15 @@ ASFLAGS = ${ASFLAGS}
extralibs = ${extralibs}
AS_SFX = ${AS_SFX:-.asm}
EXE_SFX = ${EXE_SFX}
+VCPROJ_SFX = ${VCPROJ_SFX}
RTCD_OPTIONS = ${RTCD_OPTIONS}
EOF
if enabled rvct; then cat >> $1 << EOF
-fmt_deps = sed -e 's;^__image.axf;\$(dir \$@)\$(notdir \$<).o \$@;' #hide
+fmt_deps = sed -e 's;^__image.axf;\${@:.d=.o} \$@;' #hide
EOF
else cat >> $1 << EOF
-fmt_deps = sed -e 's;^\([a-zA-Z0-9_]*\)\.o;\$(dir \$@)\1\$(suffix \$<).o \$@;'
+fmt_deps = sed -e 's;^\([a-zA-Z0-9_]*\)\.o;\${@:.d=.o} \$@;'
EOF
fi
@@ -459,6 +466,7 @@ write_common_target_config_h() {
#ifndef VPX_CONFIG_H
#define VPX_CONFIG_H
#define RESTRICT ${RESTRICT}
+#define INLINE ${INLINE}
EOF
print_config_h ARCH "${TMP_H}" ${ARCH_LIST}
print_config_h HAVE "${TMP_H}" ${HAVE_LIST}
@@ -472,7 +480,7 @@ process_common_cmdline() {
for opt in "$@"; do
optval="${opt#*=}"
case "$opt" in
- --child) enable child
+ --child) enable_feature child
;;
--log*)
logging="$optval"
@@ -484,7 +492,7 @@ process_common_cmdline() {
;;
--target=*) toolchain="${toolchain:-${optval}}"
;;
- --force-target=*) toolchain="${toolchain:-${optval}}"; enable force_toolchain
+ --force-target=*) toolchain="${toolchain:-${optval}}"; enable_feature force_toolchain
;;
--cpu)
;;
@@ -504,7 +512,7 @@ process_common_cmdline() {
echo "${CMDLINE_SELECT}" | grep "^ *$option\$" >/dev/null ||
die_unknown $opt
fi
- $action $option
+ ${action}_feature $option
;;
--require-?*)
eval `echo "$opt" | sed 's/--/action=/;s/-/ option=/;s/-/_/g'`
@@ -516,11 +524,11 @@ process_common_cmdline() {
;;
--force-enable-?*|--force-disable-?*)
eval `echo "$opt" | sed 's/--force-/action=/;s/-/ option=/;s/-/_/g'`
- $action $option
+ ${action}_feature $option
;;
--libc=*)
[ -d "${optval}" ] || die "Not a directory: ${optval}"
- disable builtin_libc
+ disable_feature builtin_libc
alt_libc="${optval}"
;;
--as=*)
@@ -596,8 +604,13 @@ process_common_toolchain() {
armv6*)
tgt_isa=armv6
;;
+ armv7*-hardfloat*)
+ tgt_isa=armv7
+ float_abi=hard
+ ;;
armv7*)
tgt_isa=armv7
+ float_abi=softfp
;;
armv5te*)
tgt_isa=armv5te
@@ -641,6 +654,13 @@ process_common_toolchain() {
tgt_isa=x86_64
tgt_os=darwin12
;;
+ *darwin13*)
+ tgt_isa=x86_64
+ tgt_os=darwin13
+ ;;
+ x86_64*mingw32*)
+ tgt_os=win64
+ ;;
*mingw32*|*cygwin*)
[ -z "$tgt_isa" ] && tgt_isa=x86
tgt_os=win32
@@ -677,13 +697,13 @@ process_common_toolchain() {
# Mark the specific ISA requested as enabled
soft_enable ${tgt_isa}
- enable ${tgt_os}
- enable ${tgt_cc}
+ enable_feature ${tgt_os}
+ enable_feature ${tgt_cc}
# Enable the architecture family
case ${tgt_isa} in
- arm*) enable arm;;
- mips*) enable mips;;
+ arm*) enable_feature arm;;
+ mips*) enable_feature mips;;
esac
# PIC is probably what we want when building shared libs
@@ -736,13 +756,17 @@ process_common_toolchain() {
add_cflags "-mmacosx-version-min=10.8"
add_ldflags "-mmacosx-version-min=10.8"
;;
+ *-darwin13-*)
+ add_cflags "-mmacosx-version-min=10.9"
+ add_ldflags "-mmacosx-version-min=10.9"
+ ;;
esac
# Handle Solaris variants. Solaris 10 needs -lposix4
case ${toolchain} in
sparc-solaris-*)
add_extralibs -lposix4
- disable fast_unaligned
+ disable_feature fast_unaligned
;;
*-solaris-*)
add_extralibs -lposix4
@@ -767,6 +791,7 @@ process_common_toolchain() {
;;
armv5te)
soft_enable edsp
+ disable_feature fast_unaligned
;;
esac
@@ -781,9 +806,16 @@ process_common_toolchain() {
arch_int=${arch_int%%te}
check_add_asflags --defsym ARCHITECTURE=${arch_int}
tune_cflags="-mtune="
- if [ ${tgt_isa} == "armv7" ]; then
- check_add_cflags -march=armv7-a -mfloat-abi=softfp
- check_add_asflags -march=armv7-a -mfloat-abi=softfp
+ if [ ${tgt_isa} = "armv7" ]; then
+ if [ -z "${float_abi}" ]; then
+ check_cpp <<EOF && float_abi=hard || float_abi=softfp
+#ifndef __ARM_PCS_VFP
+#error "not hardfp"
+#endif
+EOF
+ fi
+ check_add_cflags -march=armv7-a -mfloat-abi=${float_abi}
+ check_add_asflags -march=armv7-a -mfloat-abi=${float_abi}
if enabled neon
then
@@ -801,6 +833,18 @@ process_common_toolchain() {
enabled debug && add_asflags -g
asm_conversion_cmd="${source_path}/build/make/ads2gas.pl"
+ if enabled thumb; then
+ asm_conversion_cmd="$asm_conversion_cmd -thumb"
+ check_add_cflags -mthumb
+ check_add_asflags -mthumb -mimplicit-it=always
+ fi
+ ;;
+ vs*)
+ asm_conversion_cmd="${source_path}/build/make/ads2armasm_ms.pl"
+ AS_SFX=.s
+ msvs_arch_dir=arm-msvs
+ disable_feature multithread
+ disable_feature unit_tests
;;
rvct)
CC=armcc
@@ -812,7 +856,7 @@ process_common_toolchain() {
tune_cflags="--cpu="
tune_asflags="--cpu="
if [ -z "${tune_cpu}" ]; then
- if [ ${tgt_isa} == "armv7" ]; then
+ if [ ${tgt_isa} = "armv7" ]; then
if enabled neon
then
check_add_cflags --fpu=softvfp+vfpv3
@@ -837,8 +881,8 @@ process_common_toolchain() {
case ${tgt_os} in
none*)
- disable multithread
- disable os_support
+ disable_feature multithread
+ disable_feature os_support
;;
android*)
@@ -870,9 +914,9 @@ process_common_toolchain() {
# Cortex-A8 implementations (NDK Dev Guide)
add_ldflags "-Wl,--fix-cortex-a8"
- enable pic
+ enable_feature pic
soft_enable realtime_only
- if [ ${tgt_isa} == "armv7" ]; then
+ if [ ${tgt_isa} = "armv7" ]; then
soft_enable runtime_cpu_detect
fi
if enabled runtime_cpu_detect; then
@@ -906,7 +950,7 @@ process_common_toolchain() {
add_ldflags -arch_only ${tgt_isa}
if [ -z "${alt_libc}" ]; then
- alt_libc=${SDK_PATH}/SDKs/iPhoneOS5.1.sdk
+ alt_libc=${SDK_PATH}/SDKs/iPhoneOS6.0.sdk
fi
add_cflags "-isysroot ${alt_libc}"
@@ -926,7 +970,7 @@ process_common_toolchain() {
;;
linux*)
- enable linux
+ enable_feature linux
if enabled rvct; then
# Check if we have CodeSourcery GCC in PATH. Needed for
# libraries
@@ -957,14 +1001,14 @@ process_common_toolchain() {
tune_cflags="-mtune="
if enabled dspr2; then
check_add_cflags -mips32r2 -mdspr2
- disable fast_unaligned
+ disable_feature fast_unaligned
fi
check_add_cflags -march=${tgt_isa}
check_add_asflags -march=${tgt_isa}
check_add_asflags -KPIC
;;
ppc*)
- enable ppc
+ enable_feature ppc
bits=${tgt_isa##ppc}
link_with_cc=gcc
setup_gnu_toolchain
@@ -994,13 +1038,6 @@ process_common_toolchain() {
#error "not x32"
#endif
EOF
- soft_enable runtime_cpu_detect
- soft_enable mmx
- soft_enable sse
- soft_enable sse2
- soft_enable sse3
- soft_enable ssse3
- soft_enable sse4_1
case ${tgt_os} in
win*)
@@ -1042,18 +1079,33 @@ EOF
add_ldflags -m${bits}
link_with_cc=gcc
tune_cflags="-march="
- setup_gnu_toolchain
+ setup_gnu_toolchain
#for 32 bit x86 builds, -O3 did not turn on this flag
- enabled optimizations && check_add_cflags -fomit-frame-pointer
+ enabled optimizations && disabled gprof && check_add_cflags -fomit-frame-pointer
;;
vs*)
# When building with Microsoft Visual Studio the assembler is
# invoked directly. Checking at configure time is unnecessary.
# Skip the check by setting AS arbitrarily
AS=msvs
+ msvs_arch_dir=x86-msvs
;;
esac
+ soft_enable runtime_cpu_detect
+ soft_enable mmx
+ soft_enable sse
+ soft_enable sse2
+ soft_enable sse3
+ soft_enable ssse3
+ # We can't use 'check_cflags' until the compiler is configured and CC is
+ # populated.
+ if enabled gcc && ! disabled sse4_1 && ! check_cflags -msse4; then
+ RTCD_OPTIONS="${RTCD_OPTIONS}--disable-sse4_1 "
+ else
+ soft_enable sse4_1
+ fi
+
case "${AS}" in
auto|"")
which nasm >/dev/null 2>&1 && AS=nasm
@@ -1069,12 +1121,14 @@ EOF
win32)
add_asflags -f win32
enabled debug && add_asflags -g cv8
+ EXE_SFX=.exe
;;
win64)
add_asflags -f x64
enabled debug && add_asflags -g cv8
+ EXE_SFX=.exe
;;
- linux*|solaris*)
+ linux*|solaris*|android*)
add_asflags -f elf${bits}
enabled debug && [ "${AS}" = yasm ] && add_asflags -g dwarf2
enabled debug && [ "${AS}" = nasm ] && add_asflags -g
@@ -1102,7 +1156,7 @@ EOF
;;
universal*|*-gcc|generic-gnu)
link_with_cc=gcc
- enable gcc
+ enable_feature gcc
setup_gnu_toolchain
;;
esac
@@ -1136,6 +1190,12 @@ EOF
fi
fi
+ # default use_x86inc to yes if pic is no or 64bit or we are not on darwin
+ echo " checking here for x86inc \"${tgt_isa}\" \"$pic\" "
+ if [ ${tgt_isa} = x86_64 -o ! "$pic" = "yes" -o "${tgt_os#darwin}" = "${tgt_os}" ]; then
+ soft_enable use_x86inc
+ fi
+
# Position Independent Code (PIC) support, for building relocatable
# shared objects
enabled gcc && enabled pic && check_add_cflags -fPIC
@@ -1145,14 +1205,22 @@ EOF
enabled linux && check_add_cflags -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0
# Check for strip utility variant
- ${STRIP} -V 2>/dev/null | grep GNU >/dev/null && enable gnu_strip
+ ${STRIP} -V 2>/dev/null | grep GNU >/dev/null && enable_feature gnu_strip
# Try to determine target endianness
check_cc <<EOF
unsigned int e = 'O'<<24 | '2'<<16 | 'B'<<8 | 'E';
EOF
[ -f "${TMP_O}" ] && od -A n -t x1 "${TMP_O}" | tr -d '\n' |
- grep '4f *32 *42 *45' >/dev/null 2>&1 && enable big_endian
+ grep '4f *32 *42 *45' >/dev/null 2>&1 && enable_feature big_endian
+
+ # Try to find which inline keywords are supported
+ check_cc <<EOF && INLINE="inline"
+ static inline function() {}
+EOF
+ check_cc <<EOF && INLINE="__inline__ __attribute__((always_inline))"
+ static __attribute__((always_inline)) function() {}
+EOF
# Almost every platform uses pthreads.
if enabled multithread; then
@@ -1169,15 +1237,12 @@ EOF
if enabled dspr2; then
if enabled big_endian; then
echo "dspr2 optimizations are available only for little endian platforms"
- disable dspr2
+ disable_feature dspr2
fi
fi
;;
esac
- # for sysconf(3) and friends.
- check_header unistd.h
-
# glibc needs these
if enabled linux; then
add_cflags -D_LARGEFILE_SOURCE
@@ -1223,8 +1288,8 @@ print_config_h() {
print_webm_license() {
local destination=$1
- local prefix=$2
- local suffix=$3
+ local prefix="$2"
+ local suffix="$3"
shift 3
cat <<EOF > ${destination}
${prefix} Copyright (c) 2011 The WebM project authors. All Rights Reserved.${suffix}
@@ -1245,8 +1310,8 @@ process_detect() {
true;
}
-enable logging
-logfile="config.err"
+enable_feature logging
+logfile="config.log"
self=$0
process() {
cmdline_args="$@"
diff --git a/libvpx/build/make/gen_asm_deps.sh b/libvpx/build/make/gen_asm_deps.sh
index 0b4e3aa..6a7bff9 100755
--- a/libvpx/build/make/gen_asm_deps.sh
+++ b/libvpx/build/make/gen_asm_deps.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
##
## Copyright (c) 2010 The WebM project authors. All Rights Reserved.
##
diff --git a/libvpx/build/make/gen_msvs_proj.sh b/libvpx/build/make/gen_msvs_proj.sh
index 6d42941..fc5011b 100755
--- a/libvpx/build/make/gen_msvs_proj.sh
+++ b/libvpx/build/make/gen_msvs_proj.sh
@@ -26,6 +26,7 @@ Options:
--help Print this message
--exe Generate a project for building an Application
--lib Generate a project for creating a static library
+ --dll Generate a project for creating a dll
--static-crt Use the static C runtime (/MT)
--target=isa-os-cc Target specifier (required)
--out=filename Write output to a file [stdout]
@@ -142,7 +143,9 @@ generate_filter() {
if [ "${f##*.}" == "$pat" ]; then
unset file_list[i]
+ objf=$(echo ${f%.*}.obj | sed -e 's/^[\./]\+//g' -e 's,/,_,g')
open_tag File RelativePath="./$f"
+
if [ "$pat" == "asm" ] && $asm_use_custom_step; then
for plat in "${platforms[@]}"; do
for cfg in Debug Release; do
@@ -152,14 +155,27 @@ generate_filter() {
tag Tool \
Name="VCCustomBuildTool" \
Description="Assembling \$(InputFileName)" \
- CommandLine="$(eval echo \$asm_${cfg}_cmdline)" \
- Outputs="\$(InputName).obj" \
+ CommandLine="$(eval echo \$asm_${cfg}_cmdline) -o \$(IntDir)$objf" \
+ Outputs="\$(IntDir)$objf" \
close_tag FileConfiguration
done
done
fi
+ if [ "$pat" == "c" ] || [ "$pat" == "cc" ] ; then
+ for plat in "${platforms[@]}"; do
+ for cfg in Debug Release; do
+ open_tag FileConfiguration \
+ Name="${cfg}|${plat}" \
+ tag Tool \
+ Name="VCCLCompilerTool" \
+ ObjectFile="\$(IntDir)$objf" \
+
+ close_tag FileConfiguration
+ done
+ done
+ fi
close_tag File
break
@@ -190,6 +206,8 @@ for opt in "$@"; do
;;
--exe) proj_kind="exe"
;;
+ --dll) proj_kind="dll"
+ ;;
--lib) proj_kind="lib"
;;
--src-path-bare=*) src_path_bare="$optval"
@@ -242,10 +260,15 @@ uses_asm=${uses_asm:-false}
case "${vs_ver:-8}" in
7) vs_ver_id="7.10"
asm_use_custom_step=$uses_asm
+ warn_64bit='Detect64BitPortabilityProblems=true'
;;
8) vs_ver_id="8.00"
+ asm_use_custom_step=$uses_asm
+ warn_64bit='Detect64BitPortabilityProblems=true'
;;
9) vs_ver_id="9.00"
+ asm_use_custom_step=$uses_asm
+ warn_64bit='Detect64BitPortabilityProblems=false'
;;
esac
@@ -284,10 +307,11 @@ esac
case "$target" in
x86_64*)
platforms[0]="x64"
+ asm_Debug_cmdline="yasm -Xvc -g cv8 -f \$(PlatformName) ${yasmincs} &quot;\$(InputPath)&quot;"
+ asm_Release_cmdline="yasm -Xvc -f \$(PlatformName) ${yasmincs} &quot;\$(InputPath)&quot;"
;;
x86*)
platforms[0]="Win32"
- # these are only used by vs7
asm_Debug_cmdline="yasm -Xvc -g cv8 -f \$(PlatformName) ${yasmincs} &quot;\$(InputPath)&quot;"
asm_Release_cmdline="yasm -Xvc -f \$(PlatformName) ${yasmincs} &quot;\$(InputPath)&quot;"
;;
@@ -299,6 +323,8 @@ generate_vcproj() {
case "$proj_kind" in
exe) vs_ConfigurationType=1
;;
+ dll) vs_ConfigurationType=2
+ ;;
*) vs_ConfigurationType=4
;;
esac
@@ -318,13 +344,6 @@ generate_vcproj() {
done
close_tag Platforms
- open_tag ToolFiles
- case "$target" in
- x86*) $uses_asm && tag ToolFile RelativePath="$self_dirname/../x86-msvs/yasm.rules"
- ;;
- esac
- close_tag ToolFiles
-
open_tag Configurations
for plat in "${platforms[@]}"; do
plat_no_ws=`echo $plat | sed 's/[^A-Za-z0-9_]/_/g'`
@@ -346,8 +365,8 @@ generate_vcproj() {
PreprocessorDefinitions="WIN32;DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;_CRT_SECURE_NO_DEPRECATE" \
RuntimeLibrary="$debug_runtime" \
WarningLevel="3" \
- Detect64BitPortabilityProblems="true" \
DebugInformationFormat="1" \
+ $warn_64bit \
;;
vpx)
tag Tool \
@@ -362,8 +381,8 @@ generate_vcproj() {
RuntimeLibrary="$debug_runtime" \
UsePrecompiledHeader="0" \
WarningLevel="3" \
- DebugInformationFormat="1" \
- Detect64BitPortabilityProblems="true" \
+ DebugInformationFormat="2" \
+ $warn_64bit \
$uses_asm && tag Tool Name="YASM" IncludePaths="$incs" Debug="true"
;;
@@ -376,8 +395,8 @@ generate_vcproj() {
RuntimeLibrary="$debug_runtime" \
UsePrecompiledHeader="0" \
WarningLevel="3" \
- DebugInformationFormat="1" \
- Detect64BitPortabilityProblems="true" \
+ DebugInformationFormat="2" \
+ $warn_64bit \
$uses_asm && tag Tool Name="YASM" IncludePaths="$incs" Debug="true"
;;
@@ -454,8 +473,8 @@ generate_vcproj() {
RuntimeLibrary="$release_runtime" \
UsePrecompiledHeader="0" \
WarningLevel="3" \
- Detect64BitPortabilityProblems="true" \
DebugInformationFormat="0" \
+ $warn_64bit \
;;
vpx)
tag Tool \
@@ -472,7 +491,7 @@ generate_vcproj() {
UsePrecompiledHeader="0" \
WarningLevel="3" \
DebugInformationFormat="0" \
- Detect64BitPortabilityProblems="true" \
+ $warn_64bit \
$uses_asm && tag Tool Name="YASM" IncludePaths="$incs"
;;
@@ -487,7 +506,7 @@ generate_vcproj() {
UsePrecompiledHeader="0" \
WarningLevel="3" \
DebugInformationFormat="0" \
- Detect64BitPortabilityProblems="true" \
+ $warn_64bit \
$uses_asm && tag Tool Name="YASM" IncludePaths="$incs"
;;
diff --git a/libvpx/build/make/gen_msvs_sln.sh b/libvpx/build/make/gen_msvs_sln.sh
index 240678b..0c269b1 100755
--- a/libvpx/build/make/gen_msvs_sln.sh
+++ b/libvpx/build/make/gen_msvs_sln.sh
@@ -25,7 +25,7 @@ files.
Options:
--help Print this message
--out=outfile Redirect output to a file
- --ver=version Version (7,8,9) of visual studio to generate for
+ --ver=version Version (7,8,9,10,11) of visual studio to generate for
--target=isa-os-cc Target specifier
EOF
exit 1
@@ -55,22 +55,38 @@ indent_pop() {
parse_project() {
local file=$1
- local name=`grep Name "$file" | awk 'BEGIN {FS="\""}{if (NR==1) print $2}'`
- local guid=`grep ProjectGUID "$file" | awk 'BEGIN {FS="\""}{if (NR==1) print $2}'`
+ if [ "$sfx" = "vcproj" ]; then
+ local name=`grep Name "$file" | awk 'BEGIN {FS="\""}{if (NR==1) print $2}'`
+ local guid=`grep ProjectGUID "$file" | awk 'BEGIN {FS="\""}{if (NR==1) print $2}'`
+ else
+ local name=`grep RootNamespace "$file" | sed 's,.*<.*>\(.*\)</.*>.*,\1,'`
+ local guid=`grep ProjectGuid "$file" | sed 's,.*<.*>\(.*\)</.*>.*,\1,'`
+ fi
# save the project GUID to a varaible, normalizing to the basename of the
# vcproj file without the extension
local var
var=${file##*/}
- var=${var%%.vcproj}
+ var=${var%%.${sfx}}
eval "${var}_file=\"$1\""
eval "${var}_name=$name"
eval "${var}_guid=$guid"
- # assume that all projects have the same list of possible configurations,
- # so overwriting old config_lists is not a problem
- config_list=`grep -A1 '<Configuration' $file |
- grep Name | cut -d\" -f2`
+ if [ "$sfx" = "vcproj" ]; then
+ cur_config_list=`grep -A1 '<Configuration' $file |
+ grep Name | cut -d\" -f2`
+ else
+ cur_config_list=`grep -B1 'Label="Configuration"' $file |
+ grep Condition | cut -d\' -f4`
+ fi
+ new_config_list=$(for i in $config_list $cur_config_list; do
+ echo $i
+ done | sort | uniq)
+ if [ "$config_list" != "" ] && [ "$config_list" != "$new_config_list" ]; then
+ mixed_platforms=1
+ fi
+ config_list="$new_config_list"
+ eval "${var}_config_list=\"$cur_config_list\""
proj_list="${proj_list} ${var}"
}
@@ -83,14 +99,14 @@ process_project() {
# vcproj file without the extension
local var
var=${file##*/}
- var=${var%%.vcproj}
+ var=${var%%.${sfx}}
eval "${var}_guid=$guid"
echo "Project(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"$name\", \"$file\", \"$guid\""
indent_push
eval "local deps=\"\${${var}_deps}\""
- if [ -n "$deps" ]; then
+ if [ -n "$deps" ] && [ "$sfx" = "vcproj" ]; then
echo "${indent}ProjectSection(ProjectDependencies) = postProject"
indent_push
@@ -120,6 +136,11 @@ process_global() {
indent_push
IFS_bak=${IFS}
IFS=$'\r'$'\n'
+ if [ "$mixed_platforms" != "" ]; then
+ config_list="
+Release|Mixed Platforms
+Debug|Mixed Platforms"
+ fi
for config in ${config_list}; do
echo "${indent}$config = $config"
done
@@ -134,10 +155,17 @@ process_global() {
indent_push
for proj in ${proj_list}; do
eval "local proj_guid=\${${proj}_guid}"
+ eval "local proj_config_list=\${${proj}_config_list}"
IFS=$'\r'$'\n'
- for config in ${config_list}; do
- echo "${indent}${proj_guid}.${config}.ActiveCfg = ${config}"
- echo "${indent}${proj_guid}.${config}.Build.0 = ${config}"
+ for config in ${proj_config_list}; do
+ if [ "$mixed_platforms" != "" ]; then
+ local c=${config%%|*}
+ echo "${indent}${proj_guid}.${c}|Mixed Platforms.ActiveCfg = ${config}"
+ echo "${indent}${proj_guid}.${c}|Mixed Platforms.Build.0 = ${config}"
+ else
+ echo "${indent}${proj_guid}.${config}.ActiveCfg = ${config}"
+ echo "${indent}${proj_guid}.${config}.Build.0 = ${config}"
+ fi
done
IFS=${IFS_bak}
@@ -163,9 +191,14 @@ process_makefile() {
IFS=$'\r'$'\n'
local TAB=$'\t'
cat <<EOF
-found_devenv := \$(shell which devenv.com >/dev/null 2>&1 && echo yes)
+ifeq (\$(CONFIG_VS_VERSION),7)
+MSBUILD_TOOL := devenv.com
+else
+MSBUILD_TOOL := msbuild.exe
+endif
+found_devenv := \$(shell which \$(MSBUILD_TOOL) >/dev/null 2>&1 && echo yes)
.nodevenv.once:
-${TAB}@echo " * devenv.com not found in path."
+${TAB}@echo " * \$(MSBUILD_TOOL) not found in path."
${TAB}@echo " * "
${TAB}@echo " * You will have to build all configurations manually using the"
${TAB}@echo " * Visual Studio IDE. To allow make to build them automatically,"
@@ -190,16 +223,17 @@ ${TAB}rm -rf "$platform"/"$config"
ifneq (\$(found_devenv),)
ifeq (\$(CONFIG_VS_VERSION),7)
$nows_sln_config: $outfile
-${TAB}devenv.com $outfile -build "$config"
+${TAB}\$(MSBUILD_TOOL) $outfile -build "$config"
else
$nows_sln_config: $outfile
-${TAB}devenv.com $outfile -build "$sln_config"
+${TAB}\$(MSBUILD_TOOL) $outfile -m -t:Build \\
+${TAB}${TAB}-p:Configuration="$config" -p:Platform="$platform"
endif
else
$nows_sln_config: $outfile .nodevenv.once
-${TAB}@echo " * Skipping build of $sln_config (devenv.com not in path)."
+${TAB}@echo " * Skipping build of $sln_config (\$(MSBUILD_TOOL) not in path)."
${TAB}@echo " * "
endif
@@ -221,7 +255,7 @@ for opt in "$@"; do
;;
--ver=*) vs_ver="$optval"
case $optval in
- [789])
+ [789]|10|11)
;;
*) die Unrecognized Visual Studio Version in $opt
;;
@@ -257,6 +291,20 @@ case "${vs_ver:-8}" in
9) sln_vers="10.00"
sln_vers_str="Visual Studio 2008"
;;
+ 10) sln_vers="11.00"
+ sln_vers_str="Visual Studio 2010"
+ ;;
+ 11) sln_vers="12.00"
+ sln_vers_str="Visual Studio 2012"
+ ;;
+esac
+case "${vs_ver:-8}" in
+ [789])
+ sfx=vcproj
+ ;;
+ 10|11)
+ sfx=vcxproj
+ ;;
esac
for f in "${file_list[@]}"; do
diff --git a/libvpx/build/make/gen_msvs_vcxproj.sh b/libvpx/build/make/gen_msvs_vcxproj.sh
new file mode 100755
index 0000000..4875915
--- /dev/null
+++ b/libvpx/build/make/gen_msvs_vcxproj.sh
@@ -0,0 +1,530 @@
+#!/bin/bash
+##
+## Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+##
+## Use of this source code is governed by a BSD-style license
+## that can be found in the LICENSE file in the root of the source
+## tree. An additional intellectual property rights grant can be found
+## in the file PATENTS. All contributing project authors may
+## be found in the AUTHORS file in the root of the source tree.
+##
+
+
+self=$0
+self_basename=${self##*/}
+self_dirname=$(dirname "$0")
+EOL=$'\n'
+
+show_help() {
+ cat <<EOF
+Usage: ${self_basename} --name=projname [options] file1 [file2 ...]
+
+This script generates a Visual Studio project file from a list of source
+code files.
+
+Options:
+ --help Print this message
+ --exe Generate a project for building an Application
+ --lib Generate a project for creating a static library
+ --dll Generate a project for creating a dll
+ --static-crt Use the static C runtime (/MT)
+ --target=isa-os-cc Target specifier (required)
+ --out=filename Write output to a file [stdout]
+ --name=project_name Name of the project (required)
+ --proj-guid=GUID GUID to use for the project
+ --module-def=filename File containing export definitions (for DLLs)
+ --ver=version Version (10,11) of visual studio to generate for
+ --src-path-bare=dir Path to root of source tree
+ -Ipath/to/include Additional include directories
+ -DFLAG[=value] Preprocessor macros to define
+ -Lpath/to/lib Additional library search paths
+ -llibname Library to link against
+EOF
+ exit 1
+}
+
+die() {
+ echo "${self_basename}: $@" >&2
+ exit 1
+}
+
+die_unknown(){
+ echo "Unknown option \"$1\"." >&2
+ echo "See ${self_basename} --help for available options." >&2
+ exit 1
+}
+
+generate_uuid() {
+ local hex="0123456789ABCDEF"
+ local i
+ local uuid=""
+ local j
+ #93995380-89BD-4b04-88EB-625FBE52EBFB
+ for ((i=0; i<32; i++)); do
+ (( j = $RANDOM % 16 ))
+ uuid="${uuid}${hex:$j:1}"
+ done
+ echo "${uuid:0:8}-${uuid:8:4}-${uuid:12:4}-${uuid:16:4}-${uuid:20:12}"
+}
+
+indent1=" "
+indent=""
+indent_push() {
+ indent="${indent}${indent1}"
+}
+indent_pop() {
+ indent="${indent%${indent1}}"
+}
+
+tag_attributes() {
+ for opt in "$@"; do
+ optval="${opt#*=}"
+ [ -n "${optval}" ] ||
+ die "Missing attribute value in '$opt' while generating $tag tag"
+ echo "${indent}${opt%%=*}=\"${optval}\""
+ done
+}
+
+open_tag() {
+ local tag=$1
+ shift
+ if [ $# -ne 0 ]; then
+ echo "${indent}<${tag}"
+ indent_push
+ tag_attributes "$@"
+ echo "${indent}>"
+ else
+ echo "${indent}<${tag}>"
+ indent_push
+ fi
+}
+
+close_tag() {
+ local tag=$1
+ indent_pop
+ echo "${indent}</${tag}>"
+}
+
+tag() {
+ local tag=$1
+ shift
+ if [ $# -ne 0 ]; then
+ echo "${indent}<${tag}"
+ indent_push
+ tag_attributes "$@"
+ indent_pop
+ echo "${indent}/>"
+ else
+ echo "${indent}<${tag}/>"
+ fi
+}
+
+tag_content() {
+ local tag=$1
+ local content=$2
+ shift
+ shift
+ if [ $# -ne 0 ]; then
+ echo "${indent}<${tag}"
+ indent_push
+ tag_attributes "$@"
+ echo "${indent}>${content}</${tag}>"
+ indent_pop
+ else
+ echo "${indent}<${tag}>${content}</${tag}>"
+ fi
+}
+
+generate_filter() {
+ local name=$1
+ local pats=$2
+ local file_list_sz
+ local i
+ local f
+ local saveIFS="$IFS"
+ local pack
+ echo "generating filter '$name' from ${#file_list[@]} files" >&2
+ IFS=*
+
+ file_list_sz=${#file_list[@]}
+ for i in ${!file_list[@]}; do
+ f=${file_list[i]}
+ for pat in ${pats//;/$IFS}; do
+ if [ "${f##*.}" == "$pat" ]; then
+ unset file_list[i]
+
+ objf=$(echo ${f%.*}.obj | sed -e 's/^[\./]\+//g' -e 's,/,_,g')
+
+ if ([ "$pat" == "asm" ] || [ "$pat" == "s" ]) && $asm_use_custom_step; then
+ open_tag CustomBuild \
+ Include=".\\$f"
+ for plat in "${platforms[@]}"; do
+ for cfg in Debug Release; do
+ tag_content Message "Assembling %(Filename)%(Extension)" \
+ Condition="'\$(Configuration)|\$(Platform)'=='$cfg|$plat'"
+ tag_content Command "$(eval echo \$asm_${cfg}_cmdline) -o \$(IntDir)$objf" \
+ Condition="'\$(Configuration)|\$(Platform)'=='$cfg|$plat'"
+ tag_content Outputs "\$(IntDir)$objf" \
+ Condition="'\$(Configuration)|\$(Platform)'=='$cfg|$plat'"
+ done
+ done
+ close_tag CustomBuild
+ elif [ "$pat" == "c" ] || [ "$pat" == "cc" ] ; then
+ open_tag ClCompile \
+ Include=".\\$f"
+ # Separate file names with Condition?
+ tag_content ObjectFileName "\$(IntDir)$objf"
+ close_tag ClCompile
+ elif [ "$pat" == "h" ] ; then
+ tag ClInclude \
+ Include=".\\$f"
+ elif [ "$pat" == "vcxproj" ] ; then
+ open_tag ProjectReference \
+ Include="$f"
+ depguid=`grep ProjectGuid "$f" | sed 's,.*<.*>\(.*\)</.*>.*,\1,'`
+ tag_content Project "$depguid"
+ tag_content ReferenceOutputAssembly false
+ close_tag ProjectReference
+ else
+ tag None \
+ Include=".\\$f"
+ fi
+
+ break
+ fi
+ done
+ done
+
+ IFS="$saveIFS"
+}
+
+# Process command line
+unset target
+for opt in "$@"; do
+ optval="${opt#*=}"
+ case "$opt" in
+ --help|-h) show_help
+ ;;
+ --target=*) target="${optval}"
+ ;;
+ --out=*) outfile="$optval"
+ ;;
+ --name=*) name="${optval}"
+ ;;
+ --proj-guid=*) guid="${optval}"
+ ;;
+ --module-def=*) module_def="${optval}"
+ ;;
+ --exe) proj_kind="exe"
+ ;;
+ --dll) proj_kind="dll"
+ ;;
+ --lib) proj_kind="lib"
+ ;;
+ --src-path-bare=*) src_path_bare="$optval"
+ ;;
+ --static-crt) use_static_runtime=true
+ ;;
+ --ver=*)
+ vs_ver="$optval"
+ case "$optval" in
+ 10|11)
+ ;;
+ *) die Unrecognized Visual Studio Version in $opt
+ ;;
+ esac
+ ;;
+ -I*)
+ opt="${opt%/}"
+ incs="${incs}${incs:+;}${opt##-I}"
+ yasmincs="${yasmincs} ${opt}"
+ ;;
+ -D*) defines="${defines}${defines:+;}${opt##-D}"
+ ;;
+ -L*) # fudge . to $(OutDir)
+ if [ "${opt##-L}" == "." ]; then
+ libdirs="${libdirs}${libdirs:+;}\$(OutDir)"
+ else
+ # Also try directories for this platform/configuration
+ libdirs="${libdirs}${libdirs:+;}${opt##-L}"
+ libdirs="${libdirs}${libdirs:+;}${opt##-L}/\$(PlatformName)/\$(Configuration)"
+ libdirs="${libdirs}${libdirs:+;}${opt##-L}/\$(PlatformName)"
+ fi
+ ;;
+ -l*) libs="${libs}${libs:+ }${opt##-l}.lib"
+ ;;
+ -*) die_unknown $opt
+ ;;
+ *)
+ file_list[${#file_list[@]}]="$opt"
+ case "$opt" in
+ *.asm|*.s) uses_asm=true
+ ;;
+ esac
+ ;;
+ esac
+done
+outfile=${outfile:-/dev/stdout}
+guid=${guid:-`generate_uuid`}
+asm_use_custom_step=false
+uses_asm=${uses_asm:-false}
+case "${vs_ver:-11}" in
+ 10|11)
+ asm_use_custom_step=$uses_asm
+ ;;
+esac
+
+[ -n "$name" ] || die "Project name (--name) must be specified!"
+[ -n "$target" ] || die "Target (--target) must be specified!"
+
+if ${use_static_runtime:-false}; then
+ release_runtime=MultiThreaded
+ debug_runtime=MultiThreadedDebug
+ lib_sfx=mt
+else
+ release_runtime=MultiThreadedDLL
+ debug_runtime=MultiThreadedDebugDLL
+ lib_sfx=md
+fi
+
+# Calculate debug lib names: If a lib ends in ${lib_sfx}.lib, then rename
+# it to ${lib_sfx}d.lib. This precludes linking to release libs from a
+# debug exe, so this may need to be refactored later.
+for lib in ${libs}; do
+ if [ "$lib" != "${lib%${lib_sfx}.lib}" ]; then
+ lib=${lib%.lib}d.lib
+ fi
+ debug_libs="${debug_libs}${debug_libs:+ }${lib}"
+done
+debug_libs=${debug_libs// /;}
+libs=${libs// /;}
+
+
+# List of all platforms supported for this target
+case "$target" in
+ x86_64*)
+ platforms[0]="x64"
+ asm_Debug_cmdline="yasm -Xvc -g cv8 -f \$(PlatformName) ${yasmincs} &quot;%(FullPath)&quot;"
+ asm_Release_cmdline="yasm -Xvc -f \$(PlatformName) ${yasmincs} &quot;%(FullPath)&quot;"
+ ;;
+ x86*)
+ platforms[0]="Win32"
+ asm_Debug_cmdline="yasm -Xvc -g cv8 -f \$(PlatformName) ${yasmincs} &quot;%(FullPath)&quot;"
+ asm_Release_cmdline="yasm -Xvc -f \$(PlatformName) ${yasmincs} &quot;%(FullPath)&quot;"
+ ;;
+ arm*)
+ asm_Debug_cmdline="armasm -nologo &quot;%(FullPath)&quot;"
+ asm_Release_cmdline="armasm -nologo &quot;%(FullPath)&quot;"
+ if [ "$name" = "obj_int_extract" ]; then
+ # We don't want to build this tool for the target architecture,
+ # but for an architecture we can run locally during the build.
+ platforms[0]="Win32"
+ else
+ platforms[0]="ARM"
+ fi
+ ;;
+ *) die "Unsupported target $target!"
+ ;;
+esac
+
+generate_vcxproj() {
+ echo "<?xml version=\"1.0\" encoding=\"utf-8\"?>"
+ open_tag Project \
+ DefaultTargets="Build" \
+ ToolsVersion="4.0" \
+ xmlns="http://schemas.microsoft.com/developer/msbuild/2003" \
+
+ open_tag ItemGroup \
+ Label="ProjectConfigurations"
+ for plat in "${platforms[@]}"; do
+ for config in Debug Release; do
+ open_tag ProjectConfiguration \
+ Include="$config|$plat"
+ tag_content Configuration $config
+ tag_content Platform $plat
+ close_tag ProjectConfiguration
+ done
+ done
+ close_tag ItemGroup
+
+ open_tag PropertyGroup \
+ Label="Globals"
+ tag_content ProjectGuid "{${guid}}"
+ tag_content RootNamespace ${name}
+ tag_content Keyword ManagedCProj
+ close_tag PropertyGroup
+
+ tag Import \
+ Project="\$(VCTargetsPath)\\Microsoft.Cpp.Default.props"
+
+ for plat in "${platforms[@]}"; do
+ for config in Release Debug; do
+ open_tag PropertyGroup \
+ Condition="'\$(Configuration)|\$(Platform)'=='$config|$plat'" \
+ Label="Configuration"
+ if [ "$proj_kind" = "exe" ]; then
+ tag_content ConfigurationType Application
+ elif [ "$proj_kind" = "dll" ]; then
+ tag_content ConfigurationType DynamicLibrary
+ else
+ tag_content ConfigurationType StaticLibrary
+ fi
+ if [ "$vs_ver" = "11" ]; then
+ if [ "$plat" = "ARM" ]; then
+ # Setting the wp80 toolchain automatically sets the
+ # WINAPI_FAMILY define, which is required for building
+ # code for arm with the windows headers. Alternatively,
+ # one could add AppContainerApplication=true in the Globals
+ # section and add PrecompiledHeader=NotUsing and
+ # CompileAsWinRT=false in ClCompile and SubSystem=Console
+ # in Link.
+ tag_content PlatformToolset v110_wp80
+ else
+ tag_content PlatformToolset v110
+ fi
+ fi
+ tag_content CharacterSet Unicode
+ if [ "$config" = "Release" ]; then
+ tag_content WholeProgramOptimization true
+ fi
+ close_tag PropertyGroup
+ done
+ done
+
+ tag Import \
+ Project="\$(VCTargetsPath)\\Microsoft.Cpp.props"
+
+ open_tag ImportGroup \
+ Label="PropertySheets"
+ tag Import \
+ Project="\$(UserRootDir)\\Microsoft.Cpp.\$(Platform).user.props" \
+ Condition="exists('\$(UserRootDir)\\Microsoft.Cpp.\$(Platform).user.props')" \
+ Label="LocalAppDataPlatform"
+ close_tag ImportGroup
+
+ tag PropertyGroup \
+ Label="UserMacros"
+
+ for plat in "${platforms[@]}"; do
+ plat_no_ws=`echo $plat | sed 's/[^A-Za-z0-9_]/_/g'`
+ for config in Debug Release; do
+ open_tag PropertyGroup \
+ Condition="'\$(Configuration)|\$(Platform)'=='$config|$plat'"
+ tag_content OutDir "\$(SolutionDir)$plat_no_ws\\\$(Configuration)\\"
+ tag_content IntDir "$plat_no_ws\\\$(Configuration)\\${name}\\"
+ close_tag PropertyGroup
+ done
+ done
+
+ for plat in "${platforms[@]}"; do
+ for config in Debug Release; do
+ open_tag ItemDefinitionGroup \
+ Condition="'\$(Configuration)|\$(Platform)'=='$config|$plat'"
+ if [ "$name" = "vpx" ]; then
+ open_tag PreBuildEvent
+ tag_content Command "call obj_int_extract.bat $src_path_bare"
+ close_tag PreBuildEvent
+ fi
+ open_tag ClCompile
+ if [ "$config" = "Debug" ]; then
+ opt=Disabled
+ runtime=$debug_runtime
+ curlibs=$debug_libs
+ confsuffix=d
+ case "$name" in
+ obj_int_extract)
+ debug=DEBUG
+ ;;
+ *)
+ debug=_DEBUG
+ ;;
+ esac
+ else
+ opt=MaxSpeed
+ runtime=$release_runtime
+ curlibs=$libs
+ confsuffix=""
+ tag_content FavorSizeOrSpeed Speed
+ debug=NDEBUG
+ fi
+ case "$name" in
+ obj_int_extract)
+ extradefines=";_CONSOLE"
+ ;;
+ *)
+ extradefines=";$defines"
+ ;;
+ esac
+ tag_content Optimization $opt
+ tag_content AdditionalIncludeDirectories "$incs;%(AdditionalIncludeDirectories)"
+ tag_content PreprocessorDefinitions "WIN32;$debug;_CRT_SECURE_NO_WARNINGS;_CRT_SECURE_NO_DEPRECATE$extradefines;%(PreprocessorDefinitions)"
+ tag_content RuntimeLibrary $runtime
+ tag_content WarningLevel Level3
+ # DebugInformationFormat
+ close_tag ClCompile
+ case "$proj_kind" in
+ exe)
+ open_tag Link
+ if [ "$name" = "obj_int_extract" ]; then
+ tag_content OutputFile "${name}.exe"
+ else
+ tag_content AdditionalDependencies "$curlibs"
+ tag_content AdditionalLibraryDirectories "$libdirs;%(AdditionalLibraryDirectories)"
+ fi
+ tag_content GenerateDebugInformation true
+ close_tag Link
+ ;;
+ dll)
+ open_tag Link
+ tag_content GenerateDebugInformation true
+ tag_content ModuleDefinitionFile $module_def
+ close_tag Link
+ ;;
+ lib)
+ open_tag Lib
+ tag_content OutputFile "\$(OutDir)${name}${lib_sfx}${confsuffix}.lib"
+ close_tag Lib
+ ;;
+ esac
+ close_tag ItemDefinitionGroup
+ done
+
+ done
+
+ open_tag ItemGroup
+ generate_filter "Source Files" "c;cc;def;odl;idl;hpj;bat;asm;asmx;s"
+ close_tag ItemGroup
+ open_tag ItemGroup
+ generate_filter "Header Files" "h;hm;inl;inc;xsd"
+ close_tag ItemGroup
+ open_tag ItemGroup
+ generate_filter "Build Files" "mk"
+ close_tag ItemGroup
+ open_tag ItemGroup
+ generate_filter "References" "vcxproj"
+ close_tag ItemGroup
+
+ tag Import \
+ Project="\$(VCTargetsPath)\\Microsoft.Cpp.targets"
+
+ open_tag ImportGroup \
+ Label="ExtensionTargets"
+ close_tag ImportGroup
+
+ close_tag Project
+
+ # This must be done from within the {} subshell
+ echo "Ignored files list (${#file_list[@]} items) is:" >&2
+ for f in "${file_list[@]}"; do
+ echo " $f" >&2
+ done
+}
+
+# This regexp doesn't catch most of the strings in the vcxproj format,
+# since they're like <tag>path</tag> instead of <tag attr="path" />
+# as previously. It still seems to work ok despite this.
+generate_vcxproj |
+ sed -e '/"/s;\([^ "]\)/;\1\\;g' |
+ sed -e '/xmlns/s;\\;/;g' > ${outfile}
+
+exit
diff --git a/libvpx/build/make/obj_int_extract.c b/libvpx/build/make/obj_int_extract.c
index bf317bd..feed9d9 100644
--- a/libvpx/build/make/obj_int_extract.c
+++ b/libvpx/build/make/obj_int_extract.c
@@ -17,21 +17,19 @@
#include "vpx_config.h"
#include "vpx/vpx_integer.h"
-typedef enum
-{
- OUTPUT_FMT_PLAIN,
- OUTPUT_FMT_RVDS,
- OUTPUT_FMT_GAS,
+typedef enum {
+ OUTPUT_FMT_PLAIN,
+ OUTPUT_FMT_RVDS,
+ OUTPUT_FMT_GAS,
} output_fmt_t;
-int log_msg(const char *fmt, ...)
-{
- int res;
- va_list ap;
- va_start(ap, fmt);
- res = vfprintf(stderr, fmt, ap);
- va_end(ap);
- return res;
+int log_msg(const char *fmt, ...) {
+ int res;
+ va_list ap;
+ va_start(ap, fmt);
+ res = vfprintf(stderr, fmt, ap);
+ va_end(ap);
+ return res;
}
#if defined(__GNUC__) && __GNUC__
@@ -40,175 +38,160 @@ int log_msg(const char *fmt, ...)
#include <mach-o/loader.h>
#include <mach-o/nlist.h>
-int parse_macho(uint8_t *base_buf, size_t sz)
-{
- int i, j;
- struct mach_header header;
- uint8_t *buf = base_buf;
- int base_data_section = 0;
- int bits = 0;
-
- /* We can read in mach_header for 32 and 64 bit architectures
- * because it's identical to mach_header_64 except for the last
- * element (uint32_t reserved), which we don't use. Then, when
- * we know which architecture we're looking at, increment buf
- * appropriately.
- */
- memcpy(&header, buf, sizeof(struct mach_header));
-
- if (header.magic == MH_MAGIC)
- {
- if (header.cputype == CPU_TYPE_ARM
- || header.cputype == CPU_TYPE_X86)
- {
- bits = 32;
- buf += sizeof(struct mach_header);
- }
- else
- {
- log_msg("Bad cputype for object file. Currently only tested for CPU_TYPE_[ARM|X86].\n");
- goto bail;
- }
+int print_macho_equ(output_fmt_t mode, uint8_t* name, int val) {
+ switch (mode) {
+ case OUTPUT_FMT_RVDS:
+ printf("%-40s EQU %5d\n", name, val);
+ return 0;
+ case OUTPUT_FMT_GAS:
+ printf(".set %-40s, %5d\n", name, val);
+ return 0;
+ default:
+ log_msg("Unsupported mode: %d", mode);
+ return 1;
+ }
+}
+
+int parse_macho(uint8_t *base_buf, size_t sz, output_fmt_t mode) {
+ int i, j;
+ struct mach_header header;
+ uint8_t *buf = base_buf;
+ int base_data_section = 0;
+ int bits = 0;
+
+ /* We can read in mach_header for 32 and 64 bit architectures
+ * because it's identical to mach_header_64 except for the last
+ * element (uint32_t reserved), which we don't use. Then, when
+ * we know which architecture we're looking at, increment buf
+ * appropriately.
+ */
+ memcpy(&header, buf, sizeof(struct mach_header));
+
+ if (header.magic == MH_MAGIC) {
+ if (header.cputype == CPU_TYPE_ARM
+ || header.cputype == CPU_TYPE_X86) {
+ bits = 32;
+ buf += sizeof(struct mach_header);
+ } else {
+ log_msg("Bad cputype for object file. Currently only tested for CPU_TYPE_[ARM|X86].\n");
+ goto bail;
}
- else if (header.magic == MH_MAGIC_64)
- {
- if (header.cputype == CPU_TYPE_X86_64)
- {
- bits = 64;
- buf += sizeof(struct mach_header_64);
- }
- else
- {
- log_msg("Bad cputype for object file. Currently only tested for CPU_TYPE_X86_64.\n");
- goto bail;
- }
+ } else if (header.magic == MH_MAGIC_64) {
+ if (header.cputype == CPU_TYPE_X86_64) {
+ bits = 64;
+ buf += sizeof(struct mach_header_64);
+ } else {
+ log_msg("Bad cputype for object file. Currently only tested for CPU_TYPE_X86_64.\n");
+ goto bail;
}
- else
- {
- log_msg("Bad magic number for object file. 0x%x or 0x%x expected, 0x%x found.\n",
- MH_MAGIC, MH_MAGIC_64, header.magic);
+ } else {
+ log_msg("Bad magic number for object file. 0x%x or 0x%x expected, 0x%x found.\n",
+ MH_MAGIC, MH_MAGIC_64, header.magic);
+ goto bail;
+ }
+
+ if (header.filetype != MH_OBJECT) {
+ log_msg("Bad filetype for object file. Currently only tested for MH_OBJECT.\n");
+ goto bail;
+ }
+
+ for (i = 0; i < header.ncmds; i++) {
+ struct load_command lc;
+
+ memcpy(&lc, buf, sizeof(struct load_command));
+
+ if (lc.cmd == LC_SEGMENT) {
+ uint8_t *seg_buf = buf;
+ struct section s;
+ struct segment_command seg_c;
+
+ memcpy(&seg_c, seg_buf, sizeof(struct segment_command));
+ seg_buf += sizeof(struct segment_command);
+
+ /* Although each section is given it's own offset, nlist.n_value
+ * references the offset of the first section. This isn't
+ * apparent without debug information because the offset of the
+ * data section is the same as the first section. However, with
+ * debug sections mixed in, the offset of the debug section
+ * increases but n_value still references the first section.
+ */
+ if (seg_c.nsects < 1) {
+ log_msg("Not enough sections\n");
goto bail;
- }
+ }
- if (header.filetype != MH_OBJECT)
- {
- log_msg("Bad filetype for object file. Currently only tested for MH_OBJECT.\n");
+ memcpy(&s, seg_buf, sizeof(struct section));
+ base_data_section = s.offset;
+ } else if (lc.cmd == LC_SEGMENT_64) {
+ uint8_t *seg_buf = buf;
+ struct section_64 s;
+ struct segment_command_64 seg_c;
+
+ memcpy(&seg_c, seg_buf, sizeof(struct segment_command_64));
+ seg_buf += sizeof(struct segment_command_64);
+
+ /* Explanation in LG_SEGMENT */
+ if (seg_c.nsects < 1) {
+ log_msg("Not enough sections\n");
goto bail;
- }
+ }
- for (i = 0; i < header.ncmds; i++)
- {
- struct load_command lc;
-
- memcpy(&lc, buf, sizeof(struct load_command));
-
- if (lc.cmd == LC_SEGMENT)
- {
- uint8_t *seg_buf = buf;
- struct section s;
- struct segment_command seg_c;
-
- memcpy(&seg_c, seg_buf, sizeof(struct segment_command));
- seg_buf += sizeof(struct segment_command);
-
- /* Although each section is given it's own offset, nlist.n_value
- * references the offset of the first section. This isn't
- * apparent without debug information because the offset of the
- * data section is the same as the first section. However, with
- * debug sections mixed in, the offset of the debug section
- * increases but n_value still references the first section.
- */
- if (seg_c.nsects < 1)
- {
- log_msg("Not enough sections\n");
- goto bail;
- }
+ memcpy(&s, seg_buf, sizeof(struct section_64));
+ base_data_section = s.offset;
+ } else if (lc.cmd == LC_SYMTAB) {
+ if (base_data_section != 0) {
+ struct symtab_command sc;
+ uint8_t *sym_buf = base_buf;
+ uint8_t *str_buf = base_buf;
- memcpy(&s, seg_buf, sizeof(struct section));
- base_data_section = s.offset;
- }
- else if (lc.cmd == LC_SEGMENT_64)
- {
- uint8_t *seg_buf = buf;
- struct section_64 s;
- struct segment_command_64 seg_c;
-
- memcpy(&seg_c, seg_buf, sizeof(struct segment_command_64));
- seg_buf += sizeof(struct segment_command_64);
-
- /* Explanation in LG_SEGMENT */
- if (seg_c.nsects < 1)
- {
- log_msg("Not enough sections\n");
- goto bail;
- }
+ memcpy(&sc, buf, sizeof(struct symtab_command));
- memcpy(&s, seg_buf, sizeof(struct section_64));
- base_data_section = s.offset;
- }
- else if (lc.cmd == LC_SYMTAB)
- {
- if (base_data_section != 0)
- {
- struct symtab_command sc;
- uint8_t *sym_buf = base_buf;
- uint8_t *str_buf = base_buf;
-
- memcpy(&sc, buf, sizeof(struct symtab_command));
-
- if (sc.cmdsize != sizeof(struct symtab_command))
- {
- log_msg("Can't find symbol table!\n");
- goto bail;
- }
-
- sym_buf += sc.symoff;
- str_buf += sc.stroff;
-
- for (j = 0; j < sc.nsyms; j++)
- {
- /* Location of string is cacluated each time from the
- * start of the string buffer. On darwin the symbols
- * are prefixed by "_", so we bump the pointer by 1.
- * The target value is defined as an int in asm_*_offsets.c,
- * which is 4 bytes on all targets we currently use.
- */
- if (bits == 32)
- {
- struct nlist nl;
- int val;
-
- memcpy(&nl, sym_buf, sizeof(struct nlist));
- sym_buf += sizeof(struct nlist);
-
- memcpy(&val, base_buf + base_data_section + nl.n_value,
- sizeof(val));
- printf("%-40s EQU %5d\n",
- str_buf + nl.n_un.n_strx + 1, val);
- }
- else /* if (bits == 64) */
- {
- struct nlist_64 nl;
- int val;
-
- memcpy(&nl, sym_buf, sizeof(struct nlist_64));
- sym_buf += sizeof(struct nlist_64);
-
- memcpy(&val, base_buf + base_data_section + nl.n_value,
- sizeof(val));
- printf("%-40s EQU %5d\n",
- str_buf + nl.n_un.n_strx + 1, val);
- }
- }
- }
+ if (sc.cmdsize != sizeof(struct symtab_command)) {
+ log_msg("Can't find symbol table!\n");
+ goto bail;
}
- buf += lc.cmdsize;
+ sym_buf += sc.symoff;
+ str_buf += sc.stroff;
+
+ for (j = 0; j < sc.nsyms; j++) {
+ /* Location of string is cacluated each time from the
+ * start of the string buffer. On darwin the symbols
+ * are prefixed by "_", so we bump the pointer by 1.
+ * The target value is defined as an int in *_asm_*_offsets.c,
+ * which is 4 bytes on all targets we currently use.
+ */
+ if (bits == 32) {
+ struct nlist nl;
+ int val;
+
+ memcpy(&nl, sym_buf, sizeof(struct nlist));
+ sym_buf += sizeof(struct nlist);
+
+ memcpy(&val, base_buf + base_data_section + nl.n_value,
+ sizeof(val));
+ print_macho_equ(mode, str_buf + nl.n_un.n_strx + 1, val);
+ } else { /* if (bits == 64) */
+ struct nlist_64 nl;
+ int val;
+
+ memcpy(&nl, sym_buf, sizeof(struct nlist_64));
+ sym_buf += sizeof(struct nlist_64);
+
+ memcpy(&val, base_buf + base_data_section + nl.n_value,
+ sizeof(val));
+ print_macho_equ(mode, str_buf + nl.n_un.n_strx + 1, val);
+ }
+ }
+ }
}
- return 0;
+ buf += lc.cmdsize;
+ }
+
+ return 0;
bail:
- return 1;
+ return 1;
}
@@ -216,448 +199,400 @@ bail:
#include "elf.h"
#define COPY_STRUCT(dst, buf, ofst, sz) do {\
- if(ofst + sizeof((*(dst))) > sz) goto bail;\
- memcpy(dst, buf+ofst, sizeof((*(dst))));\
- } while(0)
+ if(ofst + sizeof((*(dst))) > sz) goto bail;\
+ memcpy(dst, buf+ofst, sizeof((*(dst))));\
+ } while(0)
#define ENDIAN_ASSIGN(val, memb) do {\
- if(!elf->le_data) {log_msg("Big Endian data not supported yet!\n");goto bail;}\
- (val) = (memb);\
- } while(0)
+ if(!elf->le_data) {log_msg("Big Endian data not supported yet!\n");goto bail;}\
+ (val) = (memb);\
+ } while(0)
#define ENDIAN_ASSIGN_IN_PLACE(memb) do {\
- ENDIAN_ASSIGN(memb, memb);\
- } while(0)
-
-typedef struct
-{
- uint8_t *buf; /* Buffer containing ELF data */
- size_t sz; /* Buffer size */
- int le_data; /* Data is little-endian */
- unsigned char e_ident[EI_NIDENT]; /* Magic number and other info */
- int bits; /* 32 or 64 */
- Elf32_Ehdr hdr32;
- Elf64_Ehdr hdr64;
+ ENDIAN_ASSIGN(memb, memb);\
+ } while(0)
+
+typedef struct {
+ uint8_t *buf; /* Buffer containing ELF data */
+ size_t sz; /* Buffer size */
+ int le_data; /* Data is little-endian */
+ unsigned char e_ident[EI_NIDENT]; /* Magic number and other info */
+ int bits; /* 32 or 64 */
+ Elf32_Ehdr hdr32;
+ Elf64_Ehdr hdr64;
} elf_obj_t;
-int parse_elf_header(elf_obj_t *elf)
-{
- int res;
- /* Verify ELF Magic numbers */
- COPY_STRUCT(&elf->e_ident, elf->buf, 0, elf->sz);
- res = elf->e_ident[EI_MAG0] == ELFMAG0;
- res &= elf->e_ident[EI_MAG1] == ELFMAG1;
- res &= elf->e_ident[EI_MAG2] == ELFMAG2;
- res &= elf->e_ident[EI_MAG3] == ELFMAG3;
- res &= elf->e_ident[EI_CLASS] == ELFCLASS32
- || elf->e_ident[EI_CLASS] == ELFCLASS64;
- res &= elf->e_ident[EI_DATA] == ELFDATA2LSB;
-
- if (!res) goto bail;
-
- elf->le_data = elf->e_ident[EI_DATA] == ELFDATA2LSB;
-
- /* Read in relevant values */
- if (elf->e_ident[EI_CLASS] == ELFCLASS32)
- {
- elf->bits = 32;
- COPY_STRUCT(&elf->hdr32, elf->buf, 0, elf->sz);
-
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_type);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_machine);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_version);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_entry);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_phoff);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_shoff);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_flags);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_ehsize);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_phentsize);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_phnum);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_shentsize);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_shnum);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_shstrndx);
- }
- else /* if (elf->e_ident[EI_CLASS] == ELFCLASS64) */
- {
- elf->bits = 64;
- COPY_STRUCT(&elf->hdr64, elf->buf, 0, elf->sz);
-
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_type);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_machine);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_version);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_entry);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_phoff);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_shoff);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_flags);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_ehsize);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_phentsize);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_phnum);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_shentsize);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_shnum);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_shstrndx);
- }
-
- return 0;
+int parse_elf_header(elf_obj_t *elf) {
+ int res;
+ /* Verify ELF Magic numbers */
+ COPY_STRUCT(&elf->e_ident, elf->buf, 0, elf->sz);
+ res = elf->e_ident[EI_MAG0] == ELFMAG0;
+ res &= elf->e_ident[EI_MAG1] == ELFMAG1;
+ res &= elf->e_ident[EI_MAG2] == ELFMAG2;
+ res &= elf->e_ident[EI_MAG3] == ELFMAG3;
+ res &= elf->e_ident[EI_CLASS] == ELFCLASS32
+ || elf->e_ident[EI_CLASS] == ELFCLASS64;
+ res &= elf->e_ident[EI_DATA] == ELFDATA2LSB;
+
+ if (!res) goto bail;
+
+ elf->le_data = elf->e_ident[EI_DATA] == ELFDATA2LSB;
+
+ /* Read in relevant values */
+ if (elf->e_ident[EI_CLASS] == ELFCLASS32) {
+ elf->bits = 32;
+ COPY_STRUCT(&elf->hdr32, elf->buf, 0, elf->sz);
+
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_type);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_machine);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_version);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_entry);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_phoff);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_shoff);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_flags);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_ehsize);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_phentsize);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_phnum);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_shentsize);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_shnum);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_shstrndx);
+ } else { /* if (elf->e_ident[EI_CLASS] == ELFCLASS64) */
+ elf->bits = 64;
+ COPY_STRUCT(&elf->hdr64, elf->buf, 0, elf->sz);
+
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_type);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_machine);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_version);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_entry);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_phoff);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_shoff);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_flags);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_ehsize);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_phentsize);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_phnum);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_shentsize);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_shnum);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_shstrndx);
+ }
+
+ return 0;
bail:
- log_msg("Failed to parse ELF file header");
- return 1;
+ log_msg("Failed to parse ELF file header");
+ return 1;
}
-int parse_elf_section(elf_obj_t *elf, int idx, Elf32_Shdr *hdr32, Elf64_Shdr *hdr64)
-{
- if (hdr32)
- {
- if (idx >= elf->hdr32.e_shnum)
- goto bail;
-
- COPY_STRUCT(hdr32, elf->buf, elf->hdr32.e_shoff + idx * elf->hdr32.e_shentsize,
- elf->sz);
- ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_name);
- ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_type);
- ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_flags);
- ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_addr);
- ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_offset);
- ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_size);
- ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_link);
- ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_info);
- ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_addralign);
- ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_entsize);
- }
- else /* if (hdr64) */
- {
- if (idx >= elf->hdr64.e_shnum)
- goto bail;
-
- COPY_STRUCT(hdr64, elf->buf, elf->hdr64.e_shoff + idx * elf->hdr64.e_shentsize,
- elf->sz);
- ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_name);
- ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_type);
- ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_flags);
- ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_addr);
- ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_offset);
- ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_size);
- ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_link);
- ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_info);
- ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_addralign);
- ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_entsize);
- }
+int parse_elf_section(elf_obj_t *elf, int idx, Elf32_Shdr *hdr32, Elf64_Shdr *hdr64) {
+ if (hdr32) {
+ if (idx >= elf->hdr32.e_shnum)
+ goto bail;
- return 0;
+ COPY_STRUCT(hdr32, elf->buf, elf->hdr32.e_shoff + idx * elf->hdr32.e_shentsize,
+ elf->sz);
+ ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_name);
+ ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_type);
+ ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_flags);
+ ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_addr);
+ ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_offset);
+ ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_size);
+ ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_link);
+ ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_info);
+ ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_addralign);
+ ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_entsize);
+ } else { /* if (hdr64) */
+ if (idx >= elf->hdr64.e_shnum)
+ goto bail;
+
+ COPY_STRUCT(hdr64, elf->buf, elf->hdr64.e_shoff + idx * elf->hdr64.e_shentsize,
+ elf->sz);
+ ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_name);
+ ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_type);
+ ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_flags);
+ ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_addr);
+ ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_offset);
+ ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_size);
+ ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_link);
+ ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_info);
+ ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_addralign);
+ ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_entsize);
+ }
+
+ return 0;
bail:
- return 1;
+ return 1;
}
-char *parse_elf_string_table(elf_obj_t *elf, int s_idx, int idx)
-{
- if (elf->bits == 32)
- {
- Elf32_Shdr shdr;
-
- if (parse_elf_section(elf, s_idx, &shdr, NULL))
- {
- log_msg("Failed to parse ELF string table: section %d, index %d\n",
- s_idx, idx);
- return "";
- }
+char *parse_elf_string_table(elf_obj_t *elf, int s_idx, int idx) {
+ if (elf->bits == 32) {
+ Elf32_Shdr shdr;
- return (char *)(elf->buf + shdr.sh_offset + idx);
+ if (parse_elf_section(elf, s_idx, &shdr, NULL)) {
+ log_msg("Failed to parse ELF string table: section %d, index %d\n",
+ s_idx, idx);
+ return "";
}
- else /* if (elf->bits == 64) */
- {
- Elf64_Shdr shdr;
-
- if (parse_elf_section(elf, s_idx, NULL, &shdr))
- {
- log_msg("Failed to parse ELF string table: section %d, index %d\n",
- s_idx, idx);
- return "";
- }
- return (char *)(elf->buf + shdr.sh_offset + idx);
+ return (char *)(elf->buf + shdr.sh_offset + idx);
+ } else { /* if (elf->bits == 64) */
+ Elf64_Shdr shdr;
+
+ if (parse_elf_section(elf, s_idx, NULL, &shdr)) {
+ log_msg("Failed to parse ELF string table: section %d, index %d\n",
+ s_idx, idx);
+ return "";
}
+
+ return (char *)(elf->buf + shdr.sh_offset + idx);
+ }
}
-int parse_elf_symbol(elf_obj_t *elf, unsigned int ofst, Elf32_Sym *sym32, Elf64_Sym *sym64)
-{
- if (sym32)
- {
- COPY_STRUCT(sym32, elf->buf, ofst, elf->sz);
- ENDIAN_ASSIGN_IN_PLACE(sym32->st_name);
- ENDIAN_ASSIGN_IN_PLACE(sym32->st_value);
- ENDIAN_ASSIGN_IN_PLACE(sym32->st_size);
- ENDIAN_ASSIGN_IN_PLACE(sym32->st_info);
- ENDIAN_ASSIGN_IN_PLACE(sym32->st_other);
- ENDIAN_ASSIGN_IN_PLACE(sym32->st_shndx);
- }
- else /* if (sym64) */
- {
- COPY_STRUCT(sym64, elf->buf, ofst, elf->sz);
- ENDIAN_ASSIGN_IN_PLACE(sym64->st_name);
- ENDIAN_ASSIGN_IN_PLACE(sym64->st_value);
- ENDIAN_ASSIGN_IN_PLACE(sym64->st_size);
- ENDIAN_ASSIGN_IN_PLACE(sym64->st_info);
- ENDIAN_ASSIGN_IN_PLACE(sym64->st_other);
- ENDIAN_ASSIGN_IN_PLACE(sym64->st_shndx);
- }
- return 0;
+int parse_elf_symbol(elf_obj_t *elf, unsigned int ofst, Elf32_Sym *sym32, Elf64_Sym *sym64) {
+ if (sym32) {
+ COPY_STRUCT(sym32, elf->buf, ofst, elf->sz);
+ ENDIAN_ASSIGN_IN_PLACE(sym32->st_name);
+ ENDIAN_ASSIGN_IN_PLACE(sym32->st_value);
+ ENDIAN_ASSIGN_IN_PLACE(sym32->st_size);
+ ENDIAN_ASSIGN_IN_PLACE(sym32->st_info);
+ ENDIAN_ASSIGN_IN_PLACE(sym32->st_other);
+ ENDIAN_ASSIGN_IN_PLACE(sym32->st_shndx);
+ } else { /* if (sym64) */
+ COPY_STRUCT(sym64, elf->buf, ofst, elf->sz);
+ ENDIAN_ASSIGN_IN_PLACE(sym64->st_name);
+ ENDIAN_ASSIGN_IN_PLACE(sym64->st_value);
+ ENDIAN_ASSIGN_IN_PLACE(sym64->st_size);
+ ENDIAN_ASSIGN_IN_PLACE(sym64->st_info);
+ ENDIAN_ASSIGN_IN_PLACE(sym64->st_other);
+ ENDIAN_ASSIGN_IN_PLACE(sym64->st_shndx);
+ }
+ return 0;
bail:
- return 1;
+ return 1;
}
-int parse_elf(uint8_t *buf, size_t sz, output_fmt_t mode)
-{
- elf_obj_t elf;
- unsigned int ofst;
- int i;
- Elf32_Off strtab_off32;
- Elf64_Off strtab_off64; /* save String Table offset for later use */
+int parse_elf(uint8_t *buf, size_t sz, output_fmt_t mode) {
+ elf_obj_t elf;
+ unsigned int ofst;
+ int i;
+ Elf32_Off strtab_off32;
+ Elf64_Off strtab_off64; /* save String Table offset for later use */
- memset(&elf, 0, sizeof(elf));
- elf.buf = buf;
- elf.sz = sz;
+ memset(&elf, 0, sizeof(elf));
+ elf.buf = buf;
+ elf.sz = sz;
- /* Parse Header */
- if (parse_elf_header(&elf))
- goto bail;
+ /* Parse Header */
+ if (parse_elf_header(&elf))
+ goto bail;
- if (elf.bits == 32)
- {
- Elf32_Shdr shdr;
- for (i = 0; i < elf.hdr32.e_shnum; i++)
- {
- parse_elf_section(&elf, i, &shdr, NULL);
-
- if (shdr.sh_type == SHT_STRTAB)
- {
- char strtsb_name[128];
-
- strcpy(strtsb_name, (char *)(elf.buf + shdr.sh_offset + shdr.sh_name));
-
- if (!(strcmp(strtsb_name, ".shstrtab")))
- {
- /* log_msg("found section: %s\n", strtsb_name); */
- strtab_off32 = shdr.sh_offset;
- break;
- }
- }
+ if (elf.bits == 32) {
+ Elf32_Shdr shdr;
+ for (i = 0; i < elf.hdr32.e_shnum; i++) {
+ parse_elf_section(&elf, i, &shdr, NULL);
+
+ if (shdr.sh_type == SHT_STRTAB) {
+ char strtsb_name[128];
+
+ strcpy(strtsb_name, (char *)(elf.buf + shdr.sh_offset + shdr.sh_name));
+
+ if (!(strcmp(strtsb_name, ".shstrtab"))) {
+ /* log_msg("found section: %s\n", strtsb_name); */
+ strtab_off32 = shdr.sh_offset;
+ break;
}
+ }
}
- else /* if (elf.bits == 64) */
- {
- Elf64_Shdr shdr;
- for (i = 0; i < elf.hdr64.e_shnum; i++)
- {
- parse_elf_section(&elf, i, NULL, &shdr);
-
- if (shdr.sh_type == SHT_STRTAB)
- {
- char strtsb_name[128];
-
- strcpy(strtsb_name, (char *)(elf.buf + shdr.sh_offset + shdr.sh_name));
-
- if (!(strcmp(strtsb_name, ".shstrtab")))
- {
- /* log_msg("found section: %s\n", strtsb_name); */
- strtab_off64 = shdr.sh_offset;
- break;
- }
- }
+ } else { /* if (elf.bits == 64) */
+ Elf64_Shdr shdr;
+ for (i = 0; i < elf.hdr64.e_shnum; i++) {
+ parse_elf_section(&elf, i, NULL, &shdr);
+
+ if (shdr.sh_type == SHT_STRTAB) {
+ char strtsb_name[128];
+
+ strcpy(strtsb_name, (char *)(elf.buf + shdr.sh_offset + shdr.sh_name));
+
+ if (!(strcmp(strtsb_name, ".shstrtab"))) {
+ /* log_msg("found section: %s\n", strtsb_name); */
+ strtab_off64 = shdr.sh_offset;
+ break;
}
+ }
}
+ }
+
+ /* Parse all Symbol Tables */
+ if (elf.bits == 32) {
+ Elf32_Shdr shdr;
+ for (i = 0; i < elf.hdr32.e_shnum; i++) {
+ parse_elf_section(&elf, i, &shdr, NULL);
+
+ if (shdr.sh_type == SHT_SYMTAB) {
+ for (ofst = shdr.sh_offset;
+ ofst < shdr.sh_offset + shdr.sh_size;
+ ofst += shdr.sh_entsize) {
+ Elf32_Sym sym;
+
+ parse_elf_symbol(&elf, ofst, &sym, NULL);
+
+ /* For all OBJECTS (data objects), extract the value from the
+ * proper data segment.
+ */
+ /* if (ELF32_ST_TYPE(sym.st_info) == STT_OBJECT && sym.st_name)
+ log_msg("found data object %s\n",
+ parse_elf_string_table(&elf,
+ shdr.sh_link,
+ sym.st_name));
+ */
+
+ if (ELF32_ST_TYPE(sym.st_info) == STT_OBJECT
+ && sym.st_size == 4) {
+ Elf32_Shdr dhdr;
+ int val = 0;
+ char section_name[128];
+
+ parse_elf_section(&elf, sym.st_shndx, &dhdr, NULL);
+
+ /* For explanition - refer to _MSC_VER version of code */
+ strcpy(section_name, (char *)(elf.buf + strtab_off32 + dhdr.sh_name));
+ /* log_msg("Section_name: %s, Section_type: %d\n", section_name, dhdr.sh_type); */
+
+ if (strcmp(section_name, ".bss")) {
+ if (sizeof(val) != sym.st_size) {
+ /* The target value is declared as an int in
+ * *_asm_*_offsets.c, which is 4 bytes on all
+ * targets we currently use. Complain loudly if
+ * this is not true.
+ */
+ log_msg("Symbol size is wrong\n");
+ goto bail;
+ }
- /* Parse all Symbol Tables */
- if (elf.bits == 32)
- {
- Elf32_Shdr shdr;
- for (i = 0; i < elf.hdr32.e_shnum; i++)
- {
- parse_elf_section(&elf, i, &shdr, NULL);
-
- if (shdr.sh_type == SHT_SYMTAB)
- {
- for (ofst = shdr.sh_offset;
- ofst < shdr.sh_offset + shdr.sh_size;
- ofst += shdr.sh_entsize)
- {
- Elf32_Sym sym;
-
- parse_elf_symbol(&elf, ofst, &sym, NULL);
-
- /* For all OBJECTS (data objects), extract the value from the
- * proper data segment.
- */
- /* if (ELF32_ST_TYPE(sym.st_info) == STT_OBJECT && sym.st_name)
- log_msg("found data object %s\n",
- parse_elf_string_table(&elf,
- shdr.sh_link,
- sym.st_name));
- */
-
- if (ELF32_ST_TYPE(sym.st_info) == STT_OBJECT
- && sym.st_size == 4)
- {
- Elf32_Shdr dhdr;
- int val = 0;
- char section_name[128];
-
- parse_elf_section(&elf, sym.st_shndx, &dhdr, NULL);
-
- /* For explanition - refer to _MSC_VER version of code */
- strcpy(section_name, (char *)(elf.buf + strtab_off32 + dhdr.sh_name));
- /* log_msg("Section_name: %s, Section_type: %d\n", section_name, dhdr.sh_type); */
-
- if (strcmp(section_name, ".bss"))
- {
- if (sizeof(val) != sym.st_size)
- {
- /* The target value is declared as an int in
- * asm_*_offsets.c, which is 4 bytes on all
- * targets we currently use. Complain loudly if
- * this is not true.
- */
- log_msg("Symbol size is wrong\n");
- goto bail;
- }
-
- memcpy(&val,
- elf.buf + dhdr.sh_offset + sym.st_value,
- sym.st_size);
- }
-
- if (!elf.le_data)
- {
- log_msg("Big Endian data not supported yet!\n");
- goto bail;
- }
-
- switch (mode)
- {
- case OUTPUT_FMT_RVDS:
- printf("%-40s EQU %5d\n",
- parse_elf_string_table(&elf,
- shdr.sh_link,
- sym.st_name),
- val);
- break;
- case OUTPUT_FMT_GAS:
- printf(".equ %-40s, %5d\n",
- parse_elf_string_table(&elf,
- shdr.sh_link,
- sym.st_name),
- val);
- break;
- default:
- printf("%s = %d\n",
- parse_elf_string_table(&elf,
- shdr.sh_link,
- sym.st_name),
- val);
- }
- }
- }
+ memcpy(&val,
+ elf.buf + dhdr.sh_offset + sym.st_value,
+ sym.st_size);
}
+
+ if (!elf.le_data) {
+ log_msg("Big Endian data not supported yet!\n");
+ goto bail;
+ }
+
+ switch (mode) {
+ case OUTPUT_FMT_RVDS:
+ printf("%-40s EQU %5d\n",
+ parse_elf_string_table(&elf,
+ shdr.sh_link,
+ sym.st_name),
+ val);
+ break;
+ case OUTPUT_FMT_GAS:
+ printf(".equ %-40s, %5d\n",
+ parse_elf_string_table(&elf,
+ shdr.sh_link,
+ sym.st_name),
+ val);
+ break;
+ default:
+ printf("%s = %d\n",
+ parse_elf_string_table(&elf,
+ shdr.sh_link,
+ sym.st_name),
+ val);
+ }
+ }
}
+ }
}
- else /* if (elf.bits == 64) */
- {
- Elf64_Shdr shdr;
- for (i = 0; i < elf.hdr64.e_shnum; i++)
- {
- parse_elf_section(&elf, i, NULL, &shdr);
-
- if (shdr.sh_type == SHT_SYMTAB)
- {
- for (ofst = shdr.sh_offset;
- ofst < shdr.sh_offset + shdr.sh_size;
- ofst += shdr.sh_entsize)
- {
- Elf64_Sym sym;
-
- parse_elf_symbol(&elf, ofst, NULL, &sym);
-
- /* For all OBJECTS (data objects), extract the value from the
- * proper data segment.
- */
- /* if (ELF64_ST_TYPE(sym.st_info) == STT_OBJECT && sym.st_name)
- log_msg("found data object %s\n",
- parse_elf_string_table(&elf,
- shdr.sh_link,
- sym.st_name));
- */
-
- if (ELF64_ST_TYPE(sym.st_info) == STT_OBJECT
- && sym.st_size == 4)
- {
- Elf64_Shdr dhdr;
- int val = 0;
- char section_name[128];
-
- parse_elf_section(&elf, sym.st_shndx, NULL, &dhdr);
-
- /* For explanition - refer to _MSC_VER version of code */
- strcpy(section_name, (char *)(elf.buf + strtab_off64 + dhdr.sh_name));
- /* log_msg("Section_name: %s, Section_type: %d\n", section_name, dhdr.sh_type); */
-
- if ((strcmp(section_name, ".bss")))
- {
- if (sizeof(val) != sym.st_size)
- {
- /* The target value is declared as an int in
- * asm_*_offsets.c, which is 4 bytes on all
- * targets we currently use. Complain loudly if
- * this is not true.
- */
- log_msg("Symbol size is wrong\n");
- goto bail;
- }
-
- memcpy(&val,
- elf.buf + dhdr.sh_offset + sym.st_value,
- sym.st_size);
- }
-
- if (!elf.le_data)
- {
- log_msg("Big Endian data not supported yet!\n");
- goto bail;
- }
-
- switch (mode)
- {
- case OUTPUT_FMT_RVDS:
- printf("%-40s EQU %5d\n",
- parse_elf_string_table(&elf,
- shdr.sh_link,
- sym.st_name),
- val);
- break;
- case OUTPUT_FMT_GAS:
- printf(".equ %-40s, %5d\n",
- parse_elf_string_table(&elf,
- shdr.sh_link,
- sym.st_name),
- val);
- break;
- default:
- printf("%s = %d\n",
- parse_elf_string_table(&elf,
- shdr.sh_link,
- sym.st_name),
- val);
- }
- }
- }
+ } else { /* if (elf.bits == 64) */
+ Elf64_Shdr shdr;
+ for (i = 0; i < elf.hdr64.e_shnum; i++) {
+ parse_elf_section(&elf, i, NULL, &shdr);
+
+ if (shdr.sh_type == SHT_SYMTAB) {
+ for (ofst = shdr.sh_offset;
+ ofst < shdr.sh_offset + shdr.sh_size;
+ ofst += shdr.sh_entsize) {
+ Elf64_Sym sym;
+
+ parse_elf_symbol(&elf, ofst, NULL, &sym);
+
+ /* For all OBJECTS (data objects), extract the value from the
+ * proper data segment.
+ */
+ /* if (ELF64_ST_TYPE(sym.st_info) == STT_OBJECT && sym.st_name)
+ log_msg("found data object %s\n",
+ parse_elf_string_table(&elf,
+ shdr.sh_link,
+ sym.st_name));
+ */
+
+ if (ELF64_ST_TYPE(sym.st_info) == STT_OBJECT
+ && sym.st_size == 4) {
+ Elf64_Shdr dhdr;
+ int val = 0;
+ char section_name[128];
+
+ parse_elf_section(&elf, sym.st_shndx, NULL, &dhdr);
+
+ /* For explanition - refer to _MSC_VER version of code */
+ strcpy(section_name, (char *)(elf.buf + strtab_off64 + dhdr.sh_name));
+ /* log_msg("Section_name: %s, Section_type: %d\n", section_name, dhdr.sh_type); */
+
+ if ((strcmp(section_name, ".bss"))) {
+ if (sizeof(val) != sym.st_size) {
+ /* The target value is declared as an int in
+ * *_asm_*_offsets.c, which is 4 bytes on all
+ * targets we currently use. Complain loudly if
+ * this is not true.
+ */
+ log_msg("Symbol size is wrong\n");
+ goto bail;
+ }
+
+ memcpy(&val,
+ elf.buf + dhdr.sh_offset + sym.st_value,
+ sym.st_size);
+ }
+
+ if (!elf.le_data) {
+ log_msg("Big Endian data not supported yet!\n");
+ goto bail;
+ }
+
+ switch (mode) {
+ case OUTPUT_FMT_RVDS:
+ printf("%-40s EQU %5d\n",
+ parse_elf_string_table(&elf,
+ shdr.sh_link,
+ sym.st_name),
+ val);
+ break;
+ case OUTPUT_FMT_GAS:
+ printf(".equ %-40s, %5d\n",
+ parse_elf_string_table(&elf,
+ shdr.sh_link,
+ sym.st_name),
+ val);
+ break;
+ default:
+ printf("%s = %d\n",
+ parse_elf_string_table(&elf,
+ shdr.sh_link,
+ sym.st_name),
+ val);
}
+ }
}
+ }
}
+ }
- if (mode == OUTPUT_FMT_RVDS)
- printf(" END\n");
+ if (mode == OUTPUT_FMT_RVDS)
+ printf(" END\n");
- return 0;
+ return 0;
bail:
- log_msg("Parse error: File does not appear to be valid ELF32 or ELF64\n");
- return 1;
+ log_msg("Parse error: File does not appear to be valid ELF32 or ELF64\n");
+ return 1;
}
#endif
@@ -671,244 +606,222 @@ bail:
#define get_le32(x) ((*(x)) | (*(x+1)) << 8 |(*(x+2)) << 16 | (*(x+3)) << 24 )
#define get_le16(x) ((*(x)) | (*(x+1)) << 8)
-int parse_coff(uint8_t *buf, size_t sz)
-{
- unsigned int nsections, symtab_ptr, symtab_sz, strtab_ptr;
- unsigned int sectionrawdata_ptr;
- unsigned int i;
- uint8_t *ptr;
- uint32_t symoffset;
-
- char **sectionlist; //this array holds all section names in their correct order.
- //it is used to check if the symbol is in .bss or .rdata section.
-
- nsections = get_le16(buf + 2);
- symtab_ptr = get_le32(buf + 8);
- symtab_sz = get_le32(buf + 12);
- strtab_ptr = symtab_ptr + symtab_sz * 18;
-
- if (nsections > 96)
- {
- log_msg("Too many sections\n");
- return 1;
- }
+int parse_coff(uint8_t *buf, size_t sz) {
+ unsigned int nsections, symtab_ptr, symtab_sz, strtab_ptr;
+ unsigned int sectionrawdata_ptr;
+ unsigned int i;
+ uint8_t *ptr;
+ uint32_t symoffset;
- sectionlist = malloc(nsections * sizeof(sectionlist));
+ char **sectionlist; // this array holds all section names in their correct order.
+ // it is used to check if the symbol is in .bss or .rdata section.
- if (sectionlist == NULL)
- {
- log_msg("Allocating first level of section list failed\n");
- return 1;
- }
-
- //log_msg("COFF: Found %u symbols in %u sections.\n", symtab_sz, nsections);
+ nsections = get_le16(buf + 2);
+ symtab_ptr = get_le32(buf + 8);
+ symtab_sz = get_le32(buf + 12);
+ strtab_ptr = symtab_ptr + symtab_sz * 18;
- /*
- The size of optional header is always zero for an obj file. So, the section header
- follows the file header immediately.
- */
+ if (nsections > 96) {
+ log_msg("Too many sections\n");
+ return 1;
+ }
- ptr = buf + 20; //section header
+ sectionlist = malloc(nsections * sizeof(sectionlist));
- for (i = 0; i < nsections; i++)
- {
- char sectionname[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
- strncpy(sectionname, ptr, 8);
- //log_msg("COFF: Parsing section %s\n",sectionname);
+ if (sectionlist == NULL) {
+ log_msg("Allocating first level of section list failed\n");
+ return 1;
+ }
- sectionlist[i] = malloc(strlen(sectionname) + 1);
+ // log_msg("COFF: Found %u symbols in %u sections.\n", symtab_sz, nsections);
- if (sectionlist[i] == NULL)
- {
- log_msg("Allocating storage for %s failed\n", sectionname);
- goto bail;
- }
- strcpy(sectionlist[i], sectionname);
+ /*
+ The size of optional header is always zero for an obj file. So, the section header
+ follows the file header immediately.
+ */
- if (!strcmp(sectionname, ".rdata")) sectionrawdata_ptr = get_le32(ptr + 20);
+ ptr = buf + 20; // section header
- ptr += 40;
- }
+ for (i = 0; i < nsections; i++) {
+ char sectionname[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+ strncpy(sectionname, ptr, 8);
+ // log_msg("COFF: Parsing section %s\n",sectionname);
- //log_msg("COFF: Symbol table at offset %u\n", symtab_ptr);
- //log_msg("COFF: raw data pointer ofset for section .rdata is %u\n", sectionrawdata_ptr);
-
- /* The compiler puts the data with non-zero offset in .rdata section, but puts the data with
- zero offset in .bss section. So, if the data in in .bss section, set offset=0.
- Note from Wiki: In an object module compiled from C, the bss section contains
- the local variables (but not functions) that were declared with the static keyword,
- except for those with non-zero initial values. (In C, static variables are initialized
- to zero by default.) It also contains the non-local (both extern and static) variables
- that are also initialized to zero (either explicitly or by default).
- */
- //move to symbol table
- /* COFF symbol table:
- offset field
- 0 Name(*)
- 8 Value
- 12 SectionNumber
- 14 Type
- 16 StorageClass
- 17 NumberOfAuxSymbols
- */
- ptr = buf + symtab_ptr;
-
- for (i = 0; i < symtab_sz; i++)
- {
- int16_t section = get_le16(ptr + 12); //section number
-
- if (section > 0 && ptr[16] == 2)
- {
- //if(section > 0 && ptr[16] == 3 && get_le32(ptr+8)) {
-
- if (get_le32(ptr))
- {
- char name[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
- strncpy(name, ptr, 8);
- //log_msg("COFF: Parsing symbol %s\n",name);
- /* The 64bit Windows compiler doesn't prefix with an _.
- * Check what's there, and bump if necessary
- */
- if (name[0] == '_')
- printf("%-40s EQU ", name + 1);
- else
- printf("%-40s EQU ", name);
- }
- else
- {
- //log_msg("COFF: Parsing symbol %s\n",
- // buf + strtab_ptr + get_le32(ptr+4));
- if ((buf + strtab_ptr + get_le32(ptr + 4))[0] == '_')
- printf("%-40s EQU ",
- buf + strtab_ptr + get_le32(ptr + 4) + 1);
- else
- printf("%-40s EQU ", buf + strtab_ptr + get_le32(ptr + 4));
- }
+ sectionlist[i] = malloc(strlen(sectionname) + 1);
- if (!(strcmp(sectionlist[section-1], ".bss")))
- {
- symoffset = 0;
- }
- else
- {
- symoffset = get_le32(buf + sectionrawdata_ptr + get_le32(ptr + 8));
- }
+ if (sectionlist[i] == NULL) {
+ log_msg("Allocating storage for %s failed\n", sectionname);
+ goto bail;
+ }
+ strcpy(sectionlist[i], sectionname);
+
+ if (!strcmp(sectionname, ".rdata")) sectionrawdata_ptr = get_le32(ptr + 20);
+
+ ptr += 40;
+ }
+
+ // log_msg("COFF: Symbol table at offset %u\n", symtab_ptr);
+ // log_msg("COFF: raw data pointer ofset for section .rdata is %u\n", sectionrawdata_ptr);
+
+ /* The compiler puts the data with non-zero offset in .rdata section, but puts the data with
+ zero offset in .bss section. So, if the data in in .bss section, set offset=0.
+ Note from Wiki: In an object module compiled from C, the bss section contains
+ the local variables (but not functions) that were declared with the static keyword,
+ except for those with non-zero initial values. (In C, static variables are initialized
+ to zero by default.) It also contains the non-local (both extern and static) variables
+ that are also initialized to zero (either explicitly or by default).
+ */
+ // move to symbol table
+ /* COFF symbol table:
+ offset field
+ 0 Name(*)
+ 8 Value
+ 12 SectionNumber
+ 14 Type
+ 16 StorageClass
+ 17 NumberOfAuxSymbols
+ */
+ ptr = buf + symtab_ptr;
+
+ for (i = 0; i < symtab_sz; i++) {
+ int16_t section = get_le16(ptr + 12); // section number
+
+ if (section > 0 && ptr[16] == 2) {
+ // if(section > 0 && ptr[16] == 3 && get_le32(ptr+8)) {
+
+ if (get_le32(ptr)) {
+ char name[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+ strncpy(name, ptr, 8);
+ // log_msg("COFF: Parsing symbol %s\n",name);
+ /* The 64bit Windows compiler doesn't prefix with an _.
+ * Check what's there, and bump if necessary
+ */
+ if (name[0] == '_')
+ printf("%-40s EQU ", name + 1);
+ else
+ printf("%-40s EQU ", name);
+ } else {
+ // log_msg("COFF: Parsing symbol %s\n",
+ // buf + strtab_ptr + get_le32(ptr+4));
+ if ((buf + strtab_ptr + get_le32(ptr + 4))[0] == '_')
+ printf("%-40s EQU ",
+ buf + strtab_ptr + get_le32(ptr + 4) + 1);
+ else
+ printf("%-40s EQU ", buf + strtab_ptr + get_le32(ptr + 4));
+ }
- //log_msg(" Section: %d\n",section);
- //log_msg(" Class: %d\n",ptr[16]);
- //log_msg(" Address: %u\n",get_le32(ptr+8));
- //log_msg(" Offset: %u\n", symoffset);
+ if (!(strcmp(sectionlist[section - 1], ".bss"))) {
+ symoffset = 0;
+ } else {
+ symoffset = get_le32(buf + sectionrawdata_ptr + get_le32(ptr + 8));
+ }
- printf("%5d\n", symoffset);
- }
+ // log_msg(" Section: %d\n",section);
+ // log_msg(" Class: %d\n",ptr[16]);
+ // log_msg(" Address: %u\n",get_le32(ptr+8));
+ // log_msg(" Offset: %u\n", symoffset);
- ptr += 18;
+ printf("%5d\n", symoffset);
}
- printf(" END\n");
+ ptr += 18;
+ }
- for (i = 0; i < nsections; i++)
- {
- free(sectionlist[i]);
- }
+ printf(" END\n");
- free(sectionlist);
+ for (i = 0; i < nsections; i++) {
+ free(sectionlist[i]);
+ }
- return 0;
+ free(sectionlist);
+
+ return 0;
bail:
- for (i = 0; i < nsections; i++)
- {
- free(sectionlist[i]);
- }
+ for (i = 0; i < nsections; i++) {
+ free(sectionlist[i]);
+ }
- free(sectionlist);
+ free(sectionlist);
- return 1;
+ return 1;
}
#endif /* defined(_MSC_VER) || defined(__MINGW32__) || defined(__CYGWIN__) */
-int main(int argc, char **argv)
-{
- output_fmt_t mode = OUTPUT_FMT_PLAIN;
- const char *f;
- uint8_t *file_buf;
- int res;
- FILE *fp;
- long int file_size;
-
- if (argc < 2 || argc > 3)
- {
- fprintf(stderr, "Usage: %s [output format] <obj file>\n\n", argv[0]);
- fprintf(stderr, " <obj file>\tobject file to parse\n");
- fprintf(stderr, "Output Formats:\n");
- fprintf(stderr, " gas - compatible with GNU assembler\n");
- fprintf(stderr, " rvds - compatible with armasm\n");
- goto bail;
- }
-
- f = argv[2];
-
- if (!strcmp(argv[1], "rvds"))
- mode = OUTPUT_FMT_RVDS;
- else if (!strcmp(argv[1], "gas"))
- mode = OUTPUT_FMT_GAS;
- else
- f = argv[1];
-
- fp = fopen(f, "rb");
-
- if (!fp)
- {
- perror("Unable to open file");
- goto bail;
- }
-
- if (fseek(fp, 0, SEEK_END))
- {
- perror("stat");
- goto bail;
- }
-
- file_size = ftell(fp);
- file_buf = malloc(file_size);
-
- if (!file_buf)
- {
- perror("malloc");
- goto bail;
- }
-
- rewind(fp);
-
- if (fread(file_buf, sizeof(char), file_size, fp) != file_size)
- {
- perror("read");
- goto bail;
- }
-
- if (fclose(fp))
- {
- perror("close");
- goto bail;
- }
+int main(int argc, char **argv) {
+ output_fmt_t mode = OUTPUT_FMT_PLAIN;
+ const char *f;
+ uint8_t *file_buf;
+ int res;
+ FILE *fp;
+ long int file_size;
+
+ if (argc < 2 || argc > 3) {
+ fprintf(stderr, "Usage: %s [output format] <obj file>\n\n", argv[0]);
+ fprintf(stderr, " <obj file>\tobject file to parse\n");
+ fprintf(stderr, "Output Formats:\n");
+ fprintf(stderr, " gas - compatible with GNU assembler\n");
+ fprintf(stderr, " rvds - compatible with armasm\n");
+ goto bail;
+ }
+
+ f = argv[2];
+
+ if (!strcmp(argv[1], "rvds"))
+ mode = OUTPUT_FMT_RVDS;
+ else if (!strcmp(argv[1], "gas"))
+ mode = OUTPUT_FMT_GAS;
+ else
+ f = argv[1];
+
+ fp = fopen(f, "rb");
+
+ if (!fp) {
+ perror("Unable to open file");
+ goto bail;
+ }
+
+ if (fseek(fp, 0, SEEK_END)) {
+ perror("stat");
+ goto bail;
+ }
+
+ file_size = ftell(fp);
+ file_buf = malloc(file_size);
+
+ if (!file_buf) {
+ perror("malloc");
+ goto bail;
+ }
+
+ rewind(fp);
+
+ if (fread(file_buf, sizeof(char), file_size, fp) != file_size) {
+ perror("read");
+ goto bail;
+ }
+
+ if (fclose(fp)) {
+ perror("close");
+ goto bail;
+ }
#if defined(__GNUC__) && __GNUC__
#if defined(__MACH__)
- res = parse_macho(file_buf, file_size);
+ res = parse_macho(file_buf, file_size, mode);
#elif defined(__ELF__)
- res = parse_elf(file_buf, file_size, mode);
+ res = parse_elf(file_buf, file_size, mode);
#endif
#endif
#if defined(_MSC_VER) || defined(__MINGW32__) || defined(__CYGWIN__)
- res = parse_coff(file_buf, file_size);
+ res = parse_coff(file_buf, file_size);
#endif
- free(file_buf);
+ free(file_buf);
- if (!res)
- return EXIT_SUCCESS;
+ if (!res)
+ return EXIT_SUCCESS;
bail:
- return EXIT_FAILURE;
+ return EXIT_FAILURE;
}
diff --git a/libvpx/build/make/rtcd.sh b/libvpx/build/make/rtcd.sh
index ddf9e09..6cc3684 100755
--- a/libvpx/build/make/rtcd.sh
+++ b/libvpx/build/make/rtcd.sh
@@ -59,13 +59,13 @@ for f in $defs_file; do [ -f "$f" ] || usage; done
# Routines for the RTCD DSL to call
#
prototype() {
- local rtyp
+ rtyp=""
case "$1" in
unsigned) rtyp="$1 "; shift;;
esac
rtyp="${rtyp}$1"
- local fn="$2"
- local args="$3"
+ fn="$2"
+ args="$3"
eval "${2}_rtyp='$rtyp'"
eval "${2}_args='$3'"
@@ -74,7 +74,7 @@ prototype() {
}
specialize() {
- local fn="$1"
+ fn="$1"
shift
for opt in "$@"; do
eval "${fn}_${opt}=${fn}_${opt}"
@@ -84,13 +84,13 @@ specialize() {
require() {
for fn in $ALL_FUNCS; do
for opt in "$@"; do
- local ofn=$(eval "echo \$${fn}_${opt}")
+ ofn=$(eval "echo \$${fn}_${opt}")
[ -z "$ofn" ] && continue
# if we already have a default, then we can disable it, as we know
# we can do better.
- local best=$(eval "echo \$${fn}_default")
- local best_ofn=$(eval "echo \$${best}")
+ best=$(eval "echo \$${fn}_default")
+ best_ofn=$(eval "echo \$${best}")
[ -n "$best" ] && [ "$best_ofn" != "$ofn" ] && eval "${best}_link=false"
eval "${fn}_default=${fn}_${opt}"
eval "${fn}_${opt}_link=true"
@@ -121,15 +121,15 @@ process_forward_decls() {
determine_indirection() {
[ "$CONFIG_RUNTIME_CPU_DETECT" = "yes" ] || require $ALL_ARCHS
for fn in $ALL_FUNCS; do
- local n=""
- local rtyp="$(eval "echo \$${fn}_rtyp")"
- local args="$(eval "echo \"\$${fn}_args\"")"
- local dfn="$(eval "echo \$${fn}_default")"
+ n=""
+ rtyp="$(eval "echo \$${fn}_rtyp")"
+ args="$(eval "echo \"\$${fn}_args\"")"
+ dfn="$(eval "echo \$${fn}_default")"
dfn=$(eval "echo \$${dfn}")
for opt in "$@"; do
- local ofn=$(eval "echo \$${fn}_${opt}")
+ ofn=$(eval "echo \$${fn}_${opt}")
[ -z "$ofn" ] && continue
- local link=$(eval "echo \$${fn}_${opt}_link")
+ link=$(eval "echo \$${fn}_${opt}_link")
[ "$link" = "false" ] && continue
n="${n}x"
done
@@ -143,12 +143,12 @@ determine_indirection() {
declare_function_pointers() {
for fn in $ALL_FUNCS; do
- local rtyp="$(eval "echo \$${fn}_rtyp")"
- local args="$(eval "echo \"\$${fn}_args\"")"
- local dfn="$(eval "echo \$${fn}_default")"
+ rtyp="$(eval "echo \$${fn}_rtyp")"
+ args="$(eval "echo \"\$${fn}_args\"")"
+ dfn="$(eval "echo \$${fn}_default")"
dfn=$(eval "echo \$${dfn}")
for opt in "$@"; do
- local ofn=$(eval "echo \$${fn}_${opt}")
+ ofn=$(eval "echo \$${fn}_${opt}")
[ -z "$ofn" ] && continue
echo "$rtyp ${ofn}($args);"
done
@@ -163,20 +163,20 @@ declare_function_pointers() {
set_function_pointers() {
for fn in $ALL_FUNCS; do
- local n=""
- local rtyp="$(eval "echo \$${fn}_rtyp")"
- local args="$(eval "echo \"\$${fn}_args\"")"
- local dfn="$(eval "echo \$${fn}_default")"
+ n=""
+ rtyp="$(eval "echo \$${fn}_rtyp")"
+ args="$(eval "echo \"\$${fn}_args\"")"
+ dfn="$(eval "echo \$${fn}_default")"
dfn=$(eval "echo \$${dfn}")
if $(eval "echo \$${fn}_indirect"); then
echo " $fn = $dfn;"
for opt in "$@"; do
- local ofn=$(eval "echo \$${fn}_${opt}")
+ ofn=$(eval "echo \$${fn}_${opt}")
[ -z "$ofn" ] && continue
[ "$ofn" = "$dfn" ] && continue;
- local link=$(eval "echo \$${fn}_${opt}_link")
+ link=$(eval "echo \$${fn}_${opt}_link")
[ "$link" = "false" ] && continue
- local cond="$(eval "echo \$have_${opt}")"
+ cond="$(eval "echo \$have_${opt}")"
echo " if (${cond}) $fn = $ofn;"
done
fi
@@ -185,7 +185,7 @@ set_function_pointers() {
}
filter() {
- local filtered
+ filtered=""
for opt in "$@"; do
[ -z $(eval "echo \$disable_${opt}") ] && filtered="$filtered $opt"
done
@@ -196,8 +196,9 @@ filter() {
# Helper functions for generating the arch specific RTCD files
#
common_top() {
- local outfile_basename=$(basename ${symbol:-rtcd.h})
- local include_guard=$(echo $outfile_basename | tr '[a-z]' '[A-Z]' | tr -c '[A-Z]' _)
+ outfile_basename=$(basename ${symbol:-rtcd})
+ include_guard=$(echo $outfile_basename | tr '[a-z]' '[A-Z]' | \
+ tr -c '[A-Z0-9]' _)H_
cat <<EOF
#ifndef ${include_guard}
#define ${include_guard}
@@ -227,7 +228,7 @@ x86() {
# Assign the helper variable for each enabled extension
for opt in $ALL_ARCHS; do
- local uc=$(echo $opt | tr '[a-z]' '[A-Z]')
+ uc=$(echo $opt | tr '[a-z]' '[A-Z]')
eval "have_${opt}=\"flags & HAS_${uc}\""
done
@@ -254,7 +255,7 @@ arm() {
# Assign the helper variable for each enabled extension
for opt in $ALL_ARCHS; do
- local uc=$(echo $opt | tr '[a-z]' '[A-Z]')
+ uc=$(echo $opt | tr '[a-z]' '[A-Z]')
eval "have_${opt}=\"flags & HAS_${uc}\""
done
diff --git a/libvpx/build/make/thumb.pm b/libvpx/build/make/thumb.pm
new file mode 100644
index 0000000..f347287
--- /dev/null
+++ b/libvpx/build/make/thumb.pm
@@ -0,0 +1,70 @@
+#!/usr/bin/perl
+##
+## Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+##
+## Use of this source code is governed by a BSD-style license
+## that can be found in the LICENSE file in the root of the source
+## tree. An additional intellectual property rights grant can be found
+## in the file PATENTS. All contributing project authors may
+## be found in the AUTHORS file in the root of the source tree.
+##
+
+package thumb;
+
+sub FixThumbInstructions($$)
+{
+ my $short_branches = $_[1];
+ my $branch_shift_offset = $short_branches ? 1 : 0;
+
+ # Write additions with shifts, such as "add r10, r11, lsl #8",
+ # in three operand form, "add r10, r10, r11, lsl #8".
+ s/(add\s+)(r\d+),\s*(r\d+),\s*(lsl #\d+)/$1$2, $2, $3, $4/g;
+
+ # Convert additions with a non-constant shift into a sequence
+ # with left shift, addition and a right shift (to restore the
+ # register to the original value). Currently the right shift
+ # isn't necessary in the code base since the values in these
+ # registers aren't used, but doing the shift for consitency.
+ # This converts instructions such as "add r12, r12, r5, lsl r4"
+ # into the sequence "lsl r5, r4", "add r12, r12, r5", "lsr r5, r4".
+ s/^(\s*)(add)(\s+)(r\d+),\s*(r\d+),\s*(r\d+),\s*lsl (r\d+)/$1lsl$3$6, $7\n$1$2$3$4, $5, $6\n$1lsr$3$6, $7/g;
+
+ # Convert loads with right shifts in the indexing into a
+ # sequence of an add, load and sub. This converts
+ # "ldrb r4, [r9, lr, asr #1]" into "add r9, r9, lr, asr #1",
+ # "ldrb r9, [r9]", "sub r9, r9, lr, asr #1".
+ s/^(\s*)(ldrb)(\s+)(r\d+),\s*\[(\w+),\s*(\w+),\s*(asr #\d+)\]/$1add $3$5, $5, $6, $7\n$1$2$3$4, [$5]\n$1sub $3$5, $5, $6, $7/g;
+
+ # Convert register indexing with writeback into a separate add
+ # instruction. This converts "ldrb r12, [r1, r2]!" into
+ # "ldrb r12, [r1, r2]", "add r1, r1, r2".
+ s/^(\s*)(ldrb)(\s+)(r\d+),\s*\[(\w+),\s*(\w+)\]!/$1$2$3$4, [$5, $6]\n$1add $3$5, $6/g;
+
+ # Convert negative register indexing into separate sub/add instructions.
+ # This converts "ldrne r4, [src, -pstep, lsl #1]" into
+ # "subne src, src, pstep, lsl #1", "ldrne r4, [src]",
+ # "addne src, src, pstep, lsl #1". In a couple of cases where
+ # this is used, it's used for two subsequent load instructions,
+ # where a hand-written version of it could merge two subsequent
+ # add and sub instructions.
+ s/^(\s*)((ldr|str)(ne)?)(\s+)(r\d+),\s*\[(\w+), -([^\]]+)\]/$1sub$4$5$7, $7, $8\n$1$2$5$6, [$7]\n$1add$4$5$7, $7, $8/g;
+
+ # Convert register post indexing to a separate add instruction.
+ # This converts "ldrneb r9, [r0], r2" into "ldrneb r9, [r0]",
+ # "add r0, r2".
+ s/^(\s*)((ldr|str)(ne)?[bhd]?)(\s+)(\w+),(\s*\w+,)?\s*\[(\w+)\],\s*(\w+)/$1$2$5$6,$7 [$8]\n$1add$4$5$8, $8, $9/g;
+
+ # Convert a conditional addition to the pc register into a series of
+ # instructions. This converts "addlt pc, pc, r3, lsl #2" into
+ # "itttt lt", "movlt.n r12, pc", "addlt.w r12, #12",
+ # "addlt.w r12, r12, r3, lsl #2", "movlt.n pc, r12".
+ # This assumes that r12 is free at this point.
+ s/^(\s*)addlt(\s+)pc,\s*pc,\s*(\w+),\s*lsl\s*#(\d+)/$1itttt$2lt\n$1movlt.n$2r12, pc\n$1addlt.w$2r12, #12\n$1addlt.w$2r12, r12, $3, lsl #($4-$branch_shift_offset)\n$1movlt.n$2pc, r12/g;
+
+ # Convert "mov pc, lr" into "bx lr", since the former only works
+ # for switching from arm to thumb (and only in armv7), but not
+ # from thumb to arm.
+ s/mov(\s*)pc\s*,\s*lr/bx$1lr/g;
+}
+
+1;
diff --git a/libvpx/build/make/version.sh b/libvpx/build/make/version.sh
index 3efb956..e31e568 100755
--- a/libvpx/build/make/version.sh
+++ b/libvpx/build/make/version.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
##
## Copyright (c) 2010 The WebM project authors. All Rights Reserved.
##
diff --git a/libvpx/build/x86-msvs/obj_int_extract.bat b/libvpx/build/x86-msvs/obj_int_extract.bat
index 1bb8653..4e9b0ec 100644
--- a/libvpx/build/x86-msvs/obj_int_extract.bat
+++ b/libvpx/build/x86-msvs/obj_int_extract.bat
@@ -7,9 +7,6 @@ REM in the file PATENTS. All contributing project authors may
REM be found in the AUTHORS file in the root of the source tree.
echo on
-cl /I "./" /I "%1" /nologo /c "%1/vp8/common/asm_com_offsets.c"
-cl /I "./" /I "%1" /nologo /c "%1/vp8/decoder/asm_dec_offsets.c"
-cl /I "./" /I "%1" /nologo /c "%1/vp8/encoder/asm_enc_offsets.c"
-obj_int_extract.exe rvds "asm_com_offsets.obj" > "asm_com_offsets.asm"
-obj_int_extract.exe rvds "asm_dec_offsets.obj" > "asm_dec_offsets.asm"
-obj_int_extract.exe rvds "asm_enc_offsets.obj" > "asm_enc_offsets.asm"
+cl /I "./" /I "%1" /nologo /c "%1/vp8/encoder/vp8_asm_enc_offsets.c"
+obj_int_extract.exe rvds "vp8_asm_enc_offsets.obj" > "vp8_asm_enc_offsets.asm"
+
diff --git a/libvpx/build/x86-msvs/yasm.rules b/libvpx/build/x86-msvs/yasm.rules
deleted file mode 100644
index ee1fefb..0000000
--- a/libvpx/build/x86-msvs/yasm.rules
+++ /dev/null
@@ -1,115 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<VisualStudioToolFile
- Name="Yasm"
- Version="8.00"
- >
- <Rules>
- <CustomBuildRule
- Name="YASM"
- DisplayName="Yasm Assembler"
- CommandLine="yasm -Xvc -f $(PlatformName) [AllOptions] [AdditionalOptions] [Inputs]"
- Outputs="[$ObjectFileName]"
- FileExtensions="*.asm"
- ExecutionDescription="Assembling $(InputFileName)"
- ShowOnlyRuleProperties="false"
- >
- <Properties>
- <StringProperty
- Name="Defines"
- DisplayName="Definitions"
- Category="Pre-Defined Symbols"
- Description="Specify pre-defined symbols (&apos;symbol&apos; or &apos;symbol = value&apos;) "
- Switch="-D [value]"
- Delimited="true"
- Inheritable="true"
- />
- <StringProperty
- Name="IncludePaths"
- DisplayName="Include Paths"
- Category="Configuration"
- Description="Set the paths for any additional include files"
- Switch="-I [value]"
- Delimited="true"
- Inheritable="true"
- />
- <StringProperty
- Name="UnDefines"
- DisplayName="Remove Definitions"
- Category="Pre-Defined Symbols"
- Description="Remove pre-defined symbols "
- Switch="-U [value]"
- Delimited="true"
- Inheritable="true"
- />
- <StringProperty
- Name="ObjectFileName"
- DisplayName="Object File Name"
- Category="Output"
- Description="Select the output file name"
- Switch="-o [value]"
- DefaultValue="$(IntDir)\$(InputName).obj"
- />
- <StringProperty
- Name="ListFileName"
- DisplayName="List File Name"
- Category="Output"
- Description="Select an output listing by setting its file name"
- Switch="-l [value]"
- />
- <StringProperty
- Name="PreIncludeFile"
- DisplayName="Pre Include File"
- Category="Configuration"
- Description="Select a pre-included file by setting its name"
- Switch="-P [value]"
- />
- <BooleanProperty
- Name="Debug"
- DisplayName="Debug Information"
- Category="Output"
- Description="Generate debugging information"
- Switch="-g cv8"
- />
- <EnumProperty
- Name="PreProc"
- DisplayName="Pre-Processor"
- Category="Configuration"
- Description="Select the pre-processor (&apos;nasm&apos; or &apos;raw&apos;)"
- >
- <Values>
- <EnumValue
- Value="0"
- Switch="-rnasm"
- DisplayName="Nasm "
- />
- <EnumValue
- Value="1"
- Switch="-rraw"
- DisplayName="Raw"
- />
- </Values>
- </EnumProperty>
- <EnumProperty
- Name="Parser"
- DisplayName="Parser"
- Category="Configuration"
- Description="Select the parser for Intel (&apos;nasm&apos;) or AT&amp;T ( &apos;gas&apos;) syntax"
- >
- <Values>
- <EnumValue
- Value="0"
- Switch="-pnasm"
- DisplayName="Nasm"
- />
- <EnumValue
- Value="1"
- Switch="-pgas"
- DisplayName="Gas"
- />
- </Values>
- </EnumProperty>
- </Properties>
- </CustomBuildRule>
- </Rules>
-</VisualStudioToolFile>
-
diff --git a/libvpx/configure b/libvpx/configure
index b3c5fe9..297cec4 100755
--- a/libvpx/configure
+++ b/libvpx/configure
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
##
## configure
##
@@ -34,9 +34,11 @@ Advanced options:
${toggle_md5} support for output of checksum data
${toggle_static_msvcrt} use static MSVCRT (VS builds only)
${toggle_vp8} VP8 codec support
+ ${toggle_vp9} VP9 codec support
${toggle_internal_stats} output of encoder internal stats for debug, if supported (encoders)
${toggle_mem_tracker} track memory usage
${toggle_postproc} postprocessing
+ ${toggle_vp9_postproc} vp9 specific postprocessing
${toggle_multithread} multithreaded encoding and decoding
${toggle_spatial_resampling} spatial sampling (scaling) support
${toggle_realtime_only} enable this option while building for real-time encoding
@@ -97,6 +99,7 @@ all_platforms="${all_platforms} armv7-darwin-gcc" #neon Cortex-A8
all_platforms="${all_platforms} armv7-linux-rvct" #neon Cortex-A8
all_platforms="${all_platforms} armv7-linux-gcc" #neon Cortex-A8
all_platforms="${all_platforms} armv7-none-rvct" #neon Cortex-A8
+all_platforms="${all_platforms} armv7-win32-vs11"
all_platforms="${all_platforms} mips32-linux-gcc"
all_platforms="${all_platforms} ppc32-darwin8-gcc"
all_platforms="${all_platforms} ppc32-darwin9-gcc"
@@ -105,6 +108,7 @@ all_platforms="${all_platforms} ppc64-darwin8-gcc"
all_platforms="${all_platforms} ppc64-darwin9-gcc"
all_platforms="${all_platforms} ppc64-linux-gcc"
all_platforms="${all_platforms} sparc-solaris-gcc"
+all_platforms="${all_platforms} x86-android-gcc"
all_platforms="${all_platforms} x86-darwin8-gcc"
all_platforms="${all_platforms} x86-darwin8-icc"
all_platforms="${all_platforms} x86-darwin9-gcc"
@@ -112,6 +116,7 @@ all_platforms="${all_platforms} x86-darwin9-icc"
all_platforms="${all_platforms} x86-darwin10-gcc"
all_platforms="${all_platforms} x86-darwin11-gcc"
all_platforms="${all_platforms} x86-darwin12-gcc"
+all_platforms="${all_platforms} x86-darwin13-gcc"
all_platforms="${all_platforms} x86-linux-gcc"
all_platforms="${all_platforms} x86-linux-icc"
all_platforms="${all_platforms} x86-os2-gcc"
@@ -120,21 +125,27 @@ all_platforms="${all_platforms} x86-win32-gcc"
all_platforms="${all_platforms} x86-win32-vs7"
all_platforms="${all_platforms} x86-win32-vs8"
all_platforms="${all_platforms} x86-win32-vs9"
+all_platforms="${all_platforms} x86-win32-vs10"
+all_platforms="${all_platforms} x86-win32-vs11"
all_platforms="${all_platforms} x86_64-darwin9-gcc"
all_platforms="${all_platforms} x86_64-darwin10-gcc"
all_platforms="${all_platforms} x86_64-darwin11-gcc"
all_platforms="${all_platforms} x86_64-darwin12-gcc"
+all_platforms="${all_platforms} x86_64-darwin13-gcc"
all_platforms="${all_platforms} x86_64-linux-gcc"
all_platforms="${all_platforms} x86_64-linux-icc"
all_platforms="${all_platforms} x86_64-solaris-gcc"
all_platforms="${all_platforms} x86_64-win64-gcc"
all_platforms="${all_platforms} x86_64-win64-vs8"
all_platforms="${all_platforms} x86_64-win64-vs9"
+all_platforms="${all_platforms} x86_64-win64-vs10"
+all_platforms="${all_platforms} x86_64-win64-vs11"
all_platforms="${all_platforms} universal-darwin8-gcc"
all_platforms="${all_platforms} universal-darwin9-gcc"
all_platforms="${all_platforms} universal-darwin10-gcc"
all_platforms="${all_platforms} universal-darwin11-gcc"
all_platforms="${all_platforms} universal-darwin12-gcc"
+all_platforms="${all_platforms} universal-darwin13-gcc"
all_platforms="${all_platforms} generic-gnu"
# all_targets is a list of all targets that can be configured
@@ -143,7 +154,7 @@ all_targets="libs examples docs"
# all targets available are enabled, by default.
for t in ${all_targets}; do
- [ -f ${source_path}/${t}.mk ] && enable ${t}
+ [ -f ${source_path}/${t}.mk ] && enable_feature ${t}
done
# check installed doxygen version
@@ -154,41 +165,46 @@ if [ ${doxy_major:-0} -ge 1 ]; then
doxy_minor=${doxy_version%%.*}
doxy_patch=${doxy_version##*.}
- [ $doxy_major -gt 1 ] && enable doxygen
- [ $doxy_minor -gt 5 ] && enable doxygen
- [ $doxy_minor -eq 5 ] && [ $doxy_patch -ge 3 ] && enable doxygen
+ [ $doxy_major -gt 1 ] && enable_feature doxygen
+ [ $doxy_minor -gt 5 ] && enable_feature doxygen
+ [ $doxy_minor -eq 5 ] && [ $doxy_patch -ge 3 ] && enable_feature doxygen
fi
# install everything except the sources, by default. sources will have
# to be enabled when doing dist builds, since that's no longer a common
# case.
-enabled doxygen && php -v >/dev/null 2>&1 && enable install_docs
-enable install_bins
-enable install_libs
-
-enable static
-enable optimizations
-enable fast_unaligned #allow unaligned accesses, if supported by hw
-enable md5
-enable spatial_resampling
-enable multithread
-enable os_support
-enable temporal_denoising
-
-[ -d ${source_path}/../include ] && enable alt_tree_layout
-for d in vp8; do
- [ -d ${source_path}/${d} ] && disable alt_tree_layout;
+enabled doxygen && php -v >/dev/null 2>&1 && enable_feature install_docs
+enable_feature install_bins
+enable_feature install_libs
+
+enable_feature static
+enable_feature optimizations
+enable_feature fast_unaligned #allow unaligned accesses, if supported by hw
+enable_feature md5
+enable_feature spatial_resampling
+enable_feature multithread
+enable_feature os_support
+enable_feature temporal_denoising
+
+[ -d ${source_path}/../include ] && enable_feature alt_tree_layout
+for d in vp8 vp9; do
+ [ -d ${source_path}/${d} ] && disable_feature alt_tree_layout;
done
if ! enabled alt_tree_layout; then
# development environment
[ -d ${source_path}/vp8 ] && CODECS="${CODECS} vp8_encoder vp8_decoder"
+[ -d ${source_path}/vp9 ] && CODECS="${CODECS} vp9_encoder vp9_decoder"
else
# customer environment
[ -f ${source_path}/../include/vpx/vp8cx.h ] && CODECS="${CODECS} vp8_encoder"
[ -f ${source_path}/../include/vpx/vp8dx.h ] && CODECS="${CODECS} vp8_decoder"
-[ -f ${source_path}/../include/vpx/vp8cx.h ] || disable vp8_encoder
-[ -f ${source_path}/../include/vpx/vp8dx.h ] || disable vp8_decoder
+[ -f ${source_path}/../include/vpx/vp9cx.h ] && CODECS="${CODECS} vp9_encoder"
+[ -f ${source_path}/../include/vpx/vp9dx.h ] && CODECS="${CODECS} vp9_decoder"
+[ -f ${source_path}/../include/vpx/vp8cx.h ] || disable_feature vp8_encoder
+[ -f ${source_path}/../include/vpx/vp8dx.h ] || disable_feature vp8_decoder
+[ -f ${source_path}/../include/vpx/vp9cx.h ] || disable_feature vp9_encoder
+[ -f ${source_path}/../include/vpx/vp9dx.h ] || disable_feature vp9_decoder
[ -f ${source_path}/../lib/*/*mt.lib ] && soft_enable static_msvcrt
fi
@@ -230,12 +246,19 @@ HAVE_LIST="
sys_mman_h
unistd_h
"
+EXPERIMENT_LIST="
+ oneshotq
+ multiple_arf
+ non420
+ alpha
+"
CONFIG_LIST="
external_build
install_docs
install_bins
install_libs
install_srcs
+ use_x86inc
debug
gprof
gcov
@@ -257,6 +280,7 @@ CONFIG_LIST="
dc_recon
runtime_cpu_detect
postproc
+ vp9_postproc
multithread
internal_stats
${CODECS}
@@ -276,8 +300,12 @@ CONFIG_LIST="
unit_tests
multi_res_encoding
temporal_denoising
+ experimental
+ decrypt
+ ${EXPERIMENT_LIST}
"
CMDLINE_SELECT="
+ external_build
extra_warnings
werror
install_docs
@@ -288,9 +316,11 @@ CMDLINE_SELECT="
gprof
gcov
pic
+ use_x86inc
optimizations
ccache
runtime_cpu_detect
+ thumb
libs
examples
@@ -305,6 +335,7 @@ CMDLINE_SELECT="
dequant_tokens
dc_recon
postproc
+ vp9_postproc
multithread
internal_stats
${CODECS}
@@ -322,13 +353,27 @@ CMDLINE_SELECT="
unit_tests
multi_res_encoding
temporal_denoising
+ experimental
+ decrypt
"
process_cmdline() {
for opt do
optval="${opt#*=}"
case "$opt" in
- --disable-codecs) for c in ${CODECS}; do disable $c; done ;;
+ --disable-codecs) for c in ${CODECS}; do disable_feature $c; done ;;
+ --enable-?*|--disable-?*)
+ eval `echo "$opt" | sed 's/--/action=/;s/-/ option=/;s/-/_/g'`
+ if echo "${EXPERIMENT_LIST}" | grep "^ *$option\$" >/dev/null; then
+ if enabled experimental; then
+ ${action}_feature $option
+ else
+ log_echo "Ignoring $opt -- not in experimental mode."
+ fi
+ else
+ process_common_cmdline $opt
+ fi
+ ;;
*) process_common_cmdline "$opt"
;;
esac
@@ -342,8 +387,8 @@ post_process_cmdline() {
# If the codec family is enabled, enable all components of that family.
log_echo "Configuring selected codecs"
for c in ${CODECS}; do
- disabled ${c%%_*} && disable ${c}
- enabled ${c%%_*} && enable ${c}
+ disabled ${c%%_*} && disable_feature ${c}
+ enabled ${c%%_*} && enable_feature ${c}
done
# Enable all detected codecs, if they haven't been disabled
@@ -351,12 +396,12 @@ post_process_cmdline() {
# Enable the codec family if any component of that family is enabled
for c in ${CODECS}; do
- enabled $c && enable ${c%_*}
+ enabled $c && enable_feature ${c%_*}
done
# Set the {en,de}coders variable if any algorithm in that class is enabled
for c in ${CODECS}; do
- enabled ${c} && enable ${c##*_}s
+ enabled ${c} && enable_feature ${c##*_}s
done
}
@@ -396,7 +441,7 @@ process_targets() {
done
enabled debug_libs && DIST_DIR="${DIST_DIR}-debug"
enabled codec_srcs && DIST_DIR="${DIST_DIR}-src"
- ! enabled postproc && DIST_DIR="${DIST_DIR}-nopost"
+ ! enabled postproc && ! enabled vp9_postproc && DIST_DIR="${DIST_DIR}-nopost"
! enabled multithread && DIST_DIR="${DIST_DIR}-nomt"
! enabled install_docs && DIST_DIR="${DIST_DIR}-nodocs"
DIST_DIR="${DIST_DIR}-${tgt_isa}-${tgt_os}"
@@ -464,15 +509,16 @@ process_detect() {
fi
fi
fi
- if [ -z "$CC" ]; then
+ if [ -z "$CC" ] || enabled external_build; then
echo "Bypassing toolchain for environment detection."
- enable external_build
+ enable_feature external_build
check_header() {
log fake_check_header "$@"
header=$1
shift
var=`echo $header | sed 's/[^A-Za-z0-9_]/_/g'`
- disable $var
+ disable_feature $var
+ # Headers common to all environments
case $header in
stdio.h)
true;
@@ -483,7 +529,26 @@ process_detect() {
[ -f "${d##-I}/$header" ] && result=true && break
done
${result:-true}
- esac && enable $var
+ esac && enable_feature $var
+
+ # Specialize windows and POSIX environments.
+ case $toolchain in
+ *-win*-*)
+ case $header-$toolchain in
+ stdint*-gcc) true;;
+ *) false;;
+ esac && enable_feature $var
+ ;;
+ *)
+ case $header in
+ stdint.h) true;;
+ pthread.h) true;;
+ sys/mman.h) true;;
+ unistd.h) true;;
+ *) false;;
+ esac && enable_feature $var
+ esac
+ enabled $var
}
check_ld() {
true
@@ -497,8 +562,9 @@ EOF
check_header stdint.h
check_header pthread.h
check_header sys/mman.h
+ check_header unistd.h # for sysconf(3) and friends.
- check_header vpx/vpx_integer.h -I${source_path} && enable vpx_ports
+ check_header vpx/vpx_integer.h -I${source_path} && enable_feature vpx_ports
}
process_toolchain() {
@@ -537,10 +603,14 @@ process_toolchain() {
check_add_cflags -Wpointer-arith
check_add_cflags -Wtype-limits
check_add_cflags -Wcast-qual
+ check_add_cflags -Wvla
check_add_cflags -Wimplicit-function-declaration
check_add_cflags -Wuninitialized
check_add_cflags -Wunused-variable
- check_add_cflags -Wunused-but-set-variable
+ case ${CC} in
+ *clang*) ;;
+ *) check_add_cflags -Wunused-but-set-variable ;;
+ esac
enabled extra_warnings || check_add_cflags -Wno-unused-function
fi
@@ -576,16 +646,31 @@ process_toolchain() {
# ccache only really works on gcc toolchains
enabled gcc || soft_disable ccache
if enabled mips; then
- enable dequant_tokens
- enable dc_recon
+ enable_feature dequant_tokens
+ enable_feature dc_recon
+ fi
+
+ if enabled internal_stats; then
+ enable_feature vp9_postproc
fi
# Enable the postbuild target if building for visual studio.
case "$tgt_cc" in
- vs*) enable msvs
- enable solution
+ vs*) enable_feature msvs
+ enable_feature solution
vs_version=${tgt_cc##vs}
+ case $vs_version in
+ [789])
+ VCPROJ_SFX=vcproj
+ gen_vcproj_cmd=${source_path}/build/make/gen_msvs_proj.sh
+ ;;
+ 10|11)
+ VCPROJ_SFX=vcxproj
+ gen_vcproj_cmd=${source_path}/build/make/gen_msvs_vcxproj.sh
+ ;;
+ esac
all_targets="${all_targets} solution"
+ INLINE="__forceinline"
;;
esac
@@ -596,7 +681,7 @@ process_toolchain() {
enabled postproc || die "postproc_visualizer requires postproc to be enabled"
fi
- # Enable unit tests if we have a working C++ compiler
+ # Enable unit tests by default if we have a working C++ compiler.
case "$toolchain" in
*-vs*)
soft_enable unit_tests
@@ -604,11 +689,23 @@ process_toolchain() {
*-android-*)
# GTestLog must be modified to use Android logging utilities.
;;
- *)
+ *-darwin-*)
+ # iOS/ARM builds do not work with gtest. This does not match
+ # x86 targets.
+ ;;
+ *-win*)
+ # Some mingw toolchains don't have pthread available by default.
+ # Treat these more like visual studio where threading in gtest
+ # would be disabled for the same reason.
check_cxx "$@" <<EOF && soft_enable unit_tests
int z;
EOF
;;
+ *)
+ enabled pthread_h && check_cxx "$@" <<EOF && soft_enable unit_tests
+int z;
+EOF
+ ;;
esac
}
diff --git a/libvpx/example_xma.c b/libvpx/example_xma.c
index 72eb470..7aa8798 100644
--- a/libvpx/example_xma.c
+++ b/libvpx/example_xma.c
@@ -18,197 +18,174 @@
#include "vpx_config.h"
#include "vpx/vpx_decoder.h"
#include "vpx/vpx_integer.h"
-#if CONFIG_VP8_DECODER
+#if CONFIG_VP9_DECODER
#include "vpx/vp8dx.h"
#endif
static char *exec_name;
static int verbose = 0;
-static const struct
-{
- const char *name;
- const vpx_codec_iface_t *iface;
-} ifaces[] =
-{
-#if CONFIG_VP8_DECODER
- {"vp8", &vpx_codec_vp8_dx_algo},
+static const struct {
+ const char *name;
+ const vpx_codec_iface_t *iface;
+} ifaces[] = {
+#if CONFIG_VP9_DECODER
+ {"vp9", &vpx_codec_vp8_dx_algo},
#endif
};
-static void usage_exit(void)
-{
- int i;
-
- printf("Usage: %s <options>\n\n"
- "Options:\n"
- "\t--codec <name>\tCodec to use (default=%s)\n"
- "\t-h <height>\tHeight of the simulated video frame, in pixels\n"
- "\t-w <width> \tWidth of the simulated video frame, in pixels\n"
- "\t-v \tVerbose mode (show individual segment sizes)\n"
- "\t--help \tShow this message\n"
- "\n"
- "Included decoders:\n"
- "\n",
- exec_name,
- ifaces[0].name);
-
- for (i = 0; i < sizeof(ifaces) / sizeof(ifaces[0]); i++)
- printf(" %-6s - %s\n",
- ifaces[i].name,
- vpx_codec_iface_name(ifaces[i].iface));
-
- exit(EXIT_FAILURE);
+static void usage_exit(void) {
+ int i;
+
+ printf("Usage: %s <options>\n\n"
+ "Options:\n"
+ "\t--codec <name>\tCodec to use (default=%s)\n"
+ "\t-h <height>\tHeight of the simulated video frame, in pixels\n"
+ "\t-w <width> \tWidth of the simulated video frame, in pixels\n"
+ "\t-v \tVerbose mode (show individual segment sizes)\n"
+ "\t--help \tShow this message\n"
+ "\n"
+ "Included decoders:\n"
+ "\n",
+ exec_name,
+ ifaces[0].name);
+
+ for (i = 0; i < sizeof(ifaces) / sizeof(ifaces[0]); i++)
+ printf(" %-6s - %s\n",
+ ifaces[i].name,
+ vpx_codec_iface_name(ifaces[i].iface));
+
+ exit(EXIT_FAILURE);
}
-static void usage_error(const char *fmt, ...)
-{
- va_list ap;
- va_start(ap, fmt);
- vprintf(fmt, ap);
- printf("\n");
- usage_exit();
+static void usage_error(const char *fmt, ...) {
+ va_list ap;
+ va_start(ap, fmt);
+ vprintf(fmt, ap);
+ printf("\n");
+ usage_exit();
}
-void my_mem_dtor(vpx_codec_mmap_t *mmap)
-{
- if (verbose)
- printf("freeing segment %d\n", mmap->id);
+void my_mem_dtor(vpx_codec_mmap_t *mmap) {
+ if (verbose)
+ printf("freeing segment %d\n", mmap->id);
- free(mmap->priv);
+ free(mmap->priv);
}
-int main(int argc, char **argv)
-{
- vpx_codec_ctx_t decoder;
- vpx_codec_iface_t *iface = ifaces[0].iface;
- vpx_codec_iter_t iter;
- vpx_codec_dec_cfg_t cfg;
- vpx_codec_err_t res = VPX_CODEC_OK;
- unsigned int alloc_sz = 0;
- unsigned int w = 352;
- unsigned int h = 288;
- int i;
-
- exec_name = argv[0];
-
- for (i = 1; i < argc; i++)
- {
- if (!strcmp(argv[i], "--codec"))
- {
- if (i + 1 < argc)
- {
- int j, k = -1;
-
- i++;
-
- for (j = 0; j < sizeof(ifaces) / sizeof(ifaces[0]); j++)
- if (!strcmp(ifaces[j].name, argv[i]))
- k = j;
-
- if (k >= 0)
- iface = ifaces[k].iface;
- else
- usage_error("Error: Unrecognized argument (%s) to --codec\n",
- argv[i]);
- }
- else
- usage_error("Error: Option --codec requires argument.\n");
- }
- else if (!strcmp(argv[i], "-v"))
- verbose = 1;
- else if (!strcmp(argv[i], "-h"))
- if (i + 1 < argc)
- {
- h = atoi(argv[++i]);
- }
- else
- usage_error("Error: Option -h requires argument.\n");
- else if (!strcmp(argv[i], "-w"))
- if (i + 1 < argc)
- {
- w = atoi(argv[++i]);
- }
- else
- usage_error("Error: Option -w requires argument.\n");
- else if (!strcmp(argv[i], "--help"))
- usage_exit();
- else
- usage_error("Error: Unrecognized option %s\n\n", argv[i]);
- }
+int main(int argc, char **argv) {
+ vpx_codec_ctx_t decoder;
+ vpx_codec_iface_t *iface = ifaces[0].iface;
+ vpx_codec_iter_t iter;
+ vpx_codec_dec_cfg_t cfg;
+ vpx_codec_err_t res = VPX_CODEC_OK;
+ unsigned int alloc_sz = 0;
+ unsigned int w = 352;
+ unsigned int h = 288;
+ int i;
- if (argc == 1)
- printf("Using built-in defaults. For options, rerun with --help\n\n");
+ exec_name = argv[0];
- /* XMA mode is not supported on all decoders! */
- if (!(vpx_codec_get_caps(iface) & VPX_CODEC_CAP_XMA))
- {
- printf("%s does not support XMA mode!\n", vpx_codec_iface_name(iface));
- return EXIT_FAILURE;
- }
+ for (i = 1; i < argc; i++) {
+ if (!strcmp(argv[i], "--codec")) {
+ if (i + 1 < argc) {
+ int j, k = -1;
- /* The codec knows how much memory to allocate based on the size of the
- * encoded frames. This data can be parsed from the bitstream with
- * vpx_codec_peek_stream_info() if a bitstream is available. Otherwise,
- * a fixed size can be used that will be the upper limit on the frame
- * size the decoder can decode.
- */
- cfg.w = w;
- cfg.h = h;
-
- /* Initialize the decoder in XMA mode. */
- if (vpx_codec_dec_init(&decoder, iface, &cfg, VPX_CODEC_USE_XMA))
- {
- printf("Failed to initialize decoder in XMA mode: %s\n", vpx_codec_error(&decoder));
- return EXIT_FAILURE;
- }
+ i++;
+
+ for (j = 0; j < sizeof(ifaces) / sizeof(ifaces[0]); j++)
+ if (!strcmp(ifaces[j].name, argv[i]))
+ k = j;
- /* Iterate through the list of memory maps, allocating them with the
- * requested alignment.
- */
- iter = NULL;
-
- do
- {
- vpx_codec_mmap_t mmap;
- unsigned int align;
-
- res = vpx_codec_get_mem_map(&decoder, &mmap, &iter);
- align = mmap.align ? mmap.align - 1 : 0;
-
- if (!res)
- {
- if (verbose)
- printf("Allocating segment %u, size %lu, align %u %s\n",
- mmap.id, mmap.sz, mmap.align,
- mmap.flags & VPX_CODEC_MEM_ZERO ? "(ZEROED)" : "");
-
- if (mmap.flags & VPX_CODEC_MEM_ZERO)
- mmap.priv = calloc(1, mmap.sz + align);
- else
- mmap.priv = malloc(mmap.sz + align);
-
- mmap.base = (void *)((((uintptr_t)mmap.priv) + align) & ~(uintptr_t)align);
- mmap.dtor = my_mem_dtor;
- alloc_sz += mmap.sz + align;
-
- if (vpx_codec_set_mem_map(&decoder, &mmap, 1))
- {
- printf("Failed to set mmap: %s\n", vpx_codec_error(&decoder));
- return EXIT_FAILURE;
- }
- }
- else if (res != VPX_CODEC_LIST_END)
- {
- printf("Failed to get mmap: %s\n", vpx_codec_error(&decoder));
- return EXIT_FAILURE;
- }
+ if (k >= 0)
+ iface = ifaces[k].iface;
+ else
+ usage_error("Error: Unrecognized argument (%s) to --codec\n",
+ argv[i]);
+ } else
+ usage_error("Error: Option --codec requires argument.\n");
+ } else if (!strcmp(argv[i], "-v"))
+ verbose = 1;
+ else if (!strcmp(argv[i], "-h"))
+ if (i + 1 < argc) {
+ h = atoi(argv[++i]);
+ } else
+ usage_error("Error: Option -h requires argument.\n");
+ else if (!strcmp(argv[i], "-w"))
+ if (i + 1 < argc) {
+ w = atoi(argv[++i]);
+ } else
+ usage_error("Error: Option -w requires argument.\n");
+ else if (!strcmp(argv[i], "--help"))
+ usage_exit();
+ else
+ usage_error("Error: Unrecognized option %s\n\n", argv[i]);
+ }
+
+ if (argc == 1)
+ printf("Using built-in defaults. For options, rerun with --help\n\n");
+
+ /* XMA mode is not supported on all decoders! */
+ if (!(vpx_codec_get_caps(iface) & VPX_CODEC_CAP_XMA)) {
+ printf("%s does not support XMA mode!\n", vpx_codec_iface_name(iface));
+ return EXIT_FAILURE;
+ }
+
+ /* The codec knows how much memory to allocate based on the size of the
+ * encoded frames. This data can be parsed from the bitstream with
+ * vpx_codec_peek_stream_info() if a bitstream is available. Otherwise,
+ * a fixed size can be used that will be the upper limit on the frame
+ * size the decoder can decode.
+ */
+ cfg.w = w;
+ cfg.h = h;
+
+ /* Initialize the decoder in XMA mode. */
+ if (vpx_codec_dec_init(&decoder, iface, &cfg, VPX_CODEC_USE_XMA)) {
+ printf("Failed to initialize decoder in XMA mode: %s\n", vpx_codec_error(&decoder));
+ return EXIT_FAILURE;
+ }
+
+ /* Iterate through the list of memory maps, allocating them with the
+ * requested alignment.
+ */
+ iter = NULL;
+
+ do {
+ vpx_codec_mmap_t mmap;
+ unsigned int align;
+
+ res = vpx_codec_get_mem_map(&decoder, &mmap, &iter);
+ align = mmap.align ? mmap.align - 1 : 0;
+
+ if (!res) {
+ if (verbose)
+ printf("Allocating segment %u, size %lu, align %u %s\n",
+ mmap.id, mmap.sz, mmap.align,
+ mmap.flags & VPX_CODEC_MEM_ZERO ? "(ZEROED)" : "");
+
+ if (mmap.flags & VPX_CODEC_MEM_ZERO)
+ mmap.priv = calloc(1, mmap.sz + align);
+ else
+ mmap.priv = malloc(mmap.sz + align);
+
+ mmap.base = (void *)((((uintptr_t)mmap.priv) + align) & ~(uintptr_t)align);
+ mmap.dtor = my_mem_dtor;
+ alloc_sz += mmap.sz + align;
+
+ if (vpx_codec_set_mem_map(&decoder, &mmap, 1)) {
+ printf("Failed to set mmap: %s\n", vpx_codec_error(&decoder));
+ return EXIT_FAILURE;
+ }
+ } else if (res != VPX_CODEC_LIST_END) {
+ printf("Failed to get mmap: %s\n", vpx_codec_error(&decoder));
+ return EXIT_FAILURE;
}
- while (res != VPX_CODEC_LIST_END);
+ } while (res != VPX_CODEC_LIST_END);
- printf("%s\n %d bytes external memory required for %dx%d.\n",
- decoder.name, alloc_sz, cfg.w, cfg.h);
- vpx_codec_destroy(&decoder);
- return EXIT_SUCCESS;
+ printf("%s\n %d bytes external memory required for %dx%d.\n",
+ decoder.name, alloc_sz, cfg.w, cfg.h);
+ vpx_codec_destroy(&decoder);
+ return EXIT_SUCCESS;
}
diff --git a/libvpx/examples.mk b/libvpx/examples.mk
index 90913e6..c17fac9 100644
--- a/libvpx/examples.mk
+++ b/libvpx/examples.mk
@@ -8,6 +8,12 @@
## be found in the AUTHORS file in the root of the source tree.
##
+LIBYUV_SRCS += third_party/libyuv/include/libyuv/basic_types.h \
+ third_party/libyuv/include/libyuv/cpu_id.h \
+ third_party/libyuv/include/libyuv/scale.h \
+ third_party/libyuv/source/row.h \
+ third_party/libyuv/source/scale.c \
+ third_party/libyuv/source/cpu_id.c
# List of examples to build. UTILS are files that are taken from the source
# tree directly, and GEN_EXAMPLES are files that are created from the
@@ -25,6 +31,7 @@ vpxdec.SRCS += nestegg/halloc/src/hlist.h
vpxdec.SRCS += nestegg/halloc/src/macros.h
vpxdec.SRCS += nestegg/include/nestegg/nestegg.h
vpxdec.SRCS += nestegg/src/nestegg.c
+vpxdec.SRCS += $(LIBYUV_SRCS)
vpxdec.GUID = BA5FE66F-38DD-E034-F542-B1578C5FB950
vpxdec.DESCRIPTION = Full featured decoder
UTILS-$(CONFIG_ENCODERS) += vpxenc.c
@@ -36,11 +43,15 @@ vpxenc.SRCS += vpx_ports/vpx_timer.h
vpxenc.SRCS += libmkv/EbmlIDs.h
vpxenc.SRCS += libmkv/EbmlWriter.c
vpxenc.SRCS += libmkv/EbmlWriter.h
+vpxenc.SRCS += $(LIBYUV_SRCS)
vpxenc.GUID = 548DEC74-7A15-4B2B-AFC3-AA102E7C25C1
vpxenc.DESCRIPTION = Full featured encoder
-UTILS-$(CONFIG_ENCODERS) += vp8_scalable_patterns.c
+UTILS-$(CONFIG_VP8_ENCODER) += vp8_scalable_patterns.c
vp8_scalable_patterns.GUID = 0D6A210B-F482-4D6F-8570-4A9C01ACC88C
vp8_scalable_patterns.DESCRIPTION = Temporal Scalability Encoder
+UTILS-$(CONFIG_VP8_ENCODER) += vp9_spatial_scalable_encoder.c
+vp8_scalable_patterns.GUID = 4A38598D-627D-4505-9C7B-D4020C84100D
+vp8_scalable_patterns.DESCRIPTION = Spatial Scalable Encoder
# Clean up old ivfenc, ivfdec binaries.
ifeq ($(CONFIG_MSVS),yes)
@@ -56,37 +67,37 @@ endif
#example_xma.GUID = A955FC4A-73F1-44F7-135E-30D84D32F022
#example_xma.DESCRIPTION = External Memory Allocation mode usage
-GEN_EXAMPLES-$(CONFIG_DECODERS) += simple_decoder.c
+GEN_EXAMPLES-$(CONFIG_VP8_DECODER) += simple_decoder.c
simple_decoder.GUID = D3BBF1E9-2427-450D-BBFF-B2843C1D44CC
simple_decoder.DESCRIPTION = Simplified decoder loop
-GEN_EXAMPLES-$(CONFIG_DECODERS) += postproc.c
+GEN_EXAMPLES-$(CONFIG_VP8_DECODER) += postproc.c
postproc.GUID = 65E33355-F35E-4088-884D-3FD4905881D7
postproc.DESCRIPTION = Decoder postprocessor control
-GEN_EXAMPLES-$(CONFIG_DECODERS) += decode_to_md5.c
+GEN_EXAMPLES-$(CONFIG_VP8_DECODER) += decode_to_md5.c
decode_to_md5.SRCS += md5_utils.h md5_utils.c
decode_to_md5.GUID = 59120B9B-2735-4BFE-B022-146CA340FE42
decode_to_md5.DESCRIPTION = Frame by frame MD5 checksum
-GEN_EXAMPLES-$(CONFIG_ENCODERS) += simple_encoder.c
+GEN_EXAMPLES-$(CONFIG_VP8_ENCODER) += simple_encoder.c
simple_encoder.GUID = 4607D299-8A71-4D2C-9B1D-071899B6FBFD
simple_encoder.DESCRIPTION = Simplified encoder loop
-GEN_EXAMPLES-$(CONFIG_ENCODERS) += twopass_encoder.c
+GEN_EXAMPLES-$(CONFIG_VP8_ENCODER) += twopass_encoder.c
twopass_encoder.GUID = 73494FA6-4AF9-4763-8FBB-265C92402FD8
twopass_encoder.DESCRIPTION = Two-pass encoder loop
-GEN_EXAMPLES-$(CONFIG_ENCODERS) += force_keyframe.c
+GEN_EXAMPLES-$(CONFIG_VP8_ENCODER) += force_keyframe.c
force_keyframe.GUID = 3C67CADF-029F-4C86-81F5-D6D4F51177F0
force_keyframe.DESCRIPTION = Force generation of keyframes
ifeq ($(CONFIG_DECODERS),yes)
-GEN_EXAMPLES-$(CONFIG_ENCODERS) += decode_with_drops.c
+GEN_EXAMPLES-$(CONFIG_VP8_ENCODER) += decode_with_drops.c
endif
decode_with_drops.GUID = CE5C53C4-8DDA-438A-86ED-0DDD3CDB8D26
decode_with_drops.DESCRIPTION = Drops frames while decoding
-ifeq ($(CONFIG_DECODERS),yes)
+ifeq ($(CONFIG_VP8_DECODER),yes)
GEN_EXAMPLES-$(CONFIG_ERROR_CONCEALMENT) += decode_with_partial_drops.c
endif
decode_with_partial_drops.GUID = 61C2D026-5754-46AC-916F-1343ECC5537E
decode_with_partial_drops.DESCRIPTION = Drops parts of frames while decoding
-GEN_EXAMPLES-$(CONFIG_ENCODERS) += error_resilient.c
+GEN_EXAMPLES-$(CONFIG_VP8_ENCODER) += error_resilient.c
error_resilient.GUID = DF5837B9-4145-4F92-A031-44E4F832E00C
error_resilient.DESCRIPTION = Error Resiliency Feature
@@ -99,13 +110,7 @@ vp8cx_set_ref.DESCRIPTION = VP8 set encoder reference frame
# C file is provided, not generated automatically.
UTILS-$(CONFIG_MULTI_RES_ENCODING) += vp8_multi_resolution_encoder.c
-vp8_multi_resolution_encoder.SRCS \
- += third_party/libyuv/include/libyuv/basic_types.h \
- third_party/libyuv/include/libyuv/cpu_id.h \
- third_party/libyuv/include/libyuv/scale.h \
- third_party/libyuv/source/row.h \
- third_party/libyuv/source/scale.c \
- third_party/libyuv/source/cpu_id.c
+vp8_multi_resolution_encoder.SRCS += $(LIBYUV_SRCS)
vp8_multi_resolution_encoder.GUID = 04f8738e-63c8-423b-90fa-7c2703a374de
vp8_multi_resolution_encoder.DESCRIPTION = VP8 Multiple-resolution Encoding
@@ -115,9 +120,11 @@ vp8_multi_resolution_encoder.DESCRIPTION = VP8 Multiple-resolution Encoding
# when building for bare-metal targets
ifeq ($(CONFIG_OS_SUPPORT), yes)
CODEC_EXTRA_LIBS-$(CONFIG_VP8) += m
+CODEC_EXTRA_LIBS-$(CONFIG_VP9) += m
else
ifeq ($(CONFIG_GCC), yes)
CODEC_EXTRA_LIBS-$(CONFIG_VP8) += m
+ CODEC_EXTRA_LIBS-$(CONFIG_VP9) += m
endif
endif
#
@@ -136,6 +143,8 @@ else
LIB_PATH-yes += $(if $(BUILD_PFX),$(BUILD_PFX),.)
INC_PATH-$(CONFIG_VP8_DECODER) += $(SRC_PATH_BARE)/vp8
INC_PATH-$(CONFIG_VP8_ENCODER) += $(SRC_PATH_BARE)/vp8
+ INC_PATH-$(CONFIG_VP9_DECODER) += $(SRC_PATH_BARE)/vp9
+ INC_PATH-$(CONFIG_VP9_ENCODER) += $(SRC_PATH_BARE)/vp9
LIB_PATH := $(call enabled,LIB_PATH)
INC_PATH := $(call enabled,INC_PATH)
endif
@@ -179,7 +188,8 @@ BINS-$(NOT_MSVS) += $(addprefix $(BUILD_PFX),$(ALL_EXAMPLES:.c=$(EXE_S
# Instantiate linker template for all examples.
CODEC_LIB=$(if $(CONFIG_DEBUG_LIBS),vpx_g,vpx)
-CODEC_LIB_SUF=$(if $(CONFIG_SHARED),.so,.a)
+SHARED_LIB_SUF=$(if $(filter darwin%,$(TGT_OS)),.dylib,.so)
+CODEC_LIB_SUF=$(if $(CONFIG_SHARED),$(SHARED_LIB_SUF),.a)
$(foreach bin,$(BINS-yes),\
$(if $(BUILD_OBJS),$(eval $(bin):\
$(LIB_PATH)/lib$(CODEC_LIB)$(CODEC_LIB_SUF)))\
@@ -209,7 +219,7 @@ INSTALL_MAPS += % %
# Set up additional MSVS environment
ifeq ($(CONFIG_MSVS),yes)
-CODEC_LIB=$(if $(CONFIG_STATIC_MSVCRT),vpxmt,vpxmd)
+CODEC_LIB=$(if $(CONFIG_SHARED),vpx,$(if $(CONFIG_STATIC_MSVCRT),vpxmt,vpxmd))
# This variable uses deferred expansion intentionally, since the results of
# $(wildcard) may change during the course of the Make.
VS_PLATFORMS = $(foreach d,$(wildcard */Release/$(CODEC_LIB).lib),$(word 1,$(subst /, ,$(d))))
@@ -224,19 +234,19 @@ endif
# even though there is no real dependency there (the dependency is on
# the makefiles). We may want to revisit this.
define vcproj_template
-$(1): $($(1:.vcproj=).SRCS)
+$(1): $($(1:.$(VCPROJ_SFX)=).SRCS) vpx.$(VCPROJ_SFX)
@echo " [vcproj] $$@"
- $$(SRC_PATH_BARE)/build/make/gen_msvs_proj.sh\
+ $$(GEN_VCPROJ)\
--exe\
--target=$$(TOOLCHAIN)\
- --name=$$(@:.vcproj=)\
+ --name=$$(@:.$(VCPROJ_SFX)=)\
--ver=$$(CONFIG_VS_VERSION)\
- --proj-guid=$$($$(@:.vcproj=).GUID)\
+ --proj-guid=$$($$(@:.$(VCPROJ_SFX)=).GUID)\
$$(if $$(CONFIG_STATIC_MSVCRT),--static-crt) \
--out=$$@ $$(INTERNAL_CFLAGS) $$(CFLAGS) \
- $$(INTERNAL_LDFLAGS) $$(LDFLAGS) -l$$(CODEC_LIB) -lwinmm $$^
+ $$(INTERNAL_LDFLAGS) $$(LDFLAGS) -l$$(CODEC_LIB) $$^
endef
-PROJECTS-$(CONFIG_MSVS) += $(ALL_EXAMPLES:.c=.vcproj)
+PROJECTS-$(CONFIG_MSVS) += $(ALL_EXAMPLES:.c=.$(VCPROJ_SFX))
INSTALL-BINS-$(CONFIG_MSVS) += $(foreach p,$(VS_PLATFORMS),\
$(addprefix bin/$(p)/,$(ALL_EXAMPLES:.c=.exe)))
$(foreach proj,$(call enabled,PROJECTS),\
diff --git a/libvpx/examples/decoder_tmpl.c b/libvpx/examples/decoder_tmpl.c
index 8194f0a..597fea2 100644
--- a/libvpx/examples/decoder_tmpl.c
+++ b/libvpx/examples/decoder_tmpl.c
@@ -12,6 +12,7 @@
/*
@*INTRODUCTION
*/
+#include "vpx_config.h"
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
diff --git a/libvpx/examples/decoder_tmpl.txt b/libvpx/examples/decoder_tmpl.txt
index e652a63..3d230a5 100644
--- a/libvpx/examples/decoder_tmpl.txt
+++ b/libvpx/examples/decoder_tmpl.txt
@@ -1,7 +1,7 @@
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DEC_INCLUDES
#define VPX_CODEC_DISABLE_COMPAT 1
#include "vpx/vpx_decoder.h"
-#include "vpx/vp8dx.h"
+#include "vpx/vp9dx.h"
#define interface (vpx_codec_vp8_dx())
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DEC_INCLUDES
diff --git a/libvpx/examples/encoder_tmpl.txt b/libvpx/examples/encoder_tmpl.txt
index 1afbd8b..9f8f4af 100644
--- a/libvpx/examples/encoder_tmpl.txt
+++ b/libvpx/examples/encoder_tmpl.txt
@@ -1,7 +1,7 @@
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ENC_INCLUDES
#define VPX_CODEC_DISABLE_COMPAT 1
#include "vpx/vpx_encoder.h"
-#include "vpx/vp8cx.h"
+#include "vpx/vp9cx.h"
#define interface (vpx_codec_vp8_cx())
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ENC_INCLUDES
diff --git a/libvpx/examples/postproc.txt b/libvpx/examples/postproc.txt
index 51b251a..e00bf59 100644
--- a/libvpx/examples/postproc.txt
+++ b/libvpx/examples/postproc.txt
@@ -51,7 +51,7 @@ Some codecs provide fine grained controls over their built-in
postprocessors. VP8 is one example. The following sample code toggles
postprocessing on and off every 15 frames.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PRE_DECODE
-#if CONFIG_VP8_DECODER
+#if CONFIG_VP9_DECODER
if(frame_cnt%30 == 1) {
vp8_postproc_cfg_t pp = {0, 0, 0};
diff --git a/libvpx/libmkv/EbmlBufferWriter.c b/libvpx/libmkv/EbmlBufferWriter.c
index d9b04a8..574e478 100644
--- a/libvpx/libmkv/EbmlBufferWriter.c
+++ b/libvpx/libmkv/EbmlBufferWriter.c
@@ -1,60 +1,54 @@
-//#include <strmif.h>
+// #include <strmif.h>
#include "EbmlBufferWriter.h"
#include "EbmlWriter.h"
-//#include <cassert>
-//#include <limits>
-//#include <malloc.h> //_alloca
+// #include <cassert>
+// #include <limits>
+// #include <malloc.h> //_alloca
#include <stdlib.h>
#include <wchar.h>
#include <string.h>
-void Ebml_Write(EbmlGlobal *glob, const void *buffer_in, unsigned long len)
-{
- unsigned char *src = glob->buf;
- src += glob->offset;
- memcpy(src, buffer_in, len);
- glob->offset += len;
+void Ebml_Write(EbmlGlobal *glob, const void *buffer_in, unsigned long len) {
+ unsigned char *src = glob->buf;
+ src += glob->offset;
+ memcpy(src, buffer_in, len);
+ glob->offset += len;
}
-static void _Serialize(EbmlGlobal *glob, const unsigned char *p, const unsigned char *q)
-{
- while (q != p)
- {
- --q;
+static void _Serialize(EbmlGlobal *glob, const unsigned char *p, const unsigned char *q) {
+ while (q != p) {
+ --q;
- unsigned long cbWritten;
- memcpy(&(glob->buf[glob->offset]), q, 1);
- glob->offset ++;
- }
+ unsigned long cbWritten;
+ memcpy(&(glob->buf[glob->offset]), q, 1);
+ glob->offset++;
+ }
}
-void Ebml_Serialize(EbmlGlobal *glob, const void *buffer_in, unsigned long len)
-{
- //assert(buf);
+void Ebml_Serialize(EbmlGlobal *glob, const void *buffer_in, unsigned long len) {
+ // assert(buf);
- const unsigned char *const p = (const unsigned char *)(buffer_in);
- const unsigned char *const q = p + len;
+ const unsigned char *const p = (const unsigned char *)(buffer_in);
+ const unsigned char *const q = p + len;
- _Serialize(glob, p, q);
+ _Serialize(glob, p, q);
}
-void Ebml_StartSubElement(EbmlGlobal *glob, EbmlLoc *ebmlLoc, unsigned long class_id)
-{
- Ebml_WriteID(glob, class_id);
- ebmlLoc->offset = glob->offset;
- //todo this is always taking 8 bytes, this may need later optimization
- unsigned long long unknownLen = 0x01FFFFFFFFFFFFFFLLU;
- Ebml_Serialize(glob, (void *)&unknownLen, 8); //this is a key that says lenght unknown
+void Ebml_StartSubElement(EbmlGlobal *glob, EbmlLoc *ebmlLoc, unsigned long class_id) {
+ Ebml_WriteID(glob, class_id);
+ ebmlLoc->offset = glob->offset;
+ // todo this is always taking 8 bytes, this may need later optimization
+ unsigned long long unknownLen = 0x01FFFFFFFFFFFFFFLLU;
+ Ebml_Serialize(glob, (void *)&unknownLen, 8); // this is a key that says lenght unknown
}
-void Ebml_EndSubElement(EbmlGlobal *glob, EbmlLoc *ebmlLoc)
-{
- unsigned long long size = glob->offset - ebmlLoc->offset - 8;
- unsigned long long curOffset = glob->offset;
- glob->offset = ebmlLoc->offset;
- size |= 0x0100000000000000LLU;
- Ebml_Serialize(glob, &size, 8);
- glob->offset = curOffset;
+void Ebml_EndSubElement(EbmlGlobal *glob, EbmlLoc *ebmlLoc) {
+ unsigned long long size = glob->offset - ebmlLoc->offset - 8;
+ unsigned long long curOffset = glob->offset;
+ glob->offset = ebmlLoc->offset;
+ size |= 0x0100000000000000LLU;
+ Ebml_Serialize(glob, &size, 8);
+ glob->offset = curOffset;
}
diff --git a/libvpx/libmkv/EbmlBufferWriter.h b/libvpx/libmkv/EbmlBufferWriter.h
index ba0a9b3..acd5c2a 100644
--- a/libvpx/libmkv/EbmlBufferWriter.h
+++ b/libvpx/libmkv/EbmlBufferWriter.h
@@ -1,16 +1,14 @@
#ifndef EBMLBUFFERWRITER_HPP
#define EBMLBUFFERWRITER_HPP
-typedef struct
-{
- unsigned long long offset;
+typedef struct {
+ unsigned long long offset;
} EbmlLoc;
-typedef struct
-{
- unsigned char *buf;
- unsigned int length;
- unsigned int offset;
+typedef struct {
+ unsigned char *buf;
+ unsigned int length;
+ unsigned int offset;
} EbmlGlobal;
diff --git a/libvpx/libmkv/EbmlIDs.h b/libvpx/libmkv/EbmlIDs.h
index e3ce585..44d4385 100644
--- a/libvpx/libmkv/EbmlIDs.h
+++ b/libvpx/libmkv/EbmlIDs.h
@@ -12,35 +12,34 @@
/* Commenting out values not available in webm, but available in matroska */
-enum mkv
-{
- EBML = 0x1A45DFA3,
- EBMLVersion = 0x4286,
- EBMLReadVersion = 0x42F7,
- EBMLMaxIDLength = 0x42F2,
- EBMLMaxSizeLength = 0x42F3,
- DocType = 0x4282,
- DocTypeVersion = 0x4287,
- DocTypeReadVersion = 0x4285,
+enum mkv {
+ EBML = 0x1A45DFA3,
+ EBMLVersion = 0x4286,
+ EBMLReadVersion = 0x42F7,
+ EBMLMaxIDLength = 0x42F2,
+ EBMLMaxSizeLength = 0x42F3,
+ DocType = 0x4282,
+ DocTypeVersion = 0x4287,
+ DocTypeReadVersion = 0x4285,
/* CRC_32 = 0xBF, */
- Void = 0xEC,
- SignatureSlot = 0x1B538667,
- SignatureAlgo = 0x7E8A,
- SignatureHash = 0x7E9A,
- SignaturePublicKey = 0x7EA5,
- Signature = 0x7EB5,
- SignatureElements = 0x7E5B,
- SignatureElementList = 0x7E7B,
- SignedElement = 0x6532,
- /* segment */
- Segment = 0x18538067,
- /* Meta Seek Information */
- SeekHead = 0x114D9B74,
- Seek = 0x4DBB,
- SeekID = 0x53AB,
- SeekPosition = 0x53AC,
- /* Segment Information */
- Info = 0x1549A966,
+ Void = 0xEC,
+ SignatureSlot = 0x1B538667,
+ SignatureAlgo = 0x7E8A,
+ SignatureHash = 0x7E9A,
+ SignaturePublicKey = 0x7EA5,
+ Signature = 0x7EB5,
+ SignatureElements = 0x7E5B,
+ SignatureElementList = 0x7E7B,
+ SignedElement = 0x6532,
+ /* segment */
+ Segment = 0x18538067,
+ /* Meta Seek Information */
+ SeekHead = 0x114D9B74,
+ Seek = 0x4DBB,
+ SeekID = 0x53AB,
+ SeekPosition = 0x53AC,
+ /* Segment Information */
+ Info = 0x1549A966,
/* SegmentUID = 0x73A4, */
/* SegmentFilename = 0x7384, */
/* PrevUID = 0x3CB923, */
@@ -52,61 +51,61 @@ enum mkv
/* ChapterTranslateEditionUID = 0x69FC, */
/* ChapterTranslateCodec = 0x69BF, */
/* ChapterTranslateID = 0x69A5, */
- TimecodeScale = 0x2AD7B1,
- Segment_Duration = 0x4489,
- DateUTC = 0x4461,
+ TimecodeScale = 0x2AD7B1,
+ Segment_Duration = 0x4489,
+ DateUTC = 0x4461,
/* Title = 0x7BA9, */
- MuxingApp = 0x4D80,
- WritingApp = 0x5741,
- /* Cluster */
- Cluster = 0x1F43B675,
- Timecode = 0xE7,
+ MuxingApp = 0x4D80,
+ WritingApp = 0x5741,
+ /* Cluster */
+ Cluster = 0x1F43B675,
+ Timecode = 0xE7,
/* SilentTracks = 0x5854, */
/* SilentTrackNumber = 0x58D7, */
/* Position = 0xA7, */
- PrevSize = 0xAB,
- BlockGroup = 0xA0,
- Block = 0xA1,
+ PrevSize = 0xAB,
+ BlockGroup = 0xA0,
+ Block = 0xA1,
/* BlockVirtual = 0xA2, */
-/* BlockAdditions = 0x75A1, */
-/* BlockMore = 0xA6, */
-/* BlockAddID = 0xEE, */
-/* BlockAdditional = 0xA5, */
- BlockDuration = 0x9B,
+ BlockAdditions = 0x75A1,
+ BlockMore = 0xA6,
+ BlockAddID = 0xEE,
+ BlockAdditional = 0xA5,
+ BlockDuration = 0x9B,
/* ReferencePriority = 0xFA, */
- ReferenceBlock = 0xFB,
+ ReferenceBlock = 0xFB,
/* ReferenceVirtual = 0xFD, */
/* CodecState = 0xA4, */
/* Slices = 0x8E, */
/* TimeSlice = 0xE8, */
- LaceNumber = 0xCC,
+ LaceNumber = 0xCC,
/* FrameNumber = 0xCD, */
/* BlockAdditionID = 0xCB, */
/* MkvDelay = 0xCE, */
/* Cluster_Duration = 0xCF, */
- SimpleBlock = 0xA3,
+ SimpleBlock = 0xA3,
/* EncryptedBlock = 0xAF, */
- /* Track */
- Tracks = 0x1654AE6B,
- TrackEntry = 0xAE,
- TrackNumber = 0xD7,
- TrackUID = 0x73C5,
- TrackType = 0x83,
- FlagEnabled = 0xB9,
- FlagDefault = 0x88,
- FlagForced = 0x55AA,
- FlagLacing = 0x9C,
+ /* Track */
+ Tracks = 0x1654AE6B,
+ TrackEntry = 0xAE,
+ TrackNumber = 0xD7,
+ TrackUID = 0x73C5,
+ TrackType = 0x83,
+ FlagEnabled = 0xB9,
+ FlagDefault = 0x88,
+ FlagForced = 0x55AA,
+ FlagLacing = 0x9C,
/* MinCache = 0x6DE7, */
/* MaxCache = 0x6DF8, */
- DefaultDuration = 0x23E383,
+ DefaultDuration = 0x23E383,
/* TrackTimecodeScale = 0x23314F, */
/* TrackOffset = 0x537F, */
-/* MaxBlockAdditionID = 0x55EE, */
- Name = 0x536E,
- Language = 0x22B59C,
- CodecID = 0x86,
- CodecPrivate = 0x63A2,
- CodecName = 0x258688,
+ MaxBlockAdditionID = 0x55EE,
+ Name = 0x536E,
+ Language = 0x22B59C,
+ CodecID = 0x86,
+ CodecPrivate = 0x63A2,
+ CodecName = 0x258688,
/* AttachmentLink = 0x7446, */
/* CodecSettings = 0x3A9697, */
/* CodecInfoURL = 0x3B4040, */
@@ -117,33 +116,34 @@ enum mkv
/* TrackTranslateEditionUID = 0x66FC, */
/* TrackTranslateCodec = 0x66BF, */
/* TrackTranslateTrackID = 0x66A5, */
- /* video */
- Video = 0xE0,
- FlagInterlaced = 0x9A,
- StereoMode = 0x53B8,
- PixelWidth = 0xB0,
- PixelHeight = 0xBA,
- PixelCropBottom = 0x54AA,
- PixelCropTop = 0x54BB,
- PixelCropLeft = 0x54CC,
- PixelCropRight = 0x54DD,
- DisplayWidth = 0x54B0,
- DisplayHeight = 0x54BA,
- DisplayUnit = 0x54B2,
- AspectRatioType = 0x54B3,
+ /* video */
+ Video = 0xE0,
+ FlagInterlaced = 0x9A,
+ StereoMode = 0x53B8,
+ AlphaMode = 0x53C0,
+ PixelWidth = 0xB0,
+ PixelHeight = 0xBA,
+ PixelCropBottom = 0x54AA,
+ PixelCropTop = 0x54BB,
+ PixelCropLeft = 0x54CC,
+ PixelCropRight = 0x54DD,
+ DisplayWidth = 0x54B0,
+ DisplayHeight = 0x54BA,
+ DisplayUnit = 0x54B2,
+ AspectRatioType = 0x54B3,
/* ColourSpace = 0x2EB524, */
/* GammaValue = 0x2FB523, */
- FrameRate = 0x2383E3,
- /* end video */
- /* audio */
- Audio = 0xE1,
- SamplingFrequency = 0xB5,
- OutputSamplingFrequency = 0x78B5,
- Channels = 0x9F,
+ FrameRate = 0x2383E3,
+ /* end video */
+ /* audio */
+ Audio = 0xE1,
+ SamplingFrequency = 0xB5,
+ OutputSamplingFrequency = 0x78B5,
+ Channels = 0x9F,
/* ChannelPositions = 0x7D7B, */
- BitDepth = 0x6264,
- /* end audio */
- /* content encoding */
+ BitDepth = 0x6264,
+ /* end audio */
+ /* content encoding */
/* ContentEncodings = 0x6d80, */
/* ContentEncoding = 0x6240, */
/* ContentEncodingOrder = 0x5031, */
@@ -159,22 +159,22 @@ enum mkv
/* ContentSigKeyID = 0x47e4, */
/* ContentSigAlgo = 0x47e5, */
/* ContentSigHashAlgo = 0x47e6, */
- /* end content encoding */
- /* Cueing Data */
- Cues = 0x1C53BB6B,
- CuePoint = 0xBB,
- CueTime = 0xB3,
- CueTrackPositions = 0xB7,
- CueTrack = 0xF7,
- CueClusterPosition = 0xF1,
- CueBlockNumber = 0x5378
+ /* end content encoding */
+ /* Cueing Data */
+ Cues = 0x1C53BB6B,
+ CuePoint = 0xBB,
+ CueTime = 0xB3,
+ CueTrackPositions = 0xB7,
+ CueTrack = 0xF7,
+ CueClusterPosition = 0xF1,
+ CueBlockNumber = 0x5378
/* CueCodecState = 0xEA, */
/* CueReference = 0xDB, */
/* CueRefTime = 0x96, */
/* CueRefCluster = 0x97, */
/* CueRefNumber = 0x535F, */
/* CueRefCodecState = 0xEB, */
- /* Attachment */
+ /* Attachment */
/* Attachments = 0x1941A469, */
/* AttachedFile = 0x61A7, */
/* FileDescription = 0x467E, */
@@ -183,7 +183,7 @@ enum mkv
/* FileData = 0x465C, */
/* FileUID = 0x46AE, */
/* FileReferral = 0x4675, */
- /* Chapters */
+ /* Chapters */
/* Chapters = 0x1043A770, */
/* EditionEntry = 0x45B9, */
/* EditionUID = 0x45BC, */
@@ -211,7 +211,7 @@ enum mkv
/* ChapProcessCommand = 0x6911, */
/* ChapProcessTime = 0x6922, */
/* ChapProcessData = 0x6933, */
- /* Tagging */
+ /* Tagging */
/* Tags = 0x1254C367, */
/* Tag = 0x7373, */
/* Targets = 0x63C0, */
diff --git a/libvpx/libmkv/EbmlWriter.c b/libvpx/libmkv/EbmlWriter.c
index d70f06e..5fc5ed2 100644
--- a/libvpx/libmkv/EbmlWriter.c
+++ b/libvpx/libmkv/EbmlWriter.c
@@ -18,158 +18,140 @@
#define LITERALU64(n) n##LLU
#endif
-void Ebml_WriteLen(EbmlGlobal *glob, int64_t val)
-{
- /* TODO check and make sure we are not > than 0x0100000000000000LLU */
- unsigned char size = 8; /* size in bytes to output */
+void Ebml_WriteLen(EbmlGlobal *glob, int64_t val) {
+ /* TODO check and make sure we are not > than 0x0100000000000000LLU */
+ unsigned char size = 8; /* size in bytes to output */
- /* mask to compare for byte size */
- int64_t minVal = 0xff;
+ /* mask to compare for byte size */
+ int64_t minVal = 0xff;
- for (size = 1; size < 8; size ++)
- {
- if (val < minVal)
- break;
+ for (size = 1; size < 8; size ++) {
+ if (val < minVal)
+ break;
- minVal = (minVal << 7);
- }
+ minVal = (minVal << 7);
+ }
- val |= (((uint64_t)0x80) << ((size - 1) * 7));
+ val |= (((uint64_t)0x80) << ((size - 1) * 7));
- Ebml_Serialize(glob, (void *) &val, sizeof(val), size);
+ Ebml_Serialize(glob, (void *) &val, sizeof(val), size);
}
-void Ebml_WriteString(EbmlGlobal *glob, const char *str)
-{
- const size_t size_ = strlen(str);
- const uint64_t size = size_;
- Ebml_WriteLen(glob, size);
- /* TODO: it's not clear from the spec whether the nul terminator
- * should be serialized too. For now we omit the null terminator.
- */
- Ebml_Write(glob, str, (unsigned long)size);
+void Ebml_WriteString(EbmlGlobal *glob, const char *str) {
+ const size_t size_ = strlen(str);
+ const uint64_t size = size_;
+ Ebml_WriteLen(glob, size);
+ /* TODO: it's not clear from the spec whether the nul terminator
+ * should be serialized too. For now we omit the null terminator.
+ */
+ Ebml_Write(glob, str, (unsigned long)size);
}
-void Ebml_WriteUTF8(EbmlGlobal *glob, const wchar_t *wstr)
-{
- const size_t strlen = wcslen(wstr);
+void Ebml_WriteUTF8(EbmlGlobal *glob, const wchar_t *wstr) {
+ const size_t strlen = wcslen(wstr);
- /* TODO: it's not clear from the spec whether the nul terminator
- * should be serialized too. For now we include it.
- */
- const uint64_t size = strlen;
+ /* TODO: it's not clear from the spec whether the nul terminator
+ * should be serialized too. For now we include it.
+ */
+ const uint64_t size = strlen;
- Ebml_WriteLen(glob, size);
- Ebml_Write(glob, wstr, (unsigned long)size);
+ Ebml_WriteLen(glob, size);
+ Ebml_Write(glob, wstr, (unsigned long)size);
}
-void Ebml_WriteID(EbmlGlobal *glob, unsigned long class_id)
-{
- int len;
+void Ebml_WriteID(EbmlGlobal *glob, unsigned long class_id) {
+ int len;
- if (class_id >= 0x01000000)
- len = 4;
- else if (class_id >= 0x00010000)
- len = 3;
- else if (class_id >= 0x00000100)
- len = 2;
- else
- len = 1;
+ if (class_id >= 0x01000000)
+ len = 4;
+ else if (class_id >= 0x00010000)
+ len = 3;
+ else if (class_id >= 0x00000100)
+ len = 2;
+ else
+ len = 1;
- Ebml_Serialize(glob, (void *)&class_id, sizeof(class_id), len);
+ Ebml_Serialize(glob, (void *)&class_id, sizeof(class_id), len);
}
-void Ebml_SerializeUnsigned64(EbmlGlobal *glob, unsigned long class_id, uint64_t ui)
-{
- unsigned char sizeSerialized = 8 | 0x80;
- Ebml_WriteID(glob, class_id);
- Ebml_Serialize(glob, &sizeSerialized, sizeof(sizeSerialized), 1);
- Ebml_Serialize(glob, &ui, sizeof(ui), 8);
+void Ebml_SerializeUnsigned64(EbmlGlobal *glob, unsigned long class_id, uint64_t ui) {
+ unsigned char sizeSerialized = 8 | 0x80;
+ Ebml_WriteID(glob, class_id);
+ Ebml_Serialize(glob, &sizeSerialized, sizeof(sizeSerialized), 1);
+ Ebml_Serialize(glob, &ui, sizeof(ui), 8);
}
-void Ebml_SerializeUnsigned(EbmlGlobal *glob, unsigned long class_id, unsigned long ui)
-{
- unsigned char size = 8; /* size in bytes to output */
- unsigned char sizeSerialized = 0;
- unsigned long minVal;
-
- Ebml_WriteID(glob, class_id);
- minVal = 0x7fLU; /* mask to compare for byte size */
+void Ebml_SerializeUnsigned(EbmlGlobal *glob, unsigned long class_id, unsigned long ui) {
+ unsigned char size = 8; /* size in bytes to output */
+ unsigned char sizeSerialized = 0;
+ unsigned long minVal;
- for (size = 1; size < 4; size ++)
- {
- if (ui < minVal)
- {
- break;
- }
+ Ebml_WriteID(glob, class_id);
+ minVal = 0x7fLU; /* mask to compare for byte size */
- minVal <<= 7;
+ for (size = 1; size < 4; size ++) {
+ if (ui < minVal) {
+ break;
}
- sizeSerialized = 0x80 | size;
- Ebml_Serialize(glob, &sizeSerialized, sizeof(sizeSerialized), 1);
- Ebml_Serialize(glob, &ui, sizeof(ui), size);
+ minVal <<= 7;
+ }
+
+ sizeSerialized = 0x80 | size;
+ Ebml_Serialize(glob, &sizeSerialized, sizeof(sizeSerialized), 1);
+ Ebml_Serialize(glob, &ui, sizeof(ui), size);
}
/* TODO: perhaps this is a poor name for this id serializer helper function */
-void Ebml_SerializeBinary(EbmlGlobal *glob, unsigned long class_id, unsigned long bin)
-{
- int size;
- for (size=4; size > 1; size--)
- {
- if (bin & 0x000000ff << ((size-1) * 8))
- break;
- }
- Ebml_WriteID(glob, class_id);
- Ebml_WriteLen(glob, size);
- Ebml_WriteID(glob, bin);
+void Ebml_SerializeBinary(EbmlGlobal *glob, unsigned long class_id, unsigned long bin) {
+ int size;
+ for (size = 4; size > 1; size--) {
+ if (bin & 0x000000ff << ((size - 1) * 8))
+ break;
+ }
+ Ebml_WriteID(glob, class_id);
+ Ebml_WriteLen(glob, size);
+ Ebml_WriteID(glob, bin);
}
-void Ebml_SerializeFloat(EbmlGlobal *glob, unsigned long class_id, double d)
-{
- unsigned char len = 0x88;
+void Ebml_SerializeFloat(EbmlGlobal *glob, unsigned long class_id, double d) {
+ unsigned char len = 0x88;
- Ebml_WriteID(glob, class_id);
- Ebml_Serialize(glob, &len, sizeof(len), 1);
- Ebml_Serialize(glob, &d, sizeof(d), 8);
+ Ebml_WriteID(glob, class_id);
+ Ebml_Serialize(glob, &len, sizeof(len), 1);
+ Ebml_Serialize(glob, &d, sizeof(d), 8);
}
-void Ebml_WriteSigned16(EbmlGlobal *glob, short val)
-{
- signed long out = ((val & 0x003FFFFF) | 0x00200000) << 8;
- Ebml_Serialize(glob, &out, sizeof(out), 3);
+void Ebml_WriteSigned16(EbmlGlobal *glob, short val) {
+ signed long out = ((val & 0x003FFFFF) | 0x00200000) << 8;
+ Ebml_Serialize(glob, &out, sizeof(out), 3);
}
-void Ebml_SerializeString(EbmlGlobal *glob, unsigned long class_id, const char *s)
-{
- Ebml_WriteID(glob, class_id);
- Ebml_WriteString(glob, s);
+void Ebml_SerializeString(EbmlGlobal *glob, unsigned long class_id, const char *s) {
+ Ebml_WriteID(glob, class_id);
+ Ebml_WriteString(glob, s);
}
-void Ebml_SerializeUTF8(EbmlGlobal *glob, unsigned long class_id, wchar_t *s)
-{
- Ebml_WriteID(glob, class_id);
- Ebml_WriteUTF8(glob, s);
+void Ebml_SerializeUTF8(EbmlGlobal *glob, unsigned long class_id, wchar_t *s) {
+ Ebml_WriteID(glob, class_id);
+ Ebml_WriteUTF8(glob, s);
}
-void Ebml_SerializeData(EbmlGlobal *glob, unsigned long class_id, unsigned char *data, unsigned long data_length)
-{
- Ebml_WriteID(glob, class_id);
- Ebml_WriteLen(glob, data_length);
- Ebml_Write(glob, data, data_length);
+void Ebml_SerializeData(EbmlGlobal *glob, unsigned long class_id, unsigned char *data, unsigned long data_length) {
+ Ebml_WriteID(glob, class_id);
+ Ebml_WriteLen(glob, data_length);
+ Ebml_Write(glob, data, data_length);
}
-void Ebml_WriteVoid(EbmlGlobal *glob, unsigned long vSize)
-{
- unsigned char tmp = 0;
- unsigned long i = 0;
+void Ebml_WriteVoid(EbmlGlobal *glob, unsigned long vSize) {
+ unsigned char tmp = 0;
+ unsigned long i = 0;
- Ebml_WriteID(glob, 0xEC);
- Ebml_WriteLen(glob, vSize);
+ Ebml_WriteID(glob, 0xEC);
+ Ebml_WriteLen(glob, vSize);
- for (i = 0; i < vSize; i++)
- {
- Ebml_Write(glob, &tmp, 1);
- }
+ for (i = 0; i < vSize; i++) {
+ Ebml_Write(glob, &tmp, 1);
+ }
}
/* TODO Serialize Date */
diff --git a/libvpx/libmkv/WebMElement.c b/libvpx/libmkv/WebMElement.c
index 0ef5100..2f79a3c 100644
--- a/libvpx/libmkv/WebMElement.c
+++ b/libvpx/libmkv/WebMElement.c
@@ -14,106 +14,100 @@
#define kVorbisPrivateMaxSize 4000
-void writeHeader(EbmlGlobal *glob)
-{
- EbmlLoc start;
- Ebml_StartSubElement(glob, &start, EBML);
- Ebml_SerializeUnsigned(glob, EBMLVersion, 1);
- Ebml_SerializeUnsigned(glob, EBMLReadVersion, 1); //EBML Read Version
- Ebml_SerializeUnsigned(glob, EBMLMaxIDLength, 4); //EBML Max ID Length
- Ebml_SerializeUnsigned(glob, EBMLMaxSizeLength, 8); //EBML Max Size Length
- Ebml_SerializeString(glob, DocType, "webm"); //Doc Type
- Ebml_SerializeUnsigned(glob, DocTypeVersion, 2); //Doc Type Version
- Ebml_SerializeUnsigned(glob, DocTypeReadVersion, 2); //Doc Type Read Version
- Ebml_EndSubElement(glob, &start);
+void writeHeader(EbmlGlobal *glob) {
+ EbmlLoc start;
+ Ebml_StartSubElement(glob, &start, EBML);
+ Ebml_SerializeUnsigned(glob, EBMLVersion, 1);
+ Ebml_SerializeUnsigned(glob, EBMLReadVersion, 1); // EBML Read Version
+ Ebml_SerializeUnsigned(glob, EBMLMaxIDLength, 4); // EBML Max ID Length
+ Ebml_SerializeUnsigned(glob, EBMLMaxSizeLength, 8); // EBML Max Size Length
+ Ebml_SerializeString(glob, DocType, "webm"); // Doc Type
+ Ebml_SerializeUnsigned(glob, DocTypeVersion, 2); // Doc Type Version
+ Ebml_SerializeUnsigned(glob, DocTypeReadVersion, 2); // Doc Type Read Version
+ Ebml_EndSubElement(glob, &start);
}
void writeSimpleBlock(EbmlGlobal *glob, unsigned char trackNumber, short timeCode,
int isKeyframe, unsigned char lacingFlag, int discardable,
- unsigned char *data, unsigned long dataLength)
-{
- Ebml_WriteID(glob, SimpleBlock);
- unsigned long blockLength = 4 + dataLength;
- blockLength |= 0x10000000; //TODO check length < 0x0FFFFFFFF
- Ebml_Serialize(glob, &blockLength, sizeof(blockLength), 4);
- trackNumber |= 0x80; //TODO check track nubmer < 128
- Ebml_Write(glob, &trackNumber, 1);
- //Ebml_WriteSigned16(glob, timeCode,2); //this is 3 bytes
- Ebml_Serialize(glob, &timeCode, sizeof(timeCode), 2);
- unsigned char flags = 0x00 | (isKeyframe ? 0x80 : 0x00) | (lacingFlag << 1) | discardable;
- Ebml_Write(glob, &flags, 1);
- Ebml_Write(glob, data, dataLength);
-}
-
-static UInt64 generateTrackID(unsigned int trackNumber)
-{
- UInt64 t = time(NULL) * trackNumber;
- UInt64 r = rand();
- r = r << 32;
- r += rand();
- UInt64 rval = t ^ r;
- return rval;
+ unsigned char *data, unsigned long dataLength) {
+ Ebml_WriteID(glob, SimpleBlock);
+ unsigned long blockLength = 4 + dataLength;
+ blockLength |= 0x10000000; // TODO check length < 0x0FFFFFFFF
+ Ebml_Serialize(glob, &blockLength, sizeof(blockLength), 4);
+ trackNumber |= 0x80; // TODO check track nubmer < 128
+ Ebml_Write(glob, &trackNumber, 1);
+ // Ebml_WriteSigned16(glob, timeCode,2); //this is 3 bytes
+ Ebml_Serialize(glob, &timeCode, sizeof(timeCode), 2);
+ unsigned char flags = 0x00 | (isKeyframe ? 0x80 : 0x00) | (lacingFlag << 1) | discardable;
+ Ebml_Write(glob, &flags, 1);
+ Ebml_Write(glob, data, dataLength);
+}
+
+static UInt64 generateTrackID(unsigned int trackNumber) {
+ UInt64 t = time(NULL) * trackNumber;
+ UInt64 r = rand();
+ r = r << 32;
+ r += rand();
+ UInt64 rval = t ^ r;
+ return rval;
}
void writeVideoTrack(EbmlGlobal *glob, unsigned int trackNumber, int flagLacing,
char *codecId, unsigned int pixelWidth, unsigned int pixelHeight,
- double frameRate)
-{
- EbmlLoc start;
- Ebml_StartSubElement(glob, &start, TrackEntry);
- Ebml_SerializeUnsigned(glob, TrackNumber, trackNumber);
- UInt64 trackID = generateTrackID(trackNumber);
- Ebml_SerializeUnsigned(glob, TrackUID, trackID);
- Ebml_SerializeString(glob, CodecName, "VP8"); //TODO shouldn't be fixed
-
- Ebml_SerializeUnsigned(glob, TrackType, 1); //video is always 1
- Ebml_SerializeString(glob, CodecID, codecId);
- {
- EbmlLoc videoStart;
- Ebml_StartSubElement(glob, &videoStart, Video);
- Ebml_SerializeUnsigned(glob, PixelWidth, pixelWidth);
- Ebml_SerializeUnsigned(glob, PixelHeight, pixelHeight);
- Ebml_SerializeFloat(glob, FrameRate, frameRate);
- Ebml_EndSubElement(glob, &videoStart); //Video
- }
- Ebml_EndSubElement(glob, &start); //Track Entry
+ double frameRate) {
+ EbmlLoc start;
+ Ebml_StartSubElement(glob, &start, TrackEntry);
+ Ebml_SerializeUnsigned(glob, TrackNumber, trackNumber);
+ UInt64 trackID = generateTrackID(trackNumber);
+ Ebml_SerializeUnsigned(glob, TrackUID, trackID);
+ Ebml_SerializeString(glob, CodecName, "VP8"); // TODO shouldn't be fixed
+
+ Ebml_SerializeUnsigned(glob, TrackType, 1); // video is always 1
+ Ebml_SerializeString(glob, CodecID, codecId);
+ {
+ EbmlLoc videoStart;
+ Ebml_StartSubElement(glob, &videoStart, Video);
+ Ebml_SerializeUnsigned(glob, PixelWidth, pixelWidth);
+ Ebml_SerializeUnsigned(glob, PixelHeight, pixelHeight);
+ Ebml_SerializeFloat(glob, FrameRate, frameRate);
+ Ebml_EndSubElement(glob, &videoStart); // Video
+ }
+ Ebml_EndSubElement(glob, &start); // Track Entry
}
void writeAudioTrack(EbmlGlobal *glob, unsigned int trackNumber, int flagLacing,
char *codecId, double samplingFrequency, unsigned int channels,
- unsigned char *private, unsigned long privateSize)
-{
- EbmlLoc start;
- Ebml_StartSubElement(glob, &start, TrackEntry);
- Ebml_SerializeUnsigned(glob, TrackNumber, trackNumber);
- UInt64 trackID = generateTrackID(trackNumber);
- Ebml_SerializeUnsigned(glob, TrackUID, trackID);
- Ebml_SerializeUnsigned(glob, TrackType, 2); //audio is always 2
- //I am using defaults for thesed required fields
- /* Ebml_SerializeUnsigned(glob, FlagEnabled, 1);
- Ebml_SerializeUnsigned(glob, FlagDefault, 1);
- Ebml_SerializeUnsigned(glob, FlagForced, 1);
- Ebml_SerializeUnsigned(glob, FlagLacing, flagLacing);*/
- Ebml_SerializeString(glob, CodecID, codecId);
- Ebml_SerializeData(glob, CodecPrivate, private, privateSize);
-
- Ebml_SerializeString(glob, CodecName, "VORBIS"); //fixed for now
- {
- EbmlLoc AudioStart;
- Ebml_StartSubElement(glob, &AudioStart, Audio);
- Ebml_SerializeFloat(glob, SamplingFrequency, samplingFrequency);
- Ebml_SerializeUnsigned(glob, Channels, channels);
- Ebml_EndSubElement(glob, &AudioStart);
- }
- Ebml_EndSubElement(glob, &start);
-}
-void writeSegmentInformation(EbmlGlobal *ebml, EbmlLoc* startInfo, unsigned long timeCodeScale, double duration)
-{
- Ebml_StartSubElement(ebml, startInfo, Info);
- Ebml_SerializeUnsigned(ebml, TimecodeScale, timeCodeScale);
- Ebml_SerializeFloat(ebml, Segment_Duration, duration * 1000.0); //Currently fixed to using milliseconds
- Ebml_SerializeString(ebml, 0x4D80, "QTmuxingAppLibWebM-0.0.1");
- Ebml_SerializeString(ebml, 0x5741, "QTwritingAppLibWebM-0.0.1");
- Ebml_EndSubElement(ebml, startInfo);
+ unsigned char *private, unsigned long privateSize) {
+ EbmlLoc start;
+ Ebml_StartSubElement(glob, &start, TrackEntry);
+ Ebml_SerializeUnsigned(glob, TrackNumber, trackNumber);
+ UInt64 trackID = generateTrackID(trackNumber);
+ Ebml_SerializeUnsigned(glob, TrackUID, trackID);
+ Ebml_SerializeUnsigned(glob, TrackType, 2); // audio is always 2
+ // I am using defaults for thesed required fields
+ /* Ebml_SerializeUnsigned(glob, FlagEnabled, 1);
+ Ebml_SerializeUnsigned(glob, FlagDefault, 1);
+ Ebml_SerializeUnsigned(glob, FlagForced, 1);
+ Ebml_SerializeUnsigned(glob, FlagLacing, flagLacing);*/
+ Ebml_SerializeString(glob, CodecID, codecId);
+ Ebml_SerializeData(glob, CodecPrivate, private, privateSize);
+
+ Ebml_SerializeString(glob, CodecName, "VORBIS"); // fixed for now
+ {
+ EbmlLoc AudioStart;
+ Ebml_StartSubElement(glob, &AudioStart, Audio);
+ Ebml_SerializeFloat(glob, SamplingFrequency, samplingFrequency);
+ Ebml_SerializeUnsigned(glob, Channels, channels);
+ Ebml_EndSubElement(glob, &AudioStart);
+ }
+ Ebml_EndSubElement(glob, &start);
+}
+void writeSegmentInformation(EbmlGlobal *ebml, EbmlLoc *startInfo, unsigned long timeCodeScale, double duration) {
+ Ebml_StartSubElement(ebml, startInfo, Info);
+ Ebml_SerializeUnsigned(ebml, TimecodeScale, timeCodeScale);
+ Ebml_SerializeFloat(ebml, Segment_Duration, duration * 1000.0); // Currently fixed to using milliseconds
+ Ebml_SerializeString(ebml, 0x4D80, "QTmuxingAppLibWebM-0.0.1");
+ Ebml_SerializeString(ebml, 0x5741, "QTwritingAppLibWebM-0.0.1");
+ Ebml_EndSubElement(ebml, startInfo);
}
/*
@@ -142,7 +136,7 @@ void Mkv_WriteSegmentInformation(Ebml& ebml_out, SegmentInformationStruct& segme
Ebml_SerializeString(ebml_out, 0x7384, segmentInformation.filename);
Ebml_SerializeUnsigned(ebml_out, 0x2AD7B1, segmentInformation.TimecodeScale);
Ebml_SerializeUnsigned(ebml_out, 0x4489, segmentInformation.Duration);
- //TODO date
+ // TODO date
Ebml_SerializeWString(ebml_out, 0x4D80, L"MKVMUX");
Ebml_SerializeWString(ebml_out, 0x5741, segmentInformation.WritingApp);
}
@@ -173,9 +167,9 @@ static void Mkv_WriteGenericTrackData(Ebml& ebml_out, TrackStruct& track)
void Mkv_WriteVideoTrack(Ebml& ebml_out, TrackStruct & track, VideoTrackStruct& video)
{
EbmlLoc trackHeadLoc, videoHeadLoc;
- Ebml_StartSubElement(ebml_out, trackHeadLoc, 0xAE); //start Track
+ Ebml_StartSubElement(ebml_out, trackHeadLoc, 0xAE); // start Track
Mkv_WriteGenericTrackData(ebml_out, track);
- Ebml_StartSubElement(ebml_out, videoHeadLoc, 0xE0); //start Video
+ Ebml_StartSubElement(ebml_out, videoHeadLoc, 0xE0); // start Video
Ebml_SerializeUnsigned(ebml_out, 0x9A, video.FlagInterlaced ? 1 :0);
Ebml_SerializeUnsigned(ebml_out, 0xB0, video.PixelWidth);
Ebml_SerializeUnsigned(ebml_out, 0xBA, video.PixelHeight);
@@ -193,7 +187,7 @@ void Mkv_WriteAudioTrack(Ebml& ebml_out, TrackStruct & track, AudioTrackStruct&
EbmlLoc trackHeadLoc, audioHeadLoc;
Ebml_StartSubElement(ebml_out, trackHeadLoc, 0xAE);
Mkv_WriteGenericTrackData(ebml_out, track);
- Ebml_StartSubElement(ebml_out, audioHeadLoc, 0xE0); //start Audio
+ Ebml_StartSubElement(ebml_out, audioHeadLoc, 0xE0); // start Audio
Ebml_SerializeFloat(ebml_out, 0xB5, video.SamplingFrequency);
Ebml_SerializeUnsigned(ebml_out, 0x9F, video.Channels);
Ebml_SerializeUnsigned(ebml_out, 0x6264, video.BitDepth);
@@ -213,7 +207,7 @@ void Mkv_WriteSimpleBlockHead(Ebml& ebml_out, EbmlLoc& ebmlLoc, SimpleBlockStru
Ebml_Write1UInt(ebml_out, block.TrackNumber);
Ebml_WriteSigned16(ebml_out,block.TimeCode);
unsigned char flags = 0x00 | (block.iskey ? 0x80:0x00) | (block.lacing << 1) | block.discardable;
- Ebml_Write1UInt(ebml_out, flags); //TODO this may be the wrong function
+ Ebml_Write1UInt(ebml_out, flags); // TODO this may be the wrong function
Ebml_Serialize(ebml_out, block.data, block.dataLength);
Ebml_EndSubElement(ebml_out,ebmlLoc);
}
diff --git a/libvpx/libmkv/WebMElement.h b/libvpx/libmkv/WebMElement.h
index b4208f2..d9ad0a0 100644
--- a/libvpx/libmkv/WebMElement.h
+++ b/libvpx/libmkv/WebMElement.h
@@ -17,8 +17,8 @@ void writeSimpleBock(EbmlGlobal *ebml, unsigned char trackNumber, unsigned short
// these are helper functions
void writeHeader(EbmlGlobal *ebml);
-void writeSegmentInformation(EbmlGlobal *ebml, EbmlLoc* startInfo , unsigned long timeCodeScale, double duration);
-//this function is a helper only, it assumes a lot of defaults
+void writeSegmentInformation(EbmlGlobal *ebml, EbmlLoc *startInfo, unsigned long timeCodeScale, double duration);
+// this function is a helper only, it assumes a lot of defaults
void writeVideoTrack(EbmlGlobal *ebml, unsigned int trackNumber, int flagLacing,
char *codecId, unsigned int pixelWidth, unsigned int pixelHeight,
double frameRate);
diff --git a/libvpx/libmkv/testlibmkv.c b/libvpx/libmkv/testlibmkv.c
index 7edfc43..97bcf95 100644
--- a/libvpx/libmkv/testlibmkv.c
+++ b/libvpx/libmkv/testlibmkv.c
@@ -13,51 +13,50 @@
#include "WebMElement.h"
#include <stdio.h>
-int main(int argc, char *argv[])
-{
- //init the datatype we're using for ebml output
- unsigned char data[8192];
- EbmlGlobal ebml;
- ebml.buf = data;
- ebml.offset = 0;
- ebml.length = 8192;
-
- writeHeader(&ebml);
+int main(int argc, char *argv[]) {
+ // init the datatype we're using for ebml output
+ unsigned char data[8192];
+ EbmlGlobal ebml;
+ ebml.buf = data;
+ ebml.offset = 0;
+ ebml.length = 8192;
+
+ writeHeader(&ebml);
+ {
+ EbmlLoc startSegment;
+ Ebml_StartSubElement(&ebml, &startSegment, Segment); // segment
{
- EbmlLoc startSegment;
- Ebml_StartSubElement(&ebml, &startSegment, Segment); //segment
- {
- //segment info
- EbmlLoc startInfo;
- Ebml_StartSubElement(&ebml, &startInfo, Info);
- Ebml_SerializeString(&ebml, 0x4D80, "muxingAppLibMkv");
- Ebml_SerializeString(&ebml, 0x5741, "writingAppLibMkv");
- Ebml_EndSubElement(&ebml, &startInfo);
- }
-
- {
- EbmlLoc trackStart;
- Ebml_StartSubElement(&ebml, &trackStart, Tracks);
- writeVideoTrack(&ebml, 1, 1, "V_MS/VFW/FOURCC", 320, 240, 29.97);
- //writeAudioTrack(&ebml,2,1, "A_VORBIS", 32000, 1, NULL, 0);
- Ebml_EndSubElement(&ebml, &trackStart);
- }
-
- {
- EbmlLoc clusterStart;
- Ebml_StartSubElement(&ebml, &clusterStart, Cluster); //cluster
- Ebml_SerializeUnsigned(&ebml, Timecode, 0);
-
- unsigned char someData[4] = {1, 2, 3, 4};
- writeSimpleBlock(&ebml, 1, 0, 1, 0, 0, someData, 4);
- Ebml_EndSubElement(&ebml, &clusterStart);
- } //end cluster
- Ebml_EndSubElement(&ebml, &startSegment);
+ // segment info
+ EbmlLoc startInfo;
+ Ebml_StartSubElement(&ebml, &startInfo, Info);
+ Ebml_SerializeString(&ebml, 0x4D80, "muxingAppLibMkv");
+ Ebml_SerializeString(&ebml, 0x5741, "writingAppLibMkv");
+ Ebml_EndSubElement(&ebml, &startInfo);
}
- //dump ebml stuff to the file
- FILE *file_out = fopen("test.mkv", "wb");
- size_t bytesWritten = fwrite(data, 1, ebml.offset, file_out);
- fclose(file_out);
- return 0;
+ {
+ EbmlLoc trackStart;
+ Ebml_StartSubElement(&ebml, &trackStart, Tracks);
+ writeVideoTrack(&ebml, 1, 1, "V_MS/VFW/FOURCC", 320, 240, 29.97);
+ // writeAudioTrack(&ebml,2,1, "A_VORBIS", 32000, 1, NULL, 0);
+ Ebml_EndSubElement(&ebml, &trackStart);
+ }
+
+ {
+ EbmlLoc clusterStart;
+ Ebml_StartSubElement(&ebml, &clusterStart, Cluster); // cluster
+ Ebml_SerializeUnsigned(&ebml, Timecode, 0);
+
+ unsigned char someData[4] = {1, 2, 3, 4};
+ writeSimpleBlock(&ebml, 1, 0, 1, 0, 0, someData, 4);
+ Ebml_EndSubElement(&ebml, &clusterStart);
+ } // end cluster
+ Ebml_EndSubElement(&ebml, &startSegment);
+ }
+
+ // dump ebml stuff to the file
+ FILE *file_out = fopen("test.mkv", "wb");
+ size_t bytesWritten = fwrite(data, 1, ebml.offset, file_out);
+ fclose(file_out);
+ return 0;
} \ No newline at end of file
diff --git a/libvpx/libs.mk b/libvpx/libs.mk
index 4115dd8..2338631 100644
--- a/libvpx/libs.mk
+++ b/libvpx/libs.mk
@@ -12,11 +12,58 @@
# ARM assembly files are written in RVCT-style. We use some make magic to
# filter those files to allow GCC compilation
ifeq ($(ARCH_ARM),yes)
- ASM:=$(if $(filter yes,$(CONFIG_GCC)),.asm.s,.asm)
+ ASM:=$(if $(filter yes,$(CONFIG_GCC)$(CONFIG_MSVS)),.asm.s,.asm)
else
ASM:=.asm
endif
+#
+# Calculate platform- and compiler-specific offsets for hand coded assembly
+#
+ifeq ($(filter icc gcc,$(TGT_CC)), $(TGT_CC))
+OFFSET_PATTERN:='^[a-zA-Z0-9_]* EQU'
+define asm_offsets_template
+$$(BUILD_PFX)$(1): $$(BUILD_PFX)$(2).S
+ @echo " [CREATE] $$@"
+ $$(qexec)LC_ALL=C grep $$(OFFSET_PATTERN) $$< | tr -d '$$$$\#' $$(ADS2GAS) > $$@
+$$(BUILD_PFX)$(2).S: $(2)
+CLEAN-OBJS += $$(BUILD_PFX)$(1) $(2).S
+endef
+else
+ ifeq ($(filter rvct,$(TGT_CC)), $(TGT_CC))
+define asm_offsets_template
+$$(BUILD_PFX)$(1): obj_int_extract
+$$(BUILD_PFX)$(1): $$(BUILD_PFX)$(2).o
+ @echo " [CREATE] $$@"
+ $$(qexec)./obj_int_extract rvds $$< $$(ADS2GAS) > $$@
+OBJS-yes += $$(BUILD_PFX)$(2).o
+CLEAN-OBJS += $$(BUILD_PFX)$(1)
+$$(filter %$$(ASM).o,$$(OBJS-yes)): $$(BUILD_PFX)$(1)
+endef
+endif # rvct
+endif # !gcc
+
+#
+# Rule to generate runtime cpu detection files
+#
+define rtcd_h_template
+$$(BUILD_PFX)$(1).h: $$(SRC_PATH_BARE)/$(2)
+ @echo " [CREATE] $$@"
+ $$(qexec)$$(SRC_PATH_BARE)/build/make/rtcd.sh --arch=$$(TGT_ISA) \
+ --sym=$(1) \
+ --config=$$(CONFIG_DIR)$$(target)$$(if $$(FAT_ARCHS),,-$$(TOOLCHAIN)).mk \
+ $$(RTCD_OPTIONS) $$^ > $$@
+CLEAN-OBJS += $$(BUILD_PFX)$(1).h
+RTCD += $$(BUILD_PFX)$(1).h
+endef
+
+# x86inc.asm is not compatible with pic 32bit builds. Restrict
+# files which use it to 64bit builds or 32bit without pic
+USE_X86INC = no
+ifeq ($(CONFIG_USE_X86INC),yes)
+ USE_X86INC = yes
+endif
+
CODEC_SRCS-yes += CHANGELOG
CODEC_SRCS-yes += libs.mk
@@ -40,9 +87,12 @@ CODEC_SRCS-yes += $(addprefix vpx_scale/,$(call enabled,SCALE_SRCS))
include $(SRC_PATH_BARE)/vpx_ports/vpx_ports.mk
CODEC_SRCS-yes += $(addprefix vpx_ports/,$(call enabled,PORTS_SRCS))
+ifneq ($(CONFIG_VP8_ENCODER)$(CONFIG_VP8_DECODER),)
+ VP8_PREFIX=vp8/
+ include $(SRC_PATH_BARE)/$(VP8_PREFIX)vp8_common.mk
+endif
ifeq ($(CONFIG_VP8_ENCODER),yes)
- VP8_PREFIX=vp8/
include $(SRC_PATH_BARE)/$(VP8_PREFIX)vp8cx.mk
CODEC_SRCS-yes += $(addprefix $(VP8_PREFIX),$(call enabled,VP8_CX_SRCS))
CODEC_EXPORTS-yes += $(addprefix $(VP8_PREFIX),$(VP8_CX_EXPORTS))
@@ -52,7 +102,6 @@ ifeq ($(CONFIG_VP8_ENCODER),yes)
endif
ifeq ($(CONFIG_VP8_DECODER),yes)
- VP8_PREFIX=vp8/
include $(SRC_PATH_BARE)/$(VP8_PREFIX)vp8dx.mk
CODEC_SRCS-yes += $(addprefix $(VP8_PREFIX),$(call enabled,VP8_DX_SRCS))
CODEC_EXPORTS-yes += $(addprefix $(VP8_PREFIX),$(VP8_DX_EXPORTS))
@@ -61,6 +110,35 @@ ifeq ($(CONFIG_VP8_DECODER),yes)
CODEC_DOC_SECTIONS += vp8 vp8_decoder
endif
+ifneq ($(CONFIG_VP9_ENCODER)$(CONFIG_VP9_DECODER),)
+ VP9_PREFIX=vp9/
+ include $(SRC_PATH_BARE)/$(VP9_PREFIX)vp9_common.mk
+endif
+
+ifeq ($(CONFIG_VP9_ENCODER),yes)
+ VP9_PREFIX=vp9/
+ include $(SRC_PATH_BARE)/$(VP9_PREFIX)vp9cx.mk
+ CODEC_SRCS-yes += $(addprefix $(VP9_PREFIX),$(call enabled,VP9_CX_SRCS))
+ CODEC_EXPORTS-yes += $(addprefix $(VP9_PREFIX),$(VP9_CX_EXPORTS))
+ CODEC_SRCS-yes += $(VP9_PREFIX)vp9cx.mk vpx/vp8.h vpx/vp8cx.h
+ INSTALL-LIBS-yes += include/vpx/vp8.h include/vpx/vp8cx.h
+ INSTALL_MAPS += include/vpx/% $(SRC_PATH_BARE)/$(VP9_PREFIX)/%
+ CODEC_DOC_SRCS += vpx/vp8.h vpx/vp8cx.h
+ CODEC_DOC_SECTIONS += vp9 vp9_encoder
+endif
+
+ifeq ($(CONFIG_VP9_DECODER),yes)
+ VP9_PREFIX=vp9/
+ include $(SRC_PATH_BARE)/$(VP9_PREFIX)vp9dx.mk
+ CODEC_SRCS-yes += $(addprefix $(VP9_PREFIX),$(call enabled,VP9_DX_SRCS))
+ CODEC_EXPORTS-yes += $(addprefix $(VP9_PREFIX),$(VP9_DX_EXPORTS))
+ CODEC_SRCS-yes += $(VP9_PREFIX)vp9dx.mk vpx/vp8.h vpx/vp8dx.h
+ INSTALL-LIBS-yes += include/vpx/vp8.h include/vpx/vp8dx.h
+ INSTALL_MAPS += include/vpx/% $(SRC_PATH_BARE)/$(VP9_PREFIX)/%
+ CODEC_DOC_SRCS += vpx/vp8.h vpx/vp8dx.h
+ CODEC_DOC_SECTIONS += vp9 vp9_decoder
+endif
+
ifeq ($(CONFIG_ENCODERS),yes)
CODEC_DOC_SECTIONS += encoder
@@ -91,8 +169,13 @@ endif
CODEC_SRCS-$(BUILD_LIBVPX) += build/make/version.sh
CODEC_SRCS-$(BUILD_LIBVPX) += build/make/rtcd.sh
+CODEC_SRCS-$(BUILD_LIBVPX) += vpx_ports/emmintrin_compat.h
+CODEC_SRCS-$(BUILD_LIBVPX) += vpx_ports/vpx_once.h
CODEC_SRCS-$(BUILD_LIBVPX) += $(BUILD_PFX)vpx_config.c
INSTALL-SRCS-no += $(BUILD_PFX)vpx_config.c
+ifeq ($(ARCH_X86)$(ARCH_X86_64),yes)
+CODEC_SRCS-$(BUILD_LIBVPX) += third_party/x86inc/x86inc.asm
+endif
CODEC_EXPORTS-$(BUILD_LIBVPX) += vpx/exports_com
CODEC_EXPORTS-$(CONFIG_ENCODERS) += vpx/exports_enc
CODEC_EXPORTS-$(CONFIG_DECODERS) += vpx/exports_dec
@@ -116,7 +199,7 @@ INSTALL-LIBS-$(CONFIG_STATIC) += $(LIBSUBDIR)/libvpx.a
INSTALL-LIBS-$(CONFIG_DEBUG_LIBS) += $(LIBSUBDIR)/libvpx_g.a
endif
-CODEC_SRCS=$(filter-out %_test.cc,$(call enabled,CODEC_SRCS))
+CODEC_SRCS=$(call enabled,CODEC_SRCS)
INSTALL-SRCS-$(CONFIG_CODEC_SRCS) += $(CODEC_SRCS)
INSTALL-SRCS-$(CONFIG_CODEC_SRCS) += $(call enabled,CODEC_EXPORTS)
@@ -126,15 +209,16 @@ INSTALL-SRCS-$(CONFIG_CODEC_SRCS) += $(call enabled,CODEC_EXPORTS)
libvpx_srcs.txt:
@echo " [CREATE] $@"
@echo $(CODEC_SRCS) | xargs -n1 echo | sort -u > $@
+CLEAN-OBJS += libvpx_srcs.txt
ifeq ($(CONFIG_EXTERNAL_BUILD),yes)
ifeq ($(CONFIG_MSVS),yes)
-obj_int_extract.vcproj: $(SRC_PATH_BARE)/build/make/obj_int_extract.c
- @cp $(SRC_PATH_BARE)/build/x86-msvs/obj_int_extract.bat .
+obj_int_extract.$(VCPROJ_SFX): $(SRC_PATH_BARE)/build/make/obj_int_extract.c
+ @cp $(SRC_PATH_BARE)/build/$(MSVS_ARCH_DIR)/obj_int_extract.bat .
@echo " [CREATE] $@"
- $(qexec)$(SRC_PATH_BARE)/build/make/gen_msvs_proj.sh \
+ $(qexec)$(GEN_VCPROJ) \
--exe \
--target=$(TOOLCHAIN) \
--name=obj_int_extract \
@@ -145,8 +229,7 @@ obj_int_extract.vcproj: $(SRC_PATH_BARE)/build/make/obj_int_extract.c
-I. \
-I"$(SRC_PATH_BARE)" \
-PROJECTS-$(BUILD_LIBVPX) += obj_int_extract.vcproj
-PROJECTS-$(BUILD_LIBVPX) += obj_int_extract.bat
+PROJECTS-$(BUILD_LIBVPX) += obj_int_extract.$(VCPROJ_SFX)
vpx.def: $(call enabled,CODEC_EXPORTS)
@echo " [CREATE] $@"
@@ -155,11 +238,11 @@ vpx.def: $(call enabled,CODEC_EXPORTS)
--out=$@ $^
CLEAN-OBJS += vpx.def
-vpx.vcproj: $(CODEC_SRCS) vpx.def
+vpx.$(VCPROJ_SFX): $(CODEC_SRCS) vpx.def obj_int_extract.$(VCPROJ_SFX)
@echo " [CREATE] $@"
- $(qexec)$(SRC_PATH_BARE)/build/make/gen_msvs_proj.sh \
- --lib \
- --target=$(TOOLCHAIN) \
+ $(qexec)$(GEN_VCPROJ) \
+ $(if $(CONFIG_SHARED),--dll,--lib) \
+ --target=$(TOOLCHAIN) \
$(if $(CONFIG_STATIC_MSVCRT),--static-crt) \
--name=vpx \
--proj-guid=DCE19DAF-69AC-46DB-B14A-39F0FAA5DB74 \
@@ -168,10 +251,10 @@ vpx.vcproj: $(CODEC_SRCS) vpx.def
--out=$@ $(CFLAGS) $^ \
--src-path-bare="$(SRC_PATH_BARE)" \
-PROJECTS-$(BUILD_LIBVPX) += vpx.vcproj
+PROJECTS-$(BUILD_LIBVPX) += vpx.$(VCPROJ_SFX)
-vpx.vcproj: vpx_config.asm
-vpx.vcproj: vpx_rtcd.h
+vpx.$(VCPROJ_SFX): vpx_config.asm
+vpx.$(VCPROJ_SFX): $(RTCD)
endif
else
@@ -180,17 +263,29 @@ OBJS-$(BUILD_LIBVPX) += $(LIBVPX_OBJS)
LIBS-$(if $(BUILD_LIBVPX),$(CONFIG_STATIC)) += $(BUILD_PFX)libvpx.a $(BUILD_PFX)libvpx_g.a
$(BUILD_PFX)libvpx_g.a: $(LIBVPX_OBJS)
+
BUILD_LIBVPX_SO := $(if $(BUILD_LIBVPX),$(CONFIG_SHARED))
+
+ifeq ($(filter darwin%,$(TGT_OS)),$(TGT_OS))
+LIBVPX_SO := libvpx.$(VERSION_MAJOR).dylib
+EXPORT_FILE := libvpx.syms
+LIBVPX_SO_SYMLINKS := $(addprefix $(LIBSUBDIR)/, \
+ libvpx.dylib )
+else
LIBVPX_SO := libvpx.so.$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_PATCH)
+EXPORT_FILE := libvpx.ver
+SYM_LINK := libvpx.so
+LIBVPX_SO_SYMLINKS := $(addprefix $(LIBSUBDIR)/, \
+ libvpx.so libvpx.so.$(VERSION_MAJOR) \
+ libvpx.so.$(VERSION_MAJOR).$(VERSION_MINOR))
+endif
+
LIBS-$(BUILD_LIBVPX_SO) += $(BUILD_PFX)$(LIBVPX_SO)\
$(notdir $(LIBVPX_SO_SYMLINKS))
-$(BUILD_PFX)$(LIBVPX_SO): $(LIBVPX_OBJS) libvpx.ver
+$(BUILD_PFX)$(LIBVPX_SO): $(LIBVPX_OBJS) $(EXPORT_FILE)
$(BUILD_PFX)$(LIBVPX_SO): extralibs += -lm
$(BUILD_PFX)$(LIBVPX_SO): SONAME = libvpx.so.$(VERSION_MAJOR)
-$(BUILD_PFX)$(LIBVPX_SO): SO_VERSION_SCRIPT = libvpx.ver
-LIBVPX_SO_SYMLINKS := $(addprefix $(LIBSUBDIR)/, \
- libvpx.so libvpx.so.$(VERSION_MAJOR) \
- libvpx.so.$(VERSION_MAJOR).$(VERSION_MINOR))
+$(BUILD_PFX)$(LIBVPX_SO): EXPORTS_FILE = $(EXPORT_FILE)
libvpx.ver: $(call enabled,CODEC_EXPORTS)
@echo " [CREATE] $@"
@@ -199,10 +294,16 @@ libvpx.ver: $(call enabled,CODEC_EXPORTS)
$(qexec)echo "local: *; };" >> $@
CLEAN-OBJS += libvpx.ver
+libvpx.syms: $(call enabled,CODEC_EXPORTS)
+ @echo " [CREATE] $@"
+ $(qexec)awk '{print "_"$$2}' $^ >$@
+CLEAN-OBJS += libvpx.syms
+
define libvpx_symlink_template
$(1): $(2)
- @echo " [LN] $$@"
- $(qexec)ln -sf $(LIBVPX_SO) $$@
+ @echo " [LN] $(2) $$@"
+ $(qexec)mkdir -p $$(dir $$@)
+ $(qexec)ln -sf $(2) $$@
endef
$(eval $(call libvpx_symlink_template,\
@@ -210,10 +311,12 @@ $(eval $(call libvpx_symlink_template,\
$(BUILD_PFX)$(LIBVPX_SO)))
$(eval $(call libvpx_symlink_template,\
$(addprefix $(DIST_DIR)/,$(LIBVPX_SO_SYMLINKS)),\
- $(DIST_DIR)/$(LIBSUBDIR)/$(LIBVPX_SO)))
+ $(LIBVPX_SO)))
+
+
+INSTALL-LIBS-$(BUILD_LIBVPX_SO) += $(LIBVPX_SO_SYMLINKS)
+INSTALL-LIBS-$(BUILD_LIBVPX_SO) += $(LIBSUBDIR)/$(LIBVPX_SO)
-INSTALL-LIBS-$(CONFIG_SHARED) += $(LIBVPX_SO_SYMLINKS)
-INSTALL-LIBS-$(CONFIG_SHARED) += $(LIBSUBDIR)/$(LIBVPX_SO)
LIBS-$(BUILD_LIBVPX) += vpx.pc
vpx.pc: config.mk libs.mk
@@ -229,8 +332,12 @@ vpx.pc: config.mk libs.mk
$(qexec)echo 'Version: $(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_PATCH)' >> $@
$(qexec)echo 'Requires:' >> $@
$(qexec)echo 'Conflicts:' >> $@
- $(qexec)echo 'Libs: -L$${libdir} -lvpx' >> $@
+ $(qexec)echo 'Libs: -L$${libdir} -lvpx -lm' >> $@
+ifeq ($(HAVE_PTHREAD_H),yes)
$(qexec)echo 'Libs.private: -lm -lpthread' >> $@
+else
+ $(qexec)echo 'Libs.private: -lm' >> $@
+endif
$(qexec)echo 'Cflags: -I$${includedir}' >> $@
INSTALL-LIBS-yes += $(LIBSUBDIR)/pkgconfig/vpx.pc
INSTALL_MAPS += $(LIBSUBDIR)/pkgconfig/%.pc %.pc
@@ -265,71 +372,10 @@ endif
$(filter %.s.o,$(OBJS-yes)): $(BUILD_PFX)vpx_config.asm
$(filter %$(ASM).o,$(OBJS-yes)): $(BUILD_PFX)vpx_config.asm
-#
-# Calculate platform- and compiler-specific offsets for hand coded assembly
-#
-
-OFFSET_PATTERN:='^[a-zA-Z0-9_]* EQU'
-
-ifeq ($(filter icc gcc,$(TGT_CC)), $(TGT_CC))
- $(BUILD_PFX)asm_com_offsets.asm: $(BUILD_PFX)$(VP8_PREFIX)common/asm_com_offsets.c.S
- @echo " [CREATE] $@"
- $(qexec)LC_ALL=C grep $(OFFSET_PATTERN) $< | tr -d '$$\#' $(ADS2GAS) > $@
- $(BUILD_PFX)$(VP8_PREFIX)common/asm_com_offsets.c.S: $(VP8_PREFIX)common/asm_com_offsets.c
- CLEAN-OBJS += $(BUILD_PFX)asm_com_offsets.asm $(BUILD_PFX)$(VP8_PREFIX)common/asm_com_offsets.c.S
-
- $(BUILD_PFX)asm_enc_offsets.asm: $(BUILD_PFX)$(VP8_PREFIX)encoder/asm_enc_offsets.c.S
- @echo " [CREATE] $@"
- $(qexec)LC_ALL=C grep $(OFFSET_PATTERN) $< | tr -d '$$\#' $(ADS2GAS) > $@
- $(BUILD_PFX)$(VP8_PREFIX)encoder/asm_enc_offsets.c.S: $(VP8_PREFIX)encoder/asm_enc_offsets.c
- CLEAN-OBJS += $(BUILD_PFX)asm_enc_offsets.asm $(BUILD_PFX)$(VP8_PREFIX)encoder/asm_enc_offsets.c.S
-
- $(BUILD_PFX)asm_dec_offsets.asm: $(BUILD_PFX)$(VP8_PREFIX)decoder/asm_dec_offsets.c.S
- @echo " [CREATE] $@"
- $(qexec)LC_ALL=C grep $(OFFSET_PATTERN) $< | tr -d '$$\#' $(ADS2GAS) > $@
- $(BUILD_PFX)$(VP8_PREFIX)decoder/asm_dec_offsets.c.S: $(VP8_PREFIX)decoder/asm_dec_offsets.c
- CLEAN-OBJS += $(BUILD_PFX)asm_dec_offsets.asm $(BUILD_PFX)$(VP8_PREFIX)decoder/asm_dec_offsets.c.S
-else
- ifeq ($(filter rvct,$(TGT_CC)), $(TGT_CC))
- asm_com_offsets.asm: obj_int_extract
- asm_com_offsets.asm: $(VP8_PREFIX)common/asm_com_offsets.c.o
- @echo " [CREATE] $@"
- $(qexec)./obj_int_extract rvds $< $(ADS2GAS) > $@
- OBJS-yes += $(VP8_PREFIX)common/asm_com_offsets.c.o
- CLEAN-OBJS += asm_com_offsets.asm
- $(filter %$(ASM).o,$(OBJS-yes)): $(BUILD_PFX)asm_com_offsets.asm
-
- asm_enc_offsets.asm: obj_int_extract
- asm_enc_offsets.asm: $(VP8_PREFIX)encoder/asm_enc_offsets.c.o
- @echo " [CREATE] $@"
- $(qexec)./obj_int_extract rvds $< $(ADS2GAS) > $@
- OBJS-yes += $(VP8_PREFIX)encoder/asm_enc_offsets.c.o
- CLEAN-OBJS += asm_enc_offsets.asm
- $(filter %$(ASM).o,$(OBJS-yes)): $(BUILD_PFX)asm_enc_offsets.asm
-
- asm_dec_offsets.asm: obj_int_extract
- asm_dec_offsets.asm: $(VP8_PREFIX)decoder/asm_dec_offsets.c.o
- @echo " [CREATE] $@"
- $(qexec)./obj_int_extract rvds $< $(ADS2GAS) > $@
- OBJS-yes += $(VP8_PREFIX)decoder/asm_dec_offsets.c.o
- CLEAN-OBJS += asm_dec_offsets.asm
- $(filter %$(ASM).o,$(OBJS-yes)): $(BUILD_PFX)asm_dec_offsets.asm
- endif
-endif
$(shell $(SRC_PATH_BARE)/build/make/version.sh "$(SRC_PATH_BARE)" $(BUILD_PFX)vpx_version.h)
CLEAN-OBJS += $(BUILD_PFX)vpx_version.h
-#
-# Rule to generate runtime cpu detection files
-#
-$(BUILD_PFX)vpx_rtcd.h: $(SRC_PATH_BARE)/$(sort $(filter %rtcd_defs.sh,$(CODEC_SRCS)))
- @echo " [CREATE] $@"
- $(qexec)$(SRC_PATH_BARE)/build/make/rtcd.sh --arch=$(TGT_ISA) \
- --sym=vpx_rtcd \
- --config=$(target)$(if $(FAT_ARCHS),,-$(TOOLCHAIN)).mk \
- $(RTCD_OPTIONS) $^ > $@
-CLEAN-OBJS += $(BUILD_PFX)vpx_rtcd.h
##
## libvpx test directives
@@ -339,11 +385,16 @@ LIBVPX_TEST_DATA_PATH ?= .
include $(SRC_PATH_BARE)/test/test.mk
LIBVPX_TEST_SRCS=$(addprefix test/,$(call enabled,LIBVPX_TEST_SRCS))
-LIBVPX_TEST_BINS=./test_libvpx
+LIBVPX_TEST_BINS=./test_libvpx$(EXE_SFX)
LIBVPX_TEST_DATA=$(addprefix $(LIBVPX_TEST_DATA_PATH)/,\
$(call enabled,LIBVPX_TEST_DATA))
libvpx_test_data_url=http://downloads.webmproject.org/test_data/libvpx/$(1)
+libvpx_test_srcs.txt:
+ @echo " [CREATE] $@"
+ @echo $(LIBVPX_TEST_SRCS) | xargs -n1 echo | sort -u > $@
+CLEAN-OBJS += libvpx_test_srcs.txt
+
$(LIBVPX_TEST_DATA):
@echo " [DOWNLOAD] $@"
$(qexec)trap 'rm -f $@' INT TERM &&\
@@ -365,9 +416,9 @@ testdata:: $(LIBVPX_TEST_DATA)
ifeq ($(CONFIG_EXTERNAL_BUILD),yes)
ifeq ($(CONFIG_MSVS),yes)
-gtest.vcproj: $(SRC_PATH_BARE)/third_party/googletest/src/src/gtest-all.cc
+gtest.$(VCPROJ_SFX): $(SRC_PATH_BARE)/third_party/googletest/src/src/gtest-all.cc
@echo " [CREATE] $@"
- $(qexec)$(SRC_PATH_BARE)/build/make/gen_msvs_proj.sh \
+ $(qexec)$(GEN_VCPROJ) \
--lib \
--target=$(TOOLCHAIN) \
$(if $(CONFIG_STATIC_MSVCRT),--static-crt) \
@@ -375,34 +426,39 @@ gtest.vcproj: $(SRC_PATH_BARE)/third_party/googletest/src/src/gtest-all.cc
--proj-guid=EC00E1EC-AF68-4D92-A255-181690D1C9B1 \
--ver=$(CONFIG_VS_VERSION) \
--src-path-bare="$(SRC_PATH_BARE)" \
- --out=gtest.vcproj $(SRC_PATH_BARE)/third_party/googletest/src/src/gtest-all.cc \
+ -D_VARIADIC_MAX=10 \
+ --out=gtest.$(VCPROJ_SFX) $(SRC_PATH_BARE)/third_party/googletest/src/src/gtest-all.cc \
-I. -I"$(SRC_PATH_BARE)/third_party/googletest/src/include" -I"$(SRC_PATH_BARE)/third_party/googletest/src"
-PROJECTS-$(CONFIG_MSVS) += gtest.vcproj
+PROJECTS-$(CONFIG_MSVS) += gtest.$(VCPROJ_SFX)
-test_libvpx.vcproj: $(LIBVPX_TEST_SRCS)
+test_libvpx.$(VCPROJ_SFX): $(LIBVPX_TEST_SRCS) vpx.$(VCPROJ_SFX) gtest.$(VCPROJ_SFX)
@echo " [CREATE] $@"
- $(qexec)$(SRC_PATH_BARE)/build/make/gen_msvs_proj.sh \
+ $(qexec)$(GEN_VCPROJ) \
--exe \
--target=$(TOOLCHAIN) \
--name=test_libvpx \
+ -D_VARIADIC_MAX=10 \
--proj-guid=CD837F5F-52D8-4314-A370-895D614166A7 \
--ver=$(CONFIG_VS_VERSION) \
$(if $(CONFIG_STATIC_MSVCRT),--static-crt) \
--out=$@ $(INTERNAL_CFLAGS) $(CFLAGS) \
-I. -I"$(SRC_PATH_BARE)/third_party/googletest/src/include" \
- -L. -l$(CODEC_LIB) -lwinmm -l$(GTEST_LIB) $^
+ -L. -l$(CODEC_LIB) -l$(GTEST_LIB) $^
-PROJECTS-$(CONFIG_MSVS) += test_libvpx.vcproj
+PROJECTS-$(CONFIG_MSVS) += test_libvpx.$(VCPROJ_SFX)
-test:: testdata
- @set -e; for t in $(addprefix Win32/Release/,$(notdir $(LIBVPX_TEST_BINS:.cc=.exe))); do $$t; done
+LIBVPX_TEST_BINS := $(addprefix $(TGT_OS:win64=x64)/Release/,$(notdir $(LIBVPX_TEST_BINS)))
endif
else
include $(SRC_PATH_BARE)/third_party/googletest/gtest.mk
GTEST_SRCS := $(addprefix third_party/googletest/src/,$(call enabled,GTEST_SRCS))
GTEST_OBJS=$(call objs,$(GTEST_SRCS))
+ifeq ($(filter win%,$(TGT_OS)),$(TGT_OS))
+# Disabling pthreads globally will cause issues on darwin and possibly elsewhere
+$(GTEST_OBJS) $(GTEST_OBJS:.o=.d): CXXFLAGS += -DGTEST_HAS_PTHREAD=0
+endif
$(GTEST_OBJS) $(GTEST_OBJS:.o=.d): CXXFLAGS += -I$(SRC_PATH_BARE)/third_party/googletest/src
$(GTEST_OBJS) $(GTEST_OBJS:.o=.d): CXXFLAGS += -I$(SRC_PATH_BARE)/third_party/googletest/src/include
OBJS-$(BUILD_LIBVPX) += $(GTEST_OBJS)
@@ -427,14 +483,28 @@ $(foreach bin,$(LIBVPX_TEST_BINS),\
lib$(CODEC_LIB)$(CODEC_LIB_SUF) libgtest.a ))\
$(if $(BUILD_LIBVPX),$(eval $(call linkerxx_template,$(bin),\
$(LIBVPX_TEST_OBJS) \
- -L. -lvpx -lgtest -lpthread -lm)\
+ -L. -lvpx -lgtest $(extralibs) -lm)\
)))\
$(if $(LIPO_LIBS),$(eval $(call lipo_bin_template,$(bin))))\
-test:: $(LIBVPX_TEST_BINS) testdata
- @set -e; for t in $(LIBVPX_TEST_BINS); do $$t; done
-
endif
+
+define test_shard_template
+test:: test_shard.$(1)
+test_shard.$(1): $(LIBVPX_TEST_BINS) testdata
+ @set -e; \
+ for t in $(LIBVPX_TEST_BINS); do \
+ export GTEST_SHARD_INDEX=$(1); \
+ export GTEST_TOTAL_SHARDS=$(2); \
+ $$$$t; \
+ done
+.PHONY: test_shard.$(1)
+endef
+
+NUM_SHARDS := 10
+SHARDS := 0 1 2 3 4 5 6 7 8 9
+$(foreach s,$(SHARDS),$(eval $(call test_shard_template,$(s),$(NUM_SHARDS))))
+
endif
##
@@ -450,5 +520,8 @@ libs.doxy: $(CODEC_DOC_SRCS)
@echo "INCLUDE_PATH += ." >> $@;
@echo "ENABLED_SECTIONS += $(sort $(CODEC_DOC_SECTIONS))" >> $@
-## Generate vpx_rtcd.h for all objects
-$(OBJS-yes:.o=.d): $(BUILD_PFX)vpx_rtcd.h
+## Generate rtcd.h for all objects
+$(OBJS-yes:.o=.d): $(RTCD)
+
+## Update the global src list
+SRCS += $(CODEC_SRCS) $(LIBVPX_TEST_SRCS) $(GTEST_SRCS)
diff --git a/libvpx/md5_utils.c b/libvpx/md5_utils.c
index 9a584fa..8fb26e2 100644
--- a/libvpx/md5_utils.c
+++ b/libvpx/md5_utils.c
@@ -25,25 +25,22 @@
#include "md5_utils.h"
void
-byteSwap(UWORD32 *buf, unsigned words)
-{
- md5byte *p;
+byteSwap(UWORD32 *buf, unsigned words) {
+ md5byte *p;
- /* Only swap bytes for big endian machines */
- int i = 1;
+ /* Only swap bytes for big endian machines */
+ int i = 1;
- if (*(char *)&i == 1)
- return;
+ if (*(char *)&i == 1)
+ return;
- p = (md5byte *)buf;
+ p = (md5byte *)buf;
- do
- {
- *buf++ = (UWORD32)((unsigned)p[3] << 8 | p[2]) << 16 |
- ((unsigned)p[1] << 8 | p[0]);
- p += 4;
- }
- while (--words);
+ do {
+ *buf++ = (UWORD32)((unsigned)p[3] << 8 | p[2]) << 16 |
+ ((unsigned)p[1] << 8 | p[0]);
+ p += 4;
+ } while (--words);
}
/*
@@ -51,15 +48,14 @@ byteSwap(UWORD32 *buf, unsigned words)
* initialization constants.
*/
void
-MD5Init(struct MD5Context *ctx)
-{
- ctx->buf[0] = 0x67452301;
- ctx->buf[1] = 0xefcdab89;
- ctx->buf[2] = 0x98badcfe;
- ctx->buf[3] = 0x10325476;
-
- ctx->bytes[0] = 0;
- ctx->bytes[1] = 0;
+MD5Init(struct MD5Context *ctx) {
+ ctx->buf[0] = 0x67452301;
+ ctx->buf[1] = 0xefcdab89;
+ ctx->buf[2] = 0x98badcfe;
+ ctx->buf[3] = 0x10325476;
+
+ ctx->bytes[0] = 0;
+ ctx->bytes[1] = 0;
}
/*
@@ -67,44 +63,41 @@ MD5Init(struct MD5Context *ctx)
* of bytes.
*/
void
-MD5Update(struct MD5Context *ctx, md5byte const *buf, unsigned len)
-{
- UWORD32 t;
+MD5Update(struct MD5Context *ctx, md5byte const *buf, unsigned len) {
+ UWORD32 t;
- /* Update byte count */
+ /* Update byte count */
- t = ctx->bytes[0];
+ t = ctx->bytes[0];
- if ((ctx->bytes[0] = t + len) < t)
- ctx->bytes[1]++; /* Carry from low to high */
+ if ((ctx->bytes[0] = t + len) < t)
+ ctx->bytes[1]++; /* Carry from low to high */
- t = 64 - (t & 0x3f); /* Space available in ctx->in (at least 1) */
+ t = 64 - (t & 0x3f); /* Space available in ctx->in (at least 1) */
- if (t > len)
- {
- memcpy((md5byte *)ctx->in + 64 - t, buf, len);
- return;
- }
+ if (t > len) {
+ memcpy((md5byte *)ctx->in + 64 - t, buf, len);
+ return;
+ }
- /* First chunk is an odd size */
- memcpy((md5byte *)ctx->in + 64 - t, buf, t);
+ /* First chunk is an odd size */
+ memcpy((md5byte *)ctx->in + 64 - t, buf, t);
+ byteSwap(ctx->in, 16);
+ MD5Transform(ctx->buf, ctx->in);
+ buf += t;
+ len -= t;
+
+ /* Process data in 64-byte chunks */
+ while (len >= 64) {
+ memcpy(ctx->in, buf, 64);
byteSwap(ctx->in, 16);
MD5Transform(ctx->buf, ctx->in);
- buf += t;
- len -= t;
-
- /* Process data in 64-byte chunks */
- while (len >= 64)
- {
- memcpy(ctx->in, buf, 64);
- byteSwap(ctx->in, 16);
- MD5Transform(ctx->buf, ctx->in);
- buf += 64;
- len -= 64;
- }
-
- /* Handle any remaining bytes of data. */
- memcpy(ctx->in, buf, len);
+ buf += 64;
+ len -= 64;
+ }
+
+ /* Handle any remaining bytes of data. */
+ memcpy(ctx->in, buf, len);
}
/*
@@ -112,37 +105,35 @@ MD5Update(struct MD5Context *ctx, md5byte const *buf, unsigned len)
* 1 0* (64-bit count of bits processed, MSB-first)
*/
void
-MD5Final(md5byte digest[16], struct MD5Context *ctx)
-{
- int count = ctx->bytes[0] & 0x3f; /* Number of bytes in ctx->in */
- md5byte *p = (md5byte *)ctx->in + count;
-
- /* Set the first char of padding to 0x80. There is always room. */
- *p++ = 0x80;
-
- /* Bytes of padding needed to make 56 bytes (-8..55) */
- count = 56 - 1 - count;
-
- if (count < 0) /* Padding forces an extra block */
- {
- memset(p, 0, count + 8);
- byteSwap(ctx->in, 16);
- MD5Transform(ctx->buf, ctx->in);
- p = (md5byte *)ctx->in;
- count = 56;
- }
-
- memset(p, 0, count);
- byteSwap(ctx->in, 14);
-
- /* Append length in bits and transform */
- ctx->in[14] = ctx->bytes[0] << 3;
- ctx->in[15] = ctx->bytes[1] << 3 | ctx->bytes[0] >> 29;
+MD5Final(md5byte digest[16], struct MD5Context *ctx) {
+ int count = ctx->bytes[0] & 0x3f; /* Number of bytes in ctx->in */
+ md5byte *p = (md5byte *)ctx->in + count;
+
+ /* Set the first char of padding to 0x80. There is always room. */
+ *p++ = 0x80;
+
+ /* Bytes of padding needed to make 56 bytes (-8..55) */
+ count = 56 - 1 - count;
+
+ if (count < 0) { /* Padding forces an extra block */
+ memset(p, 0, count + 8);
+ byteSwap(ctx->in, 16);
MD5Transform(ctx->buf, ctx->in);
+ p = (md5byte *)ctx->in;
+ count = 56;
+ }
+
+ memset(p, 0, count);
+ byteSwap(ctx->in, 14);
+
+ /* Append length in bits and transform */
+ ctx->in[14] = ctx->bytes[0] << 3;
+ ctx->in[15] = ctx->bytes[1] << 3 | ctx->bytes[0] >> 29;
+ MD5Transform(ctx->buf, ctx->in);
- byteSwap(ctx->buf, 4);
- memcpy(digest, ctx->buf, 16);
- memset(ctx, 0, sizeof(*ctx)); /* In case it's sensitive */
+ byteSwap(ctx->buf, 4);
+ memcpy(digest, ctx->buf, 16);
+ memset(ctx, 0, sizeof(*ctx)); /* In case it's sensitive */
}
#ifndef ASM_MD5
@@ -157,7 +148,7 @@ MD5Final(md5byte digest[16], struct MD5Context *ctx)
/* This is the central step in the MD5 algorithm. */
#define MD5STEP(f,w,x,y,z,in,s) \
- (w += f(x,y,z) + in, w = (w<<s | w>>(32-s)) + x)
+ (w += f(x,y,z) + in, w = (w<<s | w>>(32-s)) + x)
/*
* The core of the MD5 algorithm, this alters an existing MD5 hash to
@@ -165,87 +156,86 @@ MD5Final(md5byte digest[16], struct MD5Context *ctx)
* the data and converts bytes into longwords for this routine.
*/
void
-MD5Transform(UWORD32 buf[4], UWORD32 const in[16])
-{
- register UWORD32 a, b, c, d;
-
- a = buf[0];
- b = buf[1];
- c = buf[2];
- d = buf[3];
-
- MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
- MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
- MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
- MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
- MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
- MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
- MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
- MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
- MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
- MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
- MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
- MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
- MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
- MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
- MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
- MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
-
- MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
- MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
- MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
- MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
- MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
- MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
- MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
- MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
- MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
- MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
- MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
- MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
- MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
- MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
- MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
- MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
-
- MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
- MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
- MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
- MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
- MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
- MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
- MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
- MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
- MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
- MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
- MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
- MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
- MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
- MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
- MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
- MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
-
- MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
- MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
- MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
- MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
- MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
- MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
- MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
- MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
- MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
- MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
- MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
- MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
- MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
- MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
- MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
- MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
-
- buf[0] += a;
- buf[1] += b;
- buf[2] += c;
- buf[3] += d;
+MD5Transform(UWORD32 buf[4], UWORD32 const in[16]) {
+ register UWORD32 a, b, c, d;
+
+ a = buf[0];
+ b = buf[1];
+ c = buf[2];
+ d = buf[3];
+
+ MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
+ MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
+ MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
+ MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
+ MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
+ MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
+ MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
+ MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
+ MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
+ MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
+ MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
+ MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
+ MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
+ MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
+ MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
+ MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
+
+ MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
+ MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
+ MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
+ MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
+ MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
+ MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
+ MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
+ MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
+ MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
+ MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
+ MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
+ MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
+ MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
+ MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
+ MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
+ MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
+
+ MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
+ MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
+ MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
+ MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
+ MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
+ MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
+ MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
+ MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
+ MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
+ MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
+ MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
+ MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
+ MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
+ MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
+ MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
+ MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
+
+ MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
+ MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
+ MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
+ MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
+ MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
+ MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
+ MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
+ MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
+ MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
+ MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
+ MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
+ MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
+ MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
+ MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
+ MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
+ MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
+
+ buf[0] += a;
+ buf[1] += b;
+ buf[2] += c;
+ buf[3] += d;
}
#endif
diff --git a/libvpx/md5_utils.h b/libvpx/md5_utils.h
index 5ca1b5f..81792c4 100644
--- a/libvpx/md5_utils.h
+++ b/libvpx/md5_utils.h
@@ -27,11 +27,10 @@
#define UWORD32 unsigned int
typedef struct MD5Context MD5Context;
-struct MD5Context
-{
- UWORD32 buf[4];
- UWORD32 bytes[2];
- UWORD32 in[16];
+struct MD5Context {
+ UWORD32 buf[4];
+ UWORD32 bytes[2];
+ UWORD32 in[16];
};
void MD5Init(struct MD5Context *context);
diff --git a/libvpx/nestegg/halloc/src/macros.h b/libvpx/nestegg/halloc/src/macros.h
index c36b516..1f84bc2 100644
--- a/libvpx/nestegg/halloc/src/macros.h
+++ b/libvpx/nestegg/halloc/src/macros.h
@@ -20,7 +20,7 @@
/*
restore pointer to the structure by a pointer to its field
*/
-#define structof(p,t,f) ((t*)(- offsetof(t,f) + (char*)(p)))
+#define structof(p,t,f) ((t*)(- (ptrdiff_t) offsetof(t,f) + (char*)(p)))
/*
* redefine for the target compiler
diff --git a/libvpx/nestegg/include/nestegg/nestegg.h b/libvpx/nestegg/include/nestegg/nestegg.h
index 7447d14..6510694 100644
--- a/libvpx/nestegg/include/nestegg/nestegg.h
+++ b/libvpx/nestegg/include/nestegg/nestegg.h
@@ -67,6 +67,7 @@ extern "C" {
#define NESTEGG_CODEC_VP8 0 /**< Track uses Google On2 VP8 codec. */
#define NESTEGG_CODEC_VORBIS 1 /**< Track uses Xiph Vorbis codec. */
+#define NESTEGG_CODEC_VP9 2 /**< Track uses Google On2 VP9 codec. */
#define NESTEGG_SEEK_SET 0 /**< Seek offset relative to beginning of stream. */
#define NESTEGG_SEEK_CUR 1 /**< Seek offset relative to current position in stream. */
diff --git a/libvpx/nestegg/src/nestegg.c b/libvpx/nestegg/src/nestegg.c
index cc87788..ae87e8f8 100644
--- a/libvpx/nestegg/src/nestegg.c
+++ b/libvpx/nestegg/src/nestegg.c
@@ -127,6 +127,7 @@ enum ebml_type_enum {
/* Track IDs */
#define TRACK_ID_VP8 "V_VP8"
+#define TRACK_ID_VP9 "V_VP9"
#define TRACK_ID_VORBIS "A_VORBIS"
enum vint_mask {
@@ -1669,6 +1670,9 @@ nestegg_track_codec_id(nestegg * ctx, unsigned int track)
if (strcmp(codec_id, TRACK_ID_VP8) == 0)
return NESTEGG_CODEC_VP8;
+ if (strcmp(codec_id, TRACK_ID_VP9) == 0)
+ return NESTEGG_CODEC_VP9;
+
if (strcmp(codec_id, TRACK_ID_VORBIS) == 0)
return NESTEGG_CODEC_VORBIS;
diff --git a/libvpx/solution.mk b/libvpx/solution.mk
index 948305f..2c8d29a 100644
--- a/libvpx/solution.mk
+++ b/libvpx/solution.mk
@@ -9,14 +9,14 @@
##
# libvpx reverse dependencies (targets that depend on libvpx)
-VPX_NONDEPS=$(addsuffix .vcproj,vpx gtest obj_int_extract)
+VPX_NONDEPS=$(addsuffix .$(VCPROJ_SFX),vpx gtest obj_int_extract)
VPX_RDEPS=$(foreach vcp,\
- $(filter-out $(VPX_NONDEPS),$^), --dep=$(vcp:.vcproj=):vpx)
+ $(filter-out $(VPX_NONDEPS),$^), --dep=$(vcp:.$(VCPROJ_SFX)=):vpx)
-vpx.sln: $(wildcard *.vcproj)
+vpx.sln: $(wildcard *.$(VCPROJ_SFX))
@echo " [CREATE] $@"
$(SRC_PATH_BARE)/build/make/gen_msvs_sln.sh \
- $(if $(filter vpx.vcproj,$^),$(VPX_RDEPS)) \
+ $(if $(filter vpx.$(VCPROJ_SFX),$^),$(VPX_RDEPS)) \
--dep=vpx:obj_int_extract \
--dep=test_libvpx:gtest \
--ver=$(CONFIG_VS_VERSION)\
diff --git a/libvpx/test/acm_random.h b/libvpx/test/acm_random.h
index 514894e..de94186 100644
--- a/libvpx/test/acm_random.h
+++ b/libvpx/test/acm_random.h
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef LIBVPX_TEST_ACM_RANDOM_H_
-#define LIBVPX_TEST_ACM_RANDOM_H_
+#ifndef TEST_ACM_RANDOM_H_
+#define TEST_ACM_RANDOM_H_
-#include <stdlib.h>
+#include "third_party/googletest/src/include/gtest/gtest.h"
#include "vpx/vpx_integer.h"
@@ -19,24 +19,30 @@ namespace libvpx_test {
class ACMRandom {
public:
- ACMRandom() {
- Reset(DeterministicSeed());
- }
+ ACMRandom() : random_(DeterministicSeed()) {}
- explicit ACMRandom(int seed) {
- Reset(seed);
- }
+ explicit ACMRandom(int seed) : random_(seed) {}
void Reset(int seed) {
- srand(seed);
+ random_.Reseed(seed);
}
uint8_t Rand8(void) {
- return (rand() >> 8) & 0xff;
+ const uint32_t value =
+ random_.Generate(testing::internal::Random::kMaxRange);
+ // There's a bit more entropy in the upper bits of this implementation.
+ return (value >> 24) & 0xff;
+ }
+
+ uint8_t Rand8Extremes(void) {
+ // Returns a random value near 0 or near 255, to better exercise
+ // saturation behavior.
+ const uint8_t r = Rand8();
+ return r < 128 ? r << 4 : r >> 4;
}
int PseudoUniform(int range) {
- return (rand() >> 8) % range;
+ return random_.Generate(range);
}
int operator()(int n) {
@@ -46,8 +52,11 @@ class ACMRandom {
static int DeterministicSeed(void) {
return 0xbaba;
}
+
+ private:
+ testing::internal::Random random_;
};
} // namespace libvpx_test
-#endif // LIBVPX_TEST_ACM_RANDOM_H_
+#endif // TEST_ACM_RANDOM_H_
diff --git a/libvpx/test/altref_test.cc b/libvpx/test/altref_test.cc
index ca05577..af25b72 100644
--- a/libvpx/test/altref_test.cc
+++ b/libvpx/test/altref_test.cc
@@ -8,19 +8,20 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/codec_factory.h"
#include "test/encode_test_driver.h"
#include "test/i420_video_source.h"
-
+#include "test/util.h"
namespace {
// lookahead range: [kLookAheadMin, kLookAheadMax).
const int kLookAheadMin = 5;
const int kLookAheadMax = 26;
-class AltRefTest : public libvpx_test::EncoderTest,
- public ::testing::TestWithParam<int> {
+class AltRefTest : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWithParam<int> {
protected:
- AltRefTest() : altref_count_(0) {}
+ AltRefTest() : EncoderTest(GET_PARAM(0)), altref_count_(0) {}
virtual ~AltRefTest() {}
virtual void SetUp() {
@@ -32,10 +33,6 @@ class AltRefTest : public libvpx_test::EncoderTest,
altref_count_ = 0;
}
- virtual bool Continue() const {
- return !HasFatalFailure() && !abort_;
- }
-
virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video,
libvpx_test::Encoder *encoder) {
if (video->frame() == 1) {
@@ -58,7 +55,7 @@ TEST_P(AltRefTest, MonotonicTimestamps) {
const vpx_rational timebase = { 33333333, 1000000000 };
cfg_.g_timebase = timebase;
cfg_.rc_target_bitrate = 1000;
- cfg_.g_lag_in_frames = GetParam();
+ cfg_.g_lag_in_frames = GET_PARAM(1);
libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
timebase.den, timebase.num, 0, 30);
@@ -66,6 +63,7 @@ TEST_P(AltRefTest, MonotonicTimestamps) {
EXPECT_GE(altref_count(), 1);
}
-INSTANTIATE_TEST_CASE_P(NonZeroLag, AltRefTest,
- ::testing::Range(kLookAheadMin, kLookAheadMax));
+
+VP8_INSTANTIATE_TEST_CASE(AltRefTest,
+ ::testing::Range(kLookAheadMin, kLookAheadMax));
} // namespace
diff --git a/libvpx/test/borders_test.cc b/libvpx/test/borders_test.cc
new file mode 100644
index 0000000..dcdedcf
--- /dev/null
+++ b/libvpx/test/borders_test.cc
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include <climits>
+#include <vector>
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/codec_factory.h"
+#include "test/encode_test_driver.h"
+#include "test/i420_video_source.h"
+#include "test/util.h"
+
+namespace {
+
+class BordersTest : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
+ protected:
+ BordersTest() : EncoderTest(GET_PARAM(0)) {}
+
+ virtual void SetUp() {
+ InitializeConfig();
+ SetMode(GET_PARAM(1));
+ }
+
+ virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
+ ::libvpx_test::Encoder *encoder) {
+ if (video->frame() == 1) {
+ encoder->Control(VP8E_SET_CPUUSED, 1);
+ encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 1);
+ encoder->Control(VP8E_SET_ARNR_MAXFRAMES, 7);
+ encoder->Control(VP8E_SET_ARNR_STRENGTH, 5);
+ encoder->Control(VP8E_SET_ARNR_TYPE, 3);
+ }
+ }
+
+ virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
+ if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
+ }
+ }
+};
+
+TEST_P(BordersTest, TestEncodeHighBitrate) {
+ // Validate that this non multiple of 64 wide clip encodes and decodes
+ // without a mismatch when passing in a very low max q. This pushes
+ // the encoder to producing lots of big partitions which will likely
+ // extend into the border and test the border condition.
+ cfg_.g_lag_in_frames = 25;
+ cfg_.rc_2pass_vbr_minsection_pct = 5;
+ cfg_.rc_2pass_vbr_minsection_pct = 2000;
+ cfg_.rc_target_bitrate = 2000;
+ cfg_.rc_max_quantizer = 10;
+
+ ::libvpx_test::I420VideoSource video("hantro_odd.yuv", 208, 144, 30, 1, 0,
+ 40);
+
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+}
+TEST_P(BordersTest, TestLowBitrate) {
+ // Validate that this clip encodes and decodes without a mismatch
+ // when passing in a very high min q. This pushes the encoder to producing
+ // lots of small partitions which might will test the other condition.
+
+ cfg_.g_lag_in_frames = 25;
+ cfg_.rc_2pass_vbr_minsection_pct = 5;
+ cfg_.rc_2pass_vbr_minsection_pct = 2000;
+ cfg_.rc_target_bitrate = 200;
+ cfg_.rc_min_quantizer = 40;
+
+ ::libvpx_test::I420VideoSource video("hantro_odd.yuv", 208, 144, 30, 1, 0,
+ 40);
+
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+}
+
+VP9_INSTANTIATE_TEST_CASE(BordersTest, ::testing::Values(
+ ::libvpx_test::kTwoPassGood));
+} // namespace
diff --git a/libvpx/test/clear_system_state.h b/libvpx/test/clear_system_state.h
new file mode 100644
index 0000000..8f08a4c
--- /dev/null
+++ b/libvpx/test/clear_system_state.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef TEST_CLEAR_SYSTEM_STATE_H_
+#define TEST_CLEAR_SYSTEM_STATE_H_
+
+#include "./vpx_config.h"
+extern "C" {
+#if ARCH_X86 || ARCH_X86_64
+# include "vpx_ports/x86.h"
+#endif
+}
+
+namespace libvpx_test {
+
+// Reset system to a known state. This function should be used for all non-API
+// test cases.
+inline void ClearSystemState() {
+#if ARCH_X86 || ARCH_X86_64
+ vpx_reset_mmx_state();
+#endif
+}
+
+} // namespace libvpx_test
+#endif // TEST_CLEAR_SYSTEM_STATE_H_
diff --git a/libvpx/test/codec_factory.h b/libvpx/test/codec_factory.h
new file mode 100644
index 0000000..cc7b53f
--- /dev/null
+++ b/libvpx/test/codec_factory.h
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef TEST_CODEC_FACTORY_H_
+#define TEST_CODEC_FACTORY_H_
+
+extern "C" {
+#include "./vpx_config.h"
+#include "vpx/vpx_decoder.h"
+#include "vpx/vpx_encoder.h"
+#if CONFIG_VP8_ENCODER || CONFIG_VP9_ENCODER
+#include "vpx/vp8cx.h"
+#endif
+#if CONFIG_VP8_DECODER || CONFIG_VP9_DECODER
+#include "vpx/vp8dx.h"
+#endif
+}
+
+#include "test/decode_test_driver.h"
+#include "test/encode_test_driver.h"
+namespace libvpx_test {
+
+class CodecFactory {
+ public:
+ CodecFactory() {}
+
+ virtual ~CodecFactory() {}
+
+ virtual Decoder* CreateDecoder(vpx_codec_dec_cfg_t cfg,
+ unsigned long deadline) const = 0;
+
+ virtual Encoder* CreateEncoder(vpx_codec_enc_cfg_t cfg,
+ unsigned long deadline,
+ const unsigned long init_flags,
+ TwopassStatsStore *stats) const = 0;
+
+ virtual vpx_codec_err_t DefaultEncoderConfig(vpx_codec_enc_cfg_t *cfg,
+ int usage) const = 0;
+};
+
+/* Provide CodecTestWith<n>Params classes for a variable number of parameters
+ * to avoid having to include a pointer to the CodecFactory in every test
+ * definition.
+ */
+template<class T1>
+class CodecTestWithParam : public ::testing::TestWithParam<
+ std::tr1::tuple< const libvpx_test::CodecFactory*, T1 > > {
+};
+
+template<class T1, class T2>
+class CodecTestWith2Params : public ::testing::TestWithParam<
+ std::tr1::tuple< const libvpx_test::CodecFactory*, T1, T2 > > {
+};
+
+template<class T1, class T2, class T3>
+class CodecTestWith3Params : public ::testing::TestWithParam<
+ std::tr1::tuple< const libvpx_test::CodecFactory*, T1, T2, T3 > > {
+};
+
+/*
+ * VP8 Codec Definitions
+ */
+#if CONFIG_VP8
+class VP8Decoder : public Decoder {
+ public:
+ VP8Decoder(vpx_codec_dec_cfg_t cfg, unsigned long deadline)
+ : Decoder(cfg, deadline) {}
+
+ protected:
+ virtual const vpx_codec_iface_t* CodecInterface() const {
+#if CONFIG_VP8_DECODER
+ return &vpx_codec_vp8_dx_algo;
+#else
+ return NULL;
+#endif
+ }
+};
+
+class VP8Encoder : public Encoder {
+ public:
+ VP8Encoder(vpx_codec_enc_cfg_t cfg, unsigned long deadline,
+ const unsigned long init_flags, TwopassStatsStore *stats)
+ : Encoder(cfg, deadline, init_flags, stats) {}
+
+ protected:
+ virtual const vpx_codec_iface_t* CodecInterface() const {
+#if CONFIG_VP8_ENCODER
+ return &vpx_codec_vp8_cx_algo;
+#else
+ return NULL;
+#endif
+ }
+};
+
+class VP8CodecFactory : public CodecFactory {
+ public:
+ VP8CodecFactory() : CodecFactory() {}
+
+ virtual Decoder* CreateDecoder(vpx_codec_dec_cfg_t cfg,
+ unsigned long deadline) const {
+#if CONFIG_VP8_DECODER
+ return new VP8Decoder(cfg, deadline);
+#else
+ return NULL;
+#endif
+ }
+
+ virtual Encoder* CreateEncoder(vpx_codec_enc_cfg_t cfg,
+ unsigned long deadline,
+ const unsigned long init_flags,
+ TwopassStatsStore *stats) const {
+#if CONFIG_VP8_ENCODER
+ return new VP8Encoder(cfg, deadline, init_flags, stats);
+#else
+ return NULL;
+#endif
+ }
+
+ virtual vpx_codec_err_t DefaultEncoderConfig(vpx_codec_enc_cfg_t *cfg,
+ int usage) const {
+#if CONFIG_VP8_ENCODER
+ return vpx_codec_enc_config_default(&vpx_codec_vp8_cx_algo, cfg, usage);
+#else
+ return VPX_CODEC_INCAPABLE;
+#endif
+ }
+};
+
+const libvpx_test::VP8CodecFactory kVP8;
+
+#define VP8_INSTANTIATE_TEST_CASE(test, ...)\
+ INSTANTIATE_TEST_CASE_P(VP8, test, \
+ ::testing::Combine( \
+ ::testing::Values(static_cast<const libvpx_test::CodecFactory*>( \
+ &libvpx_test::kVP8)), \
+ __VA_ARGS__))
+#else
+#define VP8_INSTANTIATE_TEST_CASE(test, ...)
+#endif // CONFIG_VP8
+
+
+/*
+ * VP9 Codec Definitions
+ */
+#if CONFIG_VP9
+class VP9Decoder : public Decoder {
+ public:
+ VP9Decoder(vpx_codec_dec_cfg_t cfg, unsigned long deadline)
+ : Decoder(cfg, deadline) {}
+
+ protected:
+ virtual const vpx_codec_iface_t* CodecInterface() const {
+#if CONFIG_VP9_DECODER
+ return &vpx_codec_vp9_dx_algo;
+#else
+ return NULL;
+#endif
+ }
+};
+
+class VP9Encoder : public Encoder {
+ public:
+ VP9Encoder(vpx_codec_enc_cfg_t cfg, unsigned long deadline,
+ const unsigned long init_flags, TwopassStatsStore *stats)
+ : Encoder(cfg, deadline, init_flags, stats) {}
+
+ protected:
+ virtual const vpx_codec_iface_t* CodecInterface() const {
+#if CONFIG_VP9_ENCODER
+ return &vpx_codec_vp9_cx_algo;
+#else
+ return NULL;
+#endif
+ }
+};
+
+class VP9CodecFactory : public CodecFactory {
+ public:
+ VP9CodecFactory() : CodecFactory() {}
+
+ virtual Decoder* CreateDecoder(vpx_codec_dec_cfg_t cfg,
+ unsigned long deadline) const {
+#if CONFIG_VP9_DECODER
+ return new VP9Decoder(cfg, deadline);
+#else
+ return NULL;
+#endif
+ }
+
+ virtual Encoder* CreateEncoder(vpx_codec_enc_cfg_t cfg,
+ unsigned long deadline,
+ const unsigned long init_flags,
+ TwopassStatsStore *stats) const {
+#if CONFIG_VP9_ENCODER
+ return new VP9Encoder(cfg, deadline, init_flags, stats);
+#else
+ return NULL;
+#endif
+ }
+
+ virtual vpx_codec_err_t DefaultEncoderConfig(vpx_codec_enc_cfg_t *cfg,
+ int usage) const {
+#if CONFIG_VP9_ENCODER
+ return vpx_codec_enc_config_default(&vpx_codec_vp9_cx_algo, cfg, usage);
+#else
+ return VPX_CODEC_INCAPABLE;
+#endif
+ }
+};
+
+const libvpx_test::VP9CodecFactory kVP9;
+
+#define VP9_INSTANTIATE_TEST_CASE(test, ...)\
+ INSTANTIATE_TEST_CASE_P(VP9, test, \
+ ::testing::Combine( \
+ ::testing::Values(static_cast<const libvpx_test::CodecFactory*>( \
+ &libvpx_test::kVP9)), \
+ __VA_ARGS__))
+#else
+#define VP9_INSTANTIATE_TEST_CASE(test, ...)
+#endif // CONFIG_VP9
+
+
+} // namespace libvpx_test
+
+#endif // TEST_CODEC_FACTORY_H_
diff --git a/libvpx/test/config_test.cc b/libvpx/test/config_test.cc
index c4da46e..36c6330 100644
--- a/libvpx/test/config_test.cc
+++ b/libvpx/test/config_test.cc
@@ -8,20 +8,22 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/codec_factory.h"
#include "test/encode_test_driver.h"
+#include "test/util.h"
#include "test/video_source.h"
namespace {
class ConfigTest : public ::libvpx_test::EncoderTest,
- public ::testing::TestWithParam<enum libvpx_test::TestMode> {
- public:
- ConfigTest() : frame_count_in_(0), frame_count_out_(0), frame_count_max_(0) {}
-
+ public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
protected:
+ ConfigTest() : EncoderTest(GET_PARAM(0)),
+ frame_count_in_(0), frame_count_out_(0), frame_count_max_(0) {}
+
virtual void SetUp() {
InitializeConfig();
- SetMode(GetParam());
+ SetMode(GET_PARAM(1));
}
virtual void BeginPassHook(unsigned int /*pass*/) {
@@ -38,10 +40,6 @@ class ConfigTest : public ::libvpx_test::EncoderTest,
++frame_count_out_;
}
- virtual bool Continue() const {
- return !HasFatalFailure() && !abort_;
- }
-
unsigned int frame_count_in_;
unsigned int frame_count_out_;
unsigned int frame_count_max_;
@@ -57,5 +55,5 @@ TEST_P(ConfigTest, LagIsDisabled) {
EXPECT_EQ(frame_count_in_, frame_count_out_);
}
-INSTANTIATE_TEST_CASE_P(OnePassModes, ConfigTest, ONE_PASS_TEST_MODES);
+VP8_INSTANTIATE_TEST_CASE(ConfigTest, ONE_PASS_TEST_MODES);
} // namespace
diff --git a/libvpx/test/convolve_test.cc b/libvpx/test/convolve_test.cc
new file mode 100644
index 0000000..3100571
--- /dev/null
+++ b/libvpx/test/convolve_test.cc
@@ -0,0 +1,645 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string.h>
+#include "test/acm_random.h"
+#include "test/register_state_check.h"
+#include "test/util.h"
+#include "third_party/googletest/src/include/gtest/gtest.h"
+
+extern "C" {
+#include "./vpx_config.h"
+#include "./vp9_rtcd.h"
+#include "vp9/common/vp9_filter.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_ports/mem.h"
+}
+
+namespace {
+typedef void (*convolve_fn_t)(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int filter_x_stride,
+ const int16_t *filter_y, int filter_y_stride,
+ int w, int h);
+
+struct ConvolveFunctions {
+ ConvolveFunctions(convolve_fn_t h8, convolve_fn_t h8_avg,
+ convolve_fn_t v8, convolve_fn_t v8_avg,
+ convolve_fn_t hv8, convolve_fn_t hv8_avg)
+ : h8_(h8), v8_(v8), hv8_(hv8), h8_avg_(h8_avg), v8_avg_(v8_avg),
+ hv8_avg_(hv8_avg) {}
+
+ convolve_fn_t h8_;
+ convolve_fn_t v8_;
+ convolve_fn_t hv8_;
+ convolve_fn_t h8_avg_;
+ convolve_fn_t v8_avg_;
+ convolve_fn_t hv8_avg_;
+};
+
+// Reference 8-tap subpixel filter, slightly modified to fit into this test.
+#define VP9_FILTER_WEIGHT 128
+#define VP9_FILTER_SHIFT 7
+uint8_t clip_pixel(int x) {
+ return x < 0 ? 0 :
+ x > 255 ? 255 :
+ x;
+}
+
+void filter_block2d_8_c(const uint8_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint8_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height) {
+ // Between passes, we use an intermediate buffer whose height is extended to
+ // have enough horizontally filtered values as input for the vertical pass.
+ // This buffer is allocated to be big enough for the largest block type we
+ // support.
+ const int kInterp_Extend = 4;
+ const unsigned int intermediate_height =
+ (kInterp_Extend - 1) + output_height + kInterp_Extend;
+
+ /* Size of intermediate_buffer is max_intermediate_height * filter_max_width,
+ * where max_intermediate_height = (kInterp_Extend - 1) + filter_max_height
+ * + kInterp_Extend
+ * = 3 + 16 + 4
+ * = 23
+ * and filter_max_width = 16
+ */
+ uint8_t intermediate_buffer[71 * 64];
+ const int intermediate_next_stride = 1 - intermediate_height * output_width;
+
+ // Horizontal pass (src -> transposed intermediate).
+ {
+ uint8_t *output_ptr = intermediate_buffer;
+ const int src_next_row_stride = src_stride - output_width;
+ unsigned int i, j;
+ src_ptr -= (kInterp_Extend - 1) * src_stride + (kInterp_Extend - 1);
+ for (i = 0; i < intermediate_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * HFilter[0]) +
+ (src_ptr[1] * HFilter[1]) +
+ (src_ptr[2] * HFilter[2]) +
+ (src_ptr[3] * HFilter[3]) +
+ (src_ptr[4] * HFilter[4]) +
+ (src_ptr[5] * HFilter[5]) +
+ (src_ptr[6] * HFilter[6]) +
+ (src_ptr[7] * HFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
+
+ // Normalize back to 0-255...
+ *output_ptr = clip_pixel(temp >> VP9_FILTER_SHIFT);
+ ++src_ptr;
+ output_ptr += intermediate_height;
+ }
+ src_ptr += src_next_row_stride;
+ output_ptr += intermediate_next_stride;
+ }
+ }
+
+ // Vertical pass (transposed intermediate -> dst).
+ {
+ uint8_t *src_ptr = intermediate_buffer;
+ const int dst_next_row_stride = dst_stride - output_width;
+ unsigned int i, j;
+ for (i = 0; i < output_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * VFilter[0]) +
+ (src_ptr[1] * VFilter[1]) +
+ (src_ptr[2] * VFilter[2]) +
+ (src_ptr[3] * VFilter[3]) +
+ (src_ptr[4] * VFilter[4]) +
+ (src_ptr[5] * VFilter[5]) +
+ (src_ptr[6] * VFilter[6]) +
+ (src_ptr[7] * VFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
+
+ // Normalize back to 0-255...
+ *dst_ptr++ = clip_pixel(temp >> VP9_FILTER_SHIFT);
+ src_ptr += intermediate_height;
+ }
+ src_ptr += intermediate_next_stride;
+ dst_ptr += dst_next_row_stride;
+ }
+ }
+}
+
+void block2d_average_c(uint8_t *src,
+ unsigned int src_stride,
+ uint8_t *output_ptr,
+ unsigned int output_stride,
+ unsigned int output_width,
+ unsigned int output_height) {
+ unsigned int i, j;
+ for (i = 0; i < output_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ output_ptr[j] = (output_ptr[j] + src[i * src_stride + j] + 1) >> 1;
+ }
+ output_ptr += output_stride;
+ }
+}
+
+void filter_average_block2d_8_c(const uint8_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint8_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height) {
+ uint8_t tmp[64 * 64];
+
+ assert(output_width <= 64);
+ assert(output_height <= 64);
+ filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, tmp, 64,
+ output_width, output_height);
+ block2d_average_c(tmp, 64, dst_ptr, dst_stride,
+ output_width, output_height);
+}
+
+class ConvolveTest : public PARAMS(int, int, const ConvolveFunctions*) {
+ public:
+ static void SetUpTestCase() {
+ // Force input_ to be unaligned, output to be 16 byte aligned.
+ input_ = reinterpret_cast<uint8_t*>(
+ vpx_memalign(kDataAlignment, kInputBufferSize + 1)) + 1;
+ output_ = reinterpret_cast<uint8_t*>(
+ vpx_memalign(kDataAlignment, kOutputBufferSize));
+ }
+
+ static void TearDownTestCase() {
+ vpx_free(input_ - 1);
+ input_ = NULL;
+ vpx_free(output_);
+ output_ = NULL;
+ }
+
+ protected:
+ static const int kDataAlignment = 16;
+ static const int kOuterBlockSize = 256;
+ static const int kInputStride = kOuterBlockSize;
+ static const int kOutputStride = kOuterBlockSize;
+ static const int kMaxDimension = 64;
+ static const int kInputBufferSize = kOuterBlockSize * kOuterBlockSize;
+ static const int kOutputBufferSize = kOuterBlockSize * kOuterBlockSize;
+
+ int Width() const { return GET_PARAM(0); }
+ int Height() const { return GET_PARAM(1); }
+ int BorderLeft() const {
+ const int center = (kOuterBlockSize - Width()) / 2;
+ return (center + (kDataAlignment - 1)) & ~(kDataAlignment - 1);
+ }
+ int BorderTop() const { return (kOuterBlockSize - Height()) / 2; }
+
+ bool IsIndexInBorder(int i) {
+ return (i < BorderTop() * kOuterBlockSize ||
+ i >= (BorderTop() + Height()) * kOuterBlockSize ||
+ i % kOuterBlockSize < BorderLeft() ||
+ i % kOuterBlockSize >= (BorderLeft() + Width()));
+ }
+
+ virtual void SetUp() {
+ UUT_ = GET_PARAM(2);
+ /* Set up guard blocks for an inner block centered in the outer block */
+ for (int i = 0; i < kOutputBufferSize; ++i) {
+ if (IsIndexInBorder(i))
+ output_[i] = 255;
+ else
+ output_[i] = 0;
+ }
+
+ ::libvpx_test::ACMRandom prng;
+ for (int i = 0; i < kInputBufferSize; ++i)
+ input_[i] = prng.Rand8Extremes();
+ }
+
+ void SetConstantInput(int value) {
+ memset(input_, value, kInputBufferSize);
+ }
+
+ void CheckGuardBlocks() {
+ for (int i = 0; i < kOutputBufferSize; ++i) {
+ if (IsIndexInBorder(i))
+ EXPECT_EQ(255, output_[i]);
+ }
+ }
+
+ uint8_t* input() const {
+ return input_ + BorderTop() * kOuterBlockSize + BorderLeft();
+ }
+
+ uint8_t* output() const {
+ return output_ + BorderTop() * kOuterBlockSize + BorderLeft();
+ }
+
+ const ConvolveFunctions* UUT_;
+ static uint8_t* input_;
+ static uint8_t* output_;
+};
+uint8_t* ConvolveTest::input_ = NULL;
+uint8_t* ConvolveTest::output_ = NULL;
+
+TEST_P(ConvolveTest, GuardBlocks) {
+ CheckGuardBlocks();
+}
+
+TEST_P(ConvolveTest, CopyHoriz) {
+ uint8_t* const in = input();
+ uint8_t* const out = output();
+ DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
+
+ REGISTER_STATE_CHECK(
+ UUT_->h8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
+ Width(), Height()));
+
+ CheckGuardBlocks();
+
+ for (int y = 0; y < Height(); ++y)
+ for (int x = 0; x < Width(); ++x)
+ ASSERT_EQ(out[y * kOutputStride + x], in[y * kInputStride + x])
+ << "(" << x << "," << y << ")";
+}
+
+TEST_P(ConvolveTest, CopyVert) {
+ uint8_t* const in = input();
+ uint8_t* const out = output();
+ DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
+
+ REGISTER_STATE_CHECK(
+ UUT_->v8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
+ Width(), Height()));
+
+ CheckGuardBlocks();
+
+ for (int y = 0; y < Height(); ++y)
+ for (int x = 0; x < Width(); ++x)
+ ASSERT_EQ(out[y * kOutputStride + x], in[y * kInputStride + x])
+ << "(" << x << "," << y << ")";
+}
+
+TEST_P(ConvolveTest, Copy2D) {
+ uint8_t* const in = input();
+ uint8_t* const out = output();
+ DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
+
+ REGISTER_STATE_CHECK(
+ UUT_->hv8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
+ Width(), Height()));
+
+ CheckGuardBlocks();
+
+ for (int y = 0; y < Height(); ++y)
+ for (int x = 0; x < Width(); ++x)
+ ASSERT_EQ(out[y * kOutputStride + x], in[y * kInputStride + x])
+ << "(" << x << "," << y << ")";
+}
+
+const int16_t (*kTestFilterList[])[8] = {
+ vp9_bilinear_filters,
+ vp9_sub_pel_filters_8,
+ vp9_sub_pel_filters_8s,
+ vp9_sub_pel_filters_8lp
+};
+const int kNumFilterBanks = sizeof(kTestFilterList) /
+ sizeof(kTestFilterList[0]);
+const int kNumFilters = 16;
+
+TEST(ConvolveTest, FiltersWontSaturateWhenAddedPairwise) {
+ for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
+ const int16_t (*filters)[8] = kTestFilterList[filter_bank];
+ for (int i = 0; i < kNumFilters; i++) {
+ const int p0 = filters[i][0] + filters[i][1];
+ const int p1 = filters[i][2] + filters[i][3];
+ const int p2 = filters[i][4] + filters[i][5];
+ const int p3 = filters[i][6] + filters[i][7];
+ EXPECT_LE(p0, 128);
+ EXPECT_LE(p1, 128);
+ EXPECT_LE(p2, 128);
+ EXPECT_LE(p3, 128);
+ EXPECT_LE(p0 + p3, 128);
+ EXPECT_LE(p0 + p3 + p1, 128);
+ EXPECT_LE(p0 + p3 + p1 + p2, 128);
+ EXPECT_EQ(p0 + p1 + p2 + p3, 128);
+ }
+ }
+}
+
+const int16_t kInvalidFilter[8] = { 0 };
+
+TEST_P(ConvolveTest, MatchesReferenceSubpixelFilter) {
+ uint8_t* const in = input();
+ uint8_t* const out = output();
+ uint8_t ref[kOutputStride * kMaxDimension];
+
+
+ for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
+ const int16_t (*filters)[8] = kTestFilterList[filter_bank];
+
+ for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
+ for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
+ filter_block2d_8_c(in, kInputStride,
+ filters[filter_x], filters[filter_y],
+ ref, kOutputStride,
+ Width(), Height());
+
+ if (filters == vp9_sub_pel_filters_8lp || (filter_x && filter_y))
+ REGISTER_STATE_CHECK(
+ UUT_->hv8_(in, kInputStride, out, kOutputStride,
+ filters[filter_x], 16, filters[filter_y], 16,
+ Width(), Height()));
+ else if (filter_y)
+ REGISTER_STATE_CHECK(
+ UUT_->v8_(in, kInputStride, out, kOutputStride,
+ kInvalidFilter, 16, filters[filter_y], 16,
+ Width(), Height()));
+ else
+ REGISTER_STATE_CHECK(
+ UUT_->h8_(in, kInputStride, out, kOutputStride,
+ filters[filter_x], 16, kInvalidFilter, 16,
+ Width(), Height()));
+
+ CheckGuardBlocks();
+
+ for (int y = 0; y < Height(); ++y)
+ for (int x = 0; x < Width(); ++x)
+ ASSERT_EQ(ref[y * kOutputStride + x], out[y * kOutputStride + x])
+ << "mismatch at (" << x << "," << y << "), "
+ << "filters (" << filter_bank << ","
+ << filter_x << "," << filter_y << ")";
+ }
+ }
+ }
+}
+
+TEST_P(ConvolveTest, MatchesReferenceAveragingSubpixelFilter) {
+ uint8_t* const in = input();
+ uint8_t* const out = output();
+ uint8_t ref[kOutputStride * kMaxDimension];
+
+ // Populate ref and out with some random data
+ ::libvpx_test::ACMRandom prng;
+ for (int y = 0; y < Height(); ++y) {
+ for (int x = 0; x < Width(); ++x) {
+ const uint8_t r = prng.Rand8Extremes();
+
+ out[y * kOutputStride + x] = r;
+ ref[y * kOutputStride + x] = r;
+ }
+ }
+
+ const int kNumFilterBanks = sizeof(kTestFilterList) /
+ sizeof(kTestFilterList[0]);
+
+ for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
+ const int16_t (*filters)[8] = kTestFilterList[filter_bank];
+ const int kNumFilters = 16;
+
+ for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
+ for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
+ filter_average_block2d_8_c(in, kInputStride,
+ filters[filter_x], filters[filter_y],
+ ref, kOutputStride,
+ Width(), Height());
+
+ if (filters == vp9_sub_pel_filters_8lp || (filter_x && filter_y))
+ REGISTER_STATE_CHECK(
+ UUT_->hv8_avg_(in, kInputStride, out, kOutputStride,
+ filters[filter_x], 16, filters[filter_y], 16,
+ Width(), Height()));
+ else if (filter_y)
+ REGISTER_STATE_CHECK(
+ UUT_->v8_avg_(in, kInputStride, out, kOutputStride,
+ filters[filter_x], 16, filters[filter_y], 16,
+ Width(), Height()));
+ else
+ REGISTER_STATE_CHECK(
+ UUT_->h8_avg_(in, kInputStride, out, kOutputStride,
+ filters[filter_x], 16, filters[filter_y], 16,
+ Width(), Height()));
+
+ CheckGuardBlocks();
+
+ for (int y = 0; y < Height(); ++y)
+ for (int x = 0; x < Width(); ++x)
+ ASSERT_EQ(ref[y * kOutputStride + x], out[y * kOutputStride + x])
+ << "mismatch at (" << x << "," << y << "), "
+ << "filters (" << filter_bank << ","
+ << filter_x << "," << filter_y << ")";
+ }
+ }
+ }
+}
+
+DECLARE_ALIGNED(256, const int16_t, kChangeFilters[16][8]) = {
+ { 0, 0, 0, 0, 0, 0, 0, 128},
+ { 0, 0, 0, 0, 0, 0, 128},
+ { 0, 0, 0, 0, 0, 128},
+ { 0, 0, 0, 0, 128},
+ { 0, 0, 0, 128},
+ { 0, 0, 128},
+ { 0, 128},
+ { 128},
+ { 0, 0, 0, 0, 0, 0, 0, 128},
+ { 0, 0, 0, 0, 0, 0, 128},
+ { 0, 0, 0, 0, 0, 128},
+ { 0, 0, 0, 0, 128},
+ { 0, 0, 0, 128},
+ { 0, 0, 128},
+ { 0, 128},
+ { 128}
+};
+
+/* This test exercises the horizontal and vertical filter functions. */
+TEST_P(ConvolveTest, ChangeFilterWorks) {
+ uint8_t* const in = input();
+ uint8_t* const out = output();
+
+ /* Assume that the first input sample is at the 8/16th position. */
+ const int kInitialSubPelOffset = 8;
+
+ /* Filters are 8-tap, so the first filter tap will be applied to the pixel
+ * at position -3 with respect to the current filtering position. Since
+ * kInitialSubPelOffset is set to 8, we first select sub-pixel filter 8,
+ * which is non-zero only in the last tap. So, applying the filter at the
+ * current input position will result in an output equal to the pixel at
+ * offset +4 (-3 + 7) with respect to the current filtering position.
+ */
+ const int kPixelSelected = 4;
+
+ /* Assume that each output pixel requires us to step on by 17/16th pixels in
+ * the input.
+ */
+ const int kInputPixelStep = 17;
+
+ /* The filters are setup in such a way that the expected output produces
+ * sets of 8 identical output samples. As the filter position moves to the
+ * next 1/16th pixel position the only active (=128) filter tap moves one
+ * position to the left, resulting in the same input pixel being replicated
+ * in to the output for 8 consecutive samples. After each set of 8 positions
+ * the filters select a different input pixel. kFilterPeriodAdjust below
+ * computes which input pixel is written to the output for a specified
+ * x or y position.
+ */
+
+ /* Test the horizontal filter. */
+ REGISTER_STATE_CHECK(UUT_->h8_(in, kInputStride, out, kOutputStride,
+ kChangeFilters[kInitialSubPelOffset],
+ kInputPixelStep, NULL, 0, Width(), Height()));
+
+ for (int x = 0; x < Width(); ++x) {
+ const int kFilterPeriodAdjust = (x >> 3) << 3;
+ const int ref_x =
+ kPixelSelected + ((kInitialSubPelOffset
+ + kFilterPeriodAdjust * kInputPixelStep)
+ >> SUBPEL_BITS);
+ ASSERT_EQ(in[ref_x], out[x]) << "x == " << x << "width = " << Width();
+ }
+
+ /* Test the vertical filter. */
+ REGISTER_STATE_CHECK(UUT_->v8_(in, kInputStride, out, kOutputStride,
+ NULL, 0, kChangeFilters[kInitialSubPelOffset],
+ kInputPixelStep, Width(), Height()));
+
+ for (int y = 0; y < Height(); ++y) {
+ const int kFilterPeriodAdjust = (y >> 3) << 3;
+ const int ref_y =
+ kPixelSelected + ((kInitialSubPelOffset
+ + kFilterPeriodAdjust * kInputPixelStep)
+ >> SUBPEL_BITS);
+ ASSERT_EQ(in[ref_y * kInputStride], out[y * kInputStride]) << "y == " << y;
+ }
+
+ /* Test the horizontal and vertical filters in combination. */
+ REGISTER_STATE_CHECK(UUT_->hv8_(in, kInputStride, out, kOutputStride,
+ kChangeFilters[kInitialSubPelOffset],
+ kInputPixelStep,
+ kChangeFilters[kInitialSubPelOffset],
+ kInputPixelStep,
+ Width(), Height()));
+
+ for (int y = 0; y < Height(); ++y) {
+ const int kFilterPeriodAdjustY = (y >> 3) << 3;
+ const int ref_y =
+ kPixelSelected + ((kInitialSubPelOffset
+ + kFilterPeriodAdjustY * kInputPixelStep)
+ >> SUBPEL_BITS);
+ for (int x = 0; x < Width(); ++x) {
+ const int kFilterPeriodAdjustX = (x >> 3) << 3;
+ const int ref_x =
+ kPixelSelected + ((kInitialSubPelOffset
+ + kFilterPeriodAdjustX * kInputPixelStep)
+ >> SUBPEL_BITS);
+
+ ASSERT_EQ(in[ref_y * kInputStride + ref_x], out[y * kOutputStride + x])
+ << "x == " << x << ", y == " << y;
+ }
+ }
+}
+
+/* This test exercises that enough rows and columns are filtered with every
+ possible initial fractional positions and scaling steps. */
+TEST_P(ConvolveTest, CheckScalingFiltering) {
+ uint8_t* const in = input();
+ uint8_t* const out = output();
+
+ SetConstantInput(127);
+
+ for (int frac = 0; frac < 16; ++frac) {
+ for (int step = 1; step <= 32; ++step) {
+ /* Test the horizontal and vertical filters in combination. */
+ REGISTER_STATE_CHECK(UUT_->hv8_(in, kInputStride, out, kOutputStride,
+ vp9_sub_pel_filters_8[frac], step,
+ vp9_sub_pel_filters_8[frac], step,
+ Width(), Height()));
+
+ CheckGuardBlocks();
+
+ for (int y = 0; y < Height(); ++y) {
+ for (int x = 0; x < Width(); ++x) {
+ ASSERT_EQ(in[y * kInputStride + x], out[y * kOutputStride + x])
+ << "x == " << x << ", y == " << y
+ << ", frac == " << frac << ", step == " << step;
+ }
+ }
+ }
+ }
+}
+
+using std::tr1::make_tuple;
+
+const ConvolveFunctions convolve8_c(
+ vp9_convolve8_horiz_c, vp9_convolve8_avg_horiz_c,
+ vp9_convolve8_vert_c, vp9_convolve8_avg_vert_c,
+ vp9_convolve8_c, vp9_convolve8_avg_c);
+
+INSTANTIATE_TEST_CASE_P(C, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_c),
+ make_tuple(8, 4, &convolve8_c),
+ make_tuple(4, 8, &convolve8_c),
+ make_tuple(8, 8, &convolve8_c),
+ make_tuple(16, 8, &convolve8_c),
+ make_tuple(8, 16, &convolve8_c),
+ make_tuple(16, 16, &convolve8_c),
+ make_tuple(32, 16, &convolve8_c),
+ make_tuple(16, 32, &convolve8_c),
+ make_tuple(32, 32, &convolve8_c),
+ make_tuple(64, 32, &convolve8_c),
+ make_tuple(32, 64, &convolve8_c),
+ make_tuple(64, 64, &convolve8_c)));
+
+#if HAVE_SSSE3
+const ConvolveFunctions convolve8_ssse3(
+ vp9_convolve8_horiz_ssse3, vp9_convolve8_avg_horiz_ssse3,
+ vp9_convolve8_vert_ssse3, vp9_convolve8_avg_vert_ssse3,
+ vp9_convolve8_ssse3, vp9_convolve8_avg_ssse3);
+
+INSTANTIATE_TEST_CASE_P(SSSE3, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_ssse3),
+ make_tuple(8, 4, &convolve8_ssse3),
+ make_tuple(4, 8, &convolve8_ssse3),
+ make_tuple(8, 8, &convolve8_ssse3),
+ make_tuple(16, 8, &convolve8_ssse3),
+ make_tuple(8, 16, &convolve8_ssse3),
+ make_tuple(16, 16, &convolve8_ssse3),
+ make_tuple(32, 16, &convolve8_ssse3),
+ make_tuple(16, 32, &convolve8_ssse3),
+ make_tuple(32, 32, &convolve8_ssse3),
+ make_tuple(64, 32, &convolve8_ssse3),
+ make_tuple(32, 64, &convolve8_ssse3),
+ make_tuple(64, 64, &convolve8_ssse3)));
+#endif
+
+#if HAVE_NEON
+const ConvolveFunctions convolve8_neon(
+ vp9_convolve8_horiz_neon, vp9_convolve8_avg_horiz_neon,
+ vp9_convolve8_vert_neon, vp9_convolve8_avg_vert_neon,
+ vp9_convolve8_neon, vp9_convolve8_avg_neon);
+
+INSTANTIATE_TEST_CASE_P(NEON, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_neon),
+ make_tuple(8, 4, &convolve8_neon),
+ make_tuple(4, 8, &convolve8_neon),
+ make_tuple(8, 8, &convolve8_neon),
+ make_tuple(16, 8, &convolve8_neon),
+ make_tuple(8, 16, &convolve8_neon),
+ make_tuple(16, 16, &convolve8_neon),
+ make_tuple(32, 16, &convolve8_neon),
+ make_tuple(16, 32, &convolve8_neon),
+ make_tuple(32, 32, &convolve8_neon),
+ make_tuple(64, 32, &convolve8_neon),
+ make_tuple(32, 64, &convolve8_neon),
+ make_tuple(64, 64, &convolve8_neon)));
+#endif
+} // namespace
diff --git a/libvpx/test/cpu_speed_test.cc b/libvpx/test/cpu_speed_test.cc
new file mode 100644
index 0000000..c92e723
--- /dev/null
+++ b/libvpx/test/cpu_speed_test.cc
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include <climits>
+#include <vector>
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/codec_factory.h"
+#include "test/encode_test_driver.h"
+#include "test/i420_video_source.h"
+#include "test/util.h"
+
+namespace {
+
+class CpuSpeedTest : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWith2Params<
+ libvpx_test::TestMode, int> {
+ protected:
+ CpuSpeedTest() : EncoderTest(GET_PARAM(0)) {}
+
+ virtual void SetUp() {
+ InitializeConfig();
+ SetMode(GET_PARAM(1));
+ set_cpu_used_ = GET_PARAM(2);
+ }
+
+ virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
+ ::libvpx_test::Encoder *encoder) {
+ if (video->frame() == 1) {
+ encoder->Control(VP8E_SET_CPUUSED, set_cpu_used_);
+ encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 1);
+ encoder->Control(VP8E_SET_ARNR_MAXFRAMES, 7);
+ encoder->Control(VP8E_SET_ARNR_STRENGTH, 5);
+ encoder->Control(VP8E_SET_ARNR_TYPE, 3);
+ }
+ }
+
+ virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
+ if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
+ }
+ }
+ int set_cpu_used_;
+};
+
+TEST_P(CpuSpeedTest, TestQ0) {
+ // Validate that this non multiple of 64 wide clip encodes and decodes
+ // without a mismatch when passing in a very low max q. This pushes
+ // the encoder to producing lots of big partitions which will likely
+ // extend into the border and test the border condition.
+ cfg_.g_lag_in_frames = 25;
+ cfg_.rc_2pass_vbr_minsection_pct = 5;
+ cfg_.rc_2pass_vbr_minsection_pct = 2000;
+ cfg_.rc_target_bitrate = 400;
+ cfg_.rc_max_quantizer = 0;
+ cfg_.rc_min_quantizer = 0;
+
+ ::libvpx_test::I420VideoSource video("hantro_odd.yuv", 208, 144, 30, 1, 0,
+ 20);
+
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+}
+
+
+TEST_P(CpuSpeedTest, TestEncodeHighBitrate) {
+ // Validate that this non multiple of 64 wide clip encodes and decodes
+ // without a mismatch when passing in a very low max q. This pushes
+ // the encoder to producing lots of big partitions which will likely
+ // extend into the border and test the border condition.
+ cfg_.g_lag_in_frames = 25;
+ cfg_.rc_2pass_vbr_minsection_pct = 5;
+ cfg_.rc_2pass_vbr_minsection_pct = 2000;
+ cfg_.rc_target_bitrate = 12000;
+ cfg_.rc_max_quantizer = 10;
+ cfg_.rc_min_quantizer = 0;
+
+ ::libvpx_test::I420VideoSource video("hantro_odd.yuv", 208, 144, 30, 1, 0,
+ 40);
+
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+}
+TEST_P(CpuSpeedTest, TestLowBitrate) {
+ // Validate that this clip encodes and decodes without a mismatch
+ // when passing in a very high min q. This pushes the encoder to producing
+ // lots of small partitions which might will test the other condition.
+
+ cfg_.g_lag_in_frames = 25;
+ cfg_.rc_2pass_vbr_minsection_pct = 5;
+ cfg_.rc_2pass_vbr_minsection_pct = 2000;
+ cfg_.rc_target_bitrate = 200;
+ cfg_.rc_min_quantizer = 40;
+
+ ::libvpx_test::I420VideoSource video("hantro_odd.yuv", 208, 144, 30, 1, 0,
+ 40);
+
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+}
+
+using std::tr1::make_tuple;
+
+#define VP9_FACTORY \
+ static_cast<const libvpx_test::CodecFactory*> (&libvpx_test::kVP9)
+
+VP9_INSTANTIATE_TEST_CASE(
+ CpuSpeedTest,
+ ::testing::Values(::libvpx_test::kTwoPassGood),
+ ::testing::Range(0, 5));
+} // namespace
diff --git a/libvpx/test/cq_test.cc b/libvpx/test/cq_test.cc
index 42ee2a2..a2c8291 100644
--- a/libvpx/test/cq_test.cc
+++ b/libvpx/test/cq_test.cc
@@ -9,8 +9,12 @@
*/
#include <cmath>
#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/codec_factory.h"
#include "test/encode_test_driver.h"
#include "test/i420_video_source.h"
+#include "test/util.h"
+
+namespace {
// CQ level range: [kCQLevelMin, kCQLevelMax).
const int kCQLevelMin = 4;
@@ -18,12 +22,13 @@ const int kCQLevelMax = 63;
const int kCQLevelStep = 8;
const int kCQTargetBitrate = 2000;
-namespace {
-
-class CQTest : public libvpx_test::EncoderTest,
- public ::testing::TestWithParam<int> {
+class CQTest : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWithParam<int> {
protected:
- CQTest() : cq_level_(GetParam()) { init_flags_ = VPX_CODEC_USE_PSNR; }
+ CQTest() : EncoderTest(GET_PARAM(0)), cq_level_(GET_PARAM(1)) {
+ init_flags_ = VPX_CODEC_USE_PSNR;
+ }
+
virtual ~CQTest() {}
virtual void SetUp() {
@@ -37,10 +42,6 @@ class CQTest : public libvpx_test::EncoderTest,
n_frames_ = 0;
}
- virtual bool Continue() const {
- return !HasFatalFailure() && !abort_;
- }
-
virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video,
libvpx_test::Encoder *encoder) {
if (video->frame() == 1) {
@@ -100,7 +101,7 @@ TEST_P(CQTest, LinearPSNRIsHigherForCQLevel) {
EXPECT_GE(cq_psnr_lin, vbr_psnr_lin);
}
-INSTANTIATE_TEST_CASE_P(CQLevelRange, CQTest,
- ::testing::Range(kCQLevelMin, kCQLevelMax,
- kCQLevelStep));
+VP8_INSTANTIATE_TEST_CASE(CQTest,
+ ::testing::Range(kCQLevelMin, kCQLevelMax,
+ kCQLevelStep));
} // namespace
diff --git a/libvpx/test/datarate_test.cc b/libvpx/test/datarate_test.cc
index 6fbcb64..f020a99 100644
--- a/libvpx/test/datarate_test.cc
+++ b/libvpx/test/datarate_test.cc
@@ -7,17 +7,23 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/codec_factory.h"
#include "test/encode_test_driver.h"
#include "test/i420_video_source.h"
-#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/util.h"
+
namespace {
class DatarateTest : public ::libvpx_test::EncoderTest,
- public ::testing::TestWithParam<enum libvpx_test::TestMode> {
+ public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
+ public:
+ DatarateTest() : EncoderTest(GET_PARAM(0)) {}
+
protected:
virtual void SetUp() {
InitializeConfig();
- SetMode(GetParam());
+ SetMode(GET_PARAM(1));
ResetModel();
}
@@ -30,10 +36,6 @@ class DatarateTest : public ::libvpx_test::EncoderTest,
duration_ = 0.0;
}
- virtual bool Continue() const {
- return !HasFatalFailure() && !abort_;
- }
-
virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
::libvpx_test::Encoder *encoder) {
const vpx_rational_t tb = video->timebase();
@@ -73,7 +75,7 @@ class DatarateTest : public ::libvpx_test::EncoderTest,
bits_in_buffer_model_ -= frame_size_in_bits;
// Update the running total of bits for end of test datarate checks.
- bits_total_ += frame_size_in_bits ;
+ bits_total_ += frame_size_in_bits;
// If first drop not set and we have a drop set it to this time.
if (!first_drop_ && duration > 1)
@@ -174,5 +176,6 @@ TEST_P(DatarateTest, ChangingDropFrameThresh) {
}
}
-INSTANTIATE_TEST_CASE_P(AllModes, DatarateTest, ALL_TEST_MODES);
+VP8_INSTANTIATE_TEST_CASE(DatarateTest, ALL_TEST_MODES);
+
} // namespace
diff --git a/libvpx/test/dct16x16_test.cc b/libvpx/test/dct16x16_test.cc
new file mode 100644
index 0000000..7d49c12
--- /dev/null
+++ b/libvpx/test/dct16x16_test.cc
@@ -0,0 +1,521 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/acm_random.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
+#include "test/util.h"
+
+extern "C" {
+#include "vp9/common/vp9_entropy.h"
+#include "./vp9_rtcd.h"
+void vp9_short_idct16x16_add_c(int16_t *input, uint8_t *output, int pitch);
+}
+#include "vpx/vpx_integer.h"
+
+using libvpx_test::ACMRandom;
+
+namespace {
+
+#ifdef _MSC_VER
+static int round(double x) {
+ if (x < 0)
+ return static_cast<int>(ceil(x - 0.5));
+ else
+ return static_cast<int>(floor(x + 0.5));
+}
+#endif
+
+const int kNumCoeffs = 256;
+const double PI = 3.1415926535898;
+void reference2_16x16_idct_2d(double *input, double *output) {
+ double x;
+ for (int l = 0; l < 16; ++l) {
+ for (int k = 0; k < 16; ++k) {
+ double s = 0;
+ for (int i = 0; i < 16; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ x = cos(PI * j * (l + 0.5) / 16.0) *
+ cos(PI * i * (k + 0.5) / 16.0) *
+ input[i * 16 + j] / 256;
+ if (i != 0)
+ x *= sqrt(2.0);
+ if (j != 0)
+ x *= sqrt(2.0);
+ s += x;
+ }
+ }
+ output[k*16+l] = s;
+ }
+ }
+}
+
+
+const double C1 = 0.995184726672197;
+const double C2 = 0.98078528040323;
+const double C3 = 0.956940335732209;
+const double C4 = 0.923879532511287;
+const double C5 = 0.881921264348355;
+const double C6 = 0.831469612302545;
+const double C7 = 0.773010453362737;
+const double C8 = 0.707106781186548;
+const double C9 = 0.634393284163646;
+const double C10 = 0.555570233019602;
+const double C11 = 0.471396736825998;
+const double C12 = 0.38268343236509;
+const double C13 = 0.290284677254462;
+const double C14 = 0.195090322016128;
+const double C15 = 0.098017140329561;
+
+void butterfly_16x16_dct_1d(double input[16], double output[16]) {
+ double step[16];
+ double intermediate[16];
+ double temp1, temp2;
+
+ // step 1
+ step[ 0] = input[0] + input[15];
+ step[ 1] = input[1] + input[14];
+ step[ 2] = input[2] + input[13];
+ step[ 3] = input[3] + input[12];
+ step[ 4] = input[4] + input[11];
+ step[ 5] = input[5] + input[10];
+ step[ 6] = input[6] + input[ 9];
+ step[ 7] = input[7] + input[ 8];
+ step[ 8] = input[7] - input[ 8];
+ step[ 9] = input[6] - input[ 9];
+ step[10] = input[5] - input[10];
+ step[11] = input[4] - input[11];
+ step[12] = input[3] - input[12];
+ step[13] = input[2] - input[13];
+ step[14] = input[1] - input[14];
+ step[15] = input[0] - input[15];
+
+ // step 2
+ output[0] = step[0] + step[7];
+ output[1] = step[1] + step[6];
+ output[2] = step[2] + step[5];
+ output[3] = step[3] + step[4];
+ output[4] = step[3] - step[4];
+ output[5] = step[2] - step[5];
+ output[6] = step[1] - step[6];
+ output[7] = step[0] - step[7];
+
+ temp1 = step[ 8] * C7;
+ temp2 = step[15] * C9;
+ output[ 8] = temp1 + temp2;
+
+ temp1 = step[ 9] * C11;
+ temp2 = step[14] * C5;
+ output[ 9] = temp1 - temp2;
+
+ temp1 = step[10] * C3;
+ temp2 = step[13] * C13;
+ output[10] = temp1 + temp2;
+
+ temp1 = step[11] * C15;
+ temp2 = step[12] * C1;
+ output[11] = temp1 - temp2;
+
+ temp1 = step[11] * C1;
+ temp2 = step[12] * C15;
+ output[12] = temp2 + temp1;
+
+ temp1 = step[10] * C13;
+ temp2 = step[13] * C3;
+ output[13] = temp2 - temp1;
+
+ temp1 = step[ 9] * C5;
+ temp2 = step[14] * C11;
+ output[14] = temp2 + temp1;
+
+ temp1 = step[ 8] * C9;
+ temp2 = step[15] * C7;
+ output[15] = temp2 - temp1;
+
+ // step 3
+ step[ 0] = output[0] + output[3];
+ step[ 1] = output[1] + output[2];
+ step[ 2] = output[1] - output[2];
+ step[ 3] = output[0] - output[3];
+
+ temp1 = output[4] * C14;
+ temp2 = output[7] * C2;
+ step[ 4] = temp1 + temp2;
+
+ temp1 = output[5] * C10;
+ temp2 = output[6] * C6;
+ step[ 5] = temp1 + temp2;
+
+ temp1 = output[5] * C6;
+ temp2 = output[6] * C10;
+ step[ 6] = temp2 - temp1;
+
+ temp1 = output[4] * C2;
+ temp2 = output[7] * C14;
+ step[ 7] = temp2 - temp1;
+
+ step[ 8] = output[ 8] + output[11];
+ step[ 9] = output[ 9] + output[10];
+ step[10] = output[ 9] - output[10];
+ step[11] = output[ 8] - output[11];
+
+ step[12] = output[12] + output[15];
+ step[13] = output[13] + output[14];
+ step[14] = output[13] - output[14];
+ step[15] = output[12] - output[15];
+
+ // step 4
+ output[ 0] = (step[ 0] + step[ 1]);
+ output[ 8] = (step[ 0] - step[ 1]);
+
+ temp1 = step[2] * C12;
+ temp2 = step[3] * C4;
+ temp1 = temp1 + temp2;
+ output[ 4] = 2*(temp1 * C8);
+
+ temp1 = step[2] * C4;
+ temp2 = step[3] * C12;
+ temp1 = temp2 - temp1;
+ output[12] = 2 * (temp1 * C8);
+
+ output[ 2] = 2 * ((step[4] + step[ 5]) * C8);
+ output[14] = 2 * ((step[7] - step[ 6]) * C8);
+
+ temp1 = step[4] - step[5];
+ temp2 = step[6] + step[7];
+ output[ 6] = (temp1 + temp2);
+ output[10] = (temp1 - temp2);
+
+ intermediate[8] = step[8] + step[14];
+ intermediate[9] = step[9] + step[15];
+
+ temp1 = intermediate[8] * C12;
+ temp2 = intermediate[9] * C4;
+ temp1 = temp1 - temp2;
+ output[3] = 2 * (temp1 * C8);
+
+ temp1 = intermediate[8] * C4;
+ temp2 = intermediate[9] * C12;
+ temp1 = temp2 + temp1;
+ output[13] = 2 * (temp1 * C8);
+
+ output[ 9] = 2 * ((step[10] + step[11]) * C8);
+
+ intermediate[11] = step[10] - step[11];
+ intermediate[12] = step[12] + step[13];
+ intermediate[13] = step[12] - step[13];
+ intermediate[14] = step[ 8] - step[14];
+ intermediate[15] = step[ 9] - step[15];
+
+ output[15] = (intermediate[11] + intermediate[12]);
+ output[ 1] = -(intermediate[11] - intermediate[12]);
+
+ output[ 7] = 2 * (intermediate[13] * C8);
+
+ temp1 = intermediate[14] * C12;
+ temp2 = intermediate[15] * C4;
+ temp1 = temp1 - temp2;
+ output[11] = -2 * (temp1 * C8);
+
+ temp1 = intermediate[14] * C4;
+ temp2 = intermediate[15] * C12;
+ temp1 = temp2 + temp1;
+ output[ 5] = 2 * (temp1 * C8);
+}
+
+void reference_16x16_dct_2d(int16_t input[256], double output[256]) {
+ // First transform columns
+ for (int i = 0; i < 16; ++i) {
+ double temp_in[16], temp_out[16];
+ for (int j = 0; j < 16; ++j)
+ temp_in[j] = input[j * 16 + i];
+ butterfly_16x16_dct_1d(temp_in, temp_out);
+ for (int j = 0; j < 16; ++j)
+ output[j * 16 + i] = temp_out[j];
+ }
+ // Then transform rows
+ for (int i = 0; i < 16; ++i) {
+ double temp_in[16], temp_out[16];
+ for (int j = 0; j < 16; ++j)
+ temp_in[j] = output[j + i * 16];
+ butterfly_16x16_dct_1d(temp_in, temp_out);
+ // Scale by some magic number
+ for (int j = 0; j < 16; ++j)
+ output[j + i * 16] = temp_out[j]/2;
+ }
+}
+
+typedef void (*fdct_t)(int16_t *in, int16_t *out, int stride);
+typedef void (*idct_t)(int16_t *in, uint8_t *out, int stride);
+typedef void (*fht_t) (int16_t *in, int16_t *out, int stride, int tx_type);
+typedef void (*iht_t) (int16_t *in, uint8_t *dst, int stride, int tx_type);
+
+void fdct16x16_ref(int16_t *in, int16_t *out, int stride, int tx_type) {
+ vp9_short_fdct16x16_c(in, out, stride);
+}
+
+void fht16x16_ref(int16_t *in, int16_t *out, int stride, int tx_type) {
+ vp9_short_fht16x16_c(in, out, stride, tx_type);
+}
+
+class Trans16x16TestBase {
+ public:
+ virtual ~Trans16x16TestBase() {}
+
+ protected:
+ virtual void RunFwdTxfm(int16_t *in, int16_t *out, int stride) = 0;
+
+ virtual void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) = 0;
+
+ void RunAccuracyCheck() {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ uint32_t max_error = 0;
+ int64_t total_error = 0;
+ const int count_test_block = 10000;
+ for (int i = 0; i < count_test_block; ++i) {
+ DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, kNumCoeffs);
+ DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, kNumCoeffs);
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+
+ // Initialize a test block with input range [-255, 255].
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ test_input_block[j] = src[j] - dst[j];
+ }
+
+ REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
+ test_temp_block, pitch_));
+ REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
+
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ const uint32_t diff = dst[j] - src[j];
+ const uint32_t error = diff * diff;
+ if (max_error < error)
+ max_error = error;
+ total_error += error;
+ }
+ }
+
+ EXPECT_GE(1u, max_error)
+ << "Error: 16x16 FHT/IHT has an individual round trip error > 1";
+
+ EXPECT_GE(count_test_block , total_error)
+ << "Error: 16x16 FHT/IHT has average round trip error > 1 per block";
+ }
+
+ void RunCoeffCheck() {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 1000;
+ DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
+ DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
+ DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+
+ for (int i = 0; i < count_test_block; ++i) {
+ // Initialize a test block with input range [-255, 255].
+ for (int j = 0; j < kNumCoeffs; ++j)
+ input_block[j] = rnd.Rand8() - rnd.Rand8();
+
+ fwd_txfm_ref(input_block, output_ref_block, pitch_, tx_type_);
+ REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
+
+ // The minimum quant value is 4.
+ for (int j = 0; j < kNumCoeffs; ++j)
+ EXPECT_EQ(output_block[j], output_ref_block[j]);
+ }
+ }
+
+ void RunMemCheck() {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 1000;
+ DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
+ DECLARE_ALIGNED_ARRAY(16, int16_t, input_extreme_block, kNumCoeffs);
+ DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
+ DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+
+ for (int i = 0; i < count_test_block; ++i) {
+ // Initialize a test block with input range [-255, 255].
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ input_block[j] = rnd.Rand8() - rnd.Rand8();
+ input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255;
+ }
+ if (i == 0)
+ for (int j = 0; j < kNumCoeffs; ++j)
+ input_extreme_block[j] = 255;
+ if (i == 1)
+ for (int j = 0; j < kNumCoeffs; ++j)
+ input_extreme_block[j] = -255;
+
+ fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
+ REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
+ output_block, pitch_));
+
+ // The minimum quant value is 4.
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ EXPECT_EQ(output_block[j], output_ref_block[j]);
+ EXPECT_GE(4 * DCT_MAX_VALUE, abs(output_block[j]))
+ << "Error: 16x16 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
+ }
+ }
+ }
+
+ void RunInvAccuracyCheck() {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 1000;
+ DECLARE_ALIGNED_ARRAY(16, int16_t, in, kNumCoeffs);
+ DECLARE_ALIGNED_ARRAY(16, int16_t, coeff, kNumCoeffs);
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+
+ for (int i = 0; i < count_test_block; ++i) {
+ double out_r[kNumCoeffs];
+
+ // Initialize a test block with input range [-255, 255].
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ in[j] = src[j] - dst[j];
+ }
+
+ reference_16x16_dct_2d(in, out_r);
+ for (int j = 0; j < kNumCoeffs; ++j)
+ coeff[j] = round(out_r[j]);
+
+ const int pitch = 32;
+ REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch));
+
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ const uint32_t diff = dst[j] - src[j];
+ const uint32_t error = diff * diff;
+ EXPECT_GE(1u, error)
+ << "Error: 16x16 IDCT has error " << error
+ << " at index " << j;
+ }
+ }
+ }
+ int pitch_;
+ int tx_type_;
+ fht_t fwd_txfm_ref;
+};
+
+class Trans16x16DCT : public Trans16x16TestBase,
+ public PARAMS(fdct_t, idct_t, int) {
+ public:
+ virtual ~Trans16x16DCT() {}
+
+ virtual void SetUp() {
+ fwd_txfm_ = GET_PARAM(0);
+ inv_txfm_ = GET_PARAM(1);
+ tx_type_ = GET_PARAM(2);
+ pitch_ = 32;
+ fwd_txfm_ref = fdct16x16_ref;
+ }
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+ void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
+ fwd_txfm_(in, out, stride);
+ }
+ void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
+ inv_txfm_(out, dst, stride >> 1);
+ }
+
+ fdct_t fwd_txfm_;
+ idct_t inv_txfm_;
+};
+
+TEST_P(Trans16x16DCT, AccuracyCheck) {
+ RunAccuracyCheck();
+}
+
+TEST_P(Trans16x16DCT, CoeffCheck) {
+ RunCoeffCheck();
+}
+
+TEST_P(Trans16x16DCT, MemCheck) {
+ RunMemCheck();
+}
+
+TEST_P(Trans16x16DCT, InvAccuracyCheck) {
+ RunInvAccuracyCheck();
+}
+
+class Trans16x16HT : public Trans16x16TestBase,
+ public PARAMS(fht_t, iht_t, int) {
+ public:
+ virtual ~Trans16x16HT() {}
+
+ virtual void SetUp() {
+ fwd_txfm_ = GET_PARAM(0);
+ inv_txfm_ = GET_PARAM(1);
+ tx_type_ = GET_PARAM(2);
+ pitch_ = 16;
+ fwd_txfm_ref = fht16x16_ref;
+ }
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+ void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
+ fwd_txfm_(in, out, stride, tx_type_);
+ }
+ void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
+ inv_txfm_(out, dst, stride, tx_type_);
+ }
+
+ fht_t fwd_txfm_;
+ iht_t inv_txfm_;
+};
+
+TEST_P(Trans16x16HT, AccuracyCheck) {
+ RunAccuracyCheck();
+}
+
+TEST_P(Trans16x16HT, CoeffCheck) {
+ RunCoeffCheck();
+}
+
+TEST_P(Trans16x16HT, MemCheck) {
+ RunMemCheck();
+}
+
+using std::tr1::make_tuple;
+
+INSTANTIATE_TEST_CASE_P(
+ C, Trans16x16DCT,
+ ::testing::Values(
+ make_tuple(&vp9_short_fdct16x16_c, &vp9_short_idct16x16_add_c, 0)));
+INSTANTIATE_TEST_CASE_P(
+ C, Trans16x16HT,
+ ::testing::Values(
+ make_tuple(&vp9_short_fht16x16_c, &vp9_short_iht16x16_add_c, 0),
+ make_tuple(&vp9_short_fht16x16_c, &vp9_short_iht16x16_add_c, 1),
+ make_tuple(&vp9_short_fht16x16_c, &vp9_short_iht16x16_add_c, 2),
+ make_tuple(&vp9_short_fht16x16_c, &vp9_short_iht16x16_add_c, 3)));
+
+#if HAVE_SSE2
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans16x16DCT,
+ ::testing::Values(
+ make_tuple(&vp9_short_fdct16x16_sse2, &vp9_short_idct16x16_add_c, 0)));
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans16x16HT,
+ ::testing::Values(
+ make_tuple(&vp9_short_fht16x16_sse2, &vp9_short_iht16x16_add_sse2, 0),
+ make_tuple(&vp9_short_fht16x16_sse2, &vp9_short_iht16x16_add_sse2, 1),
+ make_tuple(&vp9_short_fht16x16_sse2, &vp9_short_iht16x16_add_sse2, 2),
+ make_tuple(&vp9_short_fht16x16_sse2, &vp9_short_iht16x16_add_sse2, 3)));
+#endif
+} // namespace
diff --git a/libvpx/test/dct32x32_test.cc b/libvpx/test/dct32x32_test.cc
new file mode 100644
index 0000000..f331886
--- /dev/null
+++ b/libvpx/test/dct32x32_test.cc
@@ -0,0 +1,262 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/acm_random.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
+#include "test/util.h"
+
+extern "C" {
+#include "./vpx_config.h"
+#include "vp9/common/vp9_entropy.h"
+#include "./vp9_rtcd.h"
+}
+
+#include "vpx/vpx_integer.h"
+
+using libvpx_test::ACMRandom;
+
+namespace {
+#ifdef _MSC_VER
+static int round(double x) {
+ if (x < 0)
+ return static_cast<int>(ceil(x - 0.5));
+ else
+ return static_cast<int>(floor(x + 0.5));
+}
+#endif
+
+const int kNumCoeffs = 1024;
+const double kPi = 3.141592653589793238462643383279502884;
+void reference_32x32_dct_1d(const double in[32], double out[32], int stride) {
+ const double kInvSqrt2 = 0.707106781186547524400844362104;
+ for (int k = 0; k < 32; k++) {
+ out[k] = 0.0;
+ for (int n = 0; n < 32; n++)
+ out[k] += in[n] * cos(kPi * (2 * n + 1) * k / 64.0);
+ if (k == 0)
+ out[k] = out[k] * kInvSqrt2;
+ }
+}
+
+void reference_32x32_dct_2d(const int16_t input[kNumCoeffs],
+ double output[kNumCoeffs]) {
+ // First transform columns
+ for (int i = 0; i < 32; ++i) {
+ double temp_in[32], temp_out[32];
+ for (int j = 0; j < 32; ++j)
+ temp_in[j] = input[j*32 + i];
+ reference_32x32_dct_1d(temp_in, temp_out, 1);
+ for (int j = 0; j < 32; ++j)
+ output[j * 32 + i] = temp_out[j];
+ }
+ // Then transform rows
+ for (int i = 0; i < 32; ++i) {
+ double temp_in[32], temp_out[32];
+ for (int j = 0; j < 32; ++j)
+ temp_in[j] = output[j + i*32];
+ reference_32x32_dct_1d(temp_in, temp_out, 1);
+ // Scale by some magic number
+ for (int j = 0; j < 32; ++j)
+ output[j + i * 32] = temp_out[j] / 4;
+ }
+}
+
+typedef void (*fwd_txfm_t)(int16_t *in, int16_t *out, int stride);
+typedef void (*inv_txfm_t)(int16_t *in, uint8_t *dst, int stride);
+
+class Trans32x32Test : public PARAMS(fwd_txfm_t, inv_txfm_t, int) {
+ public:
+ virtual ~Trans32x32Test() {}
+ virtual void SetUp() {
+ fwd_txfm_ = GET_PARAM(0);
+ inv_txfm_ = GET_PARAM(1);
+ version_ = GET_PARAM(2); // 0: high precision forward transform
+ // 1: low precision version for rd loop
+ }
+
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+ int version_;
+ fwd_txfm_t fwd_txfm_;
+ inv_txfm_t inv_txfm_;
+};
+
+TEST_P(Trans32x32Test, AccuracyCheck) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ uint32_t max_error = 0;
+ int64_t total_error = 0;
+ const int count_test_block = 1000;
+ DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, kNumCoeffs);
+ DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, kNumCoeffs);
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+
+ for (int i = 0; i < count_test_block; ++i) {
+ // Initialize a test block with input range [-255, 255].
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ test_input_block[j] = src[j] - dst[j];
+ }
+
+ const int pitch = 64;
+ REGISTER_STATE_CHECK(fwd_txfm_(test_input_block, test_temp_block, pitch));
+ REGISTER_STATE_CHECK(inv_txfm_(test_temp_block, dst, 32));
+
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ const uint32_t diff = dst[j] - src[j];
+ const uint32_t error = diff * diff;
+ if (max_error < error)
+ max_error = error;
+ total_error += error;
+ }
+ }
+
+ if (version_ == 1) {
+ max_error /= 2;
+ total_error /= 45;
+ }
+
+ EXPECT_GE(1u, max_error)
+ << "Error: 32x32 FDCT/IDCT has an individual round-trip error > 1";
+
+ EXPECT_GE(count_test_block, total_error)
+ << "Error: 32x32 FDCT/IDCT has average round-trip error > 1 per block";
+}
+
+TEST_P(Trans32x32Test, CoeffCheck) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 1000;
+
+ DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
+ DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
+ DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+
+ for (int i = 0; i < count_test_block; ++i) {
+ for (int j = 0; j < kNumCoeffs; ++j)
+ input_block[j] = rnd.Rand8() - rnd.Rand8();
+
+ const int pitch = 64;
+ vp9_short_fdct32x32_c(input_block, output_ref_block, pitch);
+ REGISTER_STATE_CHECK(fwd_txfm_(input_block, output_block, pitch));
+
+ if (version_ == 0) {
+ for (int j = 0; j < kNumCoeffs; ++j)
+ EXPECT_EQ(output_block[j], output_ref_block[j])
+ << "Error: 32x32 FDCT versions have mismatched coefficients";
+ } else {
+ for (int j = 0; j < kNumCoeffs; ++j)
+ EXPECT_GE(6, abs(output_block[j] - output_ref_block[j]))
+ << "Error: 32x32 FDCT rd has mismatched coefficients";
+ }
+ }
+}
+
+TEST_P(Trans32x32Test, MemCheck) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 2000;
+
+ DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
+ DECLARE_ALIGNED_ARRAY(16, int16_t, input_extreme_block, kNumCoeffs);
+ DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
+ DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
+
+ for (int i = 0; i < count_test_block; ++i) {
+ // Initialize a test block with input range [-255, 255].
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ input_block[j] = rnd.Rand8() - rnd.Rand8();
+ input_extreme_block[j] = rnd.Rand8() & 1 ? 255 : -255;
+ }
+ if (i == 0)
+ for (int j = 0; j < kNumCoeffs; ++j)
+ input_extreme_block[j] = 255;
+ if (i == 1)
+ for (int j = 0; j < kNumCoeffs; ++j)
+ input_extreme_block[j] = -255;
+
+ const int pitch = 64;
+ vp9_short_fdct32x32_c(input_extreme_block, output_ref_block, pitch);
+ REGISTER_STATE_CHECK(fwd_txfm_(input_extreme_block, output_block, pitch));
+
+ // The minimum quant value is 4.
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ if (version_ == 0) {
+ EXPECT_EQ(output_block[j], output_ref_block[j])
+ << "Error: 32x32 FDCT versions have mismatched coefficients";
+ } else {
+ EXPECT_GE(6, abs(output_block[j] - output_ref_block[j]))
+ << "Error: 32x32 FDCT rd has mismatched coefficients";
+ }
+ EXPECT_GE(4 * DCT_MAX_VALUE, abs(output_ref_block[j]))
+ << "Error: 32x32 FDCT C has coefficient larger than 4*DCT_MAX_VALUE";
+ EXPECT_GE(4 * DCT_MAX_VALUE, abs(output_block[j]))
+ << "Error: 32x32 FDCT has coefficient larger than "
+ << "4*DCT_MAX_VALUE";
+ }
+ }
+}
+
+TEST_P(Trans32x32Test, InverseAccuracy) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 1000;
+ DECLARE_ALIGNED_ARRAY(16, int16_t, in, kNumCoeffs);
+ DECLARE_ALIGNED_ARRAY(16, int16_t, coeff, kNumCoeffs);
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+
+ for (int i = 0; i < count_test_block; ++i) {
+ double out_r[kNumCoeffs];
+
+ // Initialize a test block with input range [-255, 255]
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ in[j] = src[j] - dst[j];
+ }
+
+ reference_32x32_dct_2d(in, out_r);
+ for (int j = 0; j < kNumCoeffs; ++j)
+ coeff[j] = round(out_r[j]);
+ REGISTER_STATE_CHECK(inv_txfm_(coeff, dst, 32));
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ const int diff = dst[j] - src[j];
+ const int error = diff * diff;
+ EXPECT_GE(1, error)
+ << "Error: 32x32 IDCT has error " << error
+ << " at index " << j;
+ }
+ }
+}
+
+using std::tr1::make_tuple;
+
+INSTANTIATE_TEST_CASE_P(
+ C, Trans32x32Test,
+ ::testing::Values(
+ make_tuple(&vp9_short_fdct32x32_c, &vp9_short_idct32x32_add_c, 0),
+ make_tuple(&vp9_short_fdct32x32_rd_c, &vp9_short_idct32x32_add_c, 1)));
+
+#if HAVE_SSE2
+INSTANTIATE_TEST_CASE_P(
+ SSE2, Trans32x32Test,
+ ::testing::Values(
+ make_tuple(&vp9_short_fdct32x32_sse2,
+ &vp9_short_idct32x32_add_sse2, 0),
+ make_tuple(&vp9_short_fdct32x32_rd_sse2,
+ &vp9_short_idct32x32_add_sse2, 1)));
+#endif
+} // namespace
diff --git a/libvpx/test/decode_test_driver.cc b/libvpx/test/decode_test_driver.cc
index 3610f02..1f6d540 100644
--- a/libvpx/test/decode_test_driver.cc
+++ b/libvpx/test/decode_test_driver.cc
@@ -7,40 +7,41 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
+#include "test/codec_factory.h"
#include "test/decode_test_driver.h"
#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/register_state_check.h"
#include "test/video_source.h"
namespace libvpx_test {
-#if CONFIG_VP8_DECODER
-void Decoder::DecodeFrame(const uint8_t *cxdata, int size) {
- if (!decoder_.priv) {
- const vpx_codec_err_t res_init = vpx_codec_dec_init(&decoder_,
- &vpx_codec_vp8_dx_algo,
- &cfg_, 0);
- ASSERT_EQ(VPX_CODEC_OK, res_init) << DecodeError();
- }
- const vpx_codec_err_t res_dec = vpx_codec_decode(&decoder_,
- cxdata, size, NULL, 0);
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << DecodeError();
+vpx_codec_err_t Decoder::DecodeFrame(const uint8_t *cxdata, int size) {
+ vpx_codec_err_t res_dec;
+ InitOnce();
+ REGISTER_STATE_CHECK(res_dec = vpx_codec_decode(&decoder_,
+ cxdata, size, NULL, 0));
+ return res_dec;
}
void DecoderTest::RunLoop(CompressedVideoSource *video) {
vpx_codec_dec_cfg_t dec_cfg = {0};
- Decoder decoder(dec_cfg, 0);
+ Decoder* const decoder = codec_->CreateDecoder(dec_cfg, 0);
+ ASSERT_TRUE(decoder != NULL);
// Decode frames.
for (video->Begin(); video->cxdata(); video->Next()) {
- decoder.DecodeFrame(video->cxdata(), video->frame_size());
+ vpx_codec_err_t res_dec = decoder->DecodeFrame(video->cxdata(),
+ video->frame_size());
+ ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder->DecodeError();
- DxDataIterator dec_iter = decoder.GetDxData();
+ DxDataIterator dec_iter = decoder->GetDxData();
const vpx_image_t *img = NULL;
// Get decompressed data
while ((img = dec_iter.Next()))
DecompressedFrameHook(*img, video->frame_number());
}
+
+ delete decoder;
}
-#endif
} // namespace libvpx_test
diff --git a/libvpx/test/decode_test_driver.h b/libvpx/test/decode_test_driver.h
index 6408bee..055c45e 100644
--- a/libvpx/test/decode_test_driver.h
+++ b/libvpx/test/decode_test_driver.h
@@ -12,12 +12,12 @@
#define TEST_DECODE_TEST_DRIVER_H_
#include <cstring>
#include "third_party/googletest/src/include/gtest/gtest.h"
-#include "vpx_config.h"
+#include "./vpx_config.h"
#include "vpx/vpx_decoder.h"
-#include "vpx/vp8dx.h"
namespace libvpx_test {
+class CodecFactory;
class CompressedVideoSource;
// Provides an object to handle decoding output
@@ -36,21 +36,20 @@ class DxDataIterator {
};
// Provides a simplified interface to manage one video decoding.
-//
-// TODO: similar to Encoder class, the exact services should be
-// added as more tests are added.
+// Similar to Encoder class, the exact services should be added
+// as more tests are added.
class Decoder {
public:
Decoder(vpx_codec_dec_cfg_t cfg, unsigned long deadline)
- : cfg_(cfg), deadline_(deadline) {
+ : cfg_(cfg), deadline_(deadline), init_done_(false) {
memset(&decoder_, 0, sizeof(decoder_));
}
- ~Decoder() {
+ virtual ~Decoder() {
vpx_codec_destroy(&decoder_);
}
- void DecodeFrame(const uint8_t *cxdata, int size);
+ vpx_codec_err_t DecodeFrame(const uint8_t *cxdata, int size);
DxDataIterator GetDxData() {
return DxDataIterator(&decoder_);
@@ -61,25 +60,45 @@ class Decoder {
}
void Control(int ctrl_id, int arg) {
+ InitOnce();
const vpx_codec_err_t res = vpx_codec_control_(&decoder_, ctrl_id, arg);
ASSERT_EQ(VPX_CODEC_OK, res) << DecodeError();
}
- protected:
- const char *DecodeError() {
+ void Control(int ctrl_id, const void *arg) {
+ InitOnce();
+ const vpx_codec_err_t res = vpx_codec_control_(&decoder_, ctrl_id, arg);
+ ASSERT_EQ(VPX_CODEC_OK, res) << DecodeError();
+ }
+
+ const char* DecodeError() {
const char *detail = vpx_codec_error_detail(&decoder_);
return detail ? detail : vpx_codec_error(&decoder_);
}
+ protected:
+ virtual const vpx_codec_iface_t* CodecInterface() const = 0;
+
+ void InitOnce() {
+ if (!init_done_) {
+ const vpx_codec_err_t res = vpx_codec_dec_init(&decoder_,
+ CodecInterface(),
+ &cfg_, 0);
+ ASSERT_EQ(VPX_CODEC_OK, res) << DecodeError();
+ init_done_ = true;
+ }
+ }
+
vpx_codec_ctx_t decoder_;
vpx_codec_dec_cfg_t cfg_;
unsigned int deadline_;
+ bool init_done_;
};
// Common test functionality for all Decoder tests.
class DecoderTest {
public:
- // Main loop.
+ // Main decoding loop
virtual void RunLoop(CompressedVideoSource *video);
// Hook to be called on every decompressed frame.
@@ -87,9 +106,11 @@ class DecoderTest {
const unsigned int frame_number) {}
protected:
- DecoderTest() {}
+ explicit DecoderTest(const CodecFactory *codec) : codec_(codec) {}
virtual ~DecoderTest() {}
+
+ const CodecFactory *codec_;
};
} // namespace libvpx_test
diff --git a/libvpx/test/encode_test_driver.cc b/libvpx/test/encode_test_driver.cc
index ebb3959..709831e 100644
--- a/libvpx/test/encode_test_driver.cc
+++ b/libvpx/test/encode_test_driver.cc
@@ -7,11 +7,12 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "vpx_config.h"
+
+#include "./vpx_config.h"
+#include "test/codec_factory.h"
#include "test/encode_test_driver.h"
-#if CONFIG_VP8_DECODER
#include "test/decode_test_driver.h"
-#endif
+#include "test/register_state_check.h"
#include "test/video_source.h"
#include "third_party/googletest/src/include/gtest/gtest.h"
@@ -44,7 +45,7 @@ void Encoder::EncodeFrameInternal(const VideoSource &video,
cfg_.g_h = img->d_h;
cfg_.g_timebase = video.timebase();
cfg_.rc_twopass_stats_in = stats_->buf();
- res = vpx_codec_enc_init(&encoder_, &vpx_codec_vp8_cx_algo, &cfg_,
+ res = vpx_codec_enc_init(&encoder_, CodecInterface(), &cfg_,
init_flags_);
ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
}
@@ -58,9 +59,10 @@ void Encoder::EncodeFrameInternal(const VideoSource &video,
}
// Encode the frame
- res = vpx_codec_encode(&encoder_,
- video.img(), video.pts(), video.duration(),
- frame_flags, deadline_);
+ REGISTER_STATE_CHECK(
+ res = vpx_codec_encode(&encoder_,
+ video.img(), video.pts(), video.duration(),
+ frame_flags, deadline_));
ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
}
@@ -70,6 +72,11 @@ void Encoder::Flush() {
ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
}
+void EncoderTest::InitializeConfig() {
+ const vpx_codec_err_t res = codec_->DefaultEncoderConfig(&cfg_, 0);
+ ASSERT_EQ(VPX_CODEC_OK, res);
+}
+
void EncoderTest::SetMode(TestMode mode) {
switch (mode) {
case kRealTime:
@@ -107,29 +114,33 @@ static bool compare_img(const vpx_image_t *img1,
const unsigned int height_y = img1->d_h;
unsigned int i;
for (i = 0; i < height_y; ++i)
- match = ( memcmp(img1->planes[VPX_PLANE_Y] + i * img1->stride[VPX_PLANE_Y],
- img2->planes[VPX_PLANE_Y] + i * img2->stride[VPX_PLANE_Y],
- width_y) == 0) && match;
+ match = (memcmp(img1->planes[VPX_PLANE_Y] + i * img1->stride[VPX_PLANE_Y],
+ img2->planes[VPX_PLANE_Y] + i * img2->stride[VPX_PLANE_Y],
+ width_y) == 0) && match;
const unsigned int width_uv = (img1->d_w + 1) >> 1;
const unsigned int height_uv = (img1->d_h + 1) >> 1;
for (i = 0; i < height_uv; ++i)
- match = ( memcmp(img1->planes[VPX_PLANE_U] + i * img1->stride[VPX_PLANE_U],
- img2->planes[VPX_PLANE_U] + i * img2->stride[VPX_PLANE_U],
- width_uv) == 0) && match;
+ match = (memcmp(img1->planes[VPX_PLANE_U] + i * img1->stride[VPX_PLANE_U],
+ img2->planes[VPX_PLANE_U] + i * img2->stride[VPX_PLANE_U],
+ width_uv) == 0) && match;
for (i = 0; i < height_uv; ++i)
- match = ( memcmp(img1->planes[VPX_PLANE_V] + i * img1->stride[VPX_PLANE_V],
- img2->planes[VPX_PLANE_V] + i * img2->stride[VPX_PLANE_V],
- width_uv) == 0) && match;
+ match = (memcmp(img1->planes[VPX_PLANE_V] + i * img1->stride[VPX_PLANE_V],
+ img2->planes[VPX_PLANE_V] + i * img2->stride[VPX_PLANE_V],
+ width_uv) == 0) && match;
return match;
}
+void EncoderTest::MismatchHook(const vpx_image_t *img1,
+ const vpx_image_t *img2) {
+ ASSERT_TRUE(0) << "Encode/Decode mismatch found";
+}
+
void EncoderTest::RunLoop(VideoSource *video) {
-#if CONFIG_VP8_DECODER
vpx_codec_dec_cfg_t dec_cfg = {0};
-#endif
stats_.Reset();
+ ASSERT_TRUE(passes_ == 1 || passes_ == 2);
for (unsigned int pass = 0; pass < passes_; pass++) {
last_pts_ = 0;
@@ -141,31 +152,34 @@ void EncoderTest::RunLoop(VideoSource *video) {
cfg_.g_pass = VPX_RC_LAST_PASS;
BeginPassHook(pass);
- Encoder encoder(cfg_, deadline_, init_flags_, &stats_);
-#if CONFIG_VP8_DECODER
- Decoder decoder(dec_cfg, 0);
- bool has_cxdata = false;
-#endif
+ Encoder* const encoder = codec_->CreateEncoder(cfg_, deadline_, init_flags_,
+ &stats_);
+ ASSERT_TRUE(encoder != NULL);
+ Decoder* const decoder = codec_->CreateDecoder(dec_cfg, 0);
bool again;
for (again = true, video->Begin(); again; video->Next()) {
- again = video->img() != NULL;
+ again = (video->img() != NULL);
PreEncodeFrameHook(video);
- PreEncodeFrameHook(video, &encoder);
- encoder.EncodeFrame(video, frame_flags_);
+ PreEncodeFrameHook(video, encoder);
+ encoder->EncodeFrame(video, frame_flags_);
- CxDataIterator iter = encoder.GetCxData();
+ CxDataIterator iter = encoder->GetCxData();
+ bool has_cxdata = false;
+ bool has_dxdata = false;
while (const vpx_codec_cx_pkt_t *pkt = iter.Next()) {
+ pkt = MutateEncoderOutputHook(pkt);
again = true;
-
switch (pkt->kind) {
case VPX_CODEC_CX_FRAME_PKT:
-#if CONFIG_VP8_DECODER
has_cxdata = true;
- decoder.DecodeFrame((const uint8_t*)pkt->data.frame.buf,
- pkt->data.frame.sz);
-#endif
+ if (decoder && DoDecode()) {
+ vpx_codec_err_t res_dec = decoder->DecodeFrame(
+ (const uint8_t*)pkt->data.frame.buf, pkt->data.frame.sz);
+ ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder->DecodeError();
+ has_dxdata = true;
+ }
ASSERT_GE(pkt->data.frame.pts, last_pts_);
last_pts_ = pkt->data.frame.pts;
FramePktHook(pkt);
@@ -180,25 +194,32 @@ void EncoderTest::RunLoop(VideoSource *video) {
}
}
-#if CONFIG_VP8_DECODER
- if (has_cxdata) {
- const vpx_image_t *img_enc = encoder.GetPreviewFrame();
- DxDataIterator dec_iter = decoder.GetDxData();
+ if (has_dxdata && has_cxdata) {
+ const vpx_image_t *img_enc = encoder->GetPreviewFrame();
+ DxDataIterator dec_iter = decoder->GetDxData();
const vpx_image_t *img_dec = dec_iter.Next();
- if(img_enc && img_dec) {
+ if (img_enc && img_dec) {
const bool res = compare_img(img_enc, img_dec);
- ASSERT_TRUE(res)<< "Encoder/Decoder mismatch found.";
+ if (!res) { // Mismatch
+ MismatchHook(img_enc, img_dec);
+ }
}
+ if (img_dec)
+ DecompressedFrameHook(*img_dec, video->pts());
}
-#endif
if (!Continue())
break;
}
EndPassHook();
+ if (decoder)
+ delete decoder;
+ delete encoder;
+
if (!Continue())
break;
}
}
+
} // namespace libvpx_test
diff --git a/libvpx/test/encode_test_driver.h b/libvpx/test/encode_test_driver.h
index 0141fa9..dbdc33c 100644
--- a/libvpx/test/encode_test_driver.h
+++ b/libvpx/test/encode_test_driver.h
@@ -9,14 +9,17 @@
*/
#ifndef TEST_ENCODE_TEST_DRIVER_H_
#define TEST_ENCODE_TEST_DRIVER_H_
+
#include <string>
#include <vector>
+
+#include "./vpx_config.h"
#include "third_party/googletest/src/include/gtest/gtest.h"
#include "vpx/vpx_encoder.h"
-#include "vpx/vp8cx.h"
namespace libvpx_test {
+class CodecFactory;
class VideoSource;
enum TestMode {
@@ -36,12 +39,15 @@ enum TestMode {
::libvpx_test::kOnePassGood, \
::libvpx_test::kOnePassBest)
+#define TWO_PASS_TEST_MODES ::testing::Values(::libvpx_test::kTwoPassGood, \
+ ::libvpx_test::kTwoPassBest)
+
// Provides an object to handle the libvpx get_cx_data() iteration pattern
class CxDataIterator {
public:
explicit CxDataIterator(vpx_codec_ctx_t *encoder)
- : encoder_(encoder), iter_(NULL) {}
+ : encoder_(encoder), iter_(NULL) {}
const vpx_codec_cx_pkt_t *Next() {
return vpx_codec_get_cx_data(encoder_, &iter_);
@@ -83,11 +89,11 @@ class Encoder {
public:
Encoder(vpx_codec_enc_cfg_t cfg, unsigned long deadline,
const unsigned long init_flags, TwopassStatsStore *stats)
- : cfg_(cfg), deadline_(deadline), init_flags_(init_flags), stats_(stats) {
+ : cfg_(cfg), deadline_(deadline), init_flags_(init_flags), stats_(stats) {
memset(&encoder_, 0, sizeof(encoder_));
}
- ~Encoder() {
+ virtual ~Encoder() {
vpx_codec_destroy(&encoder_);
}
@@ -112,11 +118,18 @@ class Encoder {
ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
}
+ void Control(int ctrl_id, struct vpx_scaling_mode *arg) {
+ const vpx_codec_err_t res = vpx_codec_control_(&encoder_, ctrl_id, arg);
+ ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
+ }
+
void set_deadline(unsigned long deadline) {
deadline_ = deadline;
}
protected:
+ virtual const vpx_codec_iface_t* CodecInterface() const = 0;
+
const char *EncoderError() {
const char *detail = vpx_codec_error_detail(&encoder_);
return detail ? detail : vpx_codec_error(&encoder_);
@@ -145,22 +158,19 @@ class Encoder {
// classes directly, so that tests can be parameterized differently.
class EncoderTest {
protected:
- EncoderTest() : abort_(false), init_flags_(0), frame_flags_(0),
- last_pts_(0) {}
+ explicit EncoderTest(const CodecFactory *codec)
+ : codec_(codec), abort_(false), init_flags_(0), frame_flags_(0),
+ last_pts_(0) {}
virtual ~EncoderTest() {}
// Initialize the cfg_ member with the default configuration.
- void InitializeConfig() {
- const vpx_codec_err_t res = vpx_codec_enc_config_default(
- &vpx_codec_vp8_cx_algo, &cfg_, 0);
- ASSERT_EQ(VPX_CODEC_OK, res);
- }
+ void InitializeConfig();
// Map the TestMode enum to the deadline_ and passes_ variables.
void SetMode(TestMode mode);
- // Main loop.
+ // Main loop
virtual void RunLoop(VideoSource *video);
// Hook to be called at the beginning of a pass.
@@ -180,7 +190,27 @@ class EncoderTest {
virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) {}
// Hook to determine whether the encode loop should continue.
- virtual bool Continue() const { return !abort_; }
+ virtual bool Continue() const {
+ return !(::testing::Test::HasFatalFailure() || abort_);
+ }
+
+ const CodecFactory *codec_;
+ // Hook to determine whether to decode frame after encoding
+ virtual bool DoDecode() const { return 1; }
+
+ // Hook to handle encode/decode mismatch
+ virtual void MismatchHook(const vpx_image_t *img1,
+ const vpx_image_t *img2);
+
+ // Hook to be called on every decompressed frame.
+ virtual void DecompressedFrameHook(const vpx_image_t& img,
+ vpx_codec_pts_t pts) {}
+
+ // Hook that can modify the encoder's output data
+ virtual const vpx_codec_cx_pkt_t * MutateEncoderOutputHook(
+ const vpx_codec_cx_pkt_t *pkt) {
+ return pkt;
+ }
bool abort_;
vpx_codec_enc_cfg_t cfg_;
diff --git a/libvpx/test/error_resilience_test.cc b/libvpx/test/error_resilience_test.cc
index 25c6731..16d250c 100644
--- a/libvpx/test/error_resilience_test.cc
+++ b/libvpx/test/error_resilience_test.cc
@@ -7,22 +7,37 @@
in the file PATENTS. All contributing project authors may
be found in the AUTHORS file in the root of the source tree.
*/
+
#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/codec_factory.h"
#include "test/encode_test_driver.h"
#include "test/i420_video_source.h"
+#include "test/util.h"
namespace {
-class ErrorResilienceTest : public libvpx_test::EncoderTest,
- public ::testing::TestWithParam<int> {
+const int kMaxErrorFrames = 8;
+const int kMaxDroppableFrames = 8;
+
+class ErrorResilienceTest : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
protected:
- ErrorResilienceTest() {
- psnr_ = 0.0;
- nframes_ = 0;
- encoding_mode_ = static_cast<libvpx_test::TestMode>(GetParam());
+ ErrorResilienceTest() : EncoderTest(GET_PARAM(0)),
+ psnr_(0.0),
+ nframes_(0),
+ mismatch_psnr_(0.0),
+ mismatch_nframes_(0),
+ encoding_mode_(GET_PARAM(1)) {
+ Reset();
}
+
virtual ~ErrorResilienceTest() {}
+ void Reset() {
+ error_nframes_ = 0;
+ droppable_nframes_ = 0;
+ }
+
virtual void SetUp() {
InitializeConfig();
SetMode(encoding_mode_);
@@ -31,10 +46,8 @@ class ErrorResilienceTest : public libvpx_test::EncoderTest,
virtual void BeginPassHook(unsigned int /*pass*/) {
psnr_ = 0.0;
nframes_ = 0;
- }
-
- virtual bool Continue() const {
- return !HasFatalFailure() && !abort_;
+ mismatch_psnr_ = 0.0;
+ mismatch_nframes_ = 0;
}
virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) {
@@ -42,15 +55,92 @@ class ErrorResilienceTest : public libvpx_test::EncoderTest,
nframes_++;
}
+ virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video) {
+ frame_flags_ &= ~(VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_ARF);
+ if (droppable_nframes_ > 0 &&
+ (cfg_.g_pass == VPX_RC_LAST_PASS || cfg_.g_pass == VPX_RC_ONE_PASS)) {
+ for (unsigned int i = 0; i < droppable_nframes_; ++i) {
+ if (droppable_frames_[i] == video->frame()) {
+ std::cout << " Encoding droppable frame: "
+ << droppable_frames_[i] << "\n";
+ frame_flags_ |= (VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_ARF);
+ return;
+ }
+ }
+ }
+ }
+
double GetAveragePsnr() const {
if (nframes_)
return psnr_ / nframes_;
return 0.0;
}
+ double GetAverageMismatchPsnr() const {
+ if (mismatch_nframes_)
+ return mismatch_psnr_ / mismatch_nframes_;
+ return 0.0;
+ }
+
+ virtual bool DoDecode() const {
+ if (error_nframes_ > 0 &&
+ (cfg_.g_pass == VPX_RC_LAST_PASS || cfg_.g_pass == VPX_RC_ONE_PASS)) {
+ for (unsigned int i = 0; i < error_nframes_; ++i) {
+ if (error_frames_[i] == nframes_ - 1) {
+ std::cout << " Skipping decoding frame: "
+ << error_frames_[i] << "\n";
+ return 0;
+ }
+ }
+ }
+ return 1;
+ }
+
+ virtual void MismatchHook(const vpx_image_t *img1,
+ const vpx_image_t *img2) {
+ double mismatch_psnr = compute_psnr(img1, img2);
+ mismatch_psnr_ += mismatch_psnr;
+ ++mismatch_nframes_;
+ // std::cout << "Mismatch frame psnr: " << mismatch_psnr << "\n";
+ }
+
+ void SetErrorFrames(int num, unsigned int *list) {
+ if (num > kMaxErrorFrames)
+ num = kMaxErrorFrames;
+ else if (num < 0)
+ num = 0;
+ error_nframes_ = num;
+ for (unsigned int i = 0; i < error_nframes_; ++i)
+ error_frames_[i] = list[i];
+ }
+
+ void SetDroppableFrames(int num, unsigned int *list) {
+ if (num > kMaxDroppableFrames)
+ num = kMaxDroppableFrames;
+ else if (num < 0)
+ num = 0;
+ droppable_nframes_ = num;
+ for (unsigned int i = 0; i < droppable_nframes_; ++i)
+ droppable_frames_[i] = list[i];
+ }
+
+ unsigned int GetMismatchFrames() {
+ return mismatch_nframes_;
+ }
+
private:
double psnr_;
unsigned int nframes_;
+ unsigned int error_nframes_;
+ unsigned int droppable_nframes_;
+ double mismatch_psnr_;
+ unsigned int mismatch_nframes_;
+ unsigned int error_frames_[kMaxErrorFrames];
+ unsigned int droppable_frames_[kMaxDroppableFrames];
libvpx_test::TestMode encoding_mode_;
};
@@ -58,7 +148,7 @@ TEST_P(ErrorResilienceTest, OnVersusOff) {
const vpx_rational timebase = { 33333333, 1000000000 };
cfg_.g_timebase = timebase;
cfg_.rc_target_bitrate = 2000;
- cfg_.g_lag_in_frames = 25;
+ cfg_.g_lag_in_frames = 10;
init_flags_ = VPX_CODEC_USE_PSNR;
@@ -85,6 +175,59 @@ TEST_P(ErrorResilienceTest, OnVersusOff) {
}
}
-INSTANTIATE_TEST_CASE_P(OnOffTest, ErrorResilienceTest,
- ONE_PASS_TEST_MODES);
+TEST_P(ErrorResilienceTest, DropFramesWithoutRecovery) {
+ const vpx_rational timebase = { 33333333, 1000000000 };
+ cfg_.g_timebase = timebase;
+ cfg_.rc_target_bitrate = 500;
+ // FIXME(debargha): Fix this to work for any lag.
+ // Currently this test only works for lag = 0
+ cfg_.g_lag_in_frames = 0;
+
+ init_flags_ = VPX_CODEC_USE_PSNR;
+
+ libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ timebase.den, timebase.num, 0, 30);
+
+ // Error resilient mode ON.
+ cfg_.g_error_resilient = 1;
+
+ // Set an arbitrary set of error frames same as droppable frames
+ unsigned int num_droppable_frames = 2;
+ unsigned int droppable_frame_list[] = {5, 16};
+ SetDroppableFrames(num_droppable_frames, droppable_frame_list);
+ SetErrorFrames(num_droppable_frames, droppable_frame_list);
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ // Test that no mismatches have been found
+ std::cout << " Mismatch frames: "
+ << GetMismatchFrames() << "\n";
+ EXPECT_EQ(GetMismatchFrames(), (unsigned int) 0);
+
+ // reset previously set error/droppable frames
+ Reset();
+
+#if 0
+ // TODO(jkoleszar): This test is disabled for the time being as too
+ // sensitive. It's not clear how to set a reasonable threshold for
+ // this behavior.
+
+ // Now set an arbitrary set of error frames that are non-droppable
+ unsigned int num_error_frames = 3;
+ unsigned int error_frame_list[] = {3, 10, 20};
+ SetErrorFrames(num_error_frames, error_frame_list);
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+
+ // Test that dropping an arbitrary set of inter frames does not hurt too much
+ // Note the Average Mismatch PSNR is the average of the PSNR between
+ // decoded frame and encoder's version of the same frame for all frames
+ // with mismatch.
+ const double psnr_resilience_mismatch = GetAverageMismatchPsnr();
+ std::cout << " Mismatch PSNR: "
+ << psnr_resilience_mismatch << "\n";
+ EXPECT_GT(psnr_resilience_mismatch, 20.0);
+#endif
+}
+
+VP8_INSTANTIATE_TEST_CASE(ErrorResilienceTest, ONE_PASS_TEST_MODES);
+VP9_INSTANTIATE_TEST_CASE(ErrorResilienceTest, ONE_PASS_TEST_MODES);
+
} // namespace
diff --git a/libvpx/test/fdct4x4_test.cc b/libvpx/test/fdct4x4_test.cc
index 619b23d..ea40ca6 100644
--- a/libvpx/test/fdct4x4_test.cc
+++ b/libvpx/test/fdct4x4_test.cc
@@ -1,94 +1,94 @@
/*
-* Copyright (c) 2012 The WebM project authors. All Rights Reserved.
-*
-* Use of this source code is governed by a BSD-style license
-* that can be found in the LICENSE file in the root of the source
-* tree. An additional intellectual property rights grant can be found
-* in the file PATENTS. All contributing project authors may
-* be found in the AUTHORS file in the root of the source tree.
-*/
-
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
#include <math.h>
-#include <stddef.h>
-#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include <sys/types.h>
+#include "third_party/googletest/src/include/gtest/gtest.h"
extern "C" {
-#include "vpx_rtcd.h"
+#include "./vp9_rtcd.h"
}
#include "test/acm_random.h"
-#include "third_party/googletest/src/include/gtest/gtest.h"
#include "vpx/vpx_integer.h"
+#include "vpx_ports/mem.h"
+using libvpx_test::ACMRandom;
namespace {
+void fdct4x4(int16_t *in, int16_t *out, uint8_t* /*dst*/,
+ int stride, int /*tx_type*/) {
+ vp9_short_fdct4x4_c(in, out, stride);
+}
+void idct4x4_add(int16_t* /*in*/, int16_t *out, uint8_t *dst,
+ int stride, int /*tx_type*/) {
+ vp9_short_idct4x4_add_c(out, dst, stride >> 1);
+}
+void fht4x4(int16_t *in, int16_t *out, uint8_t* /*dst*/,
+ int stride, int tx_type) {
+ vp9_short_fht4x4_c(in, out, stride >> 1, tx_type);
+}
+void iht4x4_add(int16_t* /*in*/, int16_t *out, uint8_t *dst,
+ int stride, int tx_type) {
+ vp9_short_iht4x4_add_c(out, dst, stride >> 1, tx_type);
+}
-const int cospi8sqrt2minus1 = 20091;
-const int sinpi8sqrt2 = 35468;
-
-void reference_idct4x4(const int16_t *input, int16_t *output) {
- const int16_t *ip = input;
- int16_t *op = output;
-
- for (int i = 0; i < 4; ++i) {
- const int a1 = ip[0] + ip[8];
- const int b1 = ip[0] - ip[8];
- const int temp1 = (ip[4] * sinpi8sqrt2) >> 16;
- const int temp2 = ip[12] + ((ip[12] * cospi8sqrt2minus1) >> 16);
- const int c1 = temp1 - temp2;
- const int temp3 = ip[4] + ((ip[4] * cospi8sqrt2minus1) >> 16);
- const int temp4 = (ip[12] * sinpi8sqrt2) >> 16;
- const int d1 = temp3 + temp4;
- op[0] = a1 + d1;
- op[12] = a1 - d1;
- op[4] = b1 + c1;
- op[8] = b1 - c1;
- ++ip;
- ++op;
+class FwdTrans4x4Test : public ::testing::TestWithParam<int> {
+ public:
+ virtual ~FwdTrans4x4Test() {}
+ virtual void SetUp() {
+ tx_type_ = GetParam();
+ if (tx_type_ == 0) {
+ fwd_txfm_ = fdct4x4;
+ inv_txfm_ = idct4x4_add;
+ } else {
+ fwd_txfm_ = fht4x4;
+ inv_txfm_ = iht4x4_add;
+ }
}
- ip = output;
- op = output;
- for (int i = 0; i < 4; ++i) {
- const int a1 = ip[0] + ip[2];
- const int b1 = ip[0] - ip[2];
- const int temp1 = (ip[1] * sinpi8sqrt2) >> 16;
- const int temp2 = ip[3] + ((ip[3] * cospi8sqrt2minus1) >> 16);
- const int c1 = temp1 - temp2;
- const int temp3 = ip[1] + ((ip[1] * cospi8sqrt2minus1) >> 16);
- const int temp4 = (ip[3] * sinpi8sqrt2) >> 16;
- const int d1 = temp3 + temp4;
- op[0] = (a1 + d1 + 4) >> 3;
- op[3] = (a1 - d1 + 4) >> 3;
- op[1] = (b1 + c1 + 4) >> 3;
- op[2] = (b1 - c1 + 4) >> 3;
- ip += 4;
- op += 4;
+
+ protected:
+ void RunFwdTxfm(int16_t *in, int16_t *out, uint8_t *dst,
+ int stride, int tx_type) {
+ (*fwd_txfm_)(in, out, dst, stride, tx_type);
}
-}
-using libvpx_test::ACMRandom;
+ void RunInvTxfm(int16_t *in, int16_t *out, uint8_t *dst,
+ int stride, int tx_type) {
+ (*inv_txfm_)(in, out, dst, stride, tx_type);
+ }
-TEST(Vp8FdctTest, SignBiasCheck) {
+ int tx_type_;
+ void (*fwd_txfm_)(int16_t *in, int16_t *out, uint8_t *dst,
+ int stride, int tx_type);
+ void (*inv_txfm_)(int16_t *in, int16_t *out, uint8_t *dst,
+ int stride, int tx_type);
+};
+
+TEST_P(FwdTrans4x4Test, SignBiasCheck) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
- int16_t test_input_block[16];
- int16_t test_output_block[16];
+ DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, 16);
+ DECLARE_ALIGNED_ARRAY(16, int16_t, test_output_block, 16);
const int pitch = 8;
int count_sign_block[16][2];
const int count_test_block = 1000000;
memset(count_sign_block, 0, sizeof(count_sign_block));
-
for (int i = 0; i < count_test_block; ++i) {
// Initialize a test block with input range [-255, 255].
for (int j = 0; j < 16; ++j)
test_input_block[j] = rnd.Rand8() - rnd.Rand8();
- vp8_short_fdct4x4_c(test_input_block, test_output_block, pitch);
+ RunFwdTxfm(test_input_block, test_output_block, NULL, pitch, tx_type_);
for (int j = 0; j < 16; ++j) {
if (test_output_block[j] < 0)
@@ -98,22 +98,22 @@ TEST(Vp8FdctTest, SignBiasCheck) {
}
}
- bool bias_acceptable = true;
- for (int j = 0; j < 16; ++j)
- bias_acceptable = bias_acceptable &&
- (abs(count_sign_block[j][0] - count_sign_block[j][1]) < 10000);
-
- EXPECT_EQ(true, bias_acceptable)
- << "Error: 4x4 FDCT has a sign bias > 1% for input range [-255, 255]";
+ for (int j = 0; j < 16; ++j) {
+ const bool bias_acceptable = (abs(count_sign_block[j][0] -
+ count_sign_block[j][1]) < 10000);
+ EXPECT_TRUE(bias_acceptable)
+ << "Error: 4x4 FDCT/FHT has a sign bias > 1%"
+ << " for input range [-255, 255] at index " << j
+ << " tx_type " << tx_type_;
+ }
memset(count_sign_block, 0, sizeof(count_sign_block));
-
for (int i = 0; i < count_test_block; ++i) {
// Initialize a test block with input range [-15, 15].
for (int j = 0; j < 16; ++j)
test_input_block[j] = (rnd.Rand8() >> 4) - (rnd.Rand8() >> 4);
- vp8_short_fdct4x4_c(test_input_block, test_output_block, pitch);
+ RunFwdTxfm(test_input_block, test_output_block, NULL, pitch, tx_type_);
for (int j = 0; j < 16; ++j) {
if (test_output_block[j] < 0)
@@ -123,47 +123,68 @@ TEST(Vp8FdctTest, SignBiasCheck) {
}
}
- bias_acceptable = true;
- for (int j = 0; j < 16; ++j)
- bias_acceptable = bias_acceptable &&
- (abs(count_sign_block[j][0] - count_sign_block[j][1]) < 100000);
-
- EXPECT_EQ(true, bias_acceptable)
- << "Error: 4x4 FDCT has a sign bias > 10% for input range [-15, 15]";
-};
+ for (int j = 0; j < 16; ++j) {
+ const bool bias_acceptable = (abs(count_sign_block[j][0] -
+ count_sign_block[j][1]) < 100000);
+ EXPECT_TRUE(bias_acceptable)
+ << "Error: 4x4 FDCT/FHT has a sign bias > 10%"
+ << " for input range [-15, 15] at index " << j;
+ }
+}
-TEST(Vp8FdctTest, RoundTripErrorCheck) {
+TEST_P(FwdTrans4x4Test, RoundTripErrorCheck) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
+
int max_error = 0;
- double total_error = 0;
+ int total_error = 0;
const int count_test_block = 1000000;
for (int i = 0; i < count_test_block; ++i) {
- int16_t test_input_block[16];
- int16_t test_temp_block[16];
- int16_t test_output_block[16];
+ DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, 16);
+ DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, 16);
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, 16);
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, src, 16);
+ for (int j = 0; j < 16; ++j) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ }
// Initialize a test block with input range [-255, 255].
for (int j = 0; j < 16; ++j)
- test_input_block[j] = rnd.Rand8() - rnd.Rand8();
+ test_input_block[j] = src[j] - dst[j];
const int pitch = 8;
- vp8_short_fdct4x4_c(test_input_block, test_temp_block, pitch);
- reference_idct4x4(test_temp_block, test_output_block);
+ RunFwdTxfm(test_input_block, test_temp_block, dst, pitch, tx_type_);
+
+ for (int j = 0; j < 16; ++j) {
+ if (test_temp_block[j] > 0) {
+ test_temp_block[j] += 2;
+ test_temp_block[j] /= 4;
+ test_temp_block[j] *= 4;
+ } else {
+ test_temp_block[j] -= 2;
+ test_temp_block[j] /= 4;
+ test_temp_block[j] *= 4;
+ }
+ }
+
+ // inverse transform and reconstruct the pixel block
+ RunInvTxfm(test_input_block, test_temp_block, dst, pitch, tx_type_);
for (int j = 0; j < 16; ++j) {
- const int diff = test_input_block[j] - test_output_block[j];
+ const int diff = dst[j] - src[j];
const int error = diff * diff;
if (max_error < error)
max_error = error;
total_error += error;
}
}
-
- EXPECT_GE(1, max_error )
- << "Error: FDCT/IDCT has an individual roundtrip error > 1";
+ EXPECT_GE(1, max_error)
+ << "Error: FDCT/IDCT or FHT/IHT has an individual roundtrip error > 1";
EXPECT_GE(count_test_block, total_error)
- << "Error: FDCT/IDCT has average roundtrip error > 1 per block";
-};
+ << "Error: FDCT/IDCT or FHT/IHT has average "
+ << "roundtrip error > 1 per block";
+}
+INSTANTIATE_TEST_CASE_P(VP9, FwdTrans4x4Test, ::testing::Range(0, 4));
} // namespace
diff --git a/libvpx/test/fdct8x8_test.cc b/libvpx/test/fdct8x8_test.cc
new file mode 100644
index 0000000..ee6c9f6
--- /dev/null
+++ b/libvpx/test/fdct8x8_test.cc
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
+#include "vpx_ports/mem.h"
+
+extern "C" {
+#include "./vp9_rtcd.h"
+void vp9_short_idct8x8_add_c(int16_t *input, uint8_t *output, int pitch);
+}
+
+#include "test/acm_random.h"
+#include "vpx/vpx_integer.h"
+
+using libvpx_test::ACMRandom;
+
+namespace {
+void fdct8x8(int16_t *in, int16_t *out, uint8_t* /*dst*/,
+ int stride, int /*tx_type*/) {
+ vp9_short_fdct8x8_c(in, out, stride);
+}
+void idct8x8_add(int16_t* /*in*/, int16_t *out, uint8_t *dst,
+ int stride, int /*tx_type*/) {
+ vp9_short_idct8x8_add_c(out, dst, stride >> 1);
+}
+void fht8x8(int16_t *in, int16_t *out, uint8_t* /*dst*/,
+ int stride, int tx_type) {
+ // TODO(jingning): need to refactor this to test both _c and _sse2 functions,
+ // when we have all inverse dct functions done sse2.
+#if HAVE_SSE2
+ vp9_short_fht8x8_sse2(in, out, stride >> 1, tx_type);
+#else
+ vp9_short_fht8x8_c(in, out, stride >> 1, tx_type);
+#endif
+}
+void iht8x8_add(int16_t* /*in*/, int16_t *out, uint8_t *dst,
+ int stride, int tx_type) {
+ vp9_short_iht8x8_add_c(out, dst, stride >> 1, tx_type);
+}
+
+class FwdTrans8x8Test : public ::testing::TestWithParam<int> {
+ public:
+ virtual ~FwdTrans8x8Test() {}
+ virtual void SetUp() {
+ tx_type_ = GetParam();
+ if (tx_type_ == 0) {
+ fwd_txfm = fdct8x8;
+ inv_txfm = idct8x8_add;
+ } else {
+ fwd_txfm = fht8x8;
+ inv_txfm = iht8x8_add;
+ }
+ }
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+ void RunFwdTxfm(int16_t *in, int16_t *out, uint8_t *dst,
+ int stride, int tx_type) {
+ (*fwd_txfm)(in, out, dst, stride, tx_type);
+ }
+ void RunInvTxfm(int16_t *in, int16_t *out, uint8_t *dst,
+ int stride, int tx_type) {
+ (*inv_txfm)(in, out, dst, stride, tx_type);
+ }
+
+ int tx_type_;
+ void (*fwd_txfm)(int16_t*, int16_t*, uint8_t*, int, int);
+ void (*inv_txfm)(int16_t*, int16_t*, uint8_t*, int, int);
+};
+
+TEST_P(FwdTrans8x8Test, SignBiasCheck) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, 64);
+ DECLARE_ALIGNED_ARRAY(16, int16_t, test_output_block, 64);
+ const int pitch = 16;
+ int count_sign_block[64][2];
+ const int count_test_block = 100000;
+
+ memset(count_sign_block, 0, sizeof(count_sign_block));
+
+ for (int i = 0; i < count_test_block; ++i) {
+ // Initialize a test block with input range [-255, 255].
+ for (int j = 0; j < 64; ++j)
+ test_input_block[j] = rnd.Rand8() - rnd.Rand8();
+ REGISTER_STATE_CHECK(
+ RunFwdTxfm(test_input_block, test_output_block,
+ NULL, pitch, tx_type_));
+
+ for (int j = 0; j < 64; ++j) {
+ if (test_output_block[j] < 0)
+ ++count_sign_block[j][0];
+ else if (test_output_block[j] > 0)
+ ++count_sign_block[j][1];
+ }
+ }
+
+ for (int j = 0; j < 64; ++j) {
+ const int diff = abs(count_sign_block[j][0] - count_sign_block[j][1]);
+ const int max_diff = 1125;
+ EXPECT_LT(diff, max_diff)
+ << "Error: 8x8 FDCT/FHT has a sign bias > "
+ << 1. * max_diff / count_test_block * 100 << "%"
+ << " for input range [-255, 255] at index " << j
+ << " count0: " << count_sign_block[j][0]
+ << " count1: " << count_sign_block[j][1]
+ << " diff: " << diff;
+ }
+
+ memset(count_sign_block, 0, sizeof(count_sign_block));
+
+ for (int i = 0; i < count_test_block; ++i) {
+ // Initialize a test block with input range [-15, 15].
+ for (int j = 0; j < 64; ++j)
+ test_input_block[j] = (rnd.Rand8() >> 4) - (rnd.Rand8() >> 4);
+ REGISTER_STATE_CHECK(
+ RunFwdTxfm(test_input_block, test_output_block,
+ NULL, pitch, tx_type_));
+
+ for (int j = 0; j < 64; ++j) {
+ if (test_output_block[j] < 0)
+ ++count_sign_block[j][0];
+ else if (test_output_block[j] > 0)
+ ++count_sign_block[j][1];
+ }
+ }
+
+ for (int j = 0; j < 64; ++j) {
+ const int diff = abs(count_sign_block[j][0] - count_sign_block[j][1]);
+ const int max_diff = 10000;
+ EXPECT_LT(diff, max_diff)
+ << "Error: 4x4 FDCT/FHT has a sign bias > "
+ << 1. * max_diff / count_test_block * 100 << "%"
+ << " for input range [-15, 15] at index " << j
+ << " count0: " << count_sign_block[j][0]
+ << " count1: " << count_sign_block[j][1]
+ << " diff: " << diff;
+ }
+}
+
+TEST_P(FwdTrans8x8Test, RoundTripErrorCheck) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ int max_error = 0;
+ int total_error = 0;
+ const int count_test_block = 100000;
+ for (int i = 0; i < count_test_block; ++i) {
+ DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, 64);
+ DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, 64);
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, 64);
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, src, 64);
+
+ for (int j = 0; j < 64; ++j) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ }
+ // Initialize a test block with input range [-255, 255].
+ for (int j = 0; j < 64; ++j)
+ test_input_block[j] = src[j] - dst[j];
+
+ const int pitch = 16;
+ REGISTER_STATE_CHECK(
+ RunFwdTxfm(test_input_block, test_temp_block,
+ dst, pitch, tx_type_));
+ for (int j = 0; j < 64; ++j) {
+ if (test_temp_block[j] > 0) {
+ test_temp_block[j] += 2;
+ test_temp_block[j] /= 4;
+ test_temp_block[j] *= 4;
+ } else {
+ test_temp_block[j] -= 2;
+ test_temp_block[j] /= 4;
+ test_temp_block[j] *= 4;
+ }
+ }
+ REGISTER_STATE_CHECK(
+ RunInvTxfm(test_input_block, test_temp_block,
+ dst, pitch, tx_type_));
+
+ for (int j = 0; j < 64; ++j) {
+ const int diff = dst[j] - src[j];
+ const int error = diff * diff;
+ if (max_error < error)
+ max_error = error;
+ total_error += error;
+ }
+ }
+
+ EXPECT_GE(1, max_error)
+ << "Error: 8x8 FDCT/IDCT or FHT/IHT has an individual roundtrip error > 1";
+
+ EXPECT_GE(count_test_block/5, total_error)
+ << "Error: 8x8 FDCT/IDCT or FHT/IHT has average roundtrip "
+ "error > 1/5 per block";
+}
+
+TEST_P(FwdTrans8x8Test, ExtremalCheck) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ int max_error = 0;
+ int total_error = 0;
+ const int count_test_block = 100000;
+ for (int i = 0; i < count_test_block; ++i) {
+ DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, 64);
+ DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, 64);
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, 64);
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, src, 64);
+
+ for (int j = 0; j < 64; ++j) {
+ src[j] = rnd.Rand8() % 2 ? 255 : 0;
+ dst[j] = src[j] > 0 ? 0 : 255;
+ }
+ // Initialize a test block with input range [-255, 255].
+ for (int j = 0; j < 64; ++j)
+ test_input_block[j] = src[j] - dst[j];
+
+ const int pitch = 16;
+ REGISTER_STATE_CHECK(
+ RunFwdTxfm(test_input_block, test_temp_block,
+ dst, pitch, tx_type_));
+ REGISTER_STATE_CHECK(
+ RunInvTxfm(test_input_block, test_temp_block,
+ dst, pitch, tx_type_));
+
+ for (int j = 0; j < 64; ++j) {
+ const int diff = dst[j] - src[j];
+ const int error = diff * diff;
+ if (max_error < error)
+ max_error = error;
+ total_error += error;
+ }
+
+ EXPECT_GE(1, max_error)
+ << "Error: Extremal 8x8 FDCT/IDCT or FHT/IHT has an"
+ << " individual roundtrip error > 1";
+
+ EXPECT_GE(count_test_block/5, total_error)
+ << "Error: Extremal 8x8 FDCT/IDCT or FHT/IHT has average"
+ << " roundtrip error > 1/5 per block";
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(VP9, FwdTrans8x8Test, ::testing::Range(0, 4));
+} // namespace
diff --git a/libvpx/test/i420_video_source.h b/libvpx/test/i420_video_source.h
index 219bd33..2bf2a03 100644
--- a/libvpx/test/i420_video_source.h
+++ b/libvpx/test/i420_video_source.h
@@ -11,6 +11,7 @@
#define TEST_I420_VIDEO_SOURCE_H_
#include <cstdio>
#include <cstdlib>
+#include <string>
#include "test/video_source.h"
@@ -34,7 +35,6 @@ class I420VideoSource : public VideoSource {
height_(0),
framerate_numerator_(rate_numerator),
framerate_denominator_(rate_denominator) {
-
// This initializes raw_sz_, width_, height_ and allocates an img.
SetSize(width, height);
}
@@ -49,7 +49,7 @@ class I420VideoSource : public VideoSource {
if (input_file_)
fclose(input_file_);
input_file_ = OpenTestDataFile(file_name_);
- ASSERT_TRUE(input_file_) << "Input file open failed. Filename: "
+ ASSERT_TRUE(input_file_ != NULL) << "Input file open failed. Filename: "
<< file_name_;
if (start_) {
fseek(input_file_, raw_sz_ * start_, SEEK_SET);
@@ -83,7 +83,7 @@ class I420VideoSource : public VideoSource {
void SetSize(unsigned int width, unsigned int height) {
if (width != width_ || height != height_) {
vpx_img_free(img_);
- img_ = vpx_img_alloc(NULL, VPX_IMG_FMT_VPXI420, width, height, 1);
+ img_ = vpx_img_alloc(NULL, VPX_IMG_FMT_I420, width, height, 1);
ASSERT_TRUE(img_ != NULL);
width_ = width;
height_ = height;
@@ -92,6 +92,7 @@ class I420VideoSource : public VideoSource {
}
virtual void FillFrame() {
+ ASSERT_TRUE(input_file_ != NULL);
// Read a frame from input_file.
if (fread(img_->img_data, raw_sz_, 1, input_file_) == 0) {
limit_ = frame_;
@@ -108,8 +109,8 @@ class I420VideoSource : public VideoSource {
unsigned int frame_;
unsigned int width_;
unsigned int height_;
- unsigned int framerate_numerator_;
- unsigned int framerate_denominator_;
+ int framerate_numerator_;
+ int framerate_denominator_;
};
} // namespace libvpx_test
diff --git a/libvpx/test/idct8x8_test.cc b/libvpx/test/idct8x8_test.cc
new file mode 100644
index 0000000..fc8129e
--- /dev/null
+++ b/libvpx/test/idct8x8_test.cc
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+
+extern "C" {
+#include "./vp9_rtcd.h"
+}
+
+#include "test/acm_random.h"
+#include "vpx/vpx_integer.h"
+
+using libvpx_test::ACMRandom;
+
+namespace {
+
+#ifdef _MSC_VER
+static int round(double x) {
+ if (x < 0)
+ return static_cast<int>(ceil(x - 0.5));
+ else
+ return static_cast<int>(floor(x + 0.5));
+}
+#endif
+
+void reference_dct_1d(double input[8], double output[8]) {
+ const double kPi = 3.141592653589793238462643383279502884;
+ const double kInvSqrt2 = 0.707106781186547524400844362104;
+ for (int k = 0; k < 8; k++) {
+ output[k] = 0.0;
+ for (int n = 0; n < 8; n++)
+ output[k] += input[n]*cos(kPi*(2*n+1)*k/16.0);
+ if (k == 0)
+ output[k] = output[k]*kInvSqrt2;
+ }
+}
+
+void reference_dct_2d(int16_t input[64], double output[64]) {
+ // First transform columns
+ for (int i = 0; i < 8; ++i) {
+ double temp_in[8], temp_out[8];
+ for (int j = 0; j < 8; ++j)
+ temp_in[j] = input[j*8 + i];
+ reference_dct_1d(temp_in, temp_out);
+ for (int j = 0; j < 8; ++j)
+ output[j*8 + i] = temp_out[j];
+ }
+ // Then transform rows
+ for (int i = 0; i < 8; ++i) {
+ double temp_in[8], temp_out[8];
+ for (int j = 0; j < 8; ++j)
+ temp_in[j] = output[j + i*8];
+ reference_dct_1d(temp_in, temp_out);
+ for (int j = 0; j < 8; ++j)
+ output[j + i*8] = temp_out[j];
+ }
+ // Scale by some magic number
+ for (int i = 0; i < 64; ++i)
+ output[i] *= 2;
+}
+
+void reference_idct_1d(double input[8], double output[8]) {
+ const double kPi = 3.141592653589793238462643383279502884;
+ const double kSqrt2 = 1.414213562373095048801688724209698;
+ for (int k = 0; k < 8; k++) {
+ output[k] = 0.0;
+ for (int n = 0; n < 8; n++) {
+ output[k] += input[n]*cos(kPi*(2*k+1)*n/16.0);
+ if (n == 0)
+ output[k] = output[k]/kSqrt2;
+ }
+ }
+}
+
+void reference_idct_2d(double input[64], int16_t output[64]) {
+ double out[64], out2[64];
+ // First transform rows
+ for (int i = 0; i < 8; ++i) {
+ double temp_in[8], temp_out[8];
+ for (int j = 0; j < 8; ++j)
+ temp_in[j] = input[j + i*8];
+ reference_idct_1d(temp_in, temp_out);
+ for (int j = 0; j < 8; ++j)
+ out[j + i*8] = temp_out[j];
+ }
+ // Then transform columns
+ for (int i = 0; i < 8; ++i) {
+ double temp_in[8], temp_out[8];
+ for (int j = 0; j < 8; ++j)
+ temp_in[j] = out[j*8 + i];
+ reference_idct_1d(temp_in, temp_out);
+ for (int j = 0; j < 8; ++j)
+ out2[j*8 + i] = temp_out[j];
+ }
+ for (int i = 0; i < 64; ++i)
+ output[i] = round(out2[i]/32);
+}
+
+TEST(VP9Idct8x8Test, AccuracyCheck) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 10000;
+ for (int i = 0; i < count_test_block; ++i) {
+ int16_t input[64], coeff[64];
+ double output_r[64];
+ uint8_t dst[64], src[64];
+
+ for (int j = 0; j < 64; ++j) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ }
+ // Initialize a test block with input range [-255, 255].
+ for (int j = 0; j < 64; ++j)
+ input[j] = src[j] - dst[j];
+
+ reference_dct_2d(input, output_r);
+ for (int j = 0; j < 64; ++j)
+ coeff[j] = round(output_r[j]);
+ vp9_short_idct8x8_add_c(coeff, dst, 8);
+ for (int j = 0; j < 64; ++j) {
+ const int diff = dst[j] - src[j];
+ const int error = diff * diff;
+ EXPECT_GE(1, error)
+ << "Error: 8x8 FDCT/IDCT has error " << error
+ << " at index " << j;
+ }
+ }
+}
+
+} // namespace
diff --git a/libvpx/test/idct_test.cc b/libvpx/test/idct_test.cc
new file mode 100644
index 0000000..2c7fa0e
--- /dev/null
+++ b/libvpx/test/idct_test.cc
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+extern "C" {
+#include "./vpx_config.h"
+#include "./vp8_rtcd.h"
+}
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
+#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "vpx/vpx_integer.h"
+
+typedef void (*idct_fn_t)(int16_t *input, unsigned char *pred_ptr,
+ int pred_stride, unsigned char *dst_ptr,
+ int dst_stride);
+namespace {
+class IDCTTest : public ::testing::TestWithParam<idct_fn_t> {
+ protected:
+ virtual void SetUp() {
+ int i;
+
+ UUT = GetParam();
+ memset(input, 0, sizeof(input));
+ /* Set up guard blocks */
+ for (i = 0; i < 256; i++) output[i] = ((i & 0xF) < 4 && (i < 64)) ? 0 : -1;
+ }
+
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ idct_fn_t UUT;
+ int16_t input[16];
+ unsigned char output[256];
+ unsigned char predict[256];
+};
+
+TEST_P(IDCTTest, TestGuardBlocks) {
+ int i;
+
+ for (i = 0; i < 256; i++)
+ if ((i & 0xF) < 4 && i < 64)
+ EXPECT_EQ(0, output[i]) << i;
+ else
+ EXPECT_EQ(255, output[i]);
+}
+
+TEST_P(IDCTTest, TestAllZeros) {
+ int i;
+
+ REGISTER_STATE_CHECK(UUT(input, output, 16, output, 16));
+
+ for (i = 0; i < 256; i++)
+ if ((i & 0xF) < 4 && i < 64)
+ EXPECT_EQ(0, output[i]) << "i==" << i;
+ else
+ EXPECT_EQ(255, output[i]) << "i==" << i;
+}
+
+TEST_P(IDCTTest, TestAllOnes) {
+ int i;
+
+ input[0] = 4;
+ REGISTER_STATE_CHECK(UUT(input, output, 16, output, 16));
+
+ for (i = 0; i < 256; i++)
+ if ((i & 0xF) < 4 && i < 64)
+ EXPECT_EQ(1, output[i]) << "i==" << i;
+ else
+ EXPECT_EQ(255, output[i]) << "i==" << i;
+}
+
+TEST_P(IDCTTest, TestAddOne) {
+ int i;
+
+ for (i = 0; i < 256; i++) predict[i] = i;
+ input[0] = 4;
+ REGISTER_STATE_CHECK(UUT(input, predict, 16, output, 16));
+
+ for (i = 0; i < 256; i++)
+ if ((i & 0xF) < 4 && i < 64)
+ EXPECT_EQ(i + 1, output[i]) << "i==" << i;
+ else
+ EXPECT_EQ(255, output[i]) << "i==" << i;
+}
+
+TEST_P(IDCTTest, TestWithData) {
+ int i;
+
+ for (i = 0; i < 16; i++) input[i] = i;
+
+ REGISTER_STATE_CHECK(UUT(input, output, 16, output, 16));
+
+ for (i = 0; i < 256; i++)
+ if ((i & 0xF) > 3 || i > 63)
+ EXPECT_EQ(255, output[i]) << "i==" << i;
+ else if (i == 0)
+ EXPECT_EQ(11, output[i]) << "i==" << i;
+ else if (i == 34)
+ EXPECT_EQ(1, output[i]) << "i==" << i;
+ else if (i == 2 || i == 17 || i == 32)
+ EXPECT_EQ(3, output[i]) << "i==" << i;
+ else
+ EXPECT_EQ(0, output[i]) << "i==" << i;
+}
+
+INSTANTIATE_TEST_CASE_P(C, IDCTTest, ::testing::Values(vp8_short_idct4x4llm_c));
+#if HAVE_MMX
+INSTANTIATE_TEST_CASE_P(MMX, IDCTTest,
+ ::testing::Values(vp8_short_idct4x4llm_mmx));
+#endif
+}
diff --git a/libvpx/test/idctllm_test.cc b/libvpx/test/idctllm_test.cc
deleted file mode 100644
index dd42e22..0000000
--- a/libvpx/test/idctllm_test.cc
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-
-extern "C" {
-#include "vpx_config.h"
-#include "vpx_rtcd.h"
-}
-#include "third_party/googletest/src/include/gtest/gtest.h"
-
-typedef void (*idct_fn_t)(short *input, unsigned char *pred_ptr,
- int pred_stride, unsigned char *dst_ptr,
- int dst_stride);
-namespace {
-class IDCTTest : public ::testing::TestWithParam<idct_fn_t>
-{
- protected:
- virtual void SetUp()
- {
- int i;
-
- UUT = GetParam();
- memset(input, 0, sizeof(input));
- /* Set up guard blocks */
- for(i=0; i<256; i++)
- output[i] = ((i&0xF)<4&&(i<64))?0:-1;
- }
-
- idct_fn_t UUT;
- short input[16];
- unsigned char output[256];
- unsigned char predict[256];
-};
-
-TEST_P(IDCTTest, TestGuardBlocks)
-{
- int i;
-
- for(i=0; i<256; i++)
- if((i&0xF) < 4 && i<64)
- EXPECT_EQ(0, output[i]) << i;
- else
- EXPECT_EQ(255, output[i]);
-}
-
-TEST_P(IDCTTest, TestAllZeros)
-{
- int i;
-
- UUT(input, output, 16, output, 16);
-
- for(i=0; i<256; i++)
- if((i&0xF) < 4 && i<64)
- EXPECT_EQ(0, output[i]) << "i==" << i;
- else
- EXPECT_EQ(255, output[i]) << "i==" << i;
-}
-
-TEST_P(IDCTTest, TestAllOnes)
-{
- int i;
-
- input[0] = 4;
- UUT(input, output, 16, output, 16);
-
- for(i=0; i<256; i++)
- if((i&0xF) < 4 && i<64)
- EXPECT_EQ(1, output[i]) << "i==" << i;
- else
- EXPECT_EQ(255, output[i]) << "i==" << i;
-}
-
-TEST_P(IDCTTest, TestAddOne)
-{
- int i;
-
- for(i=0; i<256; i++)
- predict[i] = i;
-
- input[0] = 4;
- UUT(input, predict, 16, output, 16);
-
- for(i=0; i<256; i++)
- if((i&0xF) < 4 && i<64)
- EXPECT_EQ(i+1, output[i]) << "i==" << i;
- else
- EXPECT_EQ(255, output[i]) << "i==" << i;
-}
-
-TEST_P(IDCTTest, TestWithData)
-{
- int i;
-
- for(i=0; i<16; i++)
- input[i] = i;
-
- UUT(input, output, 16, output, 16);
-
- for(i=0; i<256; i++)
- if((i&0xF) > 3 || i>63)
- EXPECT_EQ(255, output[i]) << "i==" << i;
- else if(i == 0)
- EXPECT_EQ(11, output[i]) << "i==" << i;
- else if(i == 34)
- EXPECT_EQ(1, output[i]) << "i==" << i;
- else if(i == 2 || i == 17 || i == 32)
- EXPECT_EQ(3, output[i]) << "i==" << i;
- else
- EXPECT_EQ(0, output[i]) << "i==" << i;
-}
-
-INSTANTIATE_TEST_CASE_P(C, IDCTTest,
- ::testing::Values(vp8_short_idct4x4llm_c));
-#if HAVE_MMX
-INSTANTIATE_TEST_CASE_P(MMX, IDCTTest,
- ::testing::Values(vp8_short_idct4x4llm_mmx));
-#endif
-}
diff --git a/libvpx/test/intrapred_test.cc b/libvpx/test/intrapred_test.cc
index d2e0d61..f5f6d5b 100644
--- a/libvpx/test/intrapred_test.cc
+++ b/libvpx/test/intrapred_test.cc
@@ -11,10 +11,12 @@
#include <string.h>
#include "test/acm_random.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
#include "third_party/googletest/src/include/gtest/gtest.h"
extern "C" {
-#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "./vpx_config.h"
+#include "./vp8_rtcd.h"
#include "vp8/common/blockd.h"
#include "vpx_mem/vpx_mem.h"
}
@@ -24,6 +26,13 @@ namespace {
using libvpx_test::ACMRandom;
class IntraPredBase {
+ public:
+ virtual ~IntraPredBase() {}
+
+ virtual void TearDown() {
+ libvpx_test::ClearSystemState();
+ }
+
protected:
void SetupMacroblock(uint8_t *data, int block_size, int stride,
int num_planes) {
@@ -97,9 +106,9 @@ class IntraPredBase {
for (int y = 0; y < block_size_; y++)
sum += data_ptr_[p][y * stride_ - 1];
expected = (sum + (1 << (shift - 1))) >> shift;
- } else
+ } else {
expected = 0x80;
-
+ }
// check that all subsequent lines are equal to the first
for (int y = 1; y < block_size_; ++y)
ASSERT_EQ(0, memcmp(data_ptr_[p], &data_ptr_[p][y * stride_],
@@ -246,8 +255,10 @@ class IntraPredYTest : public ::testing::TestWithParam<intra_pred_y_fn_t>,
virtual void Predict(MB_PREDICTION_MODE mode) {
mb_.mode_info_context->mbmi.mode = mode;
- pred_fn_(&mb_, data_ptr_[0] - kStride, data_ptr_[0] - 1, kStride,
- data_ptr_[0], kStride);
+ REGISTER_STATE_CHECK(pred_fn_(&mb_,
+ data_ptr_[0] - kStride,
+ data_ptr_[0] - 1, kStride,
+ data_ptr_[0], kStride));
}
intra_pred_y_fn_t pred_fn_;
diff --git a/libvpx/test/ivf_video_source.h b/libvpx/test/ivf_video_source.h
index 48c3a7d..3fbafbd 100644
--- a/libvpx/test/ivf_video_source.h
+++ b/libvpx/test/ivf_video_source.h
@@ -28,7 +28,7 @@ static unsigned int MemGetLe32(const uint8_t *mem) {
// so that we can do actual file decodes.
class IVFVideoSource : public CompressedVideoSource {
public:
- IVFVideoSource(const std::string &file_name)
+ explicit IVFVideoSource(const std::string &file_name)
: file_name_(file_name),
input_file_(NULL),
compressed_frame_buf_(NULL),
@@ -47,12 +47,13 @@ class IVFVideoSource : public CompressedVideoSource {
virtual void Init() {
// Allocate a buffer for read in the compressed video frame.
compressed_frame_buf_ = new uint8_t[libvpx_test::kCodeBufferSize];
- ASSERT_TRUE(compressed_frame_buf_) << "Allocate frame buffer failed";
+ ASSERT_TRUE(compressed_frame_buf_ != NULL)
+ << "Allocate frame buffer failed";
}
virtual void Begin() {
input_file_ = OpenTestDataFile(file_name_);
- ASSERT_TRUE(input_file_) << "Input file open failed. Filename: "
+ ASSERT_TRUE(input_file_ != NULL) << "Input file open failed. Filename: "
<< file_name_;
// Read file header
@@ -72,6 +73,7 @@ class IVFVideoSource : public CompressedVideoSource {
}
void FillFrame() {
+ ASSERT_TRUE(input_file_ != NULL);
uint8_t frame_hdr[kIvfFrameHdrSize];
// Check frame header and read a frame from input_file.
if (fread(frame_hdr, 1, kIvfFrameHdrSize, input_file_)
diff --git a/libvpx/test/keyframe_test.cc b/libvpx/test/keyframe_test.cc
index d0c81df..7ee2898 100644
--- a/libvpx/test/keyframe_test.cc
+++ b/libvpx/test/keyframe_test.cc
@@ -9,28 +9,28 @@
*/
#include <climits>
#include <vector>
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/codec_factory.h"
#include "test/encode_test_driver.h"
#include "test/i420_video_source.h"
-#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/util.h"
namespace {
class KeyframeTest : public ::libvpx_test::EncoderTest,
- public ::testing::TestWithParam<enum libvpx_test::TestMode> {
+ public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
protected:
+ KeyframeTest() : EncoderTest(GET_PARAM(0)) {}
+
virtual void SetUp() {
InitializeConfig();
- SetMode(GetParam());
+ SetMode(GET_PARAM(1));
kf_count_ = 0;
kf_count_max_ = INT_MAX;
kf_do_force_kf_ = false;
set_cpu_used_ = 0;
}
- virtual bool Continue() const {
- return !HasFatalFailure() && !abort_;
- }
-
virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
::libvpx_test::Encoder *encoder) {
if (kf_do_force_kf_)
@@ -64,7 +64,7 @@ TEST_P(KeyframeTest, TestRandomVideoSource) {
// In realtime mode - auto placed keyframes are exceedingly rare, don't
// bother with this check if(GetParam() > 0)
- if(GetParam() > 0)
+ if (GET_PARAM(1) > 0)
EXPECT_GT(kf_count_, 1);
}
@@ -126,13 +126,12 @@ TEST_P(KeyframeTest, TestAutoKeyframe) {
// In realtime mode - auto placed keyframes are exceedingly rare, don't
// bother with this check
- if(GetParam() > 0)
+ if (GET_PARAM(1) > 0)
EXPECT_EQ(2u, kf_pts_list_.size()) << " Not the right number of keyframes ";
// Verify that keyframes match the file keyframes in the file.
for (std::vector<vpx_codec_pts_t>::const_iterator iter = kf_pts_list_.begin();
iter != kf_pts_list_.end(); ++iter) {
-
if (deadline_ == VPX_DL_REALTIME && *iter > 0)
EXPECT_EQ(0, (*iter - 1) % 30) << "Unexpected keyframe at frame "
<< *iter;
@@ -141,5 +140,5 @@ TEST_P(KeyframeTest, TestAutoKeyframe) {
}
}
-INSTANTIATE_TEST_CASE_P(AllModes, KeyframeTest, ALL_TEST_MODES);
+VP8_INSTANTIATE_TEST_CASE(KeyframeTest, ALL_TEST_MODES);
} // namespace
diff --git a/libvpx/test/md5_helper.h b/libvpx/test/md5_helper.h
new file mode 100644
index 0000000..289f608
--- /dev/null
+++ b/libvpx/test/md5_helper.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef TEST_MD5_HELPER_H_
+#define TEST_MD5_HELPER_H_
+
+extern "C" {
+#include "./md5_utils.h"
+#include "vpx/vpx_decoder.h"
+}
+
+namespace libvpx_test {
+class MD5 {
+ public:
+ MD5() {
+ MD5Init(&md5_);
+ }
+
+ void Add(const vpx_image_t *img) {
+ for (int plane = 0; plane < 3; ++plane) {
+ const uint8_t *buf = img->planes[plane];
+ // Calculate the width and height to do the md5 check. For the chroma
+ // plane, we never want to round down and thus skip a pixel so if
+ // we are shifting by 1 (chroma_shift) we add 1 before doing the shift.
+ // This works only for chroma_shift of 0 and 1.
+ const int h = plane ? (img->d_h + img->y_chroma_shift) >>
+ img->y_chroma_shift : img->d_h;
+ const int w = plane ? (img->d_w + img->x_chroma_shift) >>
+ img->x_chroma_shift : img->d_w;
+
+ for (int y = 0; y < h; ++y) {
+ MD5Update(&md5_, buf, w);
+ buf += img->stride[plane];
+ }
+ }
+ }
+
+ const char *Get(void) {
+ static const char hex[16] = {
+ '0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'a', 'b', 'c', 'd', 'e', 'f',
+ };
+ uint8_t tmp[16];
+ MD5Context ctx_tmp = md5_;
+
+ MD5Final(tmp, &ctx_tmp);
+ for (int i = 0; i < 16; i++) {
+ res_[i * 2 + 0] = hex[tmp[i] >> 4];
+ res_[i * 2 + 1] = hex[tmp[i] & 0xf];
+ }
+ res_[32] = 0;
+
+ return res_;
+ }
+
+ protected:
+ char res_[33];
+ MD5Context md5_;
+};
+
+} // namespace libvpx_test
+
+#endif // TEST_MD5_HELPER_H_
diff --git a/libvpx/test/pp_filter_test.cc b/libvpx/test/pp_filter_test.cc
index af2f3bd..e5ac9db 100644
--- a/libvpx/test/pp_filter_test.cc
+++ b/libvpx/test/pp_filter_test.cc
@@ -7,10 +7,12 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
#include "third_party/googletest/src/include/gtest/gtest.h"
extern "C" {
-#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "./vpx_config.h"
+#include "./vp8_rtcd.h"
#include "vpx/vpx_integer.h"
#include "vpx_mem/vpx_mem.h"
}
@@ -26,7 +28,12 @@ typedef void (*post_proc_func_t)(unsigned char *src_ptr,
namespace {
class Vp8PostProcessingFilterTest
- : public ::testing::TestWithParam<post_proc_func_t> {};
+ : public ::testing::TestWithParam<post_proc_func_t> {
+ public:
+ virtual void TearDown() {
+ libvpx_test::ClearSystemState();
+ }
+};
// Test routine for the VP8 post-processing function
// vp8_post_proc_down_and_across_mb_row_c.
@@ -56,7 +63,8 @@ TEST_P(Vp8PostProcessingFilterTest, FilterOutputCheck) {
// Pointers to top-left pixel of block in the input and output images.
uint8_t *const src_image_ptr = src_image + (input_stride << 1);
uint8_t *const dst_image_ptr = dst_image + 8;
- uint8_t *const flimits = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_width));
+ uint8_t *const flimits =
+ reinterpret_cast<uint8_t *>(vpx_memalign(16, block_width));
(void)vpx_memset(flimits, 255, block_width);
// Initialize pixels in the input:
@@ -74,8 +82,8 @@ TEST_P(Vp8PostProcessingFilterTest, FilterOutputCheck) {
// Initialize pixels in the output to 99.
(void)vpx_memset(dst_image, 99, output_size);
- GetParam()(src_image_ptr, dst_image_ptr, input_stride,
- output_stride, block_width, flimits, 16);
+ REGISTER_STATE_CHECK(GetParam()(src_image_ptr, dst_image_ptr, input_stride,
+ output_stride, block_width, flimits, 16));
static const uint8_t expected_data[block_height] = {
4, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 4
diff --git a/libvpx/test/register_state_check.h b/libvpx/test/register_state_check.h
new file mode 100644
index 0000000..479a42d
--- /dev/null
+++ b/libvpx/test/register_state_check.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef TEST_REGISTER_STATE_CHECK_H_
+#define TEST_REGISTER_STATE_CHECK_H_
+
+#ifdef _WIN64
+
+#define _WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#include <winnt.h>
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+
+namespace testing {
+namespace internal {
+
+inline bool operator==(const M128A& lhs, const M128A& rhs) {
+ return (lhs.Low == rhs.Low && lhs.High == rhs.High);
+}
+
+} // namespace internal
+} // namespace testing
+
+namespace libvpx_test {
+
+// Compares the state of xmm[6-15] at construction with their state at
+// destruction. These registers should be preserved by the callee on
+// Windows x64.
+// Usage:
+// {
+// RegisterStateCheck reg_check;
+// FunctionToVerify();
+// }
+class RegisterStateCheck {
+ public:
+ RegisterStateCheck() { initialized_ = StoreRegisters(&pre_context_); }
+ ~RegisterStateCheck() { EXPECT_TRUE(Check()); }
+
+ private:
+ static bool StoreRegisters(CONTEXT* const context) {
+ const HANDLE this_thread = GetCurrentThread();
+ EXPECT_TRUE(this_thread != NULL);
+ context->ContextFlags = CONTEXT_FLOATING_POINT;
+ const bool context_saved = GetThreadContext(this_thread, context) == TRUE;
+ EXPECT_TRUE(context_saved) << "GetLastError: " << GetLastError();
+ return context_saved;
+ }
+
+ // Compares the register state. Returns true if the states match.
+ bool Check() const {
+ if (!initialized_) return false;
+ CONTEXT post_context;
+ if (!StoreRegisters(&post_context)) return false;
+
+ const M128A* xmm_pre = &pre_context_.Xmm6;
+ const M128A* xmm_post = &post_context.Xmm6;
+ for (int i = 6; i <= 15; ++i) {
+ EXPECT_EQ(*xmm_pre, *xmm_post) << "xmm" << i << " has been modified!";
+ ++xmm_pre;
+ ++xmm_post;
+ }
+ return !testing::Test::HasNonfatalFailure();
+ }
+
+ bool initialized_;
+ CONTEXT pre_context_;
+};
+
+#define REGISTER_STATE_CHECK(statement) do { \
+ libvpx_test::RegisterStateCheck reg_check; \
+ statement; \
+} while (false)
+
+} // namespace libvpx_test
+
+#else // !_WIN64
+
+namespace libvpx_test {
+
+class RegisterStateCheck {};
+#define REGISTER_STATE_CHECK(statement) statement
+
+} // namespace libvpx_test
+
+#endif // _WIN64
+
+#endif // TEST_REGISTER_STATE_CHECK_H_
diff --git a/libvpx/test/resize_test.cc b/libvpx/test/resize_test.cc
index c846157..77d3f5c 100644
--- a/libvpx/test/resize_test.cc
+++ b/libvpx/test/resize_test.cc
@@ -9,9 +9,12 @@
*/
#include <climits>
#include <vector>
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/codec_factory.h"
#include "test/encode_test_driver.h"
+#include "test/i420_video_source.h"
#include "test/video_source.h"
-#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/util.h"
namespace {
@@ -49,8 +52,10 @@ class ResizingVideoSource : public ::libvpx_test::DummyVideoSource {
};
class ResizeTest : public ::libvpx_test::EncoderTest,
- public ::testing::TestWithParam<enum libvpx_test::TestMode> {
+ public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
protected:
+ ResizeTest() : EncoderTest(GET_PARAM(0)) {}
+
struct FrameInfo {
FrameInfo(vpx_codec_pts_t _pts, unsigned int _w, unsigned int _h)
: pts(_pts), w(_w), h(_h) {}
@@ -62,22 +67,12 @@ class ResizeTest : public ::libvpx_test::EncoderTest,
virtual void SetUp() {
InitializeConfig();
- SetMode(GetParam());
+ SetMode(GET_PARAM(1));
}
- virtual bool Continue() const {
- return !HasFatalFailure() && !abort_;
- }
-
- virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
- if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
- const unsigned char *buf =
- reinterpret_cast<const unsigned char *>(pkt->data.frame.buf);
- const unsigned int w = (buf[6] | (buf[7] << 8)) & 0x3fff;
- const unsigned int h = (buf[8] | (buf[9] << 8)) & 0x3fff;
-
- frame_info_list_.push_back(FrameInfo(pkt->data.frame.pts, w, h));
- }
+ virtual void DecompressedFrameHook(const vpx_image_t &img,
+ vpx_codec_pts_t pts) {
+ frame_info_list_.push_back(FrameInfo(pts, img.d_w, img.d_h));
}
std::vector< FrameInfo > frame_info_list_;
@@ -100,5 +95,60 @@ TEST_P(ResizeTest, TestExternalResizeWorks) {
}
}
-INSTANTIATE_TEST_CASE_P(OnePass, ResizeTest, ONE_PASS_TEST_MODES);
+class ResizeInternalTest : public ResizeTest {
+ protected:
+ ResizeInternalTest() : ResizeTest(), frame0_psnr_(0.0) {}
+
+ virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video,
+ libvpx_test::Encoder *encoder) {
+ if (video->frame() == 3) {
+ struct vpx_scaling_mode mode = {VP8E_FOURFIVE, VP8E_THREEFIVE};
+ encoder->Control(VP8E_SET_SCALEMODE, &mode);
+ }
+ if (video->frame() == 6) {
+ struct vpx_scaling_mode mode = {VP8E_NORMAL, VP8E_NORMAL};
+ encoder->Control(VP8E_SET_SCALEMODE, &mode);
+ }
+ }
+
+ virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) {
+ if (!frame0_psnr_)
+ frame0_psnr_ = pkt->data.psnr.psnr[0];
+ EXPECT_NEAR(pkt->data.psnr.psnr[0], frame0_psnr_, 1.0);
+ }
+
+ double frame0_psnr_;
+};
+
+TEST_P(ResizeInternalTest, TestInternalResizeWorks) {
+ ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ 30, 1, 0, 10);
+ init_flags_ = VPX_CODEC_USE_PSNR;
+
+ // If the number of frames being encoded is smaller than g_lag_in_frames
+ // the encoded frame is unavailable using the current API. Comparing
+ // frames to detect mismatch would then not be possible. Set
+ // g_lag_in_frames = 0 to get around this.
+ cfg_.g_lag_in_frames = 0;
+
+ // q picked such that initial keyframe on this clip is ~30dB PSNR
+ cfg_.rc_min_quantizer = cfg_.rc_max_quantizer = 48;
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+
+ for (std::vector<FrameInfo>::iterator info = frame_info_list_.begin();
+ info != frame_info_list_.end(); ++info) {
+ const vpx_codec_pts_t pts = info->pts;
+ if (pts >= 3 && pts < 6) {
+ ASSERT_EQ(282U, info->w) << "Frame " << pts << " had unexpected width";
+ ASSERT_EQ(173U, info->h) << "Frame " << pts << " had unexpected height";
+ } else {
+ EXPECT_EQ(352U, info->w) << "Frame " << pts << " had unexpected width";
+ EXPECT_EQ(288U, info->h) << "Frame " << pts << " had unexpected height";
+ }
+ }
+}
+
+VP8_INSTANTIATE_TEST_CASE(ResizeTest, ONE_PASS_TEST_MODES);
+VP9_INSTANTIATE_TEST_CASE(ResizeInternalTest,
+ ::testing::Values(::libvpx_test::kOnePassBest));
} // namespace
diff --git a/libvpx/test/sad_test.cc b/libvpx/test/sad_test.cc
index 2b562e6..453b3a8 100644
--- a/libvpx/test/sad_test.cc
+++ b/libvpx/test/sad_test.cc
@@ -15,12 +15,18 @@
extern "C" {
#include "./vpx_config.h"
-#include "./vpx_rtcd.h"
-#include "vp8/common/blockd.h"
+#if CONFIG_VP8_ENCODER
+#include "./vp8_rtcd.h"
+#endif
+#if CONFIG_VP9_ENCODER
+#include "./vp9_rtcd.h"
+#endif
#include "vpx_mem/vpx_mem.h"
}
#include "test/acm_random.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
#include "test/util.h"
#include "third_party/googletest/src/include/gtest/gtest.h"
@@ -30,15 +36,26 @@ typedef unsigned int (*sad_m_by_n_fn_t)(const unsigned char *source_ptr,
const unsigned char *reference_ptr,
int reference_stride,
unsigned int max_sad);
+typedef std::tr1::tuple<int, int, sad_m_by_n_fn_t> sad_m_by_n_test_param_t;
+
+typedef void (*sad_n_by_n_by_4_fn_t)(const uint8_t *src_ptr,
+ int src_stride,
+ const unsigned char * const ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array);
+typedef std::tr1::tuple<int, int, sad_n_by_n_by_4_fn_t>
+ sad_n_by_n_by_4_test_param_t;
using libvpx_test::ACMRandom;
namespace {
-class SADTest : public PARAMS(int, int, sad_m_by_n_fn_t) {
+class SADTestBase : public ::testing::Test {
public:
+ SADTestBase(int width, int height) : width_(width), height_(height) {}
+
static void SetUpTestCase() {
source_data_ = reinterpret_cast<uint8_t*>(
- vpx_memalign(kDataAlignment, kDataBufferSize));
+ vpx_memalign(kDataAlignment, kDataBlockSize));
reference_data_ = reinterpret_cast<uint8_t*>(
vpx_memalign(kDataAlignment, kDataBufferSize));
}
@@ -50,35 +67,36 @@ class SADTest : public PARAMS(int, int, sad_m_by_n_fn_t) {
reference_data_ = NULL;
}
+ virtual void TearDown() {
+ libvpx_test::ClearSystemState();
+ }
+
protected:
+ // Handle blocks up to 4 blocks 64x64 with stride up to 128
static const int kDataAlignment = 16;
- static const int kDataBufferSize = 16 * 32;
+ static const int kDataBlockSize = 64 * 128;
+ static const int kDataBufferSize = 4 * kDataBlockSize;
virtual void SetUp() {
- sad_fn_ = GET_PARAM(2);
- height_ = GET_PARAM(1);
- width_ = GET_PARAM(0);
- source_stride_ = width_ * 2;
+ source_stride_ = (width_ + 31) & ~31;
reference_stride_ = width_ * 2;
rnd_.Reset(ACMRandom::DeterministicSeed());
}
- sad_m_by_n_fn_t sad_fn_;
- virtual unsigned int SAD(unsigned int max_sad) {
- return sad_fn_(source_data_, source_stride_,
- reference_data_, reference_stride_,
- max_sad);
+ virtual uint8_t* GetReference(int block_idx) {
+ return reference_data_ + block_idx * kDataBlockSize;
}
// Sum of Absolute Differences. Given two blocks, calculate the absolute
// difference between two pixels in the same relative location; accumulate.
- unsigned int ReferenceSAD(unsigned int max_sad) {
+ unsigned int ReferenceSAD(unsigned int max_sad, int block_idx = 0) {
unsigned int sad = 0;
+ const uint8_t* const reference = GetReference(block_idx);
for (int h = 0; h < height_; ++h) {
for (int w = 0; w < width_; ++w) {
sad += abs(source_data_[h * source_stride_ + w]
- - reference_data_[h * reference_stride_ + w]);
+ - reference[h * reference_stride_ + w]);
}
if (sad > max_sad) {
break;
@@ -103,6 +121,31 @@ class SADTest : public PARAMS(int, int, sad_m_by_n_fn_t) {
}
}
+ int width_, height_;
+ static uint8_t* source_data_;
+ int source_stride_;
+ static uint8_t* reference_data_;
+ int reference_stride_;
+
+ ACMRandom rnd_;
+};
+
+class SADTest : public SADTestBase,
+ public ::testing::WithParamInterface<sad_m_by_n_test_param_t> {
+ public:
+ SADTest() : SADTestBase(GET_PARAM(0), GET_PARAM(1)) {}
+
+ protected:
+ unsigned int SAD(unsigned int max_sad, int block_idx = 0) {
+ unsigned int ret;
+ const uint8_t* const reference = GetReference(block_idx);
+
+ REGISTER_STATE_CHECK(ret = GET_PARAM(2)(source_data_, source_stride_,
+ reference, reference_stride_,
+ max_sad));
+ return ret;
+ }
+
void CheckSad(unsigned int max_sad) {
unsigned int reference_sad, exp_sad;
@@ -116,19 +159,37 @@ class SADTest : public PARAMS(int, int, sad_m_by_n_fn_t) {
ASSERT_GE(exp_sad, reference_sad);
}
}
+};
- // Handle blocks up to 16x16 with stride up to 32
- int height_, width_;
- static uint8_t* source_data_;
- int source_stride_;
- static uint8_t* reference_data_;
- int reference_stride_;
+class SADx4Test : public SADTestBase,
+ public ::testing::WithParamInterface<sad_n_by_n_by_4_test_param_t> {
+ public:
+ SADx4Test() : SADTestBase(GET_PARAM(0), GET_PARAM(1)) {}
- ACMRandom rnd_;
+ protected:
+ void SADs(unsigned int *results) {
+ const uint8_t* refs[] = {GetReference(0), GetReference(1),
+ GetReference(2), GetReference(3)};
+
+ REGISTER_STATE_CHECK(GET_PARAM(2)(source_data_, source_stride_,
+ refs, reference_stride_,
+ results));
+ }
+
+ void CheckSADs() {
+ unsigned int reference_sad, exp_sad[4];
+
+ SADs(exp_sad);
+ for (int block = 0; block < 4; block++) {
+ reference_sad = ReferenceSAD(UINT_MAX, block);
+
+ EXPECT_EQ(exp_sad[block], reference_sad) << "block " << block;
+ }
+ }
};
-uint8_t* SADTest::source_data_ = NULL;
-uint8_t* SADTest::reference_data_ = NULL;
+uint8_t* SADTestBase::source_data_ = NULL;
+uint8_t* SADTestBase::reference_data_ = NULL;
TEST_P(SADTest, MaxRef) {
FillConstant(source_data_, source_stride_, 0);
@@ -136,12 +197,30 @@ TEST_P(SADTest, MaxRef) {
CheckSad(UINT_MAX);
}
+TEST_P(SADx4Test, MaxRef) {
+ FillConstant(source_data_, source_stride_, 0);
+ FillConstant(GetReference(0), reference_stride_, 255);
+ FillConstant(GetReference(1), reference_stride_, 255);
+ FillConstant(GetReference(2), reference_stride_, 255);
+ FillConstant(GetReference(3), reference_stride_, 255);
+ CheckSADs();
+}
+
TEST_P(SADTest, MaxSrc) {
FillConstant(source_data_, source_stride_, 255);
FillConstant(reference_data_, reference_stride_, 0);
CheckSad(UINT_MAX);
}
+TEST_P(SADx4Test, MaxSrc) {
+ FillConstant(source_data_, source_stride_, 255);
+ FillConstant(GetReference(0), reference_stride_, 0);
+ FillConstant(GetReference(1), reference_stride_, 0);
+ FillConstant(GetReference(2), reference_stride_, 0);
+ FillConstant(GetReference(3), reference_stride_, 0);
+ CheckSADs();
+}
+
TEST_P(SADTest, ShortRef) {
int tmp_stride = reference_stride_;
reference_stride_ >>= 1;
@@ -151,6 +230,18 @@ TEST_P(SADTest, ShortRef) {
reference_stride_ = tmp_stride;
}
+TEST_P(SADx4Test, ShortRef) {
+ int tmp_stride = reference_stride_;
+ reference_stride_ >>= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(GetReference(0), reference_stride_);
+ FillRandom(GetReference(1), reference_stride_);
+ FillRandom(GetReference(2), reference_stride_);
+ FillRandom(GetReference(3), reference_stride_);
+ CheckSADs();
+ reference_stride_ = tmp_stride;
+}
+
TEST_P(SADTest, UnalignedRef) {
// The reference frame, but not the source frame, may be unaligned for
// certain types of searches.
@@ -162,6 +253,20 @@ TEST_P(SADTest, UnalignedRef) {
reference_stride_ = tmp_stride;
}
+TEST_P(SADx4Test, UnalignedRef) {
+ // The reference frame, but not the source frame, may be unaligned for
+ // certain types of searches.
+ int tmp_stride = reference_stride_;
+ reference_stride_ -= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(GetReference(0), reference_stride_);
+ FillRandom(GetReference(1), reference_stride_);
+ FillRandom(GetReference(2), reference_stride_);
+ FillRandom(GetReference(3), reference_stride_);
+ CheckSADs();
+ reference_stride_ = tmp_stride;
+}
+
TEST_P(SADTest, ShortSrc) {
int tmp_stride = source_stride_;
source_stride_ >>= 1;
@@ -171,6 +276,18 @@ TEST_P(SADTest, ShortSrc) {
source_stride_ = tmp_stride;
}
+TEST_P(SADx4Test, ShortSrc) {
+ int tmp_stride = source_stride_;
+ source_stride_ >>= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(GetReference(0), reference_stride_);
+ FillRandom(GetReference(1), reference_stride_);
+ FillRandom(GetReference(2), reference_stride_);
+ FillRandom(GetReference(3), reference_stride_);
+ CheckSADs();
+ source_stride_ = tmp_stride;
+}
+
TEST_P(SADTest, MaxSAD) {
// Verify that, when max_sad is set, the implementation does not return a
// value lower than the reference.
@@ -181,17 +298,75 @@ TEST_P(SADTest, MaxSAD) {
using std::tr1::make_tuple;
+#if CONFIG_VP8_ENCODER
const sad_m_by_n_fn_t sad_16x16_c = vp8_sad16x16_c;
const sad_m_by_n_fn_t sad_8x16_c = vp8_sad8x16_c;
const sad_m_by_n_fn_t sad_16x8_c = vp8_sad16x8_c;
const sad_m_by_n_fn_t sad_8x8_c = vp8_sad8x8_c;
const sad_m_by_n_fn_t sad_4x4_c = vp8_sad4x4_c;
-INSTANTIATE_TEST_CASE_P(C, SADTest, ::testing::Values(
- make_tuple(16, 16, sad_16x16_c),
- make_tuple(8, 16, sad_8x16_c),
- make_tuple(16, 8, sad_16x8_c),
- make_tuple(8, 8, sad_8x8_c),
- make_tuple(4, 4, sad_4x4_c)));
+#endif
+#if CONFIG_VP9_ENCODER
+const sad_m_by_n_fn_t sad_64x64_c_vp9 = vp9_sad64x64_c;
+const sad_m_by_n_fn_t sad_32x32_c_vp9 = vp9_sad32x32_c;
+const sad_m_by_n_fn_t sad_16x16_c_vp9 = vp9_sad16x16_c;
+const sad_m_by_n_fn_t sad_8x16_c_vp9 = vp9_sad8x16_c;
+const sad_m_by_n_fn_t sad_16x8_c_vp9 = vp9_sad16x8_c;
+const sad_m_by_n_fn_t sad_8x8_c_vp9 = vp9_sad8x8_c;
+const sad_m_by_n_fn_t sad_8x4_c_vp9 = vp9_sad8x4_c;
+const sad_m_by_n_fn_t sad_4x8_c_vp9 = vp9_sad4x8_c;
+const sad_m_by_n_fn_t sad_4x4_c_vp9 = vp9_sad4x4_c;
+#endif
+const sad_m_by_n_test_param_t c_tests[] = {
+#if CONFIG_VP8_ENCODER
+ make_tuple(16, 16, sad_16x16_c),
+ make_tuple(8, 16, sad_8x16_c),
+ make_tuple(16, 8, sad_16x8_c),
+ make_tuple(8, 8, sad_8x8_c),
+ make_tuple(4, 4, sad_4x4_c),
+#endif
+#if CONFIG_VP9_ENCODER
+ make_tuple(64, 64, sad_64x64_c_vp9),
+ make_tuple(32, 32, sad_32x32_c_vp9),
+ make_tuple(16, 16, sad_16x16_c_vp9),
+ make_tuple(8, 16, sad_8x16_c_vp9),
+ make_tuple(16, 8, sad_16x8_c_vp9),
+ make_tuple(8, 8, sad_8x8_c_vp9),
+ make_tuple(8, 4, sad_8x4_c_vp9),
+ make_tuple(4, 8, sad_4x8_c_vp9),
+ make_tuple(4, 4, sad_4x4_c_vp9),
+#endif
+};
+INSTANTIATE_TEST_CASE_P(C, SADTest, ::testing::ValuesIn(c_tests));
+
+#if CONFIG_VP9_ENCODER
+const sad_n_by_n_by_4_fn_t sad_64x64x4d_c = vp9_sad64x64x4d_c;
+const sad_n_by_n_by_4_fn_t sad_64x32x4d_c = vp9_sad64x32x4d_c;
+const sad_n_by_n_by_4_fn_t sad_32x64x4d_c = vp9_sad32x64x4d_c;
+const sad_n_by_n_by_4_fn_t sad_32x32x4d_c = vp9_sad32x32x4d_c;
+const sad_n_by_n_by_4_fn_t sad_32x16x4d_c = vp9_sad32x16x4d_c;
+const sad_n_by_n_by_4_fn_t sad_16x32x4d_c = vp9_sad16x32x4d_c;
+const sad_n_by_n_by_4_fn_t sad_16x16x4d_c = vp9_sad16x16x4d_c;
+const sad_n_by_n_by_4_fn_t sad_16x8x4d_c = vp9_sad16x8x4d_c;
+const sad_n_by_n_by_4_fn_t sad_8x16x4d_c = vp9_sad8x16x4d_c;
+const sad_n_by_n_by_4_fn_t sad_8x8x4d_c = vp9_sad8x8x4d_c;
+const sad_n_by_n_by_4_fn_t sad_8x4x4d_c = vp9_sad8x4x4d_c;
+const sad_n_by_n_by_4_fn_t sad_4x8x4d_c = vp9_sad4x8x4d_c;
+const sad_n_by_n_by_4_fn_t sad_4x4x4d_c = vp9_sad4x4x4d_c;
+INSTANTIATE_TEST_CASE_P(C, SADx4Test, ::testing::Values(
+ make_tuple(64, 64, sad_64x64x4d_c),
+ make_tuple(64, 32, sad_64x32x4d_c),
+ make_tuple(32, 64, sad_32x64x4d_c),
+ make_tuple(32, 32, sad_32x32x4d_c),
+ make_tuple(32, 16, sad_32x16x4d_c),
+ make_tuple(16, 32, sad_16x32x4d_c),
+ make_tuple(16, 16, sad_16x16x4d_c),
+ make_tuple(16, 8, sad_16x8x4d_c),
+ make_tuple(8, 16, sad_8x16x4d_c),
+ make_tuple(8, 8, sad_8x8x4d_c),
+ make_tuple(8, 4, sad_8x4x4d_c),
+ make_tuple(4, 8, sad_4x8x4d_c),
+ make_tuple(4, 4, sad_4x4x4d_c)));
+#endif
// ARM tests
#if HAVE_MEDIA
@@ -216,35 +391,158 @@ INSTANTIATE_TEST_CASE_P(NEON, SADTest, ::testing::Values(
// X86 tests
#if HAVE_MMX
+#if CONFIG_VP8_ENCODER
const sad_m_by_n_fn_t sad_16x16_mmx = vp8_sad16x16_mmx;
const sad_m_by_n_fn_t sad_8x16_mmx = vp8_sad8x16_mmx;
const sad_m_by_n_fn_t sad_16x8_mmx = vp8_sad16x8_mmx;
const sad_m_by_n_fn_t sad_8x8_mmx = vp8_sad8x8_mmx;
const sad_m_by_n_fn_t sad_4x4_mmx = vp8_sad4x4_mmx;
-INSTANTIATE_TEST_CASE_P(MMX, SADTest, ::testing::Values(
- make_tuple(16, 16, sad_16x16_mmx),
- make_tuple(8, 16, sad_8x16_mmx),
- make_tuple(16, 8, sad_16x8_mmx),
- make_tuple(8, 8, sad_8x8_mmx),
- make_tuple(4, 4, sad_4x4_mmx)));
#endif
+#if CONFIG_VP9_ENCODER
+const sad_m_by_n_fn_t sad_16x16_mmx_vp9 = vp9_sad16x16_mmx;
+const sad_m_by_n_fn_t sad_8x16_mmx_vp9 = vp9_sad8x16_mmx;
+const sad_m_by_n_fn_t sad_16x8_mmx_vp9 = vp9_sad16x8_mmx;
+const sad_m_by_n_fn_t sad_8x8_mmx_vp9 = vp9_sad8x8_mmx;
+const sad_m_by_n_fn_t sad_4x4_mmx_vp9 = vp9_sad4x4_mmx;
+#endif
+
+const sad_m_by_n_test_param_t mmx_tests[] = {
+#if CONFIG_VP8_ENCODER
+ make_tuple(16, 16, sad_16x16_mmx),
+ make_tuple(8, 16, sad_8x16_mmx),
+ make_tuple(16, 8, sad_16x8_mmx),
+ make_tuple(8, 8, sad_8x8_mmx),
+ make_tuple(4, 4, sad_4x4_mmx),
+#endif
+#if CONFIG_VP9_ENCODER
+ make_tuple(16, 16, sad_16x16_mmx_vp9),
+ make_tuple(8, 16, sad_8x16_mmx_vp9),
+ make_tuple(16, 8, sad_16x8_mmx_vp9),
+ make_tuple(8, 8, sad_8x8_mmx_vp9),
+ make_tuple(4, 4, sad_4x4_mmx_vp9),
+#endif
+};
+INSTANTIATE_TEST_CASE_P(MMX, SADTest, ::testing::ValuesIn(mmx_tests));
+#endif
+
+#if HAVE_SSE
+#if CONFIG_VP9_ENCODER
+#if CONFIG_USE_X86INC
+const sad_m_by_n_fn_t sad_4x4_sse_vp9 = vp9_sad4x4_sse;
+const sad_m_by_n_fn_t sad_4x8_sse_vp9 = vp9_sad4x8_sse;
+INSTANTIATE_TEST_CASE_P(SSE, SADTest, ::testing::Values(
+ make_tuple(4, 4, sad_4x4_sse_vp9),
+ make_tuple(4, 8, sad_4x8_sse_vp9)));
+
+const sad_n_by_n_by_4_fn_t sad_4x8x4d_sse = vp9_sad4x8x4d_sse;
+const sad_n_by_n_by_4_fn_t sad_4x4x4d_sse = vp9_sad4x4x4d_sse;
+INSTANTIATE_TEST_CASE_P(SSE, SADx4Test, ::testing::Values(
+ make_tuple(4, 8, sad_4x8x4d_sse),
+ make_tuple(4, 4, sad_4x4x4d_sse)));
+#endif
+#endif
+#endif
+
#if HAVE_SSE2
+#if CONFIG_VP8_ENCODER
const sad_m_by_n_fn_t sad_16x16_wmt = vp8_sad16x16_wmt;
const sad_m_by_n_fn_t sad_8x16_wmt = vp8_sad8x16_wmt;
const sad_m_by_n_fn_t sad_16x8_wmt = vp8_sad16x8_wmt;
const sad_m_by_n_fn_t sad_8x8_wmt = vp8_sad8x8_wmt;
const sad_m_by_n_fn_t sad_4x4_wmt = vp8_sad4x4_wmt;
-INSTANTIATE_TEST_CASE_P(SSE2, SADTest, ::testing::Values(
- make_tuple(16, 16, sad_16x16_wmt),
- make_tuple(8, 16, sad_8x16_wmt),
- make_tuple(16, 8, sad_16x8_wmt),
- make_tuple(8, 8, sad_8x8_wmt),
- make_tuple(4, 4, sad_4x4_wmt)));
#endif
+#if CONFIG_VP9_ENCODER
+#if CONFIG_USE_X86INC
+const sad_m_by_n_fn_t sad_64x64_sse2_vp9 = vp9_sad64x64_sse2;
+const sad_m_by_n_fn_t sad_64x32_sse2_vp9 = vp9_sad64x32_sse2;
+const sad_m_by_n_fn_t sad_32x64_sse2_vp9 = vp9_sad32x64_sse2;
+const sad_m_by_n_fn_t sad_32x32_sse2_vp9 = vp9_sad32x32_sse2;
+const sad_m_by_n_fn_t sad_32x16_sse2_vp9 = vp9_sad32x16_sse2;
+const sad_m_by_n_fn_t sad_16x32_sse2_vp9 = vp9_sad16x32_sse2;
+const sad_m_by_n_fn_t sad_16x16_sse2_vp9 = vp9_sad16x16_sse2;
+const sad_m_by_n_fn_t sad_16x8_sse2_vp9 = vp9_sad16x8_sse2;
+const sad_m_by_n_fn_t sad_8x16_sse2_vp9 = vp9_sad8x16_sse2;
+const sad_m_by_n_fn_t sad_8x8_sse2_vp9 = vp9_sad8x8_sse2;
+const sad_m_by_n_fn_t sad_8x4_sse2_vp9 = vp9_sad8x4_sse2;
+#endif
+#endif
+const sad_m_by_n_test_param_t sse2_tests[] = {
+#if CONFIG_VP8_ENCODER
+ make_tuple(16, 16, sad_16x16_wmt),
+ make_tuple(8, 16, sad_8x16_wmt),
+ make_tuple(16, 8, sad_16x8_wmt),
+ make_tuple(8, 8, sad_8x8_wmt),
+ make_tuple(4, 4, sad_4x4_wmt),
+#endif
+#if CONFIG_VP9_ENCODER
+#if CONFIG_USE_X86INC
+ make_tuple(64, 64, sad_64x64_sse2_vp9),
+ make_tuple(64, 32, sad_64x32_sse2_vp9),
+ make_tuple(32, 64, sad_32x64_sse2_vp9),
+ make_tuple(32, 32, sad_32x32_sse2_vp9),
+ make_tuple(32, 16, sad_32x16_sse2_vp9),
+ make_tuple(16, 32, sad_16x32_sse2_vp9),
+ make_tuple(16, 16, sad_16x16_sse2_vp9),
+ make_tuple(16, 8, sad_16x8_sse2_vp9),
+ make_tuple(8, 16, sad_8x16_sse2_vp9),
+ make_tuple(8, 8, sad_8x8_sse2_vp9),
+ make_tuple(8, 4, sad_8x4_sse2_vp9),
+#endif
+#endif
+};
+INSTANTIATE_TEST_CASE_P(SSE2, SADTest, ::testing::ValuesIn(sse2_tests));
+
+#if CONFIG_VP9_ENCODER
+#if CONFIG_USE_X86INC
+const sad_n_by_n_by_4_fn_t sad_64x64x4d_sse2 = vp9_sad64x64x4d_sse2;
+const sad_n_by_n_by_4_fn_t sad_64x32x4d_sse2 = vp9_sad64x32x4d_sse2;
+const sad_n_by_n_by_4_fn_t sad_32x64x4d_sse2 = vp9_sad32x64x4d_sse2;
+const sad_n_by_n_by_4_fn_t sad_32x32x4d_sse2 = vp9_sad32x32x4d_sse2;
+const sad_n_by_n_by_4_fn_t sad_32x16x4d_sse2 = vp9_sad32x16x4d_sse2;
+const sad_n_by_n_by_4_fn_t sad_16x32x4d_sse2 = vp9_sad16x32x4d_sse2;
+const sad_n_by_n_by_4_fn_t sad_16x16x4d_sse2 = vp9_sad16x16x4d_sse2;
+const sad_n_by_n_by_4_fn_t sad_16x8x4d_sse2 = vp9_sad16x8x4d_sse2;
+const sad_n_by_n_by_4_fn_t sad_8x16x4d_sse2 = vp9_sad8x16x4d_sse2;
+const sad_n_by_n_by_4_fn_t sad_8x8x4d_sse2 = vp9_sad8x8x4d_sse2;
+const sad_n_by_n_by_4_fn_t sad_8x4x4d_sse2 = vp9_sad8x4x4d_sse2;
+INSTANTIATE_TEST_CASE_P(SSE2, SADx4Test, ::testing::Values(
+ make_tuple(64, 64, sad_64x64x4d_sse2),
+ make_tuple(64, 32, sad_64x32x4d_sse2),
+ make_tuple(32, 64, sad_32x64x4d_sse2),
+ make_tuple(32, 32, sad_32x32x4d_sse2),
+ make_tuple(32, 16, sad_32x16x4d_sse2),
+ make_tuple(16, 32, sad_16x32x4d_sse2),
+ make_tuple(16, 16, sad_16x16x4d_sse2),
+ make_tuple(16, 8, sad_16x8x4d_sse2),
+ make_tuple(8, 16, sad_8x16x4d_sse2),
+ make_tuple(8, 8, sad_8x8x4d_sse2),
+ make_tuple(8, 4, sad_8x4x4d_sse2)));
+#endif
+#endif
+#endif
+
+#if HAVE_SSE3
+#if CONFIG_VP8_ENCODER
+const sad_n_by_n_by_4_fn_t sad_16x16x4d_sse3 = vp8_sad16x16x4d_sse3;
+const sad_n_by_n_by_4_fn_t sad_16x8x4d_sse3 = vp8_sad16x8x4d_sse3;
+const sad_n_by_n_by_4_fn_t sad_8x16x4d_sse3 = vp8_sad8x16x4d_sse3;
+const sad_n_by_n_by_4_fn_t sad_8x8x4d_sse3 = vp8_sad8x8x4d_sse3;
+const sad_n_by_n_by_4_fn_t sad_4x4x4d_sse3 = vp8_sad4x4x4d_sse3;
+INSTANTIATE_TEST_CASE_P(SSE3, SADx4Test, ::testing::Values(
+ make_tuple(16, 16, sad_16x16x4d_sse3),
+ make_tuple(16, 8, sad_16x8x4d_sse3),
+ make_tuple(8, 16, sad_8x16x4d_sse3),
+ make_tuple(8, 8, sad_8x8x4d_sse3),
+ make_tuple(4, 4, sad_4x4x4d_sse3)));
+#endif
+#endif
+
#if HAVE_SSSE3
+#if CONFIG_USE_X86INC
const sad_m_by_n_fn_t sad_16x16_sse3 = vp8_sad16x16_sse3;
INSTANTIATE_TEST_CASE_P(SSE3, SADTest, ::testing::Values(
make_tuple(16, 16, sad_16x16_sse3)));
#endif
+#endif
} // namespace
diff --git a/libvpx/test/set_roi.cc b/libvpx/test/set_roi.cc
index 3b6112e..9d2e771 100644
--- a/libvpx/test/set_roi.cc
+++ b/libvpx/test/set_roi.cc
@@ -17,15 +17,19 @@
#include <sys/types.h>
#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/acm_random.h"
#include "vpx/vpx_integer.h"
#include "vpx_mem/vpx_mem.h"
extern "C" {
#include "vp8/encoder/onyx_int.h"
}
+using libvpx_test::ACMRandom;
+
namespace {
TEST(Vp8RoiMapTest, ParameterCheck) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
int delta_q[MAX_MB_SEGMENTS] = { -2, -25, 0, 31 };
int delta_lf[MAX_MB_SEGMENTS] = { -2, -25, 0, 31 };
unsigned int threshold[MAX_MB_SEGMENTS] = { 0, 100, 200, 300 };
@@ -121,10 +125,10 @@ TEST(Vp8RoiMapTest, ParameterCheck) {
for (int i = 0; i < 1000; ++i) {
int rand_deltas[4];
int deltas_valid;
- rand_deltas[0] = (rand() % 160) - 80;
- rand_deltas[1] = (rand() % 160) - 80;
- rand_deltas[2] = (rand() % 160) - 80;
- rand_deltas[3] = (rand() % 160) - 80;
+ rand_deltas[0] = rnd(160) - 80;
+ rand_deltas[1] = rnd(160) - 80;
+ rand_deltas[2] = rnd(160) - 80;
+ rand_deltas[3] = rnd(160) - 80;
deltas_valid = ((abs(rand_deltas[0]) <= 63) &&
(abs(rand_deltas[1]) <= 63) &&
diff --git a/libvpx/test/sixtap_predict_test.cc b/libvpx/test/sixtap_predict_test.cc
index 06f14a1..ee4faac 100644
--- a/libvpx/test/sixtap_predict_test.cc
+++ b/libvpx/test/sixtap_predict_test.cc
@@ -12,11 +12,13 @@
#include <stdlib.h>
#include <string.h>
#include "test/acm_random.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
#include "test/util.h"
#include "third_party/googletest/src/include/gtest/gtest.h"
extern "C" {
#include "./vpx_config.h"
-#include "./vpx_rtcd.h"
+#include "./vp8_rtcd.h"
#include "vpx/vpx_integer.h"
#include "vpx_mem/vpx_mem.h"
}
@@ -47,6 +49,10 @@ class SixtapPredictTest : public PARAMS(int, int, sixtap_predict_fn_t) {
dst_c_ = NULL;
}
+ virtual void TearDown() {
+ libvpx_test::ClearSystemState();
+ }
+
protected:
// Make test arrays big enough for 16x16 functions. Six-tap filters
// need 5 extra pixels outside of the macroblock.
@@ -60,9 +66,9 @@ class SixtapPredictTest : public PARAMS(int, int, sixtap_predict_fn_t) {
width_ = GET_PARAM(0);
height_ = GET_PARAM(1);
sixtap_predict_ = GET_PARAM(2);
- memset(src_, 0, sizeof(src_));
- memset(dst_, 0, sizeof(dst_));
- memset(dst_c_, 0, sizeof(dst_c_));
+ memset(src_, 0, kSrcSize);
+ memset(dst_, 0, kDstSize);
+ memset(dst_c_, 0, kDstSize);
}
int width_;
@@ -136,8 +142,8 @@ TEST_P(SixtapPredictTest, TestWithPresetData) {
uint8_t *src = const_cast<uint8_t*>(test_data);
- sixtap_predict_(&src[kSrcStride * 2 + 2 + 1], kSrcStride,
- 2, 2, dst_, kDstStride);
+ REGISTER_STATE_CHECK(sixtap_predict_(&src[kSrcStride * 2 + 2 + 1], kSrcStride,
+ 2, 2, dst_, kDstStride));
for (int i = 0; i < height_; ++i)
for (int j = 0; j < width_; ++j)
@@ -162,8 +168,9 @@ TEST_P(SixtapPredictTest, TestWithRandomData) {
xoffset, yoffset, dst_c_, kDstStride);
// Run test.
- sixtap_predict_(&src_[kSrcStride * 2 + 2 + 1], kSrcStride,
- xoffset, yoffset, dst_, kDstStride);
+ REGISTER_STATE_CHECK(
+ sixtap_predict_(&src_[kSrcStride * 2 + 2 + 1], kSrcStride,
+ xoffset, yoffset, dst_, kDstStride));
for (int i = 0; i < height_; ++i)
for (int j = 0; j < width_; ++j)
diff --git a/libvpx/test/subtract_test.cc b/libvpx/test/subtract_test.cc
index 99363de..d1f2729 100644
--- a/libvpx/test/subtract_test.cc
+++ b/libvpx/test/subtract_test.cc
@@ -10,9 +10,11 @@
#include "third_party/googletest/src/include/gtest/gtest.h"
#include "test/acm_random.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
extern "C" {
-#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "./vpx_config.h"
+#include "./vp8_rtcd.h"
#include "vp8/common/blockd.h"
#include "vp8/encoder/block.h"
#include "vpx_mem/vpx_mem.h"
@@ -22,7 +24,12 @@ typedef void (*subtract_b_fn_t)(BLOCK *be, BLOCKD *bd, int pitch);
namespace {
-class SubtractBlockTest : public ::testing::TestWithParam<subtract_b_fn_t> {};
+class SubtractBlockTest : public ::testing::TestWithParam<subtract_b_fn_t> {
+ public:
+ virtual void TearDown() {
+ libvpx_test::ClearSystemState();
+ }
+};
using libvpx_test::ACMRandom;
@@ -44,7 +51,7 @@ TEST_P(SubtractBlockTest, SimpleSubtract) {
bd.predictor = reinterpret_cast<unsigned char*>(
vpx_memalign(16, kBlockHeight * kDiffPredStride * sizeof(*bd.predictor)));
- for(int i = 0; kSrcStride[i] > 0; ++i) {
+ for (int i = 0; kSrcStride[i] > 0; ++i) {
// start at block0
be.src = 0;
be.base_src = &source;
@@ -54,7 +61,7 @@ TEST_P(SubtractBlockTest, SimpleSubtract) {
int16_t *src_diff = be.src_diff;
for (int r = 0; r < kBlockHeight; ++r) {
for (int c = 0; c < kBlockWidth; ++c) {
- src_diff[c] = 0xa5a5;
+ src_diff[c] = static_cast<int16_t>(0xa5a5);
}
src_diff += kDiffPredStride;
}
@@ -77,7 +84,7 @@ TEST_P(SubtractBlockTest, SimpleSubtract) {
predictor += kDiffPredStride;
}
- GetParam()(&be, &bd, kDiffPredStride);
+ REGISTER_STATE_CHECK(GetParam()(&be, &bd, kDiffPredStride));
base_src = *be.base_src;
src_diff = be.src_diff;
diff --git a/libvpx/test/superframe_test.cc b/libvpx/test/superframe_test.cc
new file mode 100644
index 0000000..d91e7b1
--- /dev/null
+++ b/libvpx/test/superframe_test.cc
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include <climits>
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/codec_factory.h"
+#include "test/encode_test_driver.h"
+#include "test/i420_video_source.h"
+#include "test/util.h"
+
+namespace {
+
+class SuperframeTest : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
+ protected:
+ SuperframeTest() : EncoderTest(GET_PARAM(0)), modified_buf_(NULL),
+ last_sf_pts_(0) {}
+
+ virtual void SetUp() {
+ InitializeConfig();
+ SetMode(GET_PARAM(1));
+ sf_count_ = 0;
+ sf_count_max_ = INT_MAX;
+ }
+
+ virtual void TearDown() {
+ delete[] modified_buf_;
+ }
+
+ virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video,
+ libvpx_test::Encoder *encoder) {
+ if (video->frame() == 1) {
+ encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 1);
+ }
+ }
+
+ virtual const vpx_codec_cx_pkt_t * MutateEncoderOutputHook(
+ const vpx_codec_cx_pkt_t *pkt) {
+ if (pkt->kind != VPX_CODEC_CX_FRAME_PKT)
+ return pkt;
+
+ const uint8_t *buffer = reinterpret_cast<uint8_t*>(pkt->data.frame.buf);
+ const uint8_t marker = buffer[pkt->data.frame.sz - 1];
+ const int frames = (marker & 0x7) + 1;
+ const int mag = ((marker >> 3) & 3) + 1;
+ const unsigned int index_sz = 2 + mag * frames;
+ if ((marker & 0xe0) == 0xc0 &&
+ pkt->data.frame.sz >= index_sz &&
+ buffer[pkt->data.frame.sz - index_sz] == marker) {
+ // frame is a superframe. strip off the index.
+ if (modified_buf_)
+ delete[] modified_buf_;
+ modified_buf_ = new uint8_t[pkt->data.frame.sz - index_sz];
+ memcpy(modified_buf_, pkt->data.frame.buf,
+ pkt->data.frame.sz - index_sz);
+ modified_pkt_ = *pkt;
+ modified_pkt_.data.frame.buf = modified_buf_;
+ modified_pkt_.data.frame.sz -= index_sz;
+
+ sf_count_++;
+ last_sf_pts_ = pkt->data.frame.pts;
+ return &modified_pkt_;
+ }
+
+ // Make sure we do a few frames after the last SF
+ abort_ |= sf_count_ > sf_count_max_ &&
+ pkt->data.frame.pts - last_sf_pts_ >= 5;
+ return pkt;
+ }
+
+ int sf_count_;
+ int sf_count_max_;
+ vpx_codec_cx_pkt_t modified_pkt_;
+ uint8_t *modified_buf_;
+ vpx_codec_pts_t last_sf_pts_;
+};
+
+TEST_P(SuperframeTest, TestSuperframeIndexIsOptional) {
+ sf_count_max_ = 0; // early exit on successful test.
+ cfg_.g_lag_in_frames = 25;
+
+ ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ 30, 1, 0, 40);
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ EXPECT_EQ(sf_count_, 1);
+}
+
+VP9_INSTANTIATE_TEST_CASE(SuperframeTest, ::testing::Values(
+ ::libvpx_test::kTwoPassGood));
+} // namespace
diff --git a/libvpx/test/test-data.sha1 b/libvpx/test/test-data.sha1
index c1b6a83..370ffc1 100644
--- a/libvpx/test/test-data.sha1
+++ b/libvpx/test/test-data.sha1
@@ -1,4 +1,5 @@
d5dfb0151c9051f8c85999255645d7a23916d3c0 hantro_collage_w352h288.yuv
+b87815bf86020c592ccc7a846ba2e28ec8043902 hantro_odd.yuv
5184c46ddca8b1fadd16742e8500115bc8f749da vp80-00-comprehensive-001.ivf
65bf1bbbced81b97bd030f376d1b7f61a224793f vp80-00-comprehensive-002.ivf
906b4c1e99eb734504c504b3f1ad8052137ce672 vp80-00-comprehensive-003.ivf
@@ -120,4 +121,406 @@ f95eb6214571434f1f73ab7833b9ccdf47588020 vp80-03-segmentation-1437.ivf.md5
41d70bb5fa45bc88da1604a0af466930b8dd77b5 vp80-05-sharpness-1438.ivf.md5
086c56378df81b6cee264d7540a7b8f2b405c7a4 vp80-05-sharpness-1439.ivf.md5
d32dc2c4165eb266ea4c23c14a45459b363def32 vp80-05-sharpness-1440.ivf.md5
-8c69dc3d8e563f56ffab5ad1e400d9e689dd23df vp80-05-sharpness-1443.ivf.md5 \ No newline at end of file
+8c69dc3d8e563f56ffab5ad1e400d9e689dd23df vp80-05-sharpness-1443.ivf.md5
+ce881e567fe1d0fbcb2d3e9e6281a1a8d74d82e0 vp90-2-00-quantizer-00.webm
+ac5eda33407d0521c7afca43a63fd305c0cd9d13 vp90-2-00-quantizer-00.webm.md5
+2ca0463f2cfb93d25d7dded174db70b7cb87cb48 vp90-2-00-quantizer-01.webm
+10d98884fc6d9a5f47a2057922b8e25dd48d7786 vp90-2-00-quantizer-01.webm.md5
+d80a2920a5e0819d69dcba8fe260c01f820f8982 vp90-2-00-quantizer-02.webm
+c964c8e5e04165fabbf1c6ee8ee5121d35921965 vp90-2-00-quantizer-02.webm.md5
+fdef046777b5b75c962b715d809dbe2ea331afb9 vp90-2-00-quantizer-03.webm
+f270bee0b0c7aa2bf4c5afe098556b4f3f890faf vp90-2-00-quantizer-03.webm.md5
+66d98609e809394a6ac730787e6724e3badc075a vp90-2-00-quantizer-04.webm
+427433bfe121c4aea1095ec3124fdc174d200e3a vp90-2-00-quantizer-04.webm.md5
+e6e42626d8cadf0b5be16313f69212981b96fee5 vp90-2-00-quantizer-05.webm
+c98f6a9a1af4cfd71416792827304266aad4bd46 vp90-2-00-quantizer-05.webm.md5
+413ef09b721f5dcec1a96e937a97e5873c2e6db6 vp90-2-00-quantizer-06.webm
+5080e940a23805c82e578e21b57fc2c511e76376 vp90-2-00-quantizer-06.webm.md5
+4a50a5f4ac717c30dfaae8bb46702e3542e867de vp90-2-00-quantizer-07.webm
+76c429a02b56762e10ee4db88729d8834b3a70f4 vp90-2-00-quantizer-07.webm.md5
+d2f4e464780bf8b7e647efa18ac777a930e62bc0 vp90-2-00-quantizer-08.webm
+ab94aabf9316111b52d7c531962ed4123313b6ba vp90-2-00-quantizer-08.webm.md5
+174bc58433936dd79550398d744f1072ce7f5693 vp90-2-00-quantizer-09.webm
+e1f7690cd83ccc56d045e17cce552544a5f03810 vp90-2-00-quantizer-09.webm.md5
+52bc1dfd3a97b24d922eb8a31d07527891561f2a vp90-2-00-quantizer-10.webm
+9b37bed893b5f6a4e12f2aa40f02dd40f944d0f8 vp90-2-00-quantizer-10.webm.md5
+10031eecafde1e1d8e6323fe2b2a1d7e77a66869 vp90-2-00-quantizer-11.webm
+fe4620a4bb0e4f5cb9bbfedc4039a22b81b0f5c0 vp90-2-00-quantizer-11.webm.md5
+78e9f7bb77e8e348155bbdfa12790789d1d50c34 vp90-2-00-quantizer-12.webm
+0961d060cc8dd469c6dac8d7d75f927c0bb971b8 vp90-2-00-quantizer-12.webm.md5
+133b77a3bbcef652552d74ffc46afbfe3b8a1cba vp90-2-00-quantizer-13.webm
+df29e5e0f95772af482f540d776f6b9dea4bfa29 vp90-2-00-quantizer-13.webm.md5
+27323afdaf8987e025c27129c74c86502315a206 vp90-2-00-quantizer-14.webm
+ce96a2cc312942f0427a463f15a392870dd69764 vp90-2-00-quantizer-14.webm.md5
+ab58d0b41037829f6bc993910999f4af0212aafd vp90-2-00-quantizer-15.webm
+40f700db606501aa7cb49049624cbdde6409b122 vp90-2-00-quantizer-15.webm.md5
+cd948e66448aafb65998815ce37241f95d7c9ee7 vp90-2-00-quantizer-16.webm
+039b742d149c945ed79c7b9a6384352852a1c116 vp90-2-00-quantizer-16.webm.md5
+62f56e663e13c576764e491cf08f19bd46a71999 vp90-2-00-quantizer-17.webm
+90c5a39bf76e6b3e0a1c0d3e9b68a9fd78be963e vp90-2-00-quantizer-17.webm.md5
+f26ecad7263cd66a614e53ba5d7c00df181affeb vp90-2-00-quantizer-18.webm
+cda0a1c0fca2ec2976ae55124a8a67305508bae6 vp90-2-00-quantizer-18.webm.md5
+94bfc4c04fcfe139a63b98c569e8c14ba98c401f vp90-2-00-quantizer-19.webm
+5b8ec169ccf67d8a0a8e46a62eb173f5a1dbaf4f vp90-2-00-quantizer-19.webm.md5
+0ee88e9318985e1e245de78c2c4a665885ab76a7 vp90-2-00-quantizer-20.webm
+4b26f7edb4fcd3a1b4cce9ba3cb8650e3ee6e063 vp90-2-00-quantizer-20.webm.md5
+6a995cb2b1db33da8087321df1e646f95c3e32d1 vp90-2-00-quantizer-21.webm
+e216b4a1eceac03efcc433759be54ab8ea87b24b vp90-2-00-quantizer-21.webm.md5
+aa7722fc427e7180115f3c9cd96bb6b2768e7296 vp90-2-00-quantizer-22.webm
+1aa813bd45ae831bf5e79ace4d73dfd25989a07d vp90-2-00-quantizer-22.webm.md5
+7677e5b929ed6d142041f19b8a9cd5822ee1504a vp90-2-00-quantizer-23.webm
+0de0af34abd843d5b37e58baf3ed96a6104b64c3 vp90-2-00-quantizer-23.webm.md5
+b2995cbe1128b2d4926f1b28d01c501ecb6be8c8 vp90-2-00-quantizer-24.webm
+db6033af2ba2f2bca62468fb4b8808e474f93923 vp90-2-00-quantizer-24.webm.md5
+8135ba35587fd92cd4667be7896323d9b634401c vp90-2-00-quantizer-25.webm
+3499e00c2cc15876f61f07e3d3cfca54ebcd98fd vp90-2-00-quantizer-25.webm.md5
+af0fa2907746db82d345f6d831fcc1b2862a29fb vp90-2-00-quantizer-26.webm
+cd6fe3d14dab48886ebf65be00e6ed9616ebe5a7 vp90-2-00-quantizer-26.webm.md5
+bd0002e91323776beb5ff11e06edcf19fc08e9b9 vp90-2-00-quantizer-27.webm
+fe72154ef196067d6c272521012dd79706496cac vp90-2-00-quantizer-27.webm.md5
+fc15eb606f81455ff03df16bf3432296b002c43c vp90-2-00-quantizer-28.webm
+40b2e24b542206a6bfd746ef199e49ccea07678a vp90-2-00-quantizer-28.webm.md5
+3090bbf913cad0b2eddca7228f5ed51a58378b8d vp90-2-00-quantizer-29.webm
+eb59745e0912d8ed6c928268bcf265237c9ba93f vp90-2-00-quantizer-29.webm.md5
+c615abdca9c25e1cb110d908edbedfb3b7c92b91 vp90-2-00-quantizer-30.webm
+ad0f4fe6733e4e7cdfe8ef8722bb341dcc7538c0 vp90-2-00-quantizer-30.webm.md5
+037d9f242086cfb085518f6416259defa82d5fc2 vp90-2-00-quantizer-31.webm
+4654b40792572f0a790874c6347ef9196d86c1a7 vp90-2-00-quantizer-31.webm.md5
+505899f3f3515044c5c8b3213d9b9d16f614619d vp90-2-00-quantizer-32.webm
+659a2e6dd02df323f62600626859006640b445df vp90-2-00-quantizer-32.webm.md5
+8b32ec9c3b7e5ca8ddc6b8aea1c1cb7ca996bccc vp90-2-00-quantizer-33.webm
+5b175ef1120ddeba4feae1247bf381bbc4e816ce vp90-2-00-quantizer-33.webm.md5
+4d283755d17e287b1d099a80604398f60d7fb6ea vp90-2-00-quantizer-34.webm
+22a739de95acfeb27524e3700b8f678a9ad744d8 vp90-2-00-quantizer-34.webm.md5
+4296f56a892a412d3d4f64824718dd566c4e6459 vp90-2-00-quantizer-35.webm
+c532c9c8dc7b3506fc6a51e5c20c17ef0ac039e7 vp90-2-00-quantizer-35.webm.md5
+6f54e11da461e4410dd9075b015e2d9bc1d07dfb vp90-2-00-quantizer-36.webm
+0b3573f5addea4e3eb11a0b85f068299d5bdad78 vp90-2-00-quantizer-36.webm.md5
+210581682a26c2c4375efc785c36e07539888bc2 vp90-2-00-quantizer-37.webm
+2b4fb6f8ba975237858e61cc8f560bcfc87cb38e vp90-2-00-quantizer-37.webm.md5
+a15ef31283dfc4860f837fe200eb32a445f59629 vp90-2-00-quantizer-38.webm
+fb76771f3a795054b9936f70da7505c3ac585284 vp90-2-00-quantizer-38.webm.md5
+1df8433a441412831daae6726df89fa70d21b14d vp90-2-00-quantizer-39.webm
+39e162c09a20e7e684868097766347014371fee6 vp90-2-00-quantizer-39.webm.md5
+5330e4788ab9129dbb25a7a7d5411104521248b6 vp90-2-00-quantizer-40.webm
+872cc0f2cc9dbf000f89eadb4d8f9940e48e00b1 vp90-2-00-quantizer-40.webm.md5
+d88d03b982889e399a78d7a06eeb1cf30e6c2da2 vp90-2-00-quantizer-41.webm
+5b4f7217e57fa2a221011d0b32f8d0409496b7b6 vp90-2-00-quantizer-41.webm.md5
+9e16406e3e26955a6e17d455ef1ef64bbfa26e53 vp90-2-00-quantizer-42.webm
+0219d090cf37daabe19256ba8e932ba4874b92e4 vp90-2-00-quantizer-42.webm.md5
+a9b15843486fb05f8cd15437ef279782a42b75db vp90-2-00-quantizer-43.webm
+3c9b0b4c607f9579a31726bfcf56729334ddc686 vp90-2-00-quantizer-43.webm.md5
+1dbc931ac446c91eabe7213efff55b596cccf07c vp90-2-00-quantizer-44.webm
+73bc8f675103abaef3d9f73a2742b3bffd726d23 vp90-2-00-quantizer-44.webm.md5
+7c6c1be15beb9d6201204b018966c8c4f9777efc vp90-2-00-quantizer-45.webm
+c907b29da821f790c6748de61f592689312e4e36 vp90-2-00-quantizer-45.webm.md5
+07b434da1a467580f73b32177ee11b3e00f65a0d vp90-2-00-quantizer-46.webm
+7b2b7ce60c50bc970bc0ada46d7a7ce440148da3 vp90-2-00-quantizer-46.webm.md5
+233d0465fb1a6fa36e9f89bd2193ac79bd4d2809 vp90-2-00-quantizer-47.webm
+527e0a9fb932efe915027ffe077f9e8d3a4fb139 vp90-2-00-quantizer-47.webm.md5
+719613df7307e205c3fdb6acfb373849c5ab23c7 vp90-2-00-quantizer-48.webm
+65ab6c9d1b682c183b201c7ff42b90343ce3e304 vp90-2-00-quantizer-48.webm.md5
+3bf04a598325ed0eabae1598ec7f718f715ec672 vp90-2-00-quantizer-49.webm
+ac68c4387ce11fcc998d8ba455ab9b2bb361d240 vp90-2-00-quantizer-49.webm.md5
+d59238fb3a654931c9b65a11e7321b40d1f702e9 vp90-2-00-quantizer-50.webm
+d0576bfede46fd55659f028f2fd28554ceb3e6cc vp90-2-00-quantizer-50.webm.md5
+3f579785101d4209360dd96f8c2ffe9beddf3bee vp90-2-00-quantizer-51.webm
+89fcfe04f4457a7f02ab4a2f94aacbb88aee5789 vp90-2-00-quantizer-51.webm.md5
+28be5836e2fedefe4babf12fc9b79e460ab0a0f4 vp90-2-00-quantizer-52.webm
+f3dd52b70c18345fee740220f35da9c4def2017a vp90-2-00-quantizer-52.webm.md5
+488ad4058c17170665b6acd1021fade9a02771e4 vp90-2-00-quantizer-53.webm
+1cdcb1d4f3a37cf83ad235eb27ec62ed2a01afc7 vp90-2-00-quantizer-53.webm.md5
+682978289cb28cc8c9d39bc797300e45d6039de7 vp90-2-00-quantizer-54.webm
+36c35353f2c03cb099bd710d9994de7d9ed88834 vp90-2-00-quantizer-54.webm.md5
+c398ce49af762a48f10cc4da9fae0769aae5f226 vp90-2-00-quantizer-55.webm
+2cf3570542d984f167ab087f59493c7fb47e0ed2 vp90-2-00-quantizer-55.webm.md5
+3071f18b2fce261aa82d61f81a7ae4ca9a75d0e3 vp90-2-00-quantizer-56.webm
+d3f93f8272b6de31cffb011a26f11abb514efb12 vp90-2-00-quantizer-56.webm.md5
+f4e8e14b1f278801a7eb6f11734780a01b1668e9 vp90-2-00-quantizer-57.webm
+6478fdf1d7faf6db5f19dffc5e1363af358699ee vp90-2-00-quantizer-57.webm.md5
+307dc264f57cc618fff211fa44d7f52767ed9660 vp90-2-00-quantizer-58.webm
+cf231d4a52d492fa692ea4194ec5eb7511fec54e vp90-2-00-quantizer-58.webm.md5
+1fd7cd596170afce2de0b1441b7674bda5723440 vp90-2-00-quantizer-59.webm
+4681f7ef96f63e085c41bb1a964b0df7e67e0b38 vp90-2-00-quantizer-59.webm.md5
+34cdcc81c0ba7085aefbb22d7b4aa9bca3dd7c62 vp90-2-00-quantizer-60.webm
+58691ef53b6b623810e2c57ded374c77535df935 vp90-2-00-quantizer-60.webm.md5
+e6e812406aab81021bb16e772c1db03f75906cb6 vp90-2-00-quantizer-61.webm
+76436eace62f08ff92b61a0845e66667a027db1b vp90-2-00-quantizer-61.webm.md5
+84d811bceed70c950a6a08e572a6e274866e72b1 vp90-2-00-quantizer-62.webm
+2d937cc011eeddd95222b960982da5cd18db580f vp90-2-00-quantizer-62.webm.md5
+0912b295ba0ea09359315315ffd67d22d046f883 vp90-2-00-quantizer-63.webm
+5a829031055d70565f57dbcd47a6ac33619952b3 vp90-2-00-quantizer-63.webm.md5
+0cf9e5ebe0112bdb47b5887ee5d58eb9d4727c00 vp90-2-01-sharpness-1.webm
+5a0476be4448bae8f8ca17ea236c98793a755948 vp90-2-01-sharpness-1.webm.md5
+51e02d7911810cdf5be8b68ac40aedab479a3179 vp90-2-01-sharpness-2.webm
+a0ca5bc87a5ed7c7051f59078daa0d03be1b45b6 vp90-2-01-sharpness-2.webm.md5
+0603f8ad239c07a531d948187f4dafcaf51eda8d vp90-2-01-sharpness-3.webm
+3af8000a69c72fe77881e3176f026c2affb78cc7 vp90-2-01-sharpness-3.webm.md5
+4ca4839f48146252fb261ed88838d80211804841 vp90-2-01-sharpness-4.webm
+08832a1494f84fa9edd40e080bcf2c0e80100c76 vp90-2-01-sharpness-4.webm.md5
+95099dc8f9cbaf9b9a7dd65311923e441ff70731 vp90-2-01-sharpness-5.webm
+93ceee30c140f0b406726c0d896b9db6031c4c7f vp90-2-01-sharpness-5.webm.md5
+ceb4116fb7b078d266d153233b6d62a255a34e4c vp90-2-01-sharpness-6.webm
+da83efe59e537ce538e8b03a6eac63cf25849c9a vp90-2-01-sharpness-6.webm.md5
+b5f7cd19aece3880f9d616a778e5cc24c6b9b505 vp90-2-01-sharpness-7.webm
+2957408d20deac8633941a2169f801bae6f086e1 vp90-2-01-sharpness-7.webm.md5
+ffc096c2ce1050450ad462b5fabd2a5220846319 vp90-2-02-size-08x08.webm
+e36d2ed6fa2746347710b750586aafa6a01ff3ae vp90-2-02-size-08x08.webm.md5
+895b986f9fd55cd879472b31c6a06b82094418c8 vp90-2-02-size-08x10.webm
+079157a19137ccaebba606f2871f45a397347150 vp90-2-02-size-08x10.webm.md5
+1c5992203e62a2b83040ccbecd748b604e19f4c0 vp90-2-02-size-08x16.webm
+9aa45ffdf2078f883bbed01450031b691819c144 vp90-2-02-size-08x16.webm.md5
+d0a8953da1f85f484487408fee5da9e2a8391901 vp90-2-02-size-08x18.webm
+59a5cc17d354c6a23e5e959d666b1456a5d49c56 vp90-2-02-size-08x18.webm.md5
+1b13461a9fc65cb041bacfe4ea6f02d363397d61 vp90-2-02-size-08x32.webm
+2bdddd6878f05d37d84cde056a3f5e7f926ba3d6 vp90-2-02-size-08x32.webm.md5
+2861f0a0daadb62295b0504a1fbe5b50c79a8f59 vp90-2-02-size-08x34.webm
+6b5812cfb8a82d378ea2913bf009e93668020147 vp90-2-02-size-08x34.webm.md5
+02f948216d4246579dc53c47fe55d8fb264ba251 vp90-2-02-size-08x64.webm
+84b55fdee6d9aa820c7a8c62822446184b191767 vp90-2-02-size-08x64.webm.md5
+4b011242cbf42516efd2b197baebb61dd34562c9 vp90-2-02-size-08x66.webm
+6b1fa0a885947b3cc0fe58f75f838e662bd9bb8b vp90-2-02-size-08x66.webm.md5
+4057796be9dd12df48ab607f502ae6aa70eeeab6 vp90-2-02-size-10x08.webm
+71c752c51aec9f48de286b93f4c20e9c11cad7d0 vp90-2-02-size-10x08.webm.md5
+6583c853fa43fc53d51743eac5f3a43a359d45d0 vp90-2-02-size-10x10.webm
+1da524d24af1944b671d4d3f2b398d6e336584c3 vp90-2-02-size-10x10.webm.md5
+ba442fc03ccd3a705c64c83b36f5ada67d198874 vp90-2-02-size-10x16.webm
+7cfd960f232c34c641a4a2a9411b6fd0efb2fc50 vp90-2-02-size-10x16.webm.md5
+cc92ed40eef14f52e4d080cb2c57939dd8326374 vp90-2-02-size-10x18.webm
+db5626275cc55ce970b91c995e74f6838d943aca vp90-2-02-size-10x18.webm.md5
+3a93d501d22325e9fd4c9d8b82e2a432de33c351 vp90-2-02-size-10x32.webm
+5cae51b0c71cfc131651f345f87583eb2903afaf vp90-2-02-size-10x32.webm.md5
+50d2f2b15a9a5178153db44a9e03aaf32b227f67 vp90-2-02-size-10x34.webm
+bb0efe058122641e7f73e94497dda2b9e6c21efd vp90-2-02-size-10x34.webm.md5
+01624ec173e533e0b33fd9bdb91eb7360c7c9175 vp90-2-02-size-10x64.webm
+b9c0e3b054463546356acf5157f9be92fd34732f vp90-2-02-size-10x64.webm.md5
+2942879baf1c09e96b14d0fc84806abfe129c706 vp90-2-02-size-10x66.webm
+bab5f539c2f91952e187456b4beafbb4c01e25ee vp90-2-02-size-10x66.webm.md5
+88d2b63ca5e9ee163d8f20e8886f3df3ff301a66 vp90-2-02-size-16x08.webm
+7f48a0fcf8c25963f3057d7f6669c5f2415834b8 vp90-2-02-size-16x08.webm.md5
+59261eb34c15ea9b5ddd2d416215c1a8b9e6dc1f vp90-2-02-size-16x10.webm
+73a7c209a46dd051c9f7339b6e02ccd5b3b9fc81 vp90-2-02-size-16x10.webm.md5
+066834fef9cf5b9a72932cf4dea5f253e14a976d vp90-2-02-size-16x16.webm
+faec542f52f37601cb9c480d887ae9355be99372 vp90-2-02-size-16x16.webm.md5
+195307b4eb3192271ee4a935b0e48deef0c54cc2 vp90-2-02-size-16x18.webm
+5a92e19e624c0376321d4d0e22c0c91995bc23e1 vp90-2-02-size-16x18.webm.md5
+14f3f884216d7ae16ec521f024a2f2d31bbf9c1a vp90-2-02-size-16x32.webm
+ea622d1c817dd174556f7ee7ccfe4942b34d4845 vp90-2-02-size-16x32.webm.md5
+2e0501100578a5da9dd47e4beea160f945bdd1ba vp90-2-02-size-16x34.webm
+1b8645ef64239334921c5f56b24ce815e6070b05 vp90-2-02-size-16x34.webm.md5
+89a6797fbebebe93215f367229a9152277f5dcfe vp90-2-02-size-16x64.webm
+a03d8c1179ca626a8856fb416d635dbf377979cd vp90-2-02-size-16x64.webm.md5
+0f3a182e0750fcbae0b9eae80c7a53aabafdd18d vp90-2-02-size-16x66.webm
+8cb6736dc2d897c1283919a32068af377d66c59c vp90-2-02-size-16x66.webm.md5
+68fe70dc7914cc1d8d6dcd97388b79196ba3e7f1 vp90-2-02-size-18x08.webm
+874c7fb505be9db3160c57cb405c4dbd5b990dc2 vp90-2-02-size-18x08.webm.md5
+0546352dd78496d4dd86c3727ac2ff36c9e72032 vp90-2-02-size-18x10.webm
+1d80eb36557ea5f25a386495a36f93da0f25316b vp90-2-02-size-18x10.webm.md5
+60fe99e5f5cc99706efa3e0b894e45cbcf0d6330 vp90-2-02-size-18x16.webm
+1ab6cdd89a53662995d103546e6611c84f9292ab vp90-2-02-size-18x16.webm.md5
+f9a8f5fb749d69fd555db6ca093b7f77800c7b4f vp90-2-02-size-18x18.webm
+ace8a66328f7802b15f9989c2720c029c6abd279 vp90-2-02-size-18x18.webm.md5
+a197123a527ec25913a9bf52dc8c347749e00045 vp90-2-02-size-18x32.webm
+34fbd7036752232d1663e70d7f7cdc93f7129202 vp90-2-02-size-18x32.webm.md5
+f219655a639a774a2c9c0a9f45c28dc0b5e75e24 vp90-2-02-size-18x34.webm
+2c4d622a9ea548791c1a07903d3702e9774388bb vp90-2-02-size-18x34.webm.md5
+5308578da48c677d477a5404e19391d1303033c9 vp90-2-02-size-18x64.webm
+e7fd4462527bac38559518ba80e41847db880f15 vp90-2-02-size-18x64.webm.md5
+e109a7e013bd179f97e378542e1e81689ed06802 vp90-2-02-size-18x66.webm
+45c04e422fb383c1f3be04beefaa4490e83bdb1a vp90-2-02-size-18x66.webm.md5
+38844cae5d99caf445f7de33c3ae78494ce36c01 vp90-2-02-size-32x08.webm
+ad018be39e493ca2405225034b1a5b7a42af6f3a vp90-2-02-size-32x08.webm.md5
+7b57eaad55906f9de9903c8657a3fcb2aaf792ea vp90-2-02-size-32x10.webm
+2294425d4e55d275af5e25a0beac9738a1b4ee73 vp90-2-02-size-32x10.webm.md5
+f47ca2ced0d47f761bb0a5fdcd911d3f450fdcc1 vp90-2-02-size-32x16.webm
+ae10981d93913f0ab1f28c1146255e01769aa8c0 vp90-2-02-size-32x16.webm.md5
+08b23ad838b6cf1fbfe3ad7e7775d95573e815fc vp90-2-02-size-32x18.webm
+1ba76f4c4a4ac7aabfa3ce195c1b473535eb7cc8 vp90-2-02-size-32x18.webm.md5
+d5b88ae6c8c25c53dee74d9f1e6ca64244349a57 vp90-2-02-size-32x32.webm
+e39c067a8ee2da52a51641eb1cb7f8eba935eb6b vp90-2-02-size-32x32.webm.md5
+529429920dc36bd899059fa75a767f02c8c60874 vp90-2-02-size-32x34.webm
+56888e7834f52b106e8911e3a7fc0f473b609995 vp90-2-02-size-32x34.webm.md5
+38e848e160391c2b1a55040aadde613b9f4bf15e vp90-2-02-size-32x64.webm
+8950485fb3f68b0e8be234db860e4ec5f5490fd0 vp90-2-02-size-32x64.webm.md5
+5e8670f0b8ec9cefa8795b8959ffbe1a8e1aea94 vp90-2-02-size-32x66.webm
+225df9d7d72ec711b0b60f4aeb65311c97db054a vp90-2-02-size-32x66.webm.md5
+695f929e2ce6fb11a1f180322d46c5cb1c97fa61 vp90-2-02-size-34x08.webm
+5bb4262030018dd01883965c6aa6070185924ef6 vp90-2-02-size-34x08.webm.md5
+5adf74ec906d2ad3f7526e06bd29f5ad7d966a90 vp90-2-02-size-34x10.webm
+71c100b437d3e8701632ae8d65c3555339b1c68f vp90-2-02-size-34x10.webm.md5
+d0918923c987fba2d00193d83797b21289fe54aa vp90-2-02-size-34x16.webm
+5d5a52f3535b4d2698dd3d87f4a13fdc9b57163d vp90-2-02-size-34x16.webm.md5
+553ab0042cf87f5e668ec31b2e4b2a4b6ec196fd vp90-2-02-size-34x18.webm
+a164c7f3c424987df2340496e6a8cf76e973f0f1 vp90-2-02-size-34x18.webm.md5
+baf3e233634f150de81c18ba5d8848068e1c3c54 vp90-2-02-size-34x32.webm
+22a79d3bd1c9b85dfe8c70bb2e19f08a92a8be03 vp90-2-02-size-34x32.webm.md5
+6d50a533774a7167350e4a7ef43c94a5622179a2 vp90-2-02-size-34x34.webm
+0c099638e79c273546523e06704553e42eb00b00 vp90-2-02-size-34x34.webm.md5
+698cdd0a5e895cc202c488675e682a8c537ede4f vp90-2-02-size-34x64.webm
+9317b63987cddab8389510a27b86f9f3d46e3fa5 vp90-2-02-size-34x64.webm.md5
+4b5335ca06f082b6b69f584eb8e7886bdcafefd3 vp90-2-02-size-34x66.webm
+e18d68b35428f46a84a947c646804a51ef1d7cec vp90-2-02-size-34x66.webm.md5
+a54ae7b494906ec928a876e8290e5574f2f9f6a2 vp90-2-02-size-64x08.webm
+87f9f7087b6489d45e9e4b38ede2c5aef4a4928f vp90-2-02-size-64x08.webm.md5
+24522c70804a3c23d937df2d829ae63965b23f38 vp90-2-02-size-64x10.webm
+447ce03938ab53bffcb4a841ee0bfaa90462dcb9 vp90-2-02-size-64x10.webm.md5
+2a5035d035d214ae614af8051930690ef623989b vp90-2-02-size-64x16.webm
+84e355761dd2e0361b904c84c52a0dd0384d89cf vp90-2-02-size-64x16.webm.md5
+3a293ef4e270a19438e59b817fbe5f43eed4d36b vp90-2-02-size-64x18.webm
+666824e5ba746779eb46079e0631853dcc86d48b vp90-2-02-size-64x18.webm.md5
+ed32fae837095c9e8fc95d223ec68101812932c2 vp90-2-02-size-64x32.webm
+97086eadedce1d0d9c072b585ba7b49aec69b1e7 vp90-2-02-size-64x32.webm.md5
+696c7a7250bdfff594f4dfd88af34239092ecd00 vp90-2-02-size-64x34.webm
+253a1d38d452e7826b086846c6f872f829c276bb vp90-2-02-size-64x34.webm.md5
+fc508e0e3c2e6872c60919a60b812c5232e9c2b0 vp90-2-02-size-64x64.webm
+2cd6ebeca0f82e9f505616825c07950371b905ab vp90-2-02-size-64x64.webm.md5
+0f8a4fc1d6521187660425c283f08dff8c66e476 vp90-2-02-size-64x66.webm
+5806be11a1d346be235f88d3683e69f73746166c vp90-2-02-size-64x66.webm.md5
+273b0c36e3658685cde250408a478116d7ae92f1 vp90-2-02-size-66x08.webm
+23c3cd0dca20a2f71f036e77ea92025ff4e7a298 vp90-2-02-size-66x08.webm.md5
+4844c59c3306d1e671bb0568f00e344bf797e66e vp90-2-02-size-66x10.webm
+e041eaf6841d775f8fde8bbb4949d2733fdaab7f vp90-2-02-size-66x10.webm.md5
+bdf3f1582b234fcd2805ffec59f9d716a2345302 vp90-2-02-size-66x16.webm
+2ec85ee18119e6798968571ea6e1b93ca386e3af vp90-2-02-size-66x16.webm.md5
+0acce9af12b13b025d5274013da7ef6f568f075f vp90-2-02-size-66x18.webm
+77c4d53e2a5c96b70af9d575fe6811e0f5ee627b vp90-2-02-size-66x18.webm.md5
+682b36a25774bbdedcd603f504d18eb63f0167d4 vp90-2-02-size-66x32.webm
+53728fae2a428f16d376a29f341a64ddca97996a vp90-2-02-size-66x32.webm.md5
+e71b70e901e29eaa6672a6aa4f37f6f5faa02bd6 vp90-2-02-size-66x34.webm
+f69a6a555e3f614b0a35f9bfc313d8ebb35bc725 vp90-2-02-size-66x34.webm.md5
+4151b8c29452d5c2266397a7b9bf688899a2937b vp90-2-02-size-66x64.webm
+69486e7fd9e380b6c97a03d3e167affc79f73840 vp90-2-02-size-66x64.webm.md5
+68784a1ecac776fe2a3f230345af32f06f123536 vp90-2-02-size-66x66.webm
+7f008c7f48d55e652fbd6bac405b51e0015c94f2 vp90-2-02-size-66x66.webm.md5
+7e1bc449231ac1c5c2a11c9a6333b3e828763798 vp90-2-03-size-196x196.webm
+6788a561466dace32d500194bf042e19cccc35e1 vp90-2-03-size-196x196.webm.md5
+a170c9a88ec1dd854c7a471ff55fb2a97ac31870 vp90-2-03-size-196x198.webm
+6bf9d6a8e2bdc5bf4f8a78071a3fed5ca02ad6f2 vp90-2-03-size-196x198.webm.md5
+68f861d21c4c8b03d572c3d3fcd9f4fbf1f4503f vp90-2-03-size-196x200.webm
+bbfc260b2bfd872cc6054272bb6b7f959a9e1c6e vp90-2-03-size-196x200.webm.md5
+fc34889feeca2b7e5b27b4f1ce22d2e2b8e3e4b1 vp90-2-03-size-196x202.webm
+158ee72af578f39aad0c3b8f4cbed2fc78b57e0f vp90-2-03-size-196x202.webm.md5
+dd28fb7247af534bdf5e6795a3ac429610489a0b vp90-2-03-size-196x208.webm
+7546be847efce2d1c0a23f807bfb03f91b764e1e vp90-2-03-size-196x208.webm.md5
+41d5cf5ed65b722a1b6dc035e67f978ea8ffecf8 vp90-2-03-size-196x210.webm
+9444fdf632d6a1b6143f4cb10fed8f63c1d67ec1 vp90-2-03-size-196x210.webm.md5
+5007bc618143437c009d6dde5fc2e86f72d37dc2 vp90-2-03-size-196x224.webm
+858361d8f79b44df5545feabbc9754ec9ede632f vp90-2-03-size-196x224.webm.md5
+0bcbe357fbc776c3fa68e7117179574ed7564a44 vp90-2-03-size-196x226.webm
+72006a5f42031a43d70a2cd9fc1958962a86628f vp90-2-03-size-196x226.webm.md5
+000239f048cceaac055558e97ef07078ebf65502 vp90-2-03-size-198x196.webm
+2d6841901b72000c5340f30be602853438c1b787 vp90-2-03-size-198x196.webm.md5
+ae75b766306a6404c3b3b35a6b6d53633c14fbdb vp90-2-03-size-198x198.webm
+3f2544b4f3b4b643a98f2c3b15ea5826fc702fa1 vp90-2-03-size-198x198.webm.md5
+95ffd573fa84ccef1cd59e1583e6054f56a5c83d vp90-2-03-size-198x200.webm
+5d537e3c9b9c54418c79677543454c4cda3de1af vp90-2-03-size-198x200.webm.md5
+ecc845bf574375f469bc91bf5c75c79dc00073d6 vp90-2-03-size-198x202.webm
+1b59f5e111265615a7a459eeda8cc9045178d228 vp90-2-03-size-198x202.webm.md5
+432fb27144fe421b9f51cf44d2750a26133ed585 vp90-2-03-size-198x208.webm
+a58a67f4fb357c73ca078aeecbc0f782975630b1 vp90-2-03-size-198x208.webm.md5
+ff5058e7e6a47435046612afc8536f2040989e6f vp90-2-03-size-198x210.webm
+18d3be7935e52217e2e9400b6f2c681a9e45dc89 vp90-2-03-size-198x210.webm.md5
+a0d55263c1ed2c03817454dd4ec4090d36dbc864 vp90-2-03-size-198x224.webm
+efa366a299817e2da51c00623b165aab9fbb8d91 vp90-2-03-size-198x224.webm.md5
+ccd142fa2920fc85bb753f049160c1c353ad1574 vp90-2-03-size-198x226.webm
+534524a0b2dbff852e0b92ef09939db072f83243 vp90-2-03-size-198x226.webm.md5
+0d483b94ed40abc8ab6e49f960432ee54ad9c7f1 vp90-2-03-size-200x196.webm
+41795f548181717906e7a504ba551f06c32102ae vp90-2-03-size-200x196.webm.md5
+f6c2dc54e0989d50f01333fe40c91661fcbf849a vp90-2-03-size-200x198.webm
+43df5d8c46a40089441392e6d096c588c1079a68 vp90-2-03-size-200x198.webm.md5
+2f6e9df82e44fc145f0d9212dcccbed3de605e23 vp90-2-03-size-200x200.webm
+757b2ef96b82093255725bab9690bbafe27f3caf vp90-2-03-size-200x200.webm.md5
+40c5ea60415642a4a2e75c0d127b06309baadfab vp90-2-03-size-200x202.webm
+3022c4a1c625b5dc04fdb1052d17d45b4171cfba vp90-2-03-size-200x202.webm.md5
+6942ed5b27476bb8506d10e600d6ff60887780ca vp90-2-03-size-200x208.webm
+c4ab8c66f3cf2dc8e8dd7abae9ac21f4d32cd6be vp90-2-03-size-200x208.webm.md5
+71dbc99b83c49d1da45589b91eabb98e2f4a7b1e vp90-2-03-size-200x210.webm
+3f0b40da7eef7974b9bc326562f251feb67d9c7c vp90-2-03-size-200x210.webm.md5
+6b6b8489081cfefb377cc5f18eb754ec2383f655 vp90-2-03-size-200x224.webm
+a259df2ac0e294492e3f9d4315baa34cab044f04 vp90-2-03-size-200x224.webm.md5
+c9adc1c9bb07559349a0b054df4af56f7a6edbb9 vp90-2-03-size-200x226.webm
+714cec61e3575581e4f1a0e3921f4dfdbbd316c5 vp90-2-03-size-200x226.webm.md5
+f9bdc936bdf53f8be9ce78fecd41a21d31ff3943 vp90-2-03-size-202x196.webm
+5b8e2e50fcea2c43b12fc067b8a9cc117af77bda vp90-2-03-size-202x196.webm.md5
+c7b66ea3da87613deb47ff24a111247d3c384fec vp90-2-03-size-202x198.webm
+517e91204b25586da943556f4adc5951c9be8bee vp90-2-03-size-202x198.webm.md5
+935ef56b01cfdb4265a7e24696645209ccb20970 vp90-2-03-size-202x200.webm
+55b8ec4a2513183144a8e27564596c06c7576fce vp90-2-03-size-202x200.webm.md5
+849acf75e4f1d8d90046704e1103a18c64f30e35 vp90-2-03-size-202x202.webm
+c79afc6660df2824e7df314e5bfd71f0d8acf76b vp90-2-03-size-202x202.webm.md5
+17b3a4d55576b770626ccb856b9f1a6c8f6ae476 vp90-2-03-size-202x208.webm
+0b887ff30409c58f2ccdc3bfacd6be7c69f8997a vp90-2-03-size-202x208.webm.md5
+032d0ade4230fb2eef6d19915a7a1c9aa4a52617 vp90-2-03-size-202x210.webm
+f78f8e79533c0c88dd2bfdcec9b1c07848568ece vp90-2-03-size-202x210.webm.md5
+915a38c31fe425d5b93c837121cfa8082f5ea5bc vp90-2-03-size-202x224.webm
+bf52a104074d0c5942aa7a5b31e11db47e43d48e vp90-2-03-size-202x224.webm.md5
+be5cfde35666fa435e47d544d9258215beb1cf29 vp90-2-03-size-202x226.webm
+2fa2f87502fda756b319389c8975204e130a2e3f vp90-2-03-size-202x226.webm.md5
+15d908e97862b5b4bf295610df011fb9aa09909b vp90-2-03-size-208x196.webm
+50c60792305d6a99be376dd596a6ff979325e6cc vp90-2-03-size-208x196.webm.md5
+a367c7bc9fde56d6f4848cc573c7d4c1ce75e348 vp90-2-03-size-208x198.webm
+be85fb2c8d435a75484231356f07d06ebddd13cd vp90-2-03-size-208x198.webm.md5
+05fd46deb7288e7253742091f56e54a9a441a187 vp90-2-03-size-208x200.webm
+74f8ec3b3a2fe81767ed1ab36a47bc0062d6223c vp90-2-03-size-208x200.webm.md5
+d8985c4b386513a7385a4b3639bf91e469f1378b vp90-2-03-size-208x202.webm
+0614a1e8d92048852adcf605a51333f5fabc7f03 vp90-2-03-size-208x202.webm.md5
+28b002242238479165ba4fb87ee6b442c64b32e4 vp90-2-03-size-208x208.webm
+37de5aca59bb900228400b0e115d3229edb9dcc0 vp90-2-03-size-208x208.webm.md5
+c545be0050c2fad7c68427dbf86c62a739e94ab3 vp90-2-03-size-208x210.webm
+d646eccb3cd578f94b54777e32b88898bef6e17a vp90-2-03-size-208x210.webm.md5
+63a0cfe295b661026dd7b1bebb67acace1db766f vp90-2-03-size-208x224.webm
+85c0361d93bf85a335248fef2767ff43eeef23db vp90-2-03-size-208x224.webm.md5
+f911cc718d66e4fe8a865226088939c9eb1b7825 vp90-2-03-size-208x226.webm
+a6d583a57876e7b7ec48625b2b2cdbcf70cab837 vp90-2-03-size-208x226.webm.md5
+5bbb0f36da9a4683cf04e724124d8696332911bf vp90-2-03-size-210x196.webm
+a3580fc7816d7fbcfb54fdba501cabbd06ba2f1d vp90-2-03-size-210x196.webm.md5
+8db64d6f9ce36dd382013b42ae4e292deba697bc vp90-2-03-size-210x198.webm
+eda20f8268c7f4147bead4059e9c4897e09140a9 vp90-2-03-size-210x198.webm.md5
+ce391505eeaf1d12406563101cd6b2dbbbb44bfc vp90-2-03-size-210x200.webm
+79d73b7f623082d2a00aa33e95c79d11c7d9c3a8 vp90-2-03-size-210x200.webm.md5
+852db6fdc206e72391fc69b807f1954934679949 vp90-2-03-size-210x202.webm
+f69414c5677ed2f2b8b37ae76429e509a92276a5 vp90-2-03-size-210x202.webm.md5
+c424cc3edd2308da7d33f27acb36b54db5bf2595 vp90-2-03-size-210x208.webm
+27b18562faa1b3184256f4eae8114b539b3e9d3e vp90-2-03-size-210x208.webm.md5
+dd029eba719d50a2851592fa8b9b2efe88904930 vp90-2-03-size-210x210.webm
+c853a1670465eaa04ca31b3511995f1b6ed4f58f vp90-2-03-size-210x210.webm.md5
+d962e8ae676c54d0c3ea04ec7c04b37ae6a786e3 vp90-2-03-size-210x224.webm
+93b793e79d987065b39ad8e2e71244368435fc25 vp90-2-03-size-210x224.webm.md5
+3d0825fe83bcc125be1f78145ff43ca6d7588784 vp90-2-03-size-210x226.webm
+5230f31a57ca3b5311698a12035d2644533b3ec4 vp90-2-03-size-210x226.webm.md5
+6622f8bd9279e1ce45509a58a31a990052d45e14 vp90-2-03-size-224x196.webm
+65411da07f60113f2be05c807879072b161d561e vp90-2-03-size-224x196.webm.md5
+6744ff2ee2c41eb08c62ff30880833b6d77b585b vp90-2-03-size-224x198.webm
+46ea3641d41acd4bff347b224646c060d5620385 vp90-2-03-size-224x198.webm.md5
+8eb91f3416a1404705f370caecd74b2b458351b1 vp90-2-03-size-224x200.webm
+196aefb854c8b95b9330263d6690b7ee15693ecf vp90-2-03-size-224x200.webm.md5
+256a5a23ef4e6d5ef2871af5afb8cd13d28cec00 vp90-2-03-size-224x202.webm
+840ad8455dcf2be378c14b007e66fa642fc8196d vp90-2-03-size-224x202.webm.md5
+db4606480ab48b96c9a6ff5e639f1f1aea2a12e4 vp90-2-03-size-224x208.webm
+40b9801d5620467499ac70fa6b7c40aaa5e1c331 vp90-2-03-size-224x208.webm.md5
+e37159e687fe1cb24cffddfae059301adbaf4212 vp90-2-03-size-224x210.webm
+1e4acd4b6334ae260c3eed08652d0ba8122073f2 vp90-2-03-size-224x210.webm.md5
+0de1eb4bb6285ae621e4f2b613d2aa4a8c95a130 vp90-2-03-size-224x224.webm
+37db449ad86fb286c2c02d94aa8fe0379c05044a vp90-2-03-size-224x224.webm.md5
+32ebbf903a7d7881bcfe59639f1d472371f3bf27 vp90-2-03-size-224x226.webm
+5cc3ac5dc9f6912491aa2ddac863f8187f34c569 vp90-2-03-size-224x226.webm.md5
+9480ff5c2c32b1870ac760c87514912616e6cf01 vp90-2-03-size-226x196.webm
+fe83655c0f1888f0af7b047785f01ba7ca9f1324 vp90-2-03-size-226x196.webm.md5
+09cad4221996315cdddad4e502dbfabf53ca1d6a vp90-2-03-size-226x198.webm
+e3ddfdc650acb95adb45abd9b634e1f09ea8ac96 vp90-2-03-size-226x198.webm.md5
+c34f49d55fe39e3f0b607e3cc95e30244225cecb vp90-2-03-size-226x200.webm
+abb83edc868a3523ccd4e5523fac2efbe7c3df1f vp90-2-03-size-226x200.webm.md5
+d17bc08eedfc60c4c23d576a6c964a21bf854d1f vp90-2-03-size-226x202.webm
+1d22d2d0f375251c2d5a1acb4714bc35d963865b vp90-2-03-size-226x202.webm.md5
+9bd537c4f92a25596ccd29fedfe181feac948b92 vp90-2-03-size-226x208.webm
+6feb0e7325386275719f3511ada9e248a2ae7df4 vp90-2-03-size-226x208.webm.md5
+4487067f6cedd495b93696b44b37fe0a3e7eda14 vp90-2-03-size-226x210.webm
+49a8fa87945f47208168d541c068e78d878075d5 vp90-2-03-size-226x210.webm.md5
+559fea2f8da42b33c1aa1dbc34d1d6781009847a vp90-2-03-size-226x224.webm
+83c6d8f2969b759e10e5c6542baca1265c874c29 vp90-2-03-size-226x224.webm.md5
+fe0af2ee47b1e5f6a66db369e2d7e9d870b38dce vp90-2-03-size-226x226.webm
+94ad19b8b699cea105e2ff18f0df2afd7242bcf7 vp90-2-03-size-226x226.webm.md5
+495256cfd123fe777b2c0406862ed8468a1f4677 vp91-2-04-yv444.webm
+65e3a7ffef61ab340d9140f335ecc49125970c2c vp91-2-04-yv444.webm.md5
+b6524e4084d15b5d0caaa3d3d1368db30cbee69c vp90-2-03-deltaq.webm
+65f45ec9a55537aac76104818278e0978f94a678 vp90-2-03-deltaq.webm.md5
diff --git a/libvpx/test/test.mk b/libvpx/test/test.mk
index 7a11a27..a64c0b8 100644
--- a/libvpx/test/test.mk
+++ b/libvpx/test/test.mk
@@ -1,5 +1,9 @@
-LIBVPX_TEST_SRCS-yes += acm_random.h
+LIBVPX_TEST_SRCS-yes += clear_system_state.h
+LIBVPX_TEST_SRCS-yes += register_state_check.h
LIBVPX_TEST_SRCS-yes += test.mk
+LIBVPX_TEST_SRCS-yes += acm_random.h
+LIBVPX_TEST_SRCS-yes += md5_helper.h
+LIBVPX_TEST_SRCS-yes += codec_factory.h
LIBVPX_TEST_SRCS-yes += test_libvpx.cc
LIBVPX_TEST_SRCS-yes += util.h
LIBVPX_TEST_SRCS-yes += video_source.h
@@ -13,18 +17,34 @@ LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += altref_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += config_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += cq_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += datarate_test.cc
-LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += encode_test_driver.cc
-LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += encode_test_driver.h
-LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += error_resilience_test.cc
-LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += i420_video_source.h
+
+LIBVPX_TEST_SRCS-yes += encode_test_driver.cc
+LIBVPX_TEST_SRCS-yes += encode_test_driver.h
+LIBVPX_TEST_SRCS-$(CONFIG_ENCODERS) += error_resilience_test.cc
+LIBVPX_TEST_SRCS-$(CONFIG_ENCODERS) += i420_video_source.h
LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += keyframe_test.cc
-LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += resize_test.cc
+LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += borders_test.cc
+LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += resize_test.cc
+LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += cpu_speed_test.cc
+LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += vp9_lossless_test.cc
+
+LIBVPX_TEST_SRCS-$(CONFIG_DECODERS) += ../md5_utils.h ../md5_utils.c
+LIBVPX_TEST_SRCS-yes += decode_test_driver.cc
+LIBVPX_TEST_SRCS-yes += decode_test_driver.h
+LIBVPX_TEST_SRCS-$(CONFIG_DECODERS) += ivf_video_source.h
+
+## WebM Parsing
+NESTEGG_SRCS += ../nestegg/halloc/halloc.h
+NESTEGG_SRCS += ../nestegg/halloc/src/align.h
+NESTEGG_SRCS += ../nestegg/halloc/src/halloc.c
+NESTEGG_SRCS += ../nestegg/halloc/src/hlist.h
+NESTEGG_SRCS += ../nestegg/include/nestegg/nestegg.h
+NESTEGG_SRCS += ../nestegg/src/nestegg.c
+LIBVPX_TEST_SRCS-$(CONFIG_DECODERS) += $(NESTEGG_SRCS)
+LIBVPX_TEST_SRCS-$(CONFIG_DECODERS) += webm_video_source.h
+
+LIBVPX_TEST_SRCS-$(CONFIG_DECODERS) += test_vector_test.cc
-LIBVPX_TEST_SRCS-$(CONFIG_VP8_DECODER) += ../md5_utils.h ../md5_utils.c
-LIBVPX_TEST_SRCS-$(CONFIG_VP8_DECODER) += decode_test_driver.cc
-LIBVPX_TEST_SRCS-$(CONFIG_VP8_DECODER) += decode_test_driver.h
-LIBVPX_TEST_SRCS-$(CONFIG_VP8_DECODER) += ivf_video_source.h
-LIBVPX_TEST_SRCS-$(CONFIG_VP8_DECODER) += test_vector_test.cc
##
## WHITE BOX TESTS
##
@@ -33,19 +53,52 @@ LIBVPX_TEST_SRCS-$(CONFIG_VP8_DECODER) += test_vector_test.cc
##
ifeq ($(CONFIG_SHARED),)
+## VP8
+ifneq ($(CONFIG_VP8_ENCODER)$(CONFIG_VP8_DECODER),)
+
# These tests require both the encoder and decoder to be built.
ifeq ($(CONFIG_VP8_ENCODER)$(CONFIG_VP8_DECODER),yesyes)
-LIBVPX_TEST_SRCS-yes += boolcoder_test.cc
+LIBVPX_TEST_SRCS-yes += vp8_boolcoder_test.cc
endif
-LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += fdct4x4_test.cc
-LIBVPX_TEST_SRCS-yes += idctllm_test.cc
+LIBVPX_TEST_SRCS-yes += idct_test.cc
LIBVPX_TEST_SRCS-yes += intrapred_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_POSTPROC) += pp_filter_test.cc
-LIBVPX_TEST_SRCS-yes += sad_test.cc
+LIBVPX_TEST_SRCS-$(CONFIG_ENCODERS) += sad_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += set_roi.cc
LIBVPX_TEST_SRCS-yes += sixtap_predict_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += subtract_test.cc
+LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += vp9_subtract_test.cc
+LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += variance_test.cc
+LIBVPX_TEST_SRCS-$(CONFIG_VP8_DECODER) += vp8_decrypt_test.cc
+LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += vp8_fdct4x4_test.cc
+
+endif # VP8
+
+## VP9
+ifneq ($(CONFIG_VP9_ENCODER)$(CONFIG_VP9_DECODER),)
+
+# These tests require both the encoder and decoder to be built.
+ifeq ($(CONFIG_VP9_ENCODER)$(CONFIG_VP9_DECODER),yesyes)
+LIBVPX_TEST_SRCS-yes += vp9_boolcoder_test.cc
+
+# IDCT test currently depends on FDCT function
+LIBVPX_TEST_SRCS-yes += idct8x8_test.cc
+LIBVPX_TEST_SRCS-yes += superframe_test.cc
+LIBVPX_TEST_SRCS-yes += tile_independence_test.cc
+endif
+
+LIBVPX_TEST_SRCS-$(CONFIG_VP9) += convolve_test.cc
+LIBVPX_TEST_SRCS-$(CONFIG_VP9_DECODER) += vp9_thread_test.cc
+LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += fdct4x4_test.cc
+
+LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += fdct8x8_test.cc
+LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += dct16x16_test.cc
+LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += variance_test.cc
+LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += dct32x32_test.cc
+
+endif # VP9
+
endif
@@ -53,7 +106,9 @@ endif
##
## TEST DATA
##
-LIBVPX_TEST_DATA-$(CONFIG_VP8_ENCODER) += hantro_collage_w352h288.yuv
+LIBVPX_TEST_DATA-$(CONFIG_ENCODERS) += hantro_collage_w352h288.yuv
+LIBVPX_TEST_DATA-$(CONFIG_ENCODERS) += hantro_odd.yuv
+
LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-001.ivf
LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-002.ivf
LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-003.ivf
@@ -176,3 +231,405 @@ LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-05-sharpness-1438.ivf.md5
LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-05-sharpness-1439.ivf.md5
LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-05-sharpness-1440.ivf.md5
LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-05-sharpness-1443.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-00.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-00.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-01.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-01.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-02.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-02.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-03.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-03.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-04.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-04.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-05.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-05.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-06.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-06.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-07.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-07.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-08.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-08.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-09.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-09.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-10.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-10.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-11.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-11.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-12.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-12.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-13.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-13.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-14.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-14.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-15.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-15.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-16.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-16.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-17.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-17.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-18.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-18.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-19.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-19.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-20.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-20.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-21.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-21.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-22.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-22.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-23.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-23.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-24.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-24.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-25.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-25.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-26.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-26.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-27.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-27.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-28.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-28.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-29.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-29.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-30.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-30.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-31.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-31.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-32.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-32.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-33.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-33.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-34.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-34.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-35.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-35.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-36.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-36.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-37.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-37.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-38.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-38.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-39.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-39.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-40.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-40.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-41.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-41.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-42.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-42.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-43.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-43.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-44.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-44.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-45.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-45.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-46.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-46.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-47.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-47.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-48.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-48.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-49.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-49.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-50.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-50.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-51.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-51.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-52.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-52.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-53.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-53.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-54.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-54.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-55.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-55.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-56.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-56.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-57.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-57.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-58.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-58.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-59.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-59.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-60.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-60.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-61.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-61.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-62.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-62.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-63.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-63.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-01-sharpness-1.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-01-sharpness-1.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-01-sharpness-2.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-01-sharpness-2.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-01-sharpness-3.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-01-sharpness-3.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-01-sharpness-4.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-01-sharpness-4.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-01-sharpness-5.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-01-sharpness-5.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-01-sharpness-6.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-01-sharpness-6.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-01-sharpness-7.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-01-sharpness-7.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x08.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x08.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x10.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x10.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x16.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x16.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x18.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x18.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x32.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x32.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x34.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x34.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x64.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x64.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x66.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x66.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x08.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x08.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x10.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x10.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x16.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x16.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x18.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x18.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x32.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x32.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x34.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x34.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x64.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x64.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x66.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x66.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x08.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x08.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x10.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x10.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x16.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x16.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x18.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x18.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x32.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x32.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x34.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x34.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x64.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x64.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x66.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x66.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x08.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x08.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x10.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x10.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x16.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x16.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x18.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x18.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x32.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x32.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x34.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x34.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x64.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x64.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x66.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x66.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x08.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x08.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x10.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x10.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x16.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x16.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x18.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x18.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x32.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x32.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x34.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x34.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x64.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x64.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x66.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x66.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x08.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x08.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x10.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x10.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x16.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x16.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x18.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x18.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x32.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x32.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x34.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x34.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x64.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x64.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x66.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x66.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x08.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x08.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x10.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x10.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x16.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x16.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x18.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x18.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x32.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x32.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x34.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x34.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x64.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x64.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x66.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x66.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x08.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x08.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x10.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x10.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x16.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x16.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x18.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x18.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x32.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x32.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x34.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x34.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x64.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x64.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x66.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x66.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x196.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x196.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x198.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x198.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x202.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x202.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x208.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x208.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x210.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x210.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x224.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x224.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x226.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x226.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x196.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x196.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x198.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x198.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x202.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x202.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x208.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x208.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x210.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x210.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x224.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x224.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x226.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x226.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x196.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x196.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x198.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x198.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x202.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x202.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x208.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x208.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x210.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x210.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x224.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x224.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x226.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x226.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x196.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x196.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x198.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x198.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x202.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x202.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x208.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x208.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x210.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x210.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x224.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x224.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x226.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x226.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x196.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x196.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x198.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x198.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x202.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x202.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x208.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x208.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x210.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x210.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x224.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x224.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x226.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x226.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x196.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x196.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x198.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x198.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x202.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x202.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x208.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x208.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x210.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x210.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x224.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x224.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x226.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x226.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x196.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x196.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x198.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x198.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x202.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x202.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x208.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x208.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x210.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x210.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x224.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x224.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x226.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x226.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x196.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x196.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x198.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x198.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x202.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x202.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x208.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x208.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x210.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x210.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x224.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x224.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x226.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x226.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-deltaq.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-deltaq.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp91-2-04-yv444.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp91-2-04-yv444.webm.md5
diff --git a/libvpx/test/test_libvpx.cc b/libvpx/test/test_libvpx.cc
index cfd5d28..a4dbca4 100644
--- a/libvpx/test/test_libvpx.cc
+++ b/libvpx/test/test_libvpx.cc
@@ -8,12 +8,18 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include <string>
-#include "vpx_config.h"
-#if ARCH_X86 || ARCH_X86_64
+#include "./vpx_config.h"
extern "C" {
+#if ARCH_X86 || ARCH_X86_64
#include "vpx_ports/x86.h"
-}
#endif
+#if CONFIG_VP8
+extern void vp8_rtcd();
+#endif
+#if CONFIG_VP9
+extern void vp9_rtcd();
+#endif
+}
#include "third_party/googletest/src/include/gtest/gtest.h"
static void append_gtest_filter(const char *str) {
@@ -27,19 +33,31 @@ int main(int argc, char **argv) {
#if ARCH_X86 || ARCH_X86_64
const int simd_caps = x86_simd_caps();
- if(!(simd_caps & HAS_MMX))
+ if (!(simd_caps & HAS_MMX))
append_gtest_filter(":-MMX/*");
- if(!(simd_caps & HAS_SSE))
+ if (!(simd_caps & HAS_SSE))
append_gtest_filter(":-SSE/*");
- if(!(simd_caps & HAS_SSE2))
+ if (!(simd_caps & HAS_SSE2))
append_gtest_filter(":-SSE2/*");
- if(!(simd_caps & HAS_SSE3))
+ if (!(simd_caps & HAS_SSE3))
append_gtest_filter(":-SSE3/*");
- if(!(simd_caps & HAS_SSSE3))
+ if (!(simd_caps & HAS_SSSE3))
append_gtest_filter(":-SSSE3/*");
- if(!(simd_caps & HAS_SSE4_1))
+ if (!(simd_caps & HAS_SSE4_1))
append_gtest_filter(":-SSE4_1/*");
#endif
+#if !CONFIG_SHARED
+// Shared library builds don't support whitebox tests
+// that exercise internal symbols.
+
+#if CONFIG_VP8
+ vp8_rtcd();
+#endif
+#if CONFIG_VP9
+ vp9_rtcd();
+#endif
+#endif
+
return RUN_ALL_TESTS();
}
diff --git a/libvpx/test/test_vector_test.cc b/libvpx/test/test_vector_test.cc
index 938457b..9bd03b9 100644
--- a/libvpx/test/test_vector_test.cc
+++ b/libvpx/test/test_vector_test.cc
@@ -12,20 +12,19 @@
#include <cstdlib>
#include <string>
#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/codec_factory.h"
#include "test/decode_test_driver.h"
#include "test/ivf_video_source.h"
+#include "test/webm_video_source.h"
+#include "test/util.h"
+#include "test/md5_helper.h"
extern "C" {
-#include "./md5_utils.h"
#include "vpx_mem/vpx_mem.h"
}
-#if defined(_MSC_VER)
-#define snprintf sprintf_s
-#endif
-
namespace {
-// There are 61 test vectors in total.
-const char *kTestVectors[] = {
+#if CONFIG_VP8_DECODER
+const char *kVP8TestVectors[] = {
"vp80-00-comprehensive-001.ivf",
"vp80-00-comprehensive-002.ivf", "vp80-00-comprehensive-003.ivf",
"vp80-00-comprehensive-004.ivf", "vp80-00-comprehensive-005.ivf",
@@ -58,11 +57,119 @@ const char *kTestVectors[] = {
"vp80-05-sharpness-1438.ivf", "vp80-05-sharpness-1439.ivf",
"vp80-05-sharpness-1440.ivf", "vp80-05-sharpness-1443.ivf"
};
+#endif
+#if CONFIG_VP9_DECODER
+const char *kVP9TestVectors[] = {
+ "vp90-2-00-quantizer-00.webm", "vp90-2-00-quantizer-01.webm",
+ "vp90-2-00-quantizer-02.webm", "vp90-2-00-quantizer-03.webm",
+ "vp90-2-00-quantizer-04.webm", "vp90-2-00-quantizer-05.webm",
+ "vp90-2-00-quantizer-06.webm", "vp90-2-00-quantizer-07.webm",
+ "vp90-2-00-quantizer-08.webm", "vp90-2-00-quantizer-09.webm",
+ "vp90-2-00-quantizer-10.webm", "vp90-2-00-quantizer-11.webm",
+ "vp90-2-00-quantizer-12.webm", "vp90-2-00-quantizer-13.webm",
+ "vp90-2-00-quantizer-14.webm", "vp90-2-00-quantizer-15.webm",
+ "vp90-2-00-quantizer-16.webm", "vp90-2-00-quantizer-17.webm",
+ "vp90-2-00-quantizer-18.webm", "vp90-2-00-quantizer-19.webm",
+ "vp90-2-00-quantizer-20.webm", "vp90-2-00-quantizer-21.webm",
+ "vp90-2-00-quantizer-22.webm", "vp90-2-00-quantizer-23.webm",
+ "vp90-2-00-quantizer-24.webm", "vp90-2-00-quantizer-25.webm",
+ "vp90-2-00-quantizer-26.webm", "vp90-2-00-quantizer-27.webm",
+ "vp90-2-00-quantizer-28.webm", "vp90-2-00-quantizer-29.webm",
+ "vp90-2-00-quantizer-30.webm", "vp90-2-00-quantizer-31.webm",
+ "vp90-2-00-quantizer-32.webm", "vp90-2-00-quantizer-33.webm",
+ "vp90-2-00-quantizer-34.webm", "vp90-2-00-quantizer-35.webm",
+ "vp90-2-00-quantizer-36.webm", "vp90-2-00-quantizer-37.webm",
+ "vp90-2-00-quantizer-38.webm", "vp90-2-00-quantizer-39.webm",
+ "vp90-2-00-quantizer-40.webm", "vp90-2-00-quantizer-41.webm",
+ "vp90-2-00-quantizer-42.webm", "vp90-2-00-quantizer-43.webm",
+ "vp90-2-00-quantizer-44.webm", "vp90-2-00-quantizer-45.webm",
+ "vp90-2-00-quantizer-46.webm", "vp90-2-00-quantizer-47.webm",
+ "vp90-2-00-quantizer-48.webm", "vp90-2-00-quantizer-49.webm",
+ "vp90-2-00-quantizer-50.webm", "vp90-2-00-quantizer-51.webm",
+ "vp90-2-00-quantizer-52.webm", "vp90-2-00-quantizer-53.webm",
+ "vp90-2-00-quantizer-54.webm", "vp90-2-00-quantizer-55.webm",
+ "vp90-2-00-quantizer-56.webm", "vp90-2-00-quantizer-57.webm",
+ "vp90-2-00-quantizer-58.webm", "vp90-2-00-quantizer-59.webm",
+ "vp90-2-00-quantizer-60.webm", "vp90-2-00-quantizer-61.webm",
+ "vp90-2-00-quantizer-62.webm", "vp90-2-00-quantizer-63.webm",
+ "vp90-2-01-sharpness-1.webm", "vp90-2-01-sharpness-2.webm",
+ "vp90-2-01-sharpness-3.webm", "vp90-2-01-sharpness-4.webm",
+ "vp90-2-01-sharpness-5.webm", "vp90-2-01-sharpness-6.webm",
+ "vp90-2-01-sharpness-7.webm", "vp90-2-02-size-08x08.webm",
+ "vp90-2-02-size-08x10.webm", "vp90-2-02-size-08x16.webm",
+ "vp90-2-02-size-08x18.webm", "vp90-2-02-size-08x32.webm",
+ "vp90-2-02-size-08x34.webm", "vp90-2-02-size-08x64.webm",
+ "vp90-2-02-size-08x66.webm", "vp90-2-02-size-10x08.webm",
+ "vp90-2-02-size-10x10.webm", "vp90-2-02-size-10x16.webm",
+ "vp90-2-02-size-10x18.webm", "vp90-2-02-size-10x32.webm",
+ "vp90-2-02-size-10x34.webm", "vp90-2-02-size-10x64.webm",
+ "vp90-2-02-size-10x66.webm", "vp90-2-02-size-16x08.webm",
+ "vp90-2-02-size-16x10.webm", "vp90-2-02-size-16x16.webm",
+ "vp90-2-02-size-16x18.webm", "vp90-2-02-size-16x32.webm",
+ "vp90-2-02-size-16x34.webm", "vp90-2-02-size-16x64.webm",
+ "vp90-2-02-size-16x66.webm", "vp90-2-02-size-18x08.webm",
+ "vp90-2-02-size-18x10.webm", "vp90-2-02-size-18x16.webm",
+ "vp90-2-02-size-18x18.webm", "vp90-2-02-size-18x32.webm",
+ "vp90-2-02-size-18x34.webm", "vp90-2-02-size-18x64.webm",
+ "vp90-2-02-size-18x66.webm", "vp90-2-02-size-32x08.webm",
+ "vp90-2-02-size-32x10.webm", "vp90-2-02-size-32x16.webm",
+ "vp90-2-02-size-32x18.webm", "vp90-2-02-size-32x32.webm",
+ "vp90-2-02-size-32x34.webm", "vp90-2-02-size-32x64.webm",
+ "vp90-2-02-size-32x66.webm", "vp90-2-02-size-34x08.webm",
+ "vp90-2-02-size-34x10.webm", "vp90-2-02-size-34x16.webm",
+ "vp90-2-02-size-34x18.webm", "vp90-2-02-size-34x32.webm",
+ "vp90-2-02-size-34x34.webm", "vp90-2-02-size-34x64.webm",
+ "vp90-2-02-size-34x66.webm", "vp90-2-02-size-64x08.webm",
+ "vp90-2-02-size-64x10.webm", "vp90-2-02-size-64x16.webm",
+ "vp90-2-02-size-64x18.webm", "vp90-2-02-size-64x32.webm",
+ "vp90-2-02-size-64x34.webm", "vp90-2-02-size-64x64.webm",
+ "vp90-2-02-size-64x66.webm", "vp90-2-02-size-66x08.webm",
+ "vp90-2-02-size-66x10.webm", "vp90-2-02-size-66x16.webm",
+ "vp90-2-02-size-66x18.webm", "vp90-2-02-size-66x32.webm",
+ "vp90-2-02-size-66x34.webm", "vp90-2-02-size-66x64.webm",
+ "vp90-2-02-size-66x66.webm", "vp90-2-03-size-196x196.webm",
+ "vp90-2-03-size-196x198.webm", "vp90-2-03-size-196x200.webm",
+ "vp90-2-03-size-196x202.webm", "vp90-2-03-size-196x208.webm",
+ "vp90-2-03-size-196x210.webm", "vp90-2-03-size-196x224.webm",
+ "vp90-2-03-size-196x226.webm", "vp90-2-03-size-198x196.webm",
+ "vp90-2-03-size-198x198.webm", "vp90-2-03-size-198x200.webm",
+ "vp90-2-03-size-198x202.webm", "vp90-2-03-size-198x208.webm",
+ "vp90-2-03-size-198x210.webm", "vp90-2-03-size-198x224.webm",
+ "vp90-2-03-size-198x226.webm", "vp90-2-03-size-200x196.webm",
+ "vp90-2-03-size-200x198.webm", "vp90-2-03-size-200x200.webm",
+ "vp90-2-03-size-200x202.webm", "vp90-2-03-size-200x208.webm",
+ "vp90-2-03-size-200x210.webm", "vp90-2-03-size-200x224.webm",
+ "vp90-2-03-size-200x226.webm", "vp90-2-03-size-202x196.webm",
+ "vp90-2-03-size-202x198.webm", "vp90-2-03-size-202x200.webm",
+ "vp90-2-03-size-202x202.webm", "vp90-2-03-size-202x208.webm",
+ "vp90-2-03-size-202x210.webm", "vp90-2-03-size-202x224.webm",
+ "vp90-2-03-size-202x226.webm", "vp90-2-03-size-208x196.webm",
+ "vp90-2-03-size-208x198.webm", "vp90-2-03-size-208x200.webm",
+ "vp90-2-03-size-208x202.webm", "vp90-2-03-size-208x208.webm",
+ "vp90-2-03-size-208x210.webm", "vp90-2-03-size-208x224.webm",
+ "vp90-2-03-size-208x226.webm", "vp90-2-03-size-210x196.webm",
+ "vp90-2-03-size-210x198.webm", "vp90-2-03-size-210x200.webm",
+ "vp90-2-03-size-210x202.webm", "vp90-2-03-size-210x208.webm",
+ "vp90-2-03-size-210x210.webm", "vp90-2-03-size-210x224.webm",
+ "vp90-2-03-size-210x226.webm", "vp90-2-03-size-224x196.webm",
+ "vp90-2-03-size-224x198.webm", "vp90-2-03-size-224x200.webm",
+ "vp90-2-03-size-224x202.webm", "vp90-2-03-size-224x208.webm",
+ "vp90-2-03-size-224x210.webm", "vp90-2-03-size-224x224.webm",
+ "vp90-2-03-size-224x226.webm", "vp90-2-03-size-226x196.webm",
+ "vp90-2-03-size-226x198.webm", "vp90-2-03-size-226x200.webm",
+ "vp90-2-03-size-226x202.webm", "vp90-2-03-size-226x208.webm",
+ "vp90-2-03-size-226x210.webm", "vp90-2-03-size-226x224.webm",
+ "vp90-2-03-size-226x226.webm", "vp90-2-03-deltaq.webm",
+#if CONFIG_NON420
+ "vp91-2-04-yv444.webm"
+#endif
+};
+#endif
-class TestVectorTest : public libvpx_test::DecoderTest,
- public ::testing::TestWithParam<const char*> {
+class TestVectorTest : public ::libvpx_test::DecoderTest,
+ public ::libvpx_test::CodecTestWithParam<const char*> {
protected:
- TestVectorTest() : md5_file_(NULL) {}
+ TestVectorTest() : DecoderTest(GET_PARAM(0)), md5_file_(NULL) {}
virtual ~TestVectorTest() {
if (md5_file_)
@@ -77,6 +184,7 @@ class TestVectorTest : public libvpx_test::DecoderTest,
virtual void DecompressedFrameHook(const vpx_image_t& img,
const unsigned int frame_number) {
+ ASSERT_TRUE(md5_file_ != NULL);
char expected_md5[33];
char junk[128];
@@ -85,30 +193,9 @@ class TestVectorTest : public libvpx_test::DecoderTest,
ASSERT_NE(res, EOF) << "Read md5 data failed";
expected_md5[32] = '\0';
- MD5Context md5;
- MD5Init(&md5);
-
- // Compute and update md5 for each raw in decompressed data.
- for (int plane = 0; plane < 3; ++plane) {
- uint8_t *buf = img.planes[plane];
-
- for (unsigned int y = 0; y < (plane ? (img.d_h + 1) >> 1 : img.d_h);
- ++y) {
- MD5Update(&md5, buf, (plane ? (img.d_w + 1) >> 1 : img.d_w));
- buf += img.stride[plane];
- }
- }
-
- uint8_t md5_sum[16];
- MD5Final(md5_sum, &md5);
-
- char actual_md5[33];
- // Convert to get the actual md5.
- for (int i = 0; i < 16; i++) {
- snprintf(&actual_md5[i * 2], sizeof(actual_md5) - i * 2, "%02x",
- md5_sum[i]);
- }
- actual_md5[32] = '\0';
+ ::libvpx_test::MD5 md5_res;
+ md5_res.Add(&img);
+ const char *actual_md5 = md5_res.Get();
// Check md5 match.
ASSERT_STREQ(expected_md5, actual_md5)
@@ -124,21 +211,29 @@ class TestVectorTest : public libvpx_test::DecoderTest,
// checksums match the correct md5 data, then the test is passed. Otherwise,
// the test failed.
TEST_P(TestVectorTest, MD5Match) {
- const std::string filename = GetParam();
- // Open compressed video file.
- libvpx_test::IVFVideoSource video(filename);
+ const std::string filename = GET_PARAM(1);
+ libvpx_test::CompressedVideoSource *video = NULL;
- video.Init();
+ // Open compressed video file.
+ if (filename.substr(filename.length() - 3, 3) == "ivf") {
+ video = new libvpx_test::IVFVideoSource(filename);
+ } else if (filename.substr(filename.length() - 4, 4) == "webm") {
+ video = new libvpx_test::WebMVideoSource(filename);
+ }
+ video->Init();
// Construct md5 file name.
const std::string md5_filename = filename + ".md5";
OpenMD5File(md5_filename);
// Decode frame, and check the md5 matching.
- ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ ASSERT_NO_FATAL_FAILURE(RunLoop(video));
+ delete video;
}
-INSTANTIATE_TEST_CASE_P(TestVectorSequence, TestVectorTest,
- ::testing::ValuesIn(kTestVectors));
+VP8_INSTANTIATE_TEST_CASE(TestVectorTest,
+ ::testing::ValuesIn(kVP8TestVectors));
+VP9_INSTANTIATE_TEST_CASE(TestVectorTest,
+ ::testing::ValuesIn(kVP9TestVectors));
} // namespace
diff --git a/libvpx/test/tile_independence_test.cc b/libvpx/test/tile_independence_test.cc
new file mode 100644
index 0000000..403dbb6
--- /dev/null
+++ b/libvpx/test/tile_independence_test.cc
@@ -0,0 +1,109 @@
+/*
+ Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+
+ Use of this source code is governed by a BSD-style license
+ that can be found in the LICENSE file in the root of the source
+ tree. An additional intellectual property rights grant can be found
+ in the file PATENTS. All contributing project authors may
+ be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <cstdio>
+#include <cstdlib>
+#include <string>
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/codec_factory.h"
+#include "test/encode_test_driver.h"
+#include "test/i420_video_source.h"
+#include "test/util.h"
+#include "test/md5_helper.h"
+extern "C" {
+#include "vpx_mem/vpx_mem.h"
+}
+
+namespace {
+class TileIndependenceTest : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWithParam<int> {
+ protected:
+ TileIndependenceTest()
+ : EncoderTest(GET_PARAM(0)),
+ md5_fw_order_(),
+ md5_inv_order_(),
+ n_tiles_(GET_PARAM(1)) {
+ init_flags_ = VPX_CODEC_USE_PSNR;
+ vpx_codec_dec_cfg_t cfg;
+ cfg.w = 704;
+ cfg.h = 144;
+ cfg.threads = 1;
+ fw_dec_ = codec_->CreateDecoder(cfg, 0);
+ inv_dec_ = codec_->CreateDecoder(cfg, 0);
+ inv_dec_->Control(VP9_INVERT_TILE_DECODE_ORDER, 1);
+ }
+
+ virtual ~TileIndependenceTest() {
+ delete fw_dec_;
+ delete inv_dec_;
+ }
+
+ virtual void SetUp() {
+ InitializeConfig();
+ SetMode(libvpx_test::kTwoPassGood);
+ }
+
+ virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video,
+ libvpx_test::Encoder *encoder) {
+ if (video->frame() == 1) {
+ encoder->Control(VP9E_SET_TILE_COLUMNS, n_tiles_);
+ }
+ }
+
+ void UpdateMD5(::libvpx_test::Decoder *dec, const vpx_codec_cx_pkt_t *pkt,
+ ::libvpx_test::MD5 *md5) {
+ const vpx_codec_err_t res = dec->DecodeFrame(
+ reinterpret_cast<uint8_t*>(pkt->data.frame.buf), pkt->data.frame.sz);
+ if (res != VPX_CODEC_OK) {
+ abort_ = true;
+ ASSERT_EQ(VPX_CODEC_OK, res);
+ }
+ const vpx_image_t *img = dec->GetDxData().Next();
+ md5->Add(img);
+ }
+
+ virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
+ UpdateMD5(fw_dec_, pkt, &md5_fw_order_);
+ UpdateMD5(inv_dec_, pkt, &md5_inv_order_);
+ }
+
+ ::libvpx_test::MD5 md5_fw_order_, md5_inv_order_;
+ ::libvpx_test::Decoder *fw_dec_, *inv_dec_;
+
+ private:
+ int n_tiles_;
+};
+
+// run an encode with 2 or 4 tiles, and do the decode both in normal and
+// inverted tile ordering. Ensure that the MD5 of the output in both cases
+// is identical. If so, tiles are considered independent and the test passes.
+TEST_P(TileIndependenceTest, MD5Match) {
+ const vpx_rational timebase = { 33333333, 1000000000 };
+ cfg_.g_timebase = timebase;
+ cfg_.rc_target_bitrate = 500;
+ cfg_.g_lag_in_frames = 25;
+ cfg_.rc_end_usage = VPX_VBR;
+
+ libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 704, 144,
+ timebase.den, timebase.num, 0, 30);
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+
+ const char *md5_fw_str = md5_fw_order_.Get();
+ const char *md5_inv_str = md5_inv_order_.Get();
+
+ // could use ASSERT_EQ(!memcmp(.., .., 16) here, but this gives nicer
+ // output if it fails. Not sure if it's helpful since it's really just
+ // a MD5...
+ ASSERT_STREQ(md5_fw_str, md5_inv_str);
+}
+
+VP9_INSTANTIATE_TEST_CASE(TileIndependenceTest, ::testing::Range(0, 2, 1));
+
+} // namespace
diff --git a/libvpx/test/util.h b/libvpx/test/util.h
index 06a70cc..4d7f3d4 100644
--- a/libvpx/test/util.h
+++ b/libvpx/test/util.h
@@ -11,8 +11,38 @@
#ifndef TEST_UTIL_H_
#define TEST_UTIL_H_
+#include <stdio.h>
+#include <math.h>
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "vpx/vpx_image.h"
+
// Macros
#define PARAMS(...) ::testing::TestWithParam< std::tr1::tuple< __VA_ARGS__ > >
#define GET_PARAM(k) std::tr1::get< k >(GetParam())
+static double compute_psnr(const vpx_image_t *img1,
+ const vpx_image_t *img2) {
+ assert((img1->fmt == img2->fmt) &&
+ (img1->d_w == img2->d_w) &&
+ (img1->d_h == img2->d_h));
+
+ const unsigned int width_y = img1->d_w;
+ const unsigned int height_y = img1->d_h;
+ unsigned int i, j;
+
+ int64_t sqrerr = 0;
+ for (i = 0; i < height_y; ++i)
+ for (j = 0; j < width_y; ++j) {
+ int64_t d = img1->planes[VPX_PLANE_Y][i * img1->stride[VPX_PLANE_Y] + j] -
+ img2->planes[VPX_PLANE_Y][i * img2->stride[VPX_PLANE_Y] + j];
+ sqrerr += d * d;
+ }
+ double mse = static_cast<double>(sqrerr) / (width_y * height_y);
+ double psnr = 100.0;
+ if (mse > 0.0) {
+ psnr = 10 * log10(255.0 * 255.0 / mse);
+ }
+ return psnr;
+}
+
#endif // TEST_UTIL_H_
diff --git a/libvpx/test/variance_test.cc b/libvpx/test/variance_test.cc
new file mode 100644
index 0000000..ca53ffb
--- /dev/null
+++ b/libvpx/test/variance_test.cc
@@ -0,0 +1,694 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include <stdlib.h>
+#include <new>
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
+
+#include "vpx/vpx_integer.h"
+#include "./vpx_config.h"
+extern "C" {
+#include "vpx_mem/vpx_mem.h"
+#if CONFIG_VP8_ENCODER
+# include "vp8/common/variance.h"
+# include "./vp8_rtcd.h"
+#endif
+#if CONFIG_VP9_ENCODER
+# include "vp9/encoder/vp9_variance.h"
+# include "./vp9_rtcd.h"
+#endif
+}
+#include "test/acm_random.h"
+
+namespace {
+
+using ::std::tr1::get;
+using ::std::tr1::make_tuple;
+using ::std::tr1::tuple;
+using libvpx_test::ACMRandom;
+
+static unsigned int variance_ref(const uint8_t *ref, const uint8_t *src,
+ int l2w, int l2h, unsigned int *sse_ptr) {
+ int se = 0;
+ unsigned int sse = 0;
+ const int w = 1 << l2w, h = 1 << l2h;
+ for (int y = 0; y < h; y++) {
+ for (int x = 0; x < w; x++) {
+ int diff = ref[w * y + x] - src[w * y + x];
+ se += diff;
+ sse += diff * diff;
+ }
+ }
+ *sse_ptr = sse;
+ return sse - (((int64_t) se * se) >> (l2w + l2h));
+}
+
+static unsigned int subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
+ int l2w, int l2h, int xoff, int yoff,
+ unsigned int *sse_ptr) {
+ int se = 0;
+ unsigned int sse = 0;
+ const int w = 1 << l2w, h = 1 << l2h;
+ for (int y = 0; y < h; y++) {
+ for (int x = 0; x < w; x++) {
+ // bilinear interpolation at a 16th pel step
+ const int a1 = ref[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ int diff = r - src[w * y + x];
+ se += diff;
+ sse += diff * diff;
+ }
+ }
+ *sse_ptr = sse;
+ return sse - (((int64_t) se * se) >> (l2w + l2h));
+}
+
+static unsigned int subpel_avg_variance_ref(const uint8_t *ref,
+ const uint8_t *src,
+ const uint8_t *second_pred,
+ int l2w, int l2h,
+ int xoff, int yoff,
+ unsigned int *sse_ptr) {
+ int se = 0;
+ unsigned int sse = 0;
+ const int w = 1 << l2w, h = 1 << l2h;
+ for (int y = 0; y < h; y++) {
+ for (int x = 0; x < w; x++) {
+ // bilinear interpolation at a 16th pel step
+ const int a1 = ref[(w + 1) * (y + 0) + x + 0];
+ const int a2 = ref[(w + 1) * (y + 0) + x + 1];
+ const int b1 = ref[(w + 1) * (y + 1) + x + 0];
+ const int b2 = ref[(w + 1) * (y + 1) + x + 1];
+ const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
+ const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
+ const int r = a + (((b - a) * yoff + 8) >> 4);
+ int diff = ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
+ se += diff;
+ sse += diff * diff;
+ }
+ }
+ *sse_ptr = sse;
+ return sse - (((int64_t) se * se) >> (l2w + l2h));
+}
+
+template<typename VarianceFunctionType>
+class VarianceTest
+ : public ::testing::TestWithParam<tuple<int, int, VarianceFunctionType> > {
+ public:
+ virtual void SetUp() {
+ const tuple<int, int, VarianceFunctionType>& params = this->GetParam();
+ log2width_ = get<0>(params);
+ width_ = 1 << log2width_;
+ log2height_ = get<1>(params);
+ height_ = 1 << log2height_;
+ variance_ = get<2>(params);
+
+ rnd(ACMRandom::DeterministicSeed());
+ block_size_ = width_ * height_;
+ src_ = new uint8_t[block_size_];
+ ref_ = new uint8_t[block_size_];
+ ASSERT_TRUE(src_ != NULL);
+ ASSERT_TRUE(ref_ != NULL);
+ }
+
+ virtual void TearDown() {
+ delete[] src_;
+ delete[] ref_;
+ libvpx_test::ClearSystemState();
+ }
+
+ protected:
+ void ZeroTest();
+ void RefTest();
+ void OneQuarterTest();
+
+ ACMRandom rnd;
+ uint8_t* src_;
+ uint8_t* ref_;
+ int width_, log2width_;
+ int height_, log2height_;
+ int block_size_;
+ VarianceFunctionType variance_;
+};
+
+template<typename VarianceFunctionType>
+void VarianceTest<VarianceFunctionType>::ZeroTest() {
+ for (int i = 0; i <= 255; ++i) {
+ memset(src_, i, block_size_);
+ for (int j = 0; j <= 255; ++j) {
+ memset(ref_, j, block_size_);
+ unsigned int sse;
+ unsigned int var;
+ REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
+ EXPECT_EQ(0u, var) << "src values: " << i << "ref values: " << j;
+ }
+ }
+}
+
+template<typename VarianceFunctionType>
+void VarianceTest<VarianceFunctionType>::RefTest() {
+ for (int i = 0; i < 10; ++i) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd.Rand8();
+ ref_[j] = rnd.Rand8();
+ }
+ unsigned int sse1, sse2;
+ unsigned int var1;
+ REGISTER_STATE_CHECK(var1 = variance_(src_, width_, ref_, width_, &sse1));
+ const unsigned int var2 = variance_ref(src_, ref_, log2width_,
+ log2height_, &sse2);
+ EXPECT_EQ(sse1, sse2);
+ EXPECT_EQ(var1, var2);
+ }
+}
+
+template<typename VarianceFunctionType>
+void VarianceTest<VarianceFunctionType>::OneQuarterTest() {
+ memset(src_, 255, block_size_);
+ const int half = block_size_ / 2;
+ memset(ref_, 255, half);
+ memset(ref_ + half, 0, half);
+ unsigned int sse;
+ unsigned int var;
+ REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
+ const unsigned int expected = block_size_ * 255 * 255 / 4;
+ EXPECT_EQ(expected, var);
+}
+
+template<typename SubpelVarianceFunctionType>
+class SubpelVarianceTest
+ : public ::testing::TestWithParam<tuple<int, int,
+ SubpelVarianceFunctionType> > {
+ public:
+ virtual void SetUp() {
+ const tuple<int, int, SubpelVarianceFunctionType>& params =
+ this->GetParam();
+ log2width_ = get<0>(params);
+ width_ = 1 << log2width_;
+ log2height_ = get<1>(params);
+ height_ = 1 << log2height_;
+ subpel_variance_ = get<2>(params);
+
+ rnd(ACMRandom::DeterministicSeed());
+ block_size_ = width_ * height_;
+ src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
+ sec_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
+ ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
+ ASSERT_TRUE(src_ != NULL);
+ ASSERT_TRUE(sec_ != NULL);
+ ASSERT_TRUE(ref_ != NULL);
+ }
+
+ virtual void TearDown() {
+ vpx_free(src_);
+ delete[] ref_;
+ vpx_free(sec_);
+ libvpx_test::ClearSystemState();
+ }
+
+ protected:
+ void RefTest();
+
+ ACMRandom rnd;
+ uint8_t *src_;
+ uint8_t *ref_;
+ uint8_t *sec_;
+ int width_, log2width_;
+ int height_, log2height_;
+ int block_size_;
+ SubpelVarianceFunctionType subpel_variance_;
+};
+
+template<typename SubpelVarianceFunctionType>
+void SubpelVarianceTest<SubpelVarianceFunctionType>::RefTest() {
+ for (int x = 0; x < 16; ++x) {
+ for (int y = 0; y < 16; ++y) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd.Rand8();
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ ref_[j] = rnd.Rand8();
+ }
+ unsigned int sse1, sse2;
+ unsigned int var1;
+ REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
+ src_, width_, &sse1));
+ const unsigned int var2 = subpel_variance_ref(ref_, src_, log2width_,
+ log2height_, x, y, &sse2);
+ EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
+ EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
+ }
+ }
+}
+
+template<>
+void SubpelVarianceTest<vp9_subp_avg_variance_fn_t>::RefTest() {
+ for (int x = 0; x < 16; ++x) {
+ for (int y = 0; y < 16; ++y) {
+ for (int j = 0; j < block_size_; j++) {
+ src_[j] = rnd.Rand8();
+ sec_[j] = rnd.Rand8();
+ }
+ for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
+ ref_[j] = rnd.Rand8();
+ }
+ unsigned int sse1, sse2;
+ unsigned int var1;
+ REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
+ src_, width_, &sse1, sec_));
+ const unsigned int var2 = subpel_avg_variance_ref(ref_, src_, sec_,
+ log2width_, log2height_,
+ x, y, &sse2);
+ EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
+ EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// VP8 test cases.
+
+namespace vp8 {
+
+#if CONFIG_VP8_ENCODER
+typedef VarianceTest<vp8_variance_fn_t> VP8VarianceTest;
+
+TEST_P(VP8VarianceTest, Zero) { ZeroTest(); }
+TEST_P(VP8VarianceTest, Ref) { RefTest(); }
+TEST_P(VP8VarianceTest, OneQuarter) { OneQuarterTest(); }
+
+const vp8_variance_fn_t variance4x4_c = vp8_variance4x4_c;
+const vp8_variance_fn_t variance8x8_c = vp8_variance8x8_c;
+const vp8_variance_fn_t variance8x16_c = vp8_variance8x16_c;
+const vp8_variance_fn_t variance16x8_c = vp8_variance16x8_c;
+const vp8_variance_fn_t variance16x16_c = vp8_variance16x16_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VP8VarianceTest,
+ ::testing::Values(make_tuple(2, 2, variance4x4_c),
+ make_tuple(3, 3, variance8x8_c),
+ make_tuple(3, 4, variance8x16_c),
+ make_tuple(4, 3, variance16x8_c),
+ make_tuple(4, 4, variance16x16_c)));
+
+#if HAVE_MMX
+const vp8_variance_fn_t variance4x4_mmx = vp8_variance4x4_mmx;
+const vp8_variance_fn_t variance8x8_mmx = vp8_variance8x8_mmx;
+const vp8_variance_fn_t variance8x16_mmx = vp8_variance8x16_mmx;
+const vp8_variance_fn_t variance16x8_mmx = vp8_variance16x8_mmx;
+const vp8_variance_fn_t variance16x16_mmx = vp8_variance16x16_mmx;
+INSTANTIATE_TEST_CASE_P(
+ MMX, VP8VarianceTest,
+ ::testing::Values(make_tuple(2, 2, variance4x4_mmx),
+ make_tuple(3, 3, variance8x8_mmx),
+ make_tuple(3, 4, variance8x16_mmx),
+ make_tuple(4, 3, variance16x8_mmx),
+ make_tuple(4, 4, variance16x16_mmx)));
+#endif
+
+#if HAVE_SSE2
+const vp8_variance_fn_t variance4x4_wmt = vp8_variance4x4_wmt;
+const vp8_variance_fn_t variance8x8_wmt = vp8_variance8x8_wmt;
+const vp8_variance_fn_t variance8x16_wmt = vp8_variance8x16_wmt;
+const vp8_variance_fn_t variance16x8_wmt = vp8_variance16x8_wmt;
+const vp8_variance_fn_t variance16x16_wmt = vp8_variance16x16_wmt;
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VP8VarianceTest,
+ ::testing::Values(make_tuple(2, 2, variance4x4_wmt),
+ make_tuple(3, 3, variance8x8_wmt),
+ make_tuple(3, 4, variance8x16_wmt),
+ make_tuple(4, 3, variance16x8_wmt),
+ make_tuple(4, 4, variance16x16_wmt)));
+#endif
+#endif // CONFIG_VP8_ENCODER
+
+} // namespace vp8
+
+// -----------------------------------------------------------------------------
+// VP9 test cases.
+
+namespace vp9 {
+
+#if CONFIG_VP9_ENCODER
+typedef VarianceTest<vp9_variance_fn_t> VP9VarianceTest;
+typedef SubpelVarianceTest<vp9_subpixvariance_fn_t> VP9SubpelVarianceTest;
+typedef SubpelVarianceTest<vp9_subp_avg_variance_fn_t> VP9SubpelAvgVarianceTest;
+
+TEST_P(VP9VarianceTest, Zero) { ZeroTest(); }
+TEST_P(VP9VarianceTest, Ref) { RefTest(); }
+TEST_P(VP9SubpelVarianceTest, Ref) { RefTest(); }
+TEST_P(VP9SubpelAvgVarianceTest, Ref) { RefTest(); }
+TEST_P(VP9VarianceTest, OneQuarter) { OneQuarterTest(); }
+
+const vp9_variance_fn_t variance4x4_c = vp9_variance4x4_c;
+const vp9_variance_fn_t variance4x8_c = vp9_variance4x8_c;
+const vp9_variance_fn_t variance8x4_c = vp9_variance8x4_c;
+const vp9_variance_fn_t variance8x8_c = vp9_variance8x8_c;
+const vp9_variance_fn_t variance8x16_c = vp9_variance8x16_c;
+const vp9_variance_fn_t variance16x8_c = vp9_variance16x8_c;
+const vp9_variance_fn_t variance16x16_c = vp9_variance16x16_c;
+const vp9_variance_fn_t variance16x32_c = vp9_variance16x32_c;
+const vp9_variance_fn_t variance32x16_c = vp9_variance32x16_c;
+const vp9_variance_fn_t variance32x32_c = vp9_variance32x32_c;
+const vp9_variance_fn_t variance32x64_c = vp9_variance32x64_c;
+const vp9_variance_fn_t variance64x32_c = vp9_variance64x32_c;
+const vp9_variance_fn_t variance64x64_c = vp9_variance64x64_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VP9VarianceTest,
+ ::testing::Values(make_tuple(2, 2, variance4x4_c),
+ make_tuple(2, 3, variance4x8_c),
+ make_tuple(3, 2, variance8x4_c),
+ make_tuple(3, 3, variance8x8_c),
+ make_tuple(3, 4, variance8x16_c),
+ make_tuple(4, 3, variance16x8_c),
+ make_tuple(4, 4, variance16x16_c),
+ make_tuple(4, 5, variance16x32_c),
+ make_tuple(5, 4, variance32x16_c),
+ make_tuple(5, 5, variance32x32_c),
+ make_tuple(5, 6, variance32x64_c),
+ make_tuple(6, 5, variance64x32_c),
+ make_tuple(6, 6, variance64x64_c)));
+
+const vp9_subpixvariance_fn_t subpel_variance4x4_c =
+ vp9_sub_pixel_variance4x4_c;
+const vp9_subpixvariance_fn_t subpel_variance4x8_c =
+ vp9_sub_pixel_variance4x8_c;
+const vp9_subpixvariance_fn_t subpel_variance8x4_c =
+ vp9_sub_pixel_variance8x4_c;
+const vp9_subpixvariance_fn_t subpel_variance8x8_c =
+ vp9_sub_pixel_variance8x8_c;
+const vp9_subpixvariance_fn_t subpel_variance8x16_c =
+ vp9_sub_pixel_variance8x16_c;
+const vp9_subpixvariance_fn_t subpel_variance16x8_c =
+ vp9_sub_pixel_variance16x8_c;
+const vp9_subpixvariance_fn_t subpel_variance16x16_c =
+ vp9_sub_pixel_variance16x16_c;
+const vp9_subpixvariance_fn_t subpel_variance16x32_c =
+ vp9_sub_pixel_variance16x32_c;
+const vp9_subpixvariance_fn_t subpel_variance32x16_c =
+ vp9_sub_pixel_variance32x16_c;
+const vp9_subpixvariance_fn_t subpel_variance32x32_c =
+ vp9_sub_pixel_variance32x32_c;
+const vp9_subpixvariance_fn_t subpel_variance32x64_c =
+ vp9_sub_pixel_variance32x64_c;
+const vp9_subpixvariance_fn_t subpel_variance64x32_c =
+ vp9_sub_pixel_variance64x32_c;
+const vp9_subpixvariance_fn_t subpel_variance64x64_c =
+ vp9_sub_pixel_variance64x64_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VP9SubpelVarianceTest,
+ ::testing::Values(make_tuple(2, 2, subpel_variance4x4_c),
+ make_tuple(2, 3, subpel_variance4x8_c),
+ make_tuple(3, 2, subpel_variance8x4_c),
+ make_tuple(3, 3, subpel_variance8x8_c),
+ make_tuple(3, 4, subpel_variance8x16_c),
+ make_tuple(4, 3, subpel_variance16x8_c),
+ make_tuple(4, 4, subpel_variance16x16_c),
+ make_tuple(4, 5, subpel_variance16x32_c),
+ make_tuple(5, 4, subpel_variance32x16_c),
+ make_tuple(5, 5, subpel_variance32x32_c),
+ make_tuple(5, 6, subpel_variance32x64_c),
+ make_tuple(6, 5, subpel_variance64x32_c),
+ make_tuple(6, 6, subpel_variance64x64_c)));
+
+const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_c =
+ vp9_sub_pixel_avg_variance4x4_c;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_c =
+ vp9_sub_pixel_avg_variance4x8_c;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_c =
+ vp9_sub_pixel_avg_variance8x4_c;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_c =
+ vp9_sub_pixel_avg_variance8x8_c;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_c =
+ vp9_sub_pixel_avg_variance8x16_c;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_c =
+ vp9_sub_pixel_avg_variance16x8_c;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_c =
+ vp9_sub_pixel_avg_variance16x16_c;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_c =
+ vp9_sub_pixel_avg_variance16x32_c;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_c =
+ vp9_sub_pixel_avg_variance32x16_c;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_c =
+ vp9_sub_pixel_avg_variance32x32_c;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_c =
+ vp9_sub_pixel_avg_variance32x64_c;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_c =
+ vp9_sub_pixel_avg_variance64x32_c;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_c =
+ vp9_sub_pixel_avg_variance64x64_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VP9SubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_c),
+ make_tuple(2, 3, subpel_avg_variance4x8_c),
+ make_tuple(3, 2, subpel_avg_variance8x4_c),
+ make_tuple(3, 3, subpel_avg_variance8x8_c),
+ make_tuple(3, 4, subpel_avg_variance8x16_c),
+ make_tuple(4, 3, subpel_avg_variance16x8_c),
+ make_tuple(4, 4, subpel_avg_variance16x16_c),
+ make_tuple(4, 5, subpel_avg_variance16x32_c),
+ make_tuple(5, 4, subpel_avg_variance32x16_c),
+ make_tuple(5, 5, subpel_avg_variance32x32_c),
+ make_tuple(5, 6, subpel_avg_variance32x64_c),
+ make_tuple(6, 5, subpel_avg_variance64x32_c),
+ make_tuple(6, 6, subpel_avg_variance64x64_c)));
+
+#if HAVE_MMX
+const vp9_variance_fn_t variance4x4_mmx = vp9_variance4x4_mmx;
+const vp9_variance_fn_t variance8x8_mmx = vp9_variance8x8_mmx;
+const vp9_variance_fn_t variance8x16_mmx = vp9_variance8x16_mmx;
+const vp9_variance_fn_t variance16x8_mmx = vp9_variance16x8_mmx;
+const vp9_variance_fn_t variance16x16_mmx = vp9_variance16x16_mmx;
+INSTANTIATE_TEST_CASE_P(
+ MMX, VP9VarianceTest,
+ ::testing::Values(make_tuple(2, 2, variance4x4_mmx),
+ make_tuple(3, 3, variance8x8_mmx),
+ make_tuple(3, 4, variance8x16_mmx),
+ make_tuple(4, 3, variance16x8_mmx),
+ make_tuple(4, 4, variance16x16_mmx)));
+#endif
+
+#if HAVE_SSE2
+#if CONFIG_USE_X86INC
+const vp9_variance_fn_t variance4x4_sse2 = vp9_variance4x4_sse2;
+const vp9_variance_fn_t variance4x8_sse2 = vp9_variance4x8_sse2;
+const vp9_variance_fn_t variance8x4_sse2 = vp9_variance8x4_sse2;
+const vp9_variance_fn_t variance8x8_sse2 = vp9_variance8x8_sse2;
+const vp9_variance_fn_t variance8x16_sse2 = vp9_variance8x16_sse2;
+const vp9_variance_fn_t variance16x8_sse2 = vp9_variance16x8_sse2;
+const vp9_variance_fn_t variance16x16_sse2 = vp9_variance16x16_sse2;
+const vp9_variance_fn_t variance16x32_sse2 = vp9_variance16x32_sse2;
+const vp9_variance_fn_t variance32x16_sse2 = vp9_variance32x16_sse2;
+const vp9_variance_fn_t variance32x32_sse2 = vp9_variance32x32_sse2;
+const vp9_variance_fn_t variance32x64_sse2 = vp9_variance32x64_sse2;
+const vp9_variance_fn_t variance64x32_sse2 = vp9_variance64x32_sse2;
+const vp9_variance_fn_t variance64x64_sse2 = vp9_variance64x64_sse2;
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VP9VarianceTest,
+ ::testing::Values(make_tuple(2, 2, variance4x4_sse2),
+ make_tuple(2, 3, variance4x8_sse2),
+ make_tuple(3, 2, variance8x4_sse2),
+ make_tuple(3, 3, variance8x8_sse2),
+ make_tuple(3, 4, variance8x16_sse2),
+ make_tuple(4, 3, variance16x8_sse2),
+ make_tuple(4, 4, variance16x16_sse2),
+ make_tuple(4, 5, variance16x32_sse2),
+ make_tuple(5, 4, variance32x16_sse2),
+ make_tuple(5, 5, variance32x32_sse2),
+ make_tuple(5, 6, variance32x64_sse2),
+ make_tuple(6, 5, variance64x32_sse2),
+ make_tuple(6, 6, variance64x64_sse2)));
+
+const vp9_subpixvariance_fn_t subpel_variance4x4_sse =
+ vp9_sub_pixel_variance4x4_sse;
+const vp9_subpixvariance_fn_t subpel_variance4x8_sse =
+ vp9_sub_pixel_variance4x8_sse;
+const vp9_subpixvariance_fn_t subpel_variance8x4_sse2 =
+ vp9_sub_pixel_variance8x4_sse2;
+const vp9_subpixvariance_fn_t subpel_variance8x8_sse2 =
+ vp9_sub_pixel_variance8x8_sse2;
+const vp9_subpixvariance_fn_t subpel_variance8x16_sse2 =
+ vp9_sub_pixel_variance8x16_sse2;
+const vp9_subpixvariance_fn_t subpel_variance16x8_sse2 =
+ vp9_sub_pixel_variance16x8_sse2;
+const vp9_subpixvariance_fn_t subpel_variance16x16_sse2 =
+ vp9_sub_pixel_variance16x16_sse2;
+const vp9_subpixvariance_fn_t subpel_variance16x32_sse2 =
+ vp9_sub_pixel_variance16x32_sse2;
+const vp9_subpixvariance_fn_t subpel_variance32x16_sse2 =
+ vp9_sub_pixel_variance32x16_sse2;
+const vp9_subpixvariance_fn_t subpel_variance32x32_sse2 =
+ vp9_sub_pixel_variance32x32_sse2;
+const vp9_subpixvariance_fn_t subpel_variance32x64_sse2 =
+ vp9_sub_pixel_variance32x64_sse2;
+const vp9_subpixvariance_fn_t subpel_variance64x32_sse2 =
+ vp9_sub_pixel_variance64x32_sse2;
+const vp9_subpixvariance_fn_t subpel_variance64x64_sse2 =
+ vp9_sub_pixel_variance64x64_sse2;
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VP9SubpelVarianceTest,
+ ::testing::Values(make_tuple(2, 2, subpel_variance4x4_sse),
+ make_tuple(2, 3, subpel_variance4x8_sse),
+ make_tuple(3, 2, subpel_variance8x4_sse2),
+ make_tuple(3, 3, subpel_variance8x8_sse2),
+ make_tuple(3, 4, subpel_variance8x16_sse2),
+ make_tuple(4, 3, subpel_variance16x8_sse2),
+ make_tuple(4, 4, subpel_variance16x16_sse2),
+ make_tuple(4, 5, subpel_variance16x32_sse2),
+ make_tuple(5, 4, subpel_variance32x16_sse2),
+ make_tuple(5, 5, subpel_variance32x32_sse2),
+ make_tuple(5, 6, subpel_variance32x64_sse2),
+ make_tuple(6, 5, subpel_variance64x32_sse2),
+ make_tuple(6, 6, subpel_variance64x64_sse2)));
+
+const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_sse =
+ vp9_sub_pixel_avg_variance4x4_sse;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_sse =
+ vp9_sub_pixel_avg_variance4x8_sse;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_sse2 =
+ vp9_sub_pixel_avg_variance8x4_sse2;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_sse2 =
+ vp9_sub_pixel_avg_variance8x8_sse2;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_sse2 =
+ vp9_sub_pixel_avg_variance8x16_sse2;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_sse2 =
+ vp9_sub_pixel_avg_variance16x8_sse2;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_sse2 =
+ vp9_sub_pixel_avg_variance16x16_sse2;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_sse2 =
+ vp9_sub_pixel_avg_variance16x32_sse2;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_sse2 =
+ vp9_sub_pixel_avg_variance32x16_sse2;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_sse2 =
+ vp9_sub_pixel_avg_variance32x32_sse2;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_sse2 =
+ vp9_sub_pixel_avg_variance32x64_sse2;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_sse2 =
+ vp9_sub_pixel_avg_variance64x32_sse2;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_sse2 =
+ vp9_sub_pixel_avg_variance64x64_sse2;
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VP9SubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_sse),
+ make_tuple(2, 3, subpel_avg_variance4x8_sse),
+ make_tuple(3, 2, subpel_avg_variance8x4_sse2),
+ make_tuple(3, 3, subpel_avg_variance8x8_sse2),
+ make_tuple(3, 4, subpel_avg_variance8x16_sse2),
+ make_tuple(4, 3, subpel_avg_variance16x8_sse2),
+ make_tuple(4, 4, subpel_avg_variance16x16_sse2),
+ make_tuple(4, 5, subpel_avg_variance16x32_sse2),
+ make_tuple(5, 4, subpel_avg_variance32x16_sse2),
+ make_tuple(5, 5, subpel_avg_variance32x32_sse2),
+ make_tuple(5, 6, subpel_avg_variance32x64_sse2),
+ make_tuple(6, 5, subpel_avg_variance64x32_sse2),
+ make_tuple(6, 6, subpel_avg_variance64x64_sse2)));
+#endif
+#endif
+
+#if HAVE_SSSE3
+#if CONFIG_USE_X86INC
+
+const vp9_subpixvariance_fn_t subpel_variance4x4_ssse3 =
+ vp9_sub_pixel_variance4x4_ssse3;
+const vp9_subpixvariance_fn_t subpel_variance4x8_ssse3 =
+ vp9_sub_pixel_variance4x8_ssse3;
+const vp9_subpixvariance_fn_t subpel_variance8x4_ssse3 =
+ vp9_sub_pixel_variance8x4_ssse3;
+const vp9_subpixvariance_fn_t subpel_variance8x8_ssse3 =
+ vp9_sub_pixel_variance8x8_ssse3;
+const vp9_subpixvariance_fn_t subpel_variance8x16_ssse3 =
+ vp9_sub_pixel_variance8x16_ssse3;
+const vp9_subpixvariance_fn_t subpel_variance16x8_ssse3 =
+ vp9_sub_pixel_variance16x8_ssse3;
+const vp9_subpixvariance_fn_t subpel_variance16x16_ssse3 =
+ vp9_sub_pixel_variance16x16_ssse3;
+const vp9_subpixvariance_fn_t subpel_variance16x32_ssse3 =
+ vp9_sub_pixel_variance16x32_ssse3;
+const vp9_subpixvariance_fn_t subpel_variance32x16_ssse3 =
+ vp9_sub_pixel_variance32x16_ssse3;
+const vp9_subpixvariance_fn_t subpel_variance32x32_ssse3 =
+ vp9_sub_pixel_variance32x32_ssse3;
+const vp9_subpixvariance_fn_t subpel_variance32x64_ssse3 =
+ vp9_sub_pixel_variance32x64_ssse3;
+const vp9_subpixvariance_fn_t subpel_variance64x32_ssse3 =
+ vp9_sub_pixel_variance64x32_ssse3;
+const vp9_subpixvariance_fn_t subpel_variance64x64_ssse3 =
+ vp9_sub_pixel_variance64x64_ssse3;
+INSTANTIATE_TEST_CASE_P(
+ SSSE3, VP9SubpelVarianceTest,
+ ::testing::Values(make_tuple(2, 2, subpel_variance4x4_ssse3),
+ make_tuple(2, 3, subpel_variance4x8_ssse3),
+ make_tuple(3, 2, subpel_variance8x4_ssse3),
+ make_tuple(3, 3, subpel_variance8x8_ssse3),
+ make_tuple(3, 4, subpel_variance8x16_ssse3),
+ make_tuple(4, 3, subpel_variance16x8_ssse3),
+ make_tuple(4, 4, subpel_variance16x16_ssse3),
+ make_tuple(4, 5, subpel_variance16x32_ssse3),
+ make_tuple(5, 4, subpel_variance32x16_ssse3),
+ make_tuple(5, 5, subpel_variance32x32_ssse3),
+ make_tuple(5, 6, subpel_variance32x64_ssse3),
+ make_tuple(6, 5, subpel_variance64x32_ssse3),
+ make_tuple(6, 6, subpel_variance64x64_ssse3)));
+
+const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_ssse3 =
+ vp9_sub_pixel_avg_variance4x4_ssse3;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_ssse3 =
+ vp9_sub_pixel_avg_variance4x8_ssse3;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_ssse3 =
+ vp9_sub_pixel_avg_variance8x4_ssse3;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_ssse3 =
+ vp9_sub_pixel_avg_variance8x8_ssse3;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_ssse3 =
+ vp9_sub_pixel_avg_variance8x16_ssse3;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_ssse3 =
+ vp9_sub_pixel_avg_variance16x8_ssse3;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_ssse3 =
+ vp9_sub_pixel_avg_variance16x16_ssse3;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_ssse3 =
+ vp9_sub_pixel_avg_variance16x32_ssse3;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_ssse3 =
+ vp9_sub_pixel_avg_variance32x16_ssse3;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_ssse3 =
+ vp9_sub_pixel_avg_variance32x32_ssse3;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_ssse3 =
+ vp9_sub_pixel_avg_variance32x64_ssse3;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_ssse3 =
+ vp9_sub_pixel_avg_variance64x32_ssse3;
+const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_ssse3 =
+ vp9_sub_pixel_avg_variance64x64_ssse3;
+INSTANTIATE_TEST_CASE_P(
+ SSSE3, VP9SubpelAvgVarianceTest,
+ ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_ssse3),
+ make_tuple(2, 3, subpel_avg_variance4x8_ssse3),
+ make_tuple(3, 2, subpel_avg_variance8x4_ssse3),
+ make_tuple(3, 3, subpel_avg_variance8x8_ssse3),
+ make_tuple(3, 4, subpel_avg_variance8x16_ssse3),
+ make_tuple(4, 3, subpel_avg_variance16x8_ssse3),
+ make_tuple(4, 4, subpel_avg_variance16x16_ssse3),
+ make_tuple(4, 5, subpel_avg_variance16x32_ssse3),
+ make_tuple(5, 4, subpel_avg_variance32x16_ssse3),
+ make_tuple(5, 5, subpel_avg_variance32x32_ssse3),
+ make_tuple(5, 6, subpel_avg_variance32x64_ssse3),
+ make_tuple(6, 5, subpel_avg_variance64x32_ssse3),
+ make_tuple(6, 6, subpel_avg_variance64x64_ssse3)));
+#endif
+#endif
+#endif // CONFIG_VP9_ENCODER
+
+} // namespace vp9
+
+} // namespace
diff --git a/libvpx/test/video_source.h b/libvpx/test/video_source.h
index 9772657..26d5328 100644
--- a/libvpx/test/video_source.h
+++ b/libvpx/test/video_source.h
@@ -103,7 +103,7 @@ class DummyVideoSource : public VideoSource {
if (width != width_ || height != height_) {
vpx_img_free(img_);
raw_sz_ = ((width + 31)&~31) * height * 3 / 2;
- img_ = vpx_img_alloc(NULL, VPX_IMG_FMT_VPXI420, width, height, 32);
+ img_ = vpx_img_alloc(NULL, VPX_IMG_FMT_I420, width, height, 32);
width_ = width;
height_ = height;
}
diff --git a/libvpx/test/boolcoder_test.cc b/libvpx/test/vp8_boolcoder_test.cc
index 4e21be8..0383af2 100644
--- a/libvpx/test/boolcoder_test.cc
+++ b/libvpx/test/vp8_boolcoder_test.cc
@@ -8,10 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-extern "C" {
-#include "vp8/encoder/boolhuff.h"
-#include "vp8/decoder/dboolhuff.h"
-}
#include <math.h>
#include <stddef.h>
@@ -24,8 +20,37 @@ extern "C" {
#include "third_party/googletest/src/include/gtest/gtest.h"
#include "vpx/vpx_integer.h"
+extern "C" {
+#include "vp8/encoder/boolhuff.h"
+#include "vp8/decoder/dboolhuff.h"
+}
+
namespace {
const int num_tests = 10;
+
+// In a real use the 'decrypt_state' parameter will be a pointer to a struct
+// with whatever internal state the decryptor uses. For testing we'll just
+// xor with a constant key, and decrypt_state will point to the start of
+// the original buffer.
+const uint8_t secret_key[16] = {
+ 0x01, 0x12, 0x23, 0x34, 0x45, 0x56, 0x67, 0x78,
+ 0x89, 0x9a, 0xab, 0xbc, 0xcd, 0xde, 0xef, 0xf0
+};
+
+void encrypt_buffer(uint8_t *buffer, int size) {
+ for (int i = 0; i < size; ++i) {
+ buffer[i] ^= secret_key[i & 15];
+ }
+}
+
+void test_decrypt_cb(void *decrypt_state, const uint8_t *input,
+ uint8_t *output, int count) {
+ int offset = input - reinterpret_cast<uint8_t *>(decrypt_state);
+ for (int i = 0; i < count; i++) {
+ output[i] = input[i] ^ secret_key[(offset + i) & 15];
+ }
+}
+
} // namespace
using libvpx_test::ACMRandom;
@@ -34,10 +59,10 @@ TEST(VP8, TestBitIO) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
for (int n = 0; n < num_tests; ++n) {
for (int method = 0; method <= 7; ++method) { // we generate various proba
- const int bits_to_test = 1000;
- uint8_t probas[bits_to_test];
+ const int kBitsToTest = 1000;
+ uint8_t probas[kBitsToTest];
- for (int i = 0; i < bits_to_test; ++i) {
+ for (int i = 0; i < kBitsToTest; ++i) {
const int parity = i & 1;
probas[i] =
(method == 0) ? 0 : (method == 1) ? 255 :
@@ -52,14 +77,14 @@ TEST(VP8, TestBitIO) {
}
for (int bit_method = 0; bit_method <= 3; ++bit_method) {
const int random_seed = 6432;
- const int buffer_size = 10000;
+ const int kBufferSize = 10000;
ACMRandom bit_rnd(random_seed);
BOOL_CODER bw;
- uint8_t bw_buffer[buffer_size];
- vp8_start_encode(&bw, bw_buffer, bw_buffer + buffer_size);
+ uint8_t bw_buffer[kBufferSize];
+ vp8_start_encode(&bw, bw_buffer, bw_buffer + kBufferSize);
int bit = (bit_method == 0) ? 0 : (bit_method == 1) ? 1 : 0;
- for (int i = 0; i < bits_to_test; ++i) {
+ for (int i = 0; i < kBitsToTest; ++i) {
if (bit_method == 2) {
bit = (i & 1);
} else if (bit_method == 3) {
@@ -71,16 +96,23 @@ TEST(VP8, TestBitIO) {
vp8_stop_encode(&bw);
BOOL_DECODER br;
- vp8dx_start_decode(&br, bw_buffer, buffer_size);
+#if CONFIG_DECRYPT
+ encrypt_buffer(bw_buffer, buffer_size);
+ vp8dx_start_decode(&br, bw_buffer, buffer_size,
+ test_decrypt_cb,
+ reinterpret_cast<void *>(bw_buffer));
+#else
+ vp8dx_start_decode(&br, bw_buffer, kBufferSize, NULL, NULL);
+#endif
bit_rnd.Reset(random_seed);
- for (int i = 0; i < bits_to_test; ++i) {
+ for (int i = 0; i < kBitsToTest; ++i) {
if (bit_method == 2) {
bit = (i & 1);
} else if (bit_method == 3) {
bit = bit_rnd(2);
}
GTEST_ASSERT_EQ(vp8dx_decode_bool(&br, probas[i]), bit)
- << "pos: "<< i << " / " << bits_to_test
+ << "pos: "<< i << " / " << kBitsToTest
<< " bit_method: " << bit_method
<< " method: " << method;
}
diff --git a/libvpx/test/vp8_decrypt_test.cc b/libvpx/test/vp8_decrypt_test.cc
new file mode 100644
index 0000000..b092509
--- /dev/null
+++ b/libvpx/test/vp8_decrypt_test.cc
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <cstdio>
+#include <cstdlib>
+#include <string>
+#include <vector>
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/codec_factory.h"
+#include "test/ivf_video_source.h"
+
+namespace {
+// In a real use the 'decrypt_state' parameter will be a pointer to a struct
+// with whatever internal state the decryptor uses. For testing we'll just
+// xor with a constant key, and decrypt_state will point to the start of
+// the original buffer.
+const uint8_t test_key[16] = {
+ 0x01, 0x12, 0x23, 0x34, 0x45, 0x56, 0x67, 0x78,
+ 0x89, 0x9a, 0xab, 0xbc, 0xcd, 0xde, 0xef, 0xf0
+};
+
+void encrypt_buffer(const uint8_t *src, uint8_t *dst,
+ int size, int offset = 0) {
+ for (int i = 0; i < size; ++i) {
+ dst[i] = src[i] ^ test_key[(offset + i) & 15];
+ }
+}
+
+void test_decrypt_cb(void *decrypt_state, const uint8_t *input,
+ uint8_t *output, int count) {
+ encrypt_buffer(input, output, count,
+ input - reinterpret_cast<uint8_t *>(decrypt_state));
+}
+
+} // namespace
+
+namespace libvpx_test {
+
+TEST(TestDecrypt, DecryptWorks) {
+ libvpx_test::IVFVideoSource video("vp80-00-comprehensive-001.ivf");
+ video.Init();
+
+ vpx_codec_dec_cfg_t dec_cfg = {0};
+ VP8Decoder decoder(dec_cfg, 0);
+
+ video.Begin();
+
+ // no decryption
+ vpx_codec_err_t res = decoder.DecodeFrame(video.cxdata(), video.frame_size());
+ ASSERT_EQ(VPX_CODEC_OK, res) << decoder.DecodeError();
+
+ // decrypt frame
+ video.Next();
+
+#if CONFIG_DECRYPT
+ std::vector<uint8_t> encrypted(video.frame_size());
+ encrypt_buffer(video.cxdata(), &encrypted[0], video.frame_size());
+ vp8_decrypt_init di = { test_decrypt_cb, &encrypted[0] };
+ decoder.Control(VP8D_SET_DECRYPTOR, &di);
+#endif // CONFIG_DECRYPT
+
+ res = decoder.DecodeFrame(video.cxdata(), video.frame_size());
+ ASSERT_EQ(VPX_CODEC_OK, res) << decoder.DecodeError();
+}
+
+} // namespace libvpx_test
diff --git a/libvpx/test/vp8_fdct4x4_test.cc b/libvpx/test/vp8_fdct4x4_test.cc
new file mode 100644
index 0000000..c823436
--- /dev/null
+++ b/libvpx/test/vp8_fdct4x4_test.cc
@@ -0,0 +1,169 @@
+/*
+* Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+*
+* Use of this source code is governed by a BSD-style license
+* that can be found in the LICENSE file in the root of the source
+* tree. An additional intellectual property rights grant can be found
+* in the file PATENTS. All contributing project authors may
+* be found in the AUTHORS file in the root of the source tree.
+*/
+
+
+#include <math.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+
+
+extern "C" {
+#include "./vp8_rtcd.h"
+}
+
+#include "test/acm_random.h"
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "vpx/vpx_integer.h"
+
+
+namespace {
+
+const int cospi8sqrt2minus1 = 20091;
+const int sinpi8sqrt2 = 35468;
+
+void reference_idct4x4(const int16_t *input, int16_t *output) {
+ const int16_t *ip = input;
+ int16_t *op = output;
+
+ for (int i = 0; i < 4; ++i) {
+ const int a1 = ip[0] + ip[8];
+ const int b1 = ip[0] - ip[8];
+ const int temp1 = (ip[4] * sinpi8sqrt2) >> 16;
+ const int temp2 = ip[12] + ((ip[12] * cospi8sqrt2minus1) >> 16);
+ const int c1 = temp1 - temp2;
+ const int temp3 = ip[4] + ((ip[4] * cospi8sqrt2minus1) >> 16);
+ const int temp4 = (ip[12] * sinpi8sqrt2) >> 16;
+ const int d1 = temp3 + temp4;
+ op[0] = a1 + d1;
+ op[12] = a1 - d1;
+ op[4] = b1 + c1;
+ op[8] = b1 - c1;
+ ++ip;
+ ++op;
+ }
+ ip = output;
+ op = output;
+ for (int i = 0; i < 4; ++i) {
+ const int a1 = ip[0] + ip[2];
+ const int b1 = ip[0] - ip[2];
+ const int temp1 = (ip[1] * sinpi8sqrt2) >> 16;
+ const int temp2 = ip[3] + ((ip[3] * cospi8sqrt2minus1) >> 16);
+ const int c1 = temp1 - temp2;
+ const int temp3 = ip[1] + ((ip[1] * cospi8sqrt2minus1) >> 16);
+ const int temp4 = (ip[3] * sinpi8sqrt2) >> 16;
+ const int d1 = temp3 + temp4;
+ op[0] = (a1 + d1 + 4) >> 3;
+ op[3] = (a1 - d1 + 4) >> 3;
+ op[1] = (b1 + c1 + 4) >> 3;
+ op[2] = (b1 - c1 + 4) >> 3;
+ ip += 4;
+ op += 4;
+ }
+}
+
+using libvpx_test::ACMRandom;
+
+TEST(Vp8FdctTest, SignBiasCheck) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ int16_t test_input_block[16];
+ int16_t test_output_block[16];
+ const int pitch = 8;
+ int count_sign_block[16][2];
+ const int count_test_block = 1000000;
+
+ memset(count_sign_block, 0, sizeof(count_sign_block));
+
+ for (int i = 0; i < count_test_block; ++i) {
+ // Initialize a test block with input range [-255, 255].
+ for (int j = 0; j < 16; ++j)
+ test_input_block[j] = rnd.Rand8() - rnd.Rand8();
+
+ vp8_short_fdct4x4_c(test_input_block, test_output_block, pitch);
+
+ for (int j = 0; j < 16; ++j) {
+ if (test_output_block[j] < 0)
+ ++count_sign_block[j][0];
+ else if (test_output_block[j] > 0)
+ ++count_sign_block[j][1];
+ }
+ }
+
+ bool bias_acceptable = true;
+ for (int j = 0; j < 16; ++j)
+ bias_acceptable = bias_acceptable &&
+ (abs(count_sign_block[j][0] - count_sign_block[j][1]) < 10000);
+
+ EXPECT_EQ(true, bias_acceptable)
+ << "Error: 4x4 FDCT has a sign bias > 1% for input range [-255, 255]";
+
+ memset(count_sign_block, 0, sizeof(count_sign_block));
+
+ for (int i = 0; i < count_test_block; ++i) {
+ // Initialize a test block with input range [-15, 15].
+ for (int j = 0; j < 16; ++j)
+ test_input_block[j] = (rnd.Rand8() >> 4) - (rnd.Rand8() >> 4);
+
+ vp8_short_fdct4x4_c(test_input_block, test_output_block, pitch);
+
+ for (int j = 0; j < 16; ++j) {
+ if (test_output_block[j] < 0)
+ ++count_sign_block[j][0];
+ else if (test_output_block[j] > 0)
+ ++count_sign_block[j][1];
+ }
+ }
+
+ bias_acceptable = true;
+ for (int j = 0; j < 16; ++j)
+ bias_acceptable = bias_acceptable &&
+ (abs(count_sign_block[j][0] - count_sign_block[j][1]) < 100000);
+
+ EXPECT_EQ(true, bias_acceptable)
+ << "Error: 4x4 FDCT has a sign bias > 10% for input range [-15, 15]";
+};
+
+TEST(Vp8FdctTest, RoundTripErrorCheck) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ int max_error = 0;
+ double total_error = 0;
+ const int count_test_block = 1000000;
+ for (int i = 0; i < count_test_block; ++i) {
+ int16_t test_input_block[16];
+ int16_t test_temp_block[16];
+ int16_t test_output_block[16];
+
+ // Initialize a test block with input range [-255, 255].
+ for (int j = 0; j < 16; ++j)
+ test_input_block[j] = rnd.Rand8() - rnd.Rand8();
+
+ const int pitch = 8;
+ vp8_short_fdct4x4_c(test_input_block, test_temp_block, pitch);
+ reference_idct4x4(test_temp_block, test_output_block);
+
+ for (int j = 0; j < 16; ++j) {
+ const int diff = test_input_block[j] - test_output_block[j];
+ const int error = diff * diff;
+ if (max_error < error)
+ max_error = error;
+ total_error += error;
+ }
+ }
+
+ EXPECT_GE(1, max_error )
+ << "Error: FDCT/IDCT has an individual roundtrip error > 1";
+
+ EXPECT_GE(count_test_block, total_error)
+ << "Error: FDCT/IDCT has average roundtrip error > 1 per block";
+};
+
+} // namespace
diff --git a/libvpx/test/vp9_boolcoder_test.cc b/libvpx/test/vp9_boolcoder_test.cc
new file mode 100644
index 0000000..5edde90
--- /dev/null
+++ b/libvpx/test/vp9_boolcoder_test.cc
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+
+extern "C" {
+#include "vp9/encoder/vp9_boolhuff.h"
+#include "vp9/decoder/vp9_dboolhuff.h"
+}
+
+#include "test/acm_random.h"
+#include "vpx/vpx_integer.h"
+
+using libvpx_test::ACMRandom;
+
+namespace {
+const int num_tests = 10;
+} // namespace
+
+TEST(VP9, TestBitIO) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ for (int n = 0; n < num_tests; ++n) {
+ for (int method = 0; method <= 7; ++method) { // we generate various proba
+ const int kBitsToTest = 1000;
+ uint8_t probas[kBitsToTest];
+
+ for (int i = 0; i < kBitsToTest; ++i) {
+ const int parity = i & 1;
+ probas[i] =
+ (method == 0) ? 0 : (method == 1) ? 255 :
+ (method == 2) ? 128 :
+ (method == 3) ? rnd.Rand8() :
+ (method == 4) ? (parity ? 0 : 255) :
+ // alternate between low and high proba:
+ (method == 5) ? (parity ? rnd(128) : 255 - rnd(128)) :
+ (method == 6) ?
+ (parity ? rnd(64) : 255 - rnd(64)) :
+ (parity ? rnd(32) : 255 - rnd(32));
+ }
+ for (int bit_method = 0; bit_method <= 3; ++bit_method) {
+ const int random_seed = 6432;
+ const int kBufferSize = 10000;
+ ACMRandom bit_rnd(random_seed);
+ vp9_writer bw;
+ uint8_t bw_buffer[kBufferSize];
+ vp9_start_encode(&bw, bw_buffer);
+
+ int bit = (bit_method == 0) ? 0 : (bit_method == 1) ? 1 : 0;
+ for (int i = 0; i < kBitsToTest; ++i) {
+ if (bit_method == 2) {
+ bit = (i & 1);
+ } else if (bit_method == 3) {
+ bit = bit_rnd(2);
+ }
+ vp9_write(&bw, bit, static_cast<int>(probas[i]));
+ }
+
+ vp9_stop_encode(&bw);
+
+ // First bit should be zero
+ GTEST_ASSERT_EQ(bw_buffer[0] & 0x80, 0);
+
+ vp9_reader br;
+ vp9_reader_init(&br, bw_buffer, kBufferSize);
+ bit_rnd.Reset(random_seed);
+ for (int i = 0; i < kBitsToTest; ++i) {
+ if (bit_method == 2) {
+ bit = (i & 1);
+ } else if (bit_method == 3) {
+ bit = bit_rnd(2);
+ }
+ GTEST_ASSERT_EQ(vp9_read(&br, probas[i]), bit)
+ << "pos: " << i << " / " << kBitsToTest
+ << " bit_method: " << bit_method
+ << " method: " << method;
+ }
+ }
+ }
+ }
+}
diff --git a/libvpx/test/vp9_lossless_test.cc b/libvpx/test/vp9_lossless_test.cc
new file mode 100644
index 0000000..441cc44
--- /dev/null
+++ b/libvpx/test/vp9_lossless_test.cc
@@ -0,0 +1,75 @@
+/*
+ Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+
+ Use of this source code is governed by a BSD-style license
+ that can be found in the LICENSE file in the root of the source
+ tree. An additional intellectual property rights grant can be found
+ in the file PATENTS. All contributing project authors may
+ be found in the AUTHORS file in the root of the source tree.
+*/
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/codec_factory.h"
+#include "test/encode_test_driver.h"
+#include "test/i420_video_source.h"
+#include "test/util.h"
+
+namespace {
+
+const int kMaxPsnr = 100;
+
+class LossLessTest : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
+ protected:
+ LossLessTest() : EncoderTest(GET_PARAM(0)),
+ psnr_(kMaxPsnr),
+ nframes_(0),
+ encoding_mode_(GET_PARAM(1)) {
+ }
+
+ virtual ~LossLessTest() {}
+
+ virtual void SetUp() {
+ InitializeConfig();
+ SetMode(encoding_mode_);
+ }
+
+ virtual void BeginPassHook(unsigned int /*pass*/) {
+ psnr_ = 0.0;
+ nframes_ = 0;
+ }
+
+ virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) {
+ if (pkt->data.psnr.psnr[0] < psnr_)
+ psnr_= pkt->data.psnr.psnr[0];
+ }
+
+ double GetMinPsnr() const {
+ return psnr_;
+ }
+
+ private:
+ double psnr_;
+ unsigned int nframes_;
+ libvpx_test::TestMode encoding_mode_;
+};
+
+TEST_P(LossLessTest, TestLossLessEncoding) {
+ const vpx_rational timebase = { 33333333, 1000000000 };
+ cfg_.g_timebase = timebase;
+ cfg_.rc_target_bitrate = 2000;
+ cfg_.g_lag_in_frames = 25;
+ cfg_.rc_min_quantizer = 0;
+ cfg_.rc_max_quantizer = 0;
+
+ init_flags_ = VPX_CODEC_USE_PSNR;
+
+ // intentionally changed the dimension for better testing coverage
+ libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 356, 284,
+ timebase.den, timebase.num, 0, 30);
+
+ const double psnr_lossless = GetMinPsnr();
+ EXPECT_GE(psnr_lossless, kMaxPsnr);
+}
+VP9_INSTANTIATE_TEST_CASE(LossLessTest, ALL_TEST_MODES);
+} // namespace
diff --git a/libvpx/test/vp9_subtract_test.cc b/libvpx/test/vp9_subtract_test.cc
new file mode 100644
index 0000000..332a839
--- /dev/null
+++ b/libvpx/test/vp9_subtract_test.cc
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/acm_random.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
+extern "C" {
+#include "./vpx_config.h"
+#include "./vp9_rtcd.h"
+#include "vp9/common/vp9_blockd.h"
+#include "vpx_mem/vpx_mem.h"
+}
+
+typedef void (*subtract_fn_t)(int rows, int cols,
+ int16_t *diff_ptr, ptrdiff_t diff_stride,
+ const uint8_t *src_ptr, ptrdiff_t src_stride,
+ const uint8_t *pred_ptr, ptrdiff_t pred_stride);
+
+namespace vp9 {
+
+class VP9SubtractBlockTest : public ::testing::TestWithParam<subtract_fn_t> {
+ public:
+ virtual void TearDown() {
+ libvpx_test::ClearSystemState();
+ }
+};
+
+using libvpx_test::ACMRandom;
+
+TEST_P(VP9SubtractBlockTest, SimpleSubtract) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+
+ // FIXME(rbultje) split in its own file
+ for (BLOCK_SIZE bsize = BLOCK_4X4; bsize < BLOCK_SIZES;
+ bsize = static_cast<BLOCK_SIZE>(static_cast<int>(bsize) + 1)) {
+ const int block_width = 4 << b_width_log2(bsize);
+ const int block_height = 4 << b_height_log2(bsize);
+ int16_t *diff = reinterpret_cast<int16_t *>(
+ vpx_memalign(16, sizeof(*diff) * block_width * block_height * 2));
+ uint8_t *pred = reinterpret_cast<uint8_t *>(
+ vpx_memalign(16, block_width * block_height * 2));
+ uint8_t *src = reinterpret_cast<uint8_t *>(
+ vpx_memalign(16, block_width * block_height * 2));
+
+ for (int n = 0; n < 100; n++) {
+ for (int r = 0; r < block_height; ++r) {
+ for (int c = 0; c < block_width * 2; ++c) {
+ src[r * block_width * 2 + c] = rnd.Rand8();
+ pred[r * block_width * 2 + c] = rnd.Rand8();
+ }
+ }
+
+ GetParam()(block_height, block_width, diff, block_width,
+ src, block_width, pred, block_width);
+
+ for (int r = 0; r < block_height; ++r) {
+ for (int c = 0; c < block_width; ++c) {
+ EXPECT_EQ(diff[r * block_width + c],
+ (src[r * block_width + c] -
+ pred[r * block_width + c])) << "r = " << r
+ << ", c = " << c
+ << ", bs = " << bsize;
+ }
+ }
+
+ GetParam()(block_height, block_width, diff, block_width * 2,
+ src, block_width * 2, pred, block_width * 2);
+
+ for (int r = 0; r < block_height; ++r) {
+ for (int c = 0; c < block_width; ++c) {
+ EXPECT_EQ(diff[r * block_width * 2 + c],
+ (src[r * block_width * 2 + c] -
+ pred[r * block_width * 2 + c])) << "r = " << r
+ << ", c = " << c
+ << ", bs = " << bsize;
+ }
+ }
+ }
+ vpx_free(diff);
+ vpx_free(pred);
+ vpx_free(src);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(C, VP9SubtractBlockTest,
+ ::testing::Values(vp9_subtract_block_c));
+
+#if HAVE_SSE2 && CONFIG_USE_X86INC
+INSTANTIATE_TEST_CASE_P(SSE2, VP9SubtractBlockTest,
+ ::testing::Values(vp9_subtract_block_sse2));
+#endif
+} // namespace vp9
diff --git a/libvpx/test/vp9_thread_test.cc b/libvpx/test/vp9_thread_test.cc
new file mode 100644
index 0000000..41d22dd
--- /dev/null
+++ b/libvpx/test/vp9_thread_test.cc
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp9/decoder/vp9_thread.h"
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/codec_factory.h"
+#include "test/decode_test_driver.h"
+#include "test/md5_helper.h"
+#include "test/webm_video_source.h"
+
+namespace {
+
+class VP9WorkerThreadTest : public ::testing::Test {
+ protected:
+ virtual ~VP9WorkerThreadTest() {}
+ virtual void SetUp() {
+ vp9_worker_init(&worker_);
+ }
+
+ virtual void TearDown() {
+ vp9_worker_end(&worker_);
+ }
+
+ VP9Worker worker_;
+};
+
+int ThreadHook(void* data, void* return_value) {
+ int* const hook_data = reinterpret_cast<int*>(data);
+ *hook_data = 5;
+ return *reinterpret_cast<int*>(return_value);
+}
+
+TEST_F(VP9WorkerThreadTest, HookSuccess) {
+ EXPECT_TRUE(vp9_worker_sync(&worker_)); // should be a no-op.
+
+ for (int i = 0; i < 2; ++i) {
+ EXPECT_TRUE(vp9_worker_reset(&worker_));
+
+ int hook_data = 0;
+ int return_value = 1; // return successfully from the hook
+ worker_.hook = ThreadHook;
+ worker_.data1 = &hook_data;
+ worker_.data2 = &return_value;
+
+ vp9_worker_launch(&worker_);
+ EXPECT_TRUE(vp9_worker_sync(&worker_));
+ EXPECT_FALSE(worker_.had_error);
+ EXPECT_EQ(5, hook_data);
+
+ EXPECT_TRUE(vp9_worker_sync(&worker_)); // should be a no-op.
+ }
+}
+
+TEST_F(VP9WorkerThreadTest, HookFailure) {
+ EXPECT_TRUE(vp9_worker_reset(&worker_));
+
+ int hook_data = 0;
+ int return_value = 0; // return failure from the hook
+ worker_.hook = ThreadHook;
+ worker_.data1 = &hook_data;
+ worker_.data2 = &return_value;
+
+ vp9_worker_launch(&worker_);
+ EXPECT_FALSE(vp9_worker_sync(&worker_));
+ EXPECT_TRUE(worker_.had_error);
+
+ // Ensure _reset() clears the error and _launch() can be called again.
+ return_value = 1;
+ EXPECT_TRUE(vp9_worker_reset(&worker_));
+ EXPECT_FALSE(worker_.had_error);
+ vp9_worker_launch(&worker_);
+ EXPECT_TRUE(vp9_worker_sync(&worker_));
+ EXPECT_FALSE(worker_.had_error);
+}
+
+TEST(VP9DecodeMTTest, MTDecode) {
+ libvpx_test::WebMVideoSource video("vp90-2-03-size-226x226.webm");
+ video.Init();
+
+ vpx_codec_dec_cfg_t cfg = {0};
+ cfg.threads = 2;
+ libvpx_test::VP9Decoder decoder(cfg, 0);
+
+ libvpx_test::MD5 md5;
+ for (video.Begin(); video.cxdata(); video.Next()) {
+ const vpx_codec_err_t res =
+ decoder.DecodeFrame(video.cxdata(), video.frame_size());
+ ASSERT_EQ(VPX_CODEC_OK, res) << decoder.DecodeError();
+
+ libvpx_test::DxDataIterator dec_iter = decoder.GetDxData();
+ const vpx_image_t *img = NULL;
+
+ // Get decompressed data
+ while ((img = dec_iter.Next())) {
+ md5.Add(img);
+ }
+ }
+ EXPECT_STREQ("b35a1b707b28e82be025d960aba039bc", md5.Get());
+}
+
+} // namespace
diff --git a/libvpx/test/webm_video_source.h b/libvpx/test/webm_video_source.h
new file mode 100644
index 0000000..9fc8545
--- /dev/null
+++ b/libvpx/test/webm_video_source.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef TEST_WEBM_VIDEO_SOURCE_H_
+#define TEST_WEBM_VIDEO_SOURCE_H_
+#include <cstdarg>
+#include <cstdio>
+#include <cstdlib>
+#include <new>
+#include <string>
+#include "nestegg/include/nestegg/nestegg.h"
+#include "test/video_source.h"
+
+namespace libvpx_test {
+
+static int
+nestegg_read_cb(void *buffer, size_t length, void *userdata) {
+ FILE *f = reinterpret_cast<FILE *>(userdata);
+
+ if (fread(buffer, 1, length, f) < length) {
+ if (ferror(f))
+ return -1;
+ if (feof(f))
+ return 0;
+ }
+ return 1;
+}
+
+
+static int
+nestegg_seek_cb(int64_t offset, int whence, void *userdata) {
+ FILE *f = reinterpret_cast<FILE *>(userdata);
+ switch (whence) {
+ case NESTEGG_SEEK_SET:
+ whence = SEEK_SET;
+ break;
+ case NESTEGG_SEEK_CUR:
+ whence = SEEK_CUR;
+ break;
+ case NESTEGG_SEEK_END:
+ whence = SEEK_END;
+ break;
+ };
+ return fseek(f, (long)offset, whence) ? -1 : 0;
+}
+
+
+static int64_t
+nestegg_tell_cb(void *userdata) {
+ FILE *f = reinterpret_cast<FILE *>(userdata);
+ return ftell(f);
+}
+
+
+static void
+nestegg_log_cb(nestegg *context, unsigned int severity, char const *format,
+ ...) {
+ va_list ap;
+
+ va_start(ap, format);
+ vfprintf(stderr, format, ap);
+ fprintf(stderr, "\n");
+ va_end(ap);
+}
+
+// This class extends VideoSource to allow parsing of WebM files,
+// so that we can do actual file decodes.
+class WebMVideoSource : public CompressedVideoSource {
+ public:
+ explicit WebMVideoSource(const std::string &file_name)
+ : file_name_(file_name),
+ input_file_(NULL),
+ nestegg_ctx_(NULL),
+ pkt_(NULL),
+ video_track_(0),
+ chunk_(0),
+ chunks_(0),
+ buf_(NULL),
+ buf_sz_(0),
+ frame_(0),
+ end_of_file_(false) {
+ }
+
+ virtual ~WebMVideoSource() {
+ if (input_file_)
+ fclose(input_file_);
+ if (nestegg_ctx_)
+ nestegg_destroy(nestegg_ctx_);
+ }
+
+ virtual void Init() {
+ }
+
+ virtual void Begin() {
+ input_file_ = OpenTestDataFile(file_name_);
+ ASSERT_TRUE(input_file_ != NULL) << "Input file open failed. Filename: "
+ << file_name_;
+
+ nestegg_io io = {nestegg_read_cb, nestegg_seek_cb, nestegg_tell_cb,
+ input_file_};
+ ASSERT_FALSE(nestegg_init(&nestegg_ctx_, io, NULL))
+ << "nestegg_init failed";
+
+ unsigned int n;
+ ASSERT_FALSE(nestegg_track_count(nestegg_ctx_, &n))
+ << "failed to get track count";
+
+ for (unsigned int i = 0; i < n; i++) {
+ int track_type = nestegg_track_type(nestegg_ctx_, i);
+ ASSERT_GE(track_type, 0) << "failed to get track type";
+
+ if (track_type == NESTEGG_TRACK_VIDEO) {
+ video_track_ = i;
+ break;
+ }
+ }
+
+ FillFrame();
+ }
+
+ virtual void Next() {
+ ++frame_;
+ FillFrame();
+ }
+
+ void FillFrame() {
+ ASSERT_TRUE(input_file_ != NULL);
+ if (chunk_ >= chunks_) {
+ unsigned int track;
+
+ do {
+ /* End of this packet, get another. */
+ if (pkt_)
+ nestegg_free_packet(pkt_);
+
+ int again = nestegg_read_packet(nestegg_ctx_, &pkt_);
+ ASSERT_GE(again, 0) << "nestegg_read_packet failed";
+ if (!again) {
+ end_of_file_ = true;
+ return;
+ }
+
+ ASSERT_FALSE(nestegg_packet_track(pkt_, &track))
+ << "nestegg_packet_track failed";
+ } while (track != video_track_);
+
+ ASSERT_FALSE(nestegg_packet_count(pkt_, &chunks_))
+ << "nestegg_packet_count failed";
+ chunk_ = 0;
+ }
+
+ ASSERT_FALSE(nestegg_packet_data(pkt_, chunk_, &buf_, &buf_sz_))
+ << "nestegg_packet_data failed";
+ chunk_++;
+ }
+
+ virtual const uint8_t *cxdata() const {
+ return end_of_file_ ? NULL : buf_;
+ }
+ virtual const unsigned int frame_size() const { return buf_sz_; }
+ virtual const unsigned int frame_number() const { return frame_; }
+
+ protected:
+ std::string file_name_;
+ FILE *input_file_;
+ nestegg *nestegg_ctx_;
+ nestegg_packet *pkt_;
+ unsigned int video_track_;
+ unsigned int chunk_;
+ unsigned int chunks_;
+ uint8_t *buf_;
+ size_t buf_sz_;
+ unsigned int frame_;
+ bool end_of_file_;
+};
+
+} // namespace libvpx_test
+
+#endif // TEST_WEBM_VIDEO_SOURCE_H_
diff --git a/libvpx/third_party/libyuv/source/scale.c b/libvpx/third_party/libyuv/source/scale.c
index c142a17..3c30b55 100644
--- a/libvpx/third_party/libyuv/source/scale.c
+++ b/libvpx/third_party/libyuv/source/scale.c
@@ -632,7 +632,7 @@ TALIGN16(const uint16, scaleab2[8]) =
{ 65536 / 3, 65536 / 3, 65536 / 2, 65536 / 3, 65536 / 3, 65536 / 2, 0, 0 };
#endif
-#if defined(_M_IX86) && !defined(YUV_DISABLE_ASM)
+#if defined(_M_IX86) && !defined(YUV_DISABLE_ASM) && defined(_MSC_VER)
#define HAS_SCALEROWDOWN2_SSE2
// Reads 32 pixels, throws half away and writes 16 pixels.
@@ -1370,12 +1370,12 @@ static void ScaleFilterRows_SSSE3(uint8* dst_ptr, const uint8* src_ptr,
mov edx, [esp + 8 + 12] // src_stride
mov ecx, [esp + 8 + 16] // dst_width
mov eax, [esp + 8 + 20] // source_y_fraction (0..255)
+ shr eax, 1
cmp eax, 0
je xloop1
- cmp eax, 128
+ cmp eax, 64
je xloop2
- shr eax, 1
mov ah,al
neg al
add al, 128
@@ -2132,11 +2132,11 @@ void ScaleFilterRows_SSSE3(uint8* dst_ptr,
"mov 0x14(%esp),%edx \n"
"mov 0x18(%esp),%ecx \n"
"mov 0x1c(%esp),%eax \n"
+ "shr %eax \n"
"cmp $0x0,%eax \n"
"je 2f \n"
- "cmp $0x80,%eax \n"
+ "cmp $0x40,%eax \n"
"je 3f \n"
- "shr %eax \n"
"mov %al,%ah \n"
"neg %al \n"
"add $0x80,%al \n"
@@ -2662,6 +2662,7 @@ static void ScaleFilterRows_SSE2(uint8* dst_ptr,
static void ScaleFilterRows_SSSE3(uint8* dst_ptr,
const uint8* src_ptr, int src_stride,
int dst_width, int source_y_fraction) {
+ source_y_fraction >>= 1;
if (source_y_fraction == 0) {
asm volatile (
"1:"
@@ -2680,7 +2681,7 @@ static void ScaleFilterRows_SSSE3(uint8* dst_ptr,
: "memory", "cc", "rax"
);
return;
- } else if (source_y_fraction == 128) {
+ } else if (source_y_fraction == 64) {
asm volatile (
"1:"
"movdqa (%1),%%xmm0 \n"
@@ -2703,7 +2704,6 @@ static void ScaleFilterRows_SSSE3(uint8* dst_ptr,
} else {
asm volatile (
"mov %3,%%eax \n"
- "shr %%eax \n"
"mov %%al,%%ah \n"
"neg %%al \n"
"add $0x80,%%al \n"
diff --git a/libvpx/third_party/x86inc/LICENSE b/libvpx/third_party/x86inc/LICENSE
new file mode 100644
index 0000000..7d07645
--- /dev/null
+++ b/libvpx/third_party/x86inc/LICENSE
@@ -0,0 +1,18 @@
+Copyright (C) 2005-2012 x264 project
+
+Authors: Loren Merritt <lorenm@u.washington.edu>
+ Anton Mitrofanov <BugMaster@narod.ru>
+ Jason Garrett-Glaser <darkshikari@gmail.com>
+ Henrik Gramner <hengar-6@student.ltu.se>
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/libvpx/third_party/x86inc/README.webm b/libvpx/third_party/x86inc/README.webm
new file mode 100644
index 0000000..02cd9ab
--- /dev/null
+++ b/libvpx/third_party/x86inc/README.webm
@@ -0,0 +1,11 @@
+URL: http://git.videolan.org/?p=x264.git
+Version: 999b753ff0f4dc872077f4fa90d465e948cbe656
+License: ISC
+License File: LICENSE
+
+Description:
+x264/libav's framework for x86 assembly. Contains a variety of macros and
+defines that help automatically allow assembly to work cross-platform.
+
+Local Modifications:
+Some modifications to allow PIC to work with x86inc.
diff --git a/libvpx/third_party/x86inc/x86inc.asm b/libvpx/third_party/x86inc/x86inc.asm
new file mode 100644
index 0000000..a66a96b
--- /dev/null
+++ b/libvpx/third_party/x86inc/x86inc.asm
@@ -0,0 +1,1125 @@
+;*****************************************************************************
+;* x86inc.asm: x264asm abstraction layer
+;*****************************************************************************
+;* Copyright (C) 2005-2012 x264 project
+;*
+;* Authors: Loren Merritt <lorenm@u.washington.edu>
+;* Anton Mitrofanov <BugMaster@narod.ru>
+;* Jason Garrett-Glaser <darkshikari@gmail.com>
+;* Henrik Gramner <hengar-6@student.ltu.se>
+;*
+;* Permission to use, copy, modify, and/or distribute this software for any
+;* purpose with or without fee is hereby granted, provided that the above
+;* copyright notice and this permission notice appear in all copies.
+;*
+;* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+;* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+;* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+;* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+;* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+;* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+;* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+;*****************************************************************************
+
+; This is a header file for the x264ASM assembly language, which uses
+; NASM/YASM syntax combined with a large number of macros to provide easy
+; abstraction between different calling conventions (x86_32, win64, linux64).
+; It also has various other useful features to simplify writing the kind of
+; DSP functions that are most often used in x264.
+
+; Unlike the rest of x264, this file is available under an ISC license, as it
+; has significant usefulness outside of x264 and we want it to be available
+; to the largest audience possible. Of course, if you modify it for your own
+; purposes to add a new feature, we strongly encourage contributing a patch
+; as this feature might be useful for others as well. Send patches or ideas
+; to x264-devel@videolan.org .
+
+%include "vpx_config.asm"
+
+%define program_name vp9
+
+
+%define UNIX64 0
+%define WIN64 0
+%if ARCH_X86_64
+ %ifidn __OUTPUT_FORMAT__,win32
+ %define WIN64 1
+ %elifidn __OUTPUT_FORMAT__,win64
+ %define WIN64 1
+ %elifidn __OUTPUT_FORMAT__,x64
+ %define WIN64 1
+ %else
+ %define UNIX64 1
+ %endif
+%endif
+
+%ifidn __OUTPUT_FORMAT__,elf32
+ %define mangle(x) x
+%elifidn __OUTPUT_FORMAT__,elf64
+ %define mangle(x) x
+%elifidn __OUTPUT_FORMAT__,elf
+ %define mangle(x) x
+%elifidn __OUTPUT_FORMAT__,x64
+ %define mangle(x) x
+%elifidn __OUTPUT_FORMAT__,win64
+ %define mangle(x) x
+%else
+ %define mangle(x) _ %+ x
+%endif
+
+; FIXME: All of the 64bit asm functions that take a stride as an argument
+; via register, assume that the high dword of that register is filled with 0.
+; This is true in practice (since we never do any 64bit arithmetic on strides,
+; and x264's strides are all positive), but is not guaranteed by the ABI.
+
+; Name of the .rodata section.
+; Kludge: Something on OS X fails to align .rodata even given an align attribute,
+; so use a different read-only section.
+%macro SECTION_RODATA 0-1 16
+ %ifidn __OUTPUT_FORMAT__,macho64
+ SECTION .text align=%1
+ %elifidn __OUTPUT_FORMAT__,macho
+ SECTION .text align=%1
+ fakegot:
+ %elifidn __OUTPUT_FORMAT__,aout
+ section .text
+ %else
+ SECTION .rodata align=%1
+ %endif
+%endmacro
+
+; aout does not support align=
+%macro SECTION_TEXT 0-1 16
+ %ifidn __OUTPUT_FORMAT__,aout
+ SECTION .text
+ %else
+ SECTION .text align=%1
+ %endif
+%endmacro
+
+%if WIN64
+ %define PIC
+%elifidn __OUTPUT_FORMAT__,macho64
+ %define PIC
+%elif ARCH_X86_64 == 0
+; x86_32 doesn't require PIC.
+; Some distros prefer shared objects to be PIC, but nothing breaks if
+; the code contains a few textrels, so we'll skip that complexity.
+ %undef PIC
+%elif CONFIG_PIC
+ %define PIC
+%endif
+%ifdef PIC
+ default rel
+%endif
+
+; Always use long nops (reduces 0x90 spam in disassembly on x86_32)
+%ifndef __NASM_VER__
+CPU amdnop
+%else
+%use smartalign
+ALIGNMODE k7
+%endif
+
+; Macros to eliminate most code duplication between x86_32 and x86_64:
+; Currently this works only for leaf functions which load all their arguments
+; into registers at the start, and make no other use of the stack. Luckily that
+; covers most of x264's asm.
+
+; PROLOGUE:
+; %1 = number of arguments. loads them from stack if needed.
+; %2 = number of registers used. pushes callee-saved regs if needed.
+; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed.
+; %4 = list of names to define to registers
+; PROLOGUE can also be invoked by adding the same options to cglobal
+
+; e.g.
+; cglobal foo, 2,3,0, dst, src, tmp
+; declares a function (foo), taking two args (dst and src) and one local variable (tmp)
+
+; TODO Some functions can use some args directly from the stack. If they're the
+; last args then you can just not declare them, but if they're in the middle
+; we need more flexible macro.
+
+; RET:
+; Pops anything that was pushed by PROLOGUE, and returns.
+
+; REP_RET:
+; Same, but if it doesn't pop anything it becomes a 2-byte ret, for athlons
+; which are slow when a normal ret follows a branch.
+
+; registers:
+; rN and rNq are the native-size register holding function argument N
+; rNd, rNw, rNb are dword, word, and byte size
+; rNm is the original location of arg N (a register or on the stack), dword
+; rNmp is native size
+
+%macro DECLARE_REG 5-6
+ %define r%1q %2
+ %define r%1d %3
+ %define r%1w %4
+ %define r%1b %5
+ %if %0 == 5
+ %define r%1m %3
+ %define r%1mp %2
+ %elif ARCH_X86_64 ; memory
+ %define r%1m [rsp + stack_offset + %6]
+ %define r%1mp qword r %+ %1m
+ %else
+ %define r%1m [esp + stack_offset + %6]
+ %define r%1mp dword r %+ %1m
+ %endif
+ %define r%1 %2
+%endmacro
+
+%macro DECLARE_REG_SIZE 2
+ %define r%1q r%1
+ %define e%1q r%1
+ %define r%1d e%1
+ %define e%1d e%1
+ %define r%1w %1
+ %define e%1w %1
+ %define r%1b %2
+ %define e%1b %2
+%if ARCH_X86_64 == 0
+ %define r%1 e%1
+%endif
+%endmacro
+
+DECLARE_REG_SIZE ax, al
+DECLARE_REG_SIZE bx, bl
+DECLARE_REG_SIZE cx, cl
+DECLARE_REG_SIZE dx, dl
+DECLARE_REG_SIZE si, sil
+DECLARE_REG_SIZE di, dil
+DECLARE_REG_SIZE bp, bpl
+
+; t# defines for when per-arch register allocation is more complex than just function arguments
+
+%macro DECLARE_REG_TMP 1-*
+ %assign %%i 0
+ %rep %0
+ CAT_XDEFINE t, %%i, r%1
+ %assign %%i %%i+1
+ %rotate 1
+ %endrep
+%endmacro
+
+%macro DECLARE_REG_TMP_SIZE 0-*
+ %rep %0
+ %define t%1q t%1 %+ q
+ %define t%1d t%1 %+ d
+ %define t%1w t%1 %+ w
+ %define t%1b t%1 %+ b
+ %rotate 1
+ %endrep
+%endmacro
+
+DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14
+
+%if ARCH_X86_64
+ %define gprsize 8
+%else
+ %define gprsize 4
+%endif
+
+%macro PUSH 1
+ push %1
+ %assign stack_offset stack_offset+gprsize
+%endmacro
+
+%macro POP 1
+ pop %1
+ %assign stack_offset stack_offset-gprsize
+%endmacro
+
+%macro PUSH_IF_USED 1-*
+ %rep %0
+ %if %1 < regs_used
+ PUSH r%1
+ %endif
+ %rotate 1
+ %endrep
+%endmacro
+
+%macro POP_IF_USED 1-*
+ %rep %0
+ %if %1 < regs_used
+ pop r%1
+ %endif
+ %rotate 1
+ %endrep
+%endmacro
+
+%macro LOAD_IF_USED 1-*
+ %rep %0
+ %if %1 < num_args
+ mov r%1, r %+ %1 %+ mp
+ %endif
+ %rotate 1
+ %endrep
+%endmacro
+
+%macro SUB 2
+ sub %1, %2
+ %ifidn %1, rsp
+ %assign stack_offset stack_offset+(%2)
+ %endif
+%endmacro
+
+%macro ADD 2
+ add %1, %2
+ %ifidn %1, rsp
+ %assign stack_offset stack_offset-(%2)
+ %endif
+%endmacro
+
+%macro movifnidn 2
+ %ifnidn %1, %2
+ mov %1, %2
+ %endif
+%endmacro
+
+%macro movsxdifnidn 2
+ %ifnidn %1, %2
+ movsxd %1, %2
+ %endif
+%endmacro
+
+%macro ASSERT 1
+ %if (%1) == 0
+ %error assert failed
+ %endif
+%endmacro
+
+%macro DEFINE_ARGS 0-*
+ %ifdef n_arg_names
+ %assign %%i 0
+ %rep n_arg_names
+ CAT_UNDEF arg_name %+ %%i, q
+ CAT_UNDEF arg_name %+ %%i, d
+ CAT_UNDEF arg_name %+ %%i, w
+ CAT_UNDEF arg_name %+ %%i, b
+ CAT_UNDEF arg_name %+ %%i, m
+ CAT_UNDEF arg_name %+ %%i, mp
+ CAT_UNDEF arg_name, %%i
+ %assign %%i %%i+1
+ %endrep
+ %endif
+
+ %xdefine %%stack_offset stack_offset
+ %undef stack_offset ; so that the current value of stack_offset doesn't get baked in by xdefine
+ %assign %%i 0
+ %rep %0
+ %xdefine %1q r %+ %%i %+ q
+ %xdefine %1d r %+ %%i %+ d
+ %xdefine %1w r %+ %%i %+ w
+ %xdefine %1b r %+ %%i %+ b
+ %xdefine %1m r %+ %%i %+ m
+ %xdefine %1mp r %+ %%i %+ mp
+ CAT_XDEFINE arg_name, %%i, %1
+ %assign %%i %%i+1
+ %rotate 1
+ %endrep
+ %xdefine stack_offset %%stack_offset
+ %assign n_arg_names %0
+%endmacro
+
+%if WIN64 ; Windows x64 ;=================================================
+
+DECLARE_REG 0, rcx, ecx, cx, cl
+DECLARE_REG 1, rdx, edx, dx, dl
+DECLARE_REG 2, R8, R8D, R8W, R8B
+DECLARE_REG 3, R9, R9D, R9W, R9B
+DECLARE_REG 4, R10, R10D, R10W, R10B, 40
+DECLARE_REG 5, R11, R11D, R11W, R11B, 48
+DECLARE_REG 6, rax, eax, ax, al, 56
+DECLARE_REG 7, rdi, edi, di, dil, 64
+DECLARE_REG 8, rsi, esi, si, sil, 72
+DECLARE_REG 9, rbx, ebx, bx, bl, 80
+DECLARE_REG 10, rbp, ebp, bp, bpl, 88
+DECLARE_REG 11, R12, R12D, R12W, R12B, 96
+DECLARE_REG 12, R13, R13D, R13W, R13B, 104
+DECLARE_REG 13, R14, R14D, R14W, R14B, 112
+DECLARE_REG 14, R15, R15D, R15W, R15B, 120
+
+%macro PROLOGUE 2-4+ 0 ; #args, #regs, #xmm_regs, arg_names...
+ %assign num_args %1
+ %assign regs_used %2
+ ASSERT regs_used >= num_args
+ ASSERT regs_used <= 15
+ PUSH_IF_USED 7, 8, 9, 10, 11, 12, 13, 14
+ %if mmsize == 8
+ %assign xmm_regs_used 0
+ %else
+ WIN64_SPILL_XMM %3
+ %endif
+ LOAD_IF_USED 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+ DEFINE_ARGS %4
+%endmacro
+
+%macro WIN64_SPILL_XMM 1
+ %assign xmm_regs_used %1
+ ASSERT xmm_regs_used <= 16
+ %if xmm_regs_used > 6
+ SUB rsp, (xmm_regs_used-6)*16+16
+ %assign %%i xmm_regs_used
+ %rep (xmm_regs_used-6)
+ %assign %%i %%i-1
+ movdqa [rsp + (%%i-6)*16+(~stack_offset&8)], xmm %+ %%i
+ %endrep
+ %endif
+%endmacro
+
+%macro WIN64_RESTORE_XMM_INTERNAL 1
+ %if xmm_regs_used > 6
+ %assign %%i xmm_regs_used
+ %rep (xmm_regs_used-6)
+ %assign %%i %%i-1
+ movdqa xmm %+ %%i, [%1 + (%%i-6)*16+(~stack_offset&8)]
+ %endrep
+ add %1, (xmm_regs_used-6)*16+16
+ %endif
+%endmacro
+
+%macro WIN64_RESTORE_XMM 1
+ WIN64_RESTORE_XMM_INTERNAL %1
+ %assign stack_offset stack_offset-(xmm_regs_used-6)*16+16
+ %assign xmm_regs_used 0
+%endmacro
+
+%macro RET 0
+ WIN64_RESTORE_XMM_INTERNAL rsp
+ POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7
+ ret
+%endmacro
+
+%macro REP_RET 0
+ %if regs_used > 7 || xmm_regs_used > 6
+ RET
+ %else
+ rep ret
+ %endif
+%endmacro
+
+%elif ARCH_X86_64 ; *nix x64 ;=============================================
+
+DECLARE_REG 0, rdi, edi, di, dil
+DECLARE_REG 1, rsi, esi, si, sil
+DECLARE_REG 2, rdx, edx, dx, dl
+DECLARE_REG 3, rcx, ecx, cx, cl
+DECLARE_REG 4, R8, R8D, R8W, R8B
+DECLARE_REG 5, R9, R9D, R9W, R9B
+DECLARE_REG 6, rax, eax, ax, al, 8
+DECLARE_REG 7, R10, R10D, R10W, R10B, 16
+DECLARE_REG 8, R11, R11D, R11W, R11B, 24
+DECLARE_REG 9, rbx, ebx, bx, bl, 32
+DECLARE_REG 10, rbp, ebp, bp, bpl, 40
+DECLARE_REG 11, R12, R12D, R12W, R12B, 48
+DECLARE_REG 12, R13, R13D, R13W, R13B, 56
+DECLARE_REG 13, R14, R14D, R14W, R14B, 64
+DECLARE_REG 14, R15, R15D, R15W, R15B, 72
+
+%macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names...
+ %assign num_args %1
+ %assign regs_used %2
+ ASSERT regs_used >= num_args
+ ASSERT regs_used <= 15
+ PUSH_IF_USED 9, 10, 11, 12, 13, 14
+ LOAD_IF_USED 6, 7, 8, 9, 10, 11, 12, 13, 14
+ DEFINE_ARGS %4
+%endmacro
+
+%macro RET 0
+ POP_IF_USED 14, 13, 12, 11, 10, 9
+ ret
+%endmacro
+
+%macro REP_RET 0
+ %if regs_used > 9
+ RET
+ %else
+ rep ret
+ %endif
+%endmacro
+
+%else ; X86_32 ;==============================================================
+
+DECLARE_REG 0, eax, eax, ax, al, 4
+DECLARE_REG 1, ecx, ecx, cx, cl, 8
+DECLARE_REG 2, edx, edx, dx, dl, 12
+DECLARE_REG 3, ebx, ebx, bx, bl, 16
+DECLARE_REG 4, esi, esi, si, null, 20
+DECLARE_REG 5, edi, edi, di, null, 24
+DECLARE_REG 6, ebp, ebp, bp, null, 28
+%define rsp esp
+
+%macro DECLARE_ARG 1-*
+ %rep %0
+ %define r%1m [esp + stack_offset + 4*%1 + 4]
+ %define r%1mp dword r%1m
+ %rotate 1
+ %endrep
+%endmacro
+
+DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
+
+%macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names...
+ %assign num_args %1
+ %assign regs_used %2
+ %if regs_used > 7
+ %assign regs_used 7
+ %endif
+ ASSERT regs_used >= num_args
+ PUSH_IF_USED 3, 4, 5, 6
+ LOAD_IF_USED 0, 1, 2, 3, 4, 5, 6
+ DEFINE_ARGS %4
+%endmacro
+
+%macro RET 0
+ POP_IF_USED 6, 5, 4, 3
+ ret
+%endmacro
+
+%macro REP_RET 0
+ %if regs_used > 3
+ RET
+ %else
+ rep ret
+ %endif
+%endmacro
+
+%endif ;======================================================================
+
+%if WIN64 == 0
+%macro WIN64_SPILL_XMM 1
+%endmacro
+%macro WIN64_RESTORE_XMM 1
+%endmacro
+%endif
+
+;=============================================================================
+; arch-independent part
+;=============================================================================
+
+%assign function_align 16
+
+; Begin a function.
+; Applies any symbol mangling needed for C linkage, and sets up a define such that
+; subsequent uses of the function name automatically refer to the mangled version.
+; Appends cpuflags to the function name if cpuflags has been specified.
+%macro cglobal 1-2+ ; name, [PROLOGUE args]
+%if %0 == 1
+ cglobal_internal %1 %+ SUFFIX
+%else
+ cglobal_internal %1 %+ SUFFIX, %2
+%endif
+%endmacro
+%macro cglobal_internal 1-2+
+ %ifndef cglobaled_%1
+ %xdefine %1 mangle(program_name %+ _ %+ %1)
+ %xdefine %1.skip_prologue %1 %+ .skip_prologue
+ CAT_XDEFINE cglobaled_, %1, 1
+ %endif
+ %xdefine current_function %1
+ %ifidn __OUTPUT_FORMAT__,elf
+ global %1:function hidden
+ %elifidn __OUTPUT_FORMAT__,elf32
+ global %1:function hidden
+ %elifidn __OUTPUT_FORMAT__,elf64
+ global %1:function hidden
+ %else
+ global %1
+ %endif
+ align function_align
+ %1:
+ RESET_MM_PERMUTATION ; not really needed, but makes disassembly somewhat nicer
+ %assign stack_offset 0
+ %if %0 > 1
+ PROLOGUE %2
+ %endif
+%endmacro
+
+%macro cextern 1
+ %xdefine %1 mangle(program_name %+ _ %+ %1)
+ CAT_XDEFINE cglobaled_, %1, 1
+ extern %1
+%endmacro
+
+; like cextern, but without the prefix
+%macro cextern_naked 1
+ %xdefine %1 mangle(%1)
+ CAT_XDEFINE cglobaled_, %1, 1
+ extern %1
+%endmacro
+
+%macro const 2+
+ %xdefine %1 mangle(program_name %+ _ %+ %1)
+ global %1
+ %1: %2
+%endmacro
+
+; This is needed for ELF, otherwise the GNU linker assumes the stack is
+; executable by default.
+%ifidn __OUTPUT_FORMAT__,elf
+SECTION .note.GNU-stack noalloc noexec nowrite progbits
+%elifidn __OUTPUT_FORMAT__,elf32
+SECTION .note.GNU-stack noalloc noexec nowrite progbits
+%elifidn __OUTPUT_FORMAT__,elf64
+SECTION .note.GNU-stack noalloc noexec nowrite progbits
+%endif
+
+; cpuflags
+
+%assign cpuflags_mmx (1<<0)
+%assign cpuflags_mmx2 (1<<1) | cpuflags_mmx
+%assign cpuflags_3dnow (1<<2) | cpuflags_mmx
+%assign cpuflags_3dnow2 (1<<3) | cpuflags_3dnow
+%assign cpuflags_sse (1<<4) | cpuflags_mmx2
+%assign cpuflags_sse2 (1<<5) | cpuflags_sse
+%assign cpuflags_sse2slow (1<<6) | cpuflags_sse2
+%assign cpuflags_sse3 (1<<7) | cpuflags_sse2
+%assign cpuflags_ssse3 (1<<8) | cpuflags_sse3
+%assign cpuflags_sse4 (1<<9) | cpuflags_ssse3
+%assign cpuflags_sse42 (1<<10)| cpuflags_sse4
+%assign cpuflags_avx (1<<11)| cpuflags_sse42
+%assign cpuflags_xop (1<<12)| cpuflags_avx
+%assign cpuflags_fma4 (1<<13)| cpuflags_avx
+
+%assign cpuflags_cache32 (1<<16)
+%assign cpuflags_cache64 (1<<17)
+%assign cpuflags_slowctz (1<<18)
+%assign cpuflags_lzcnt (1<<19)
+%assign cpuflags_misalign (1<<20)
+%assign cpuflags_aligned (1<<21) ; not a cpu feature, but a function variant
+%assign cpuflags_atom (1<<22)
+
+%define cpuflag(x) ((cpuflags & (cpuflags_ %+ x)) == (cpuflags_ %+ x))
+%define notcpuflag(x) ((cpuflags & (cpuflags_ %+ x)) != (cpuflags_ %+ x))
+
+; Takes up to 2 cpuflags from the above list.
+; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu.
+; You shouldn't need to invoke this macro directly, it's a subroutine for INIT_MMX &co.
+%macro INIT_CPUFLAGS 0-2
+ %if %0 >= 1
+ %xdefine cpuname %1
+ %assign cpuflags cpuflags_%1
+ %if %0 >= 2
+ %xdefine cpuname %1_%2
+ %assign cpuflags cpuflags | cpuflags_%2
+ %endif
+ %xdefine SUFFIX _ %+ cpuname
+ %if cpuflag(avx)
+ %assign avx_enabled 1
+ %endif
+ %if mmsize == 16 && notcpuflag(sse2)
+ %define mova movaps
+ %define movu movups
+ %define movnta movntps
+ %endif
+ %if cpuflag(aligned)
+ %define movu mova
+ %elifidn %1, sse3
+ %define movu lddqu
+ %endif
+ %else
+ %xdefine SUFFIX
+ %undef cpuname
+ %undef cpuflags
+ %endif
+%endmacro
+
+; merge mmx and sse*
+
+%macro CAT_XDEFINE 3
+ %xdefine %1%2 %3
+%endmacro
+
+%macro CAT_UNDEF 2
+ %undef %1%2
+%endmacro
+
+%macro INIT_MMX 0-1+
+ %assign avx_enabled 0
+ %define RESET_MM_PERMUTATION INIT_MMX %1
+ %define mmsize 8
+ %define num_mmregs 8
+ %define mova movq
+ %define movu movq
+ %define movh movd
+ %define movnta movntq
+ %assign %%i 0
+ %rep 8
+ CAT_XDEFINE m, %%i, mm %+ %%i
+ CAT_XDEFINE nmm, %%i, %%i
+ %assign %%i %%i+1
+ %endrep
+ %rep 8
+ CAT_UNDEF m, %%i
+ CAT_UNDEF nmm, %%i
+ %assign %%i %%i+1
+ %endrep
+ INIT_CPUFLAGS %1
+%endmacro
+
+%macro INIT_XMM 0-1+
+ %assign avx_enabled 0
+ %define RESET_MM_PERMUTATION INIT_XMM %1
+ %define mmsize 16
+ %define num_mmregs 8
+ %if ARCH_X86_64
+ %define num_mmregs 16
+ %endif
+ %define mova movdqa
+ %define movu movdqu
+ %define movh movq
+ %define movnta movntdq
+ %assign %%i 0
+ %rep num_mmregs
+ CAT_XDEFINE m, %%i, xmm %+ %%i
+ CAT_XDEFINE nxmm, %%i, %%i
+ %assign %%i %%i+1
+ %endrep
+ INIT_CPUFLAGS %1
+%endmacro
+
+; FIXME: INIT_AVX can be replaced by INIT_XMM avx
+%macro INIT_AVX 0
+ INIT_XMM
+ %assign avx_enabled 1
+ %define PALIGNR PALIGNR_SSSE3
+ %define RESET_MM_PERMUTATION INIT_AVX
+%endmacro
+
+%macro INIT_YMM 0-1+
+ %assign avx_enabled 1
+ %define RESET_MM_PERMUTATION INIT_YMM %1
+ %define mmsize 32
+ %define num_mmregs 8
+ %if ARCH_X86_64
+ %define num_mmregs 16
+ %endif
+ %define mova vmovaps
+ %define movu vmovups
+ %undef movh
+ %define movnta vmovntps
+ %assign %%i 0
+ %rep num_mmregs
+ CAT_XDEFINE m, %%i, ymm %+ %%i
+ CAT_XDEFINE nymm, %%i, %%i
+ %assign %%i %%i+1
+ %endrep
+ INIT_CPUFLAGS %1
+%endmacro
+
+INIT_XMM
+
+; I often want to use macros that permute their arguments. e.g. there's no
+; efficient way to implement butterfly or transpose or dct without swapping some
+; arguments.
+;
+; I would like to not have to manually keep track of the permutations:
+; If I insert a permutation in the middle of a function, it should automatically
+; change everything that follows. For more complex macros I may also have multiple
+; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations.
+;
+; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that
+; permutes its arguments. It's equivalent to exchanging the contents of the
+; registers, except that this way you exchange the register names instead, so it
+; doesn't cost any cycles.
+
+%macro PERMUTE 2-* ; takes a list of pairs to swap
+%rep %0/2
+ %xdefine tmp%2 m%2
+ %xdefine ntmp%2 nm%2
+ %rotate 2
+%endrep
+%rep %0/2
+ %xdefine m%1 tmp%2
+ %xdefine nm%1 ntmp%2
+ %undef tmp%2
+ %undef ntmp%2
+ %rotate 2
+%endrep
+%endmacro
+
+%macro SWAP 2-* ; swaps a single chain (sometimes more concise than pairs)
+%rep %0-1
+%ifdef m%1
+ %xdefine tmp m%1
+ %xdefine m%1 m%2
+ %xdefine m%2 tmp
+ CAT_XDEFINE n, m%1, %1
+ CAT_XDEFINE n, m%2, %2
+%else
+ ; If we were called as "SWAP m0,m1" rather than "SWAP 0,1" infer the original numbers here.
+ ; Be careful using this mode in nested macros though, as in some cases there may be
+ ; other copies of m# that have already been dereferenced and don't get updated correctly.
+ %xdefine %%n1 n %+ %1
+ %xdefine %%n2 n %+ %2
+ %xdefine tmp m %+ %%n1
+ CAT_XDEFINE m, %%n1, m %+ %%n2
+ CAT_XDEFINE m, %%n2, tmp
+ CAT_XDEFINE n, m %+ %%n1, %%n1
+ CAT_XDEFINE n, m %+ %%n2, %%n2
+%endif
+ %undef tmp
+ %rotate 1
+%endrep
+%endmacro
+
+; If SAVE_MM_PERMUTATION is placed at the end of a function, then any later
+; calls to that function will automatically load the permutation, so values can
+; be returned in mmregs.
+%macro SAVE_MM_PERMUTATION 0-1
+ %if %0
+ %xdefine %%f %1_m
+ %else
+ %xdefine %%f current_function %+ _m
+ %endif
+ %assign %%i 0
+ %rep num_mmregs
+ CAT_XDEFINE %%f, %%i, m %+ %%i
+ %assign %%i %%i+1
+ %endrep
+%endmacro
+
+%macro LOAD_MM_PERMUTATION 1 ; name to load from
+ %ifdef %1_m0
+ %assign %%i 0
+ %rep num_mmregs
+ CAT_XDEFINE m, %%i, %1_m %+ %%i
+ CAT_XDEFINE n, m %+ %%i, %%i
+ %assign %%i %%i+1
+ %endrep
+ %endif
+%endmacro
+
+; Append cpuflags to the callee's name iff the appended name is known and the plain name isn't
+%macro call 1
+ call_internal %1, %1 %+ SUFFIX
+%endmacro
+%macro call_internal 2
+ %xdefine %%i %1
+ %ifndef cglobaled_%1
+ %ifdef cglobaled_%2
+ %xdefine %%i %2
+ %endif
+ %endif
+ call %%i
+ LOAD_MM_PERMUTATION %%i
+%endmacro
+
+; Substitutions that reduce instruction size but are functionally equivalent
+%macro add 2
+ %ifnum %2
+ %if %2==128
+ sub %1, -128
+ %else
+ add %1, %2
+ %endif
+ %else
+ add %1, %2
+ %endif
+%endmacro
+
+%macro sub 2
+ %ifnum %2
+ %if %2==128
+ add %1, -128
+ %else
+ sub %1, %2
+ %endif
+ %else
+ sub %1, %2
+ %endif
+%endmacro
+
+;=============================================================================
+; AVX abstraction layer
+;=============================================================================
+
+%assign i 0
+%rep 16
+ %if i < 8
+ CAT_XDEFINE sizeofmm, i, 8
+ %endif
+ CAT_XDEFINE sizeofxmm, i, 16
+ CAT_XDEFINE sizeofymm, i, 32
+%assign i i+1
+%endrep
+%undef i
+
+;%1 == instruction
+;%2 == 1 if float, 0 if int
+;%3 == 1 if 4-operand (xmm, xmm, xmm, imm), 0 if 2- or 3-operand (xmm, xmm, xmm)
+;%4 == number of operands given
+;%5+: operands
+%macro RUN_AVX_INSTR 6-7+
+ %ifid %5
+ %define %%size sizeof%5
+ %else
+ %define %%size mmsize
+ %endif
+ %if %%size==32
+ %if %0 >= 7
+ v%1 %5, %6, %7
+ %else
+ v%1 %5, %6
+ %endif
+ %else
+ %if %%size==8
+ %define %%regmov movq
+ %elif %2
+ %define %%regmov movaps
+ %else
+ %define %%regmov movdqa
+ %endif
+
+ %if %4>=3+%3
+ %ifnidn %5, %6
+ %if avx_enabled && sizeof%5==16
+ v%1 %5, %6, %7
+ %else
+ %%regmov %5, %6
+ %1 %5, %7
+ %endif
+ %else
+ %1 %5, %7
+ %endif
+ %elif %3
+ %1 %5, %6, %7
+ %else
+ %1 %5, %6
+ %endif
+ %endif
+%endmacro
+
+; 3arg AVX ops with a memory arg can only have it in src2,
+; whereas SSE emulation of 3arg prefers to have it in src1 (i.e. the mov).
+; So, if the op is symmetric and the wrong one is memory, swap them.
+%macro RUN_AVX_INSTR1 8
+ %assign %%swap 0
+ %if avx_enabled
+ %ifnid %6
+ %assign %%swap 1
+ %endif
+ %elifnidn %5, %6
+ %ifnid %7
+ %assign %%swap 1
+ %endif
+ %endif
+ %if %%swap && %3 == 0 && %8 == 1
+ RUN_AVX_INSTR %1, %2, %3, %4, %5, %7, %6
+ %else
+ RUN_AVX_INSTR %1, %2, %3, %4, %5, %6, %7
+ %endif
+%endmacro
+
+;%1 == instruction
+;%2 == 1 if float, 0 if int
+;%3 == 1 if 4-operand (xmm, xmm, xmm, imm), 0 if 3-operand (xmm, xmm, xmm)
+;%4 == 1 if symmetric (i.e. doesn't matter which src arg is which), 0 if not
+%macro AVX_INSTR 4
+ %macro %1 2-9 fnord, fnord, fnord, %1, %2, %3, %4
+ %ifidn %3, fnord
+ RUN_AVX_INSTR %6, %7, %8, 2, %1, %2
+ %elifidn %4, fnord
+ RUN_AVX_INSTR1 %6, %7, %8, 3, %1, %2, %3, %9
+ %elifidn %5, fnord
+ RUN_AVX_INSTR %6, %7, %8, 4, %1, %2, %3, %4
+ %else
+ RUN_AVX_INSTR %6, %7, %8, 5, %1, %2, %3, %4, %5
+ %endif
+ %endmacro
+%endmacro
+
+AVX_INSTR addpd, 1, 0, 1
+AVX_INSTR addps, 1, 0, 1
+AVX_INSTR addsd, 1, 0, 1
+AVX_INSTR addss, 1, 0, 1
+AVX_INSTR addsubpd, 1, 0, 0
+AVX_INSTR addsubps, 1, 0, 0
+AVX_INSTR andpd, 1, 0, 1
+AVX_INSTR andps, 1, 0, 1
+AVX_INSTR andnpd, 1, 0, 0
+AVX_INSTR andnps, 1, 0, 0
+AVX_INSTR blendpd, 1, 0, 0
+AVX_INSTR blendps, 1, 0, 0
+AVX_INSTR blendvpd, 1, 0, 0
+AVX_INSTR blendvps, 1, 0, 0
+AVX_INSTR cmppd, 1, 0, 0
+AVX_INSTR cmpps, 1, 0, 0
+AVX_INSTR cmpsd, 1, 0, 0
+AVX_INSTR cmpss, 1, 0, 0
+AVX_INSTR cvtdq2ps, 1, 0, 0
+AVX_INSTR cvtps2dq, 1, 0, 0
+AVX_INSTR divpd, 1, 0, 0
+AVX_INSTR divps, 1, 0, 0
+AVX_INSTR divsd, 1, 0, 0
+AVX_INSTR divss, 1, 0, 0
+AVX_INSTR dppd, 1, 1, 0
+AVX_INSTR dpps, 1, 1, 0
+AVX_INSTR haddpd, 1, 0, 0
+AVX_INSTR haddps, 1, 0, 0
+AVX_INSTR hsubpd, 1, 0, 0
+AVX_INSTR hsubps, 1, 0, 0
+AVX_INSTR maxpd, 1, 0, 1
+AVX_INSTR maxps, 1, 0, 1
+AVX_INSTR maxsd, 1, 0, 1
+AVX_INSTR maxss, 1, 0, 1
+AVX_INSTR minpd, 1, 0, 1
+AVX_INSTR minps, 1, 0, 1
+AVX_INSTR minsd, 1, 0, 1
+AVX_INSTR minss, 1, 0, 1
+AVX_INSTR movhlps, 1, 0, 0
+AVX_INSTR movlhps, 1, 0, 0
+AVX_INSTR movsd, 1, 0, 0
+AVX_INSTR movss, 1, 0, 0
+AVX_INSTR mpsadbw, 0, 1, 0
+AVX_INSTR mulpd, 1, 0, 1
+AVX_INSTR mulps, 1, 0, 1
+AVX_INSTR mulsd, 1, 0, 1
+AVX_INSTR mulss, 1, 0, 1
+AVX_INSTR orpd, 1, 0, 1
+AVX_INSTR orps, 1, 0, 1
+AVX_INSTR packsswb, 0, 0, 0
+AVX_INSTR packssdw, 0, 0, 0
+AVX_INSTR packuswb, 0, 0, 0
+AVX_INSTR packusdw, 0, 0, 0
+AVX_INSTR paddb, 0, 0, 1
+AVX_INSTR paddw, 0, 0, 1
+AVX_INSTR paddd, 0, 0, 1
+AVX_INSTR paddq, 0, 0, 1
+AVX_INSTR paddsb, 0, 0, 1
+AVX_INSTR paddsw, 0, 0, 1
+AVX_INSTR paddusb, 0, 0, 1
+AVX_INSTR paddusw, 0, 0, 1
+AVX_INSTR palignr, 0, 1, 0
+AVX_INSTR pand, 0, 0, 1
+AVX_INSTR pandn, 0, 0, 0
+AVX_INSTR pavgb, 0, 0, 1
+AVX_INSTR pavgw, 0, 0, 1
+AVX_INSTR pblendvb, 0, 0, 0
+AVX_INSTR pblendw, 0, 1, 0
+AVX_INSTR pcmpestri, 0, 0, 0
+AVX_INSTR pcmpestrm, 0, 0, 0
+AVX_INSTR pcmpistri, 0, 0, 0
+AVX_INSTR pcmpistrm, 0, 0, 0
+AVX_INSTR pcmpeqb, 0, 0, 1
+AVX_INSTR pcmpeqw, 0, 0, 1
+AVX_INSTR pcmpeqd, 0, 0, 1
+AVX_INSTR pcmpeqq, 0, 0, 1
+AVX_INSTR pcmpgtb, 0, 0, 0
+AVX_INSTR pcmpgtw, 0, 0, 0
+AVX_INSTR pcmpgtd, 0, 0, 0
+AVX_INSTR pcmpgtq, 0, 0, 0
+AVX_INSTR phaddw, 0, 0, 0
+AVX_INSTR phaddd, 0, 0, 0
+AVX_INSTR phaddsw, 0, 0, 0
+AVX_INSTR phsubw, 0, 0, 0
+AVX_INSTR phsubd, 0, 0, 0
+AVX_INSTR phsubsw, 0, 0, 0
+AVX_INSTR pmaddwd, 0, 0, 1
+AVX_INSTR pmaddubsw, 0, 0, 0
+AVX_INSTR pmaxsb, 0, 0, 1
+AVX_INSTR pmaxsw, 0, 0, 1
+AVX_INSTR pmaxsd, 0, 0, 1
+AVX_INSTR pmaxub, 0, 0, 1
+AVX_INSTR pmaxuw, 0, 0, 1
+AVX_INSTR pmaxud, 0, 0, 1
+AVX_INSTR pminsb, 0, 0, 1
+AVX_INSTR pminsw, 0, 0, 1
+AVX_INSTR pminsd, 0, 0, 1
+AVX_INSTR pminub, 0, 0, 1
+AVX_INSTR pminuw, 0, 0, 1
+AVX_INSTR pminud, 0, 0, 1
+AVX_INSTR pmulhuw, 0, 0, 1
+AVX_INSTR pmulhrsw, 0, 0, 1
+AVX_INSTR pmulhw, 0, 0, 1
+AVX_INSTR pmullw, 0, 0, 1
+AVX_INSTR pmulld, 0, 0, 1
+AVX_INSTR pmuludq, 0, 0, 1
+AVX_INSTR pmuldq, 0, 0, 1
+AVX_INSTR por, 0, 0, 1
+AVX_INSTR psadbw, 0, 0, 1
+AVX_INSTR pshufb, 0, 0, 0
+AVX_INSTR psignb, 0, 0, 0
+AVX_INSTR psignw, 0, 0, 0
+AVX_INSTR psignd, 0, 0, 0
+AVX_INSTR psllw, 0, 0, 0
+AVX_INSTR pslld, 0, 0, 0
+AVX_INSTR psllq, 0, 0, 0
+AVX_INSTR pslldq, 0, 0, 0
+AVX_INSTR psraw, 0, 0, 0
+AVX_INSTR psrad, 0, 0, 0
+AVX_INSTR psrlw, 0, 0, 0
+AVX_INSTR psrld, 0, 0, 0
+AVX_INSTR psrlq, 0, 0, 0
+AVX_INSTR psrldq, 0, 0, 0
+AVX_INSTR psubb, 0, 0, 0
+AVX_INSTR psubw, 0, 0, 0
+AVX_INSTR psubd, 0, 0, 0
+AVX_INSTR psubq, 0, 0, 0
+AVX_INSTR psubsb, 0, 0, 0
+AVX_INSTR psubsw, 0, 0, 0
+AVX_INSTR psubusb, 0, 0, 0
+AVX_INSTR psubusw, 0, 0, 0
+AVX_INSTR punpckhbw, 0, 0, 0
+AVX_INSTR punpckhwd, 0, 0, 0
+AVX_INSTR punpckhdq, 0, 0, 0
+AVX_INSTR punpckhqdq, 0, 0, 0
+AVX_INSTR punpcklbw, 0, 0, 0
+AVX_INSTR punpcklwd, 0, 0, 0
+AVX_INSTR punpckldq, 0, 0, 0
+AVX_INSTR punpcklqdq, 0, 0, 0
+AVX_INSTR pxor, 0, 0, 1
+AVX_INSTR shufps, 1, 1, 0
+AVX_INSTR subpd, 1, 0, 0
+AVX_INSTR subps, 1, 0, 0
+AVX_INSTR subsd, 1, 0, 0
+AVX_INSTR subss, 1, 0, 0
+AVX_INSTR unpckhpd, 1, 0, 0
+AVX_INSTR unpckhps, 1, 0, 0
+AVX_INSTR unpcklpd, 1, 0, 0
+AVX_INSTR unpcklps, 1, 0, 0
+AVX_INSTR xorpd, 1, 0, 1
+AVX_INSTR xorps, 1, 0, 1
+
+; 3DNow instructions, for sharing code between AVX, SSE and 3DN
+AVX_INSTR pfadd, 1, 0, 1
+AVX_INSTR pfsub, 1, 0, 0
+AVX_INSTR pfmul, 1, 0, 1
+
+; base-4 constants for shuffles
+%assign i 0
+%rep 256
+ %assign j ((i>>6)&3)*1000 + ((i>>4)&3)*100 + ((i>>2)&3)*10 + (i&3)
+ %if j < 10
+ CAT_XDEFINE q000, j, i
+ %elif j < 100
+ CAT_XDEFINE q00, j, i
+ %elif j < 1000
+ CAT_XDEFINE q0, j, i
+ %else
+ CAT_XDEFINE q, j, i
+ %endif
+%assign i i+1
+%endrep
+%undef i
+%undef j
+
+%macro FMA_INSTR 3
+ %macro %1 4-7 %1, %2, %3
+ %if cpuflag(xop)
+ v%5 %1, %2, %3, %4
+ %else
+ %6 %1, %2, %3
+ %7 %1, %4
+ %endif
+ %endmacro
+%endmacro
+
+FMA_INSTR pmacsdd, pmulld, paddd
+FMA_INSTR pmacsww, pmullw, paddw
+FMA_INSTR pmadcswd, pmaddwd, paddd
diff --git a/libvpx/tools/all_builds.py b/libvpx/tools/all_builds.py
new file mode 100755
index 0000000..d1f0c80
--- /dev/null
+++ b/libvpx/tools/all_builds.py
@@ -0,0 +1,72 @@
+#!/usr/bin/python
+
+import getopt
+import subprocess
+import sys
+
+LONG_OPTIONS = ["shard=", "shards="]
+BASE_COMMAND = "./configure --enable-internal-stats --enable-experimental"
+
+def RunCommand(command):
+ run = subprocess.Popen(command, shell=True)
+ output = run.communicate()
+ if run.returncode:
+ print "Non-zero return code: " + str(run.returncode) + " => exiting!"
+ sys.exit(1)
+
+def list_of_experiments():
+ experiments = []
+ configure_file = open("configure")
+ list_start = False
+ for line in configure_file.read().split("\n"):
+ if line == 'EXPERIMENT_LIST="':
+ list_start = True
+ elif line == '"':
+ list_start = False
+ elif list_start:
+ currently_broken = ["csm"]
+ experiment = line[4:]
+ if experiment not in currently_broken:
+ experiments.append(experiment)
+ return experiments
+
+def main(argv):
+ # Parse arguments
+ options = {"--shard": 0, "--shards": 1}
+ if "--" in argv:
+ opt_end_index = argv.index("--")
+ else:
+ opt_end_index = len(argv)
+ try:
+ o, _ = getopt.getopt(argv[1:opt_end_index], None, LONG_OPTIONS)
+ except getopt.GetoptError, err:
+ print str(err)
+ print "Usage: %s [--shard=<n> --shards=<n>] -- [configure flag ...]"%argv[0]
+ sys.exit(2)
+
+ options.update(o)
+ extra_args = argv[opt_end_index + 1:]
+
+ # Shard experiment list
+ shard = int(options["--shard"])
+ shards = int(options["--shards"])
+ experiments = list_of_experiments()
+ base_command = " ".join([BASE_COMMAND] + extra_args)
+ configs = [base_command]
+ configs += ["%s --enable-%s" % (base_command, e) for e in experiments]
+ my_configs = zip(configs, range(len(configs)))
+ my_configs = filter(lambda x: x[1] % shards == shard, my_configs)
+ my_configs = [e[0] for e in my_configs]
+
+ # Run configs for this shard
+ for config in my_configs:
+ test_build(config)
+
+def test_build(configure_command):
+ print "\033[34m\033[47mTesting %s\033[0m" % (configure_command)
+ RunCommand(configure_command)
+ RunCommand("make clean")
+ RunCommand("make")
+
+if __name__ == "__main__":
+ main(sys.argv)
diff --git a/libvpx/tools/cpplint.py b/libvpx/tools/cpplint.py
new file mode 100755
index 0000000..159dbbb
--- /dev/null
+++ b/libvpx/tools/cpplint.py
@@ -0,0 +1,4020 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Here are some issues that I've had people identify in my code during reviews,
+# that I think are possible to flag automatically in a lint tool. If these were
+# caught by lint, it would save time both for myself and that of my reviewers.
+# Most likely, some of these are beyond the scope of the current lint framework,
+# but I think it is valuable to retain these wish-list items even if they cannot
+# be immediately implemented.
+#
+# Suggestions
+# -----------
+# - Check for no 'explicit' for multi-arg ctor
+# - Check for boolean assign RHS in parens
+# - Check for ctor initializer-list colon position and spacing
+# - Check that if there's a ctor, there should be a dtor
+# - Check accessors that return non-pointer member variables are
+# declared const
+# - Check accessors that return non-const pointer member vars are
+# *not* declared const
+# - Check for using public includes for testing
+# - Check for spaces between brackets in one-line inline method
+# - Check for no assert()
+# - Check for spaces surrounding operators
+# - Check for 0 in pointer context (should be NULL)
+# - Check for 0 in char context (should be '\0')
+# - Check for camel-case method name conventions for methods
+# that are not simple inline getters and setters
+# - Do not indent namespace contents
+# - Avoid inlining non-trivial constructors in header files
+# - Check for old-school (void) cast for call-sites of functions
+# ignored return value
+# - Check gUnit usage of anonymous namespace
+# - Check for class declaration order (typedefs, consts, enums,
+# ctor(s?), dtor, friend declarations, methods, member vars)
+#
+
+"""Does google-lint on c++ files.
+
+The goal of this script is to identify places in the code that *may*
+be in non-compliance with google style. It does not attempt to fix
+up these problems -- the point is to educate. It does also not
+attempt to find all problems, or to ensure that everything it does
+find is legitimately a problem.
+
+In particular, we can get very confused by /* and // inside strings!
+We do a small hack, which is to ignore //'s with "'s after them on the
+same line, but it is far from perfect (in either direction).
+"""
+
+import codecs
+import copy
+import getopt
+import math # for log
+import os
+import re
+import sre_compile
+import string
+import sys
+import unicodedata
+
+
+_USAGE = """
+Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
+ [--counting=total|toplevel|detailed]
+ <file> [file] ...
+
+ The style guidelines this tries to follow are those in
+ http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
+
+ Every problem is given a confidence score from 1-5, with 5 meaning we are
+ certain of the problem, and 1 meaning it could be a legitimate construct.
+ This will miss some errors, and is not a substitute for a code review.
+
+ To suppress false-positive errors of a certain category, add a
+ 'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
+ suppresses errors of all categories on that line.
+
+ The files passed in will be linted; at least one file must be provided.
+ Linted extensions are .cc, .cpp, and .h. Other file types will be ignored.
+
+ Flags:
+
+ output=vs7
+ By default, the output is formatted to ease emacs parsing. Visual Studio
+ compatible output (vs7) may also be used. Other formats are unsupported.
+
+ verbose=#
+ Specify a number 0-5 to restrict errors to certain verbosity levels.
+
+ filter=-x,+y,...
+ Specify a comma-separated list of category-filters to apply: only
+ error messages whose category names pass the filters will be printed.
+ (Category names are printed with the message and look like
+ "[whitespace/indent]".) Filters are evaluated left to right.
+ "-FOO" and "FOO" means "do not print categories that start with FOO".
+ "+FOO" means "do print categories that start with FOO".
+
+ Examples: --filter=-whitespace,+whitespace/braces
+ --filter=whitespace,runtime/printf,+runtime/printf_format
+ --filter=-,+build/include_what_you_use
+
+ To see a list of all the categories used in cpplint, pass no arg:
+ --filter=
+
+ counting=total|toplevel|detailed
+ The total number of errors found is always printed. If
+ 'toplevel' is provided, then the count of errors in each of
+ the top-level categories like 'build' and 'whitespace' will
+ also be printed. If 'detailed' is provided, then a count
+ is provided for each category like 'build/class'.
+
+ root=subdir
+ The root directory used for deriving header guard CPP variable.
+ By default, the header guard CPP variable is calculated as the relative
+ path to the directory that contains .git, .hg, or .svn. When this flag
+ is specified, the relative path is calculated from the specified
+ directory. If the specified directory does not exist, this flag is
+ ignored.
+
+ Examples:
+ Assuing that src/.git exists, the header guard CPP variables for
+ src/chrome/browser/ui/browser.h are:
+
+ No flag => CHROME_BROWSER_UI_BROWSER_H_
+ --root=chrome => BROWSER_UI_BROWSER_H_
+ --root=chrome/browser => UI_BROWSER_H_
+"""
+
+# We categorize each error message we print. Here are the categories.
+# We want an explicit list so we can list them all in cpplint --filter=.
+# If you add a new error message with a new category, add it to the list
+# here! cpplint_unittest.py should tell you if you forget to do this.
+# \ used for clearer layout -- pylint: disable-msg=C6013
+_ERROR_CATEGORIES = [
+ 'build/class',
+ 'build/deprecated',
+ 'build/endif_comment',
+ 'build/explicit_make_pair',
+ 'build/forward_decl',
+ 'build/header_guard',
+ 'build/include',
+ 'build/include_alpha',
+ 'build/include_order',
+ 'build/include_what_you_use',
+ 'build/namespaces',
+ 'build/printf_format',
+ 'build/storage_class',
+ 'legal/copyright',
+ 'readability/alt_tokens',
+ 'readability/braces',
+ 'readability/casting',
+ 'readability/check',
+ 'readability/constructors',
+ 'readability/fn_size',
+ 'readability/function',
+ 'readability/multiline_comment',
+ 'readability/multiline_string',
+ 'readability/namespace',
+ 'readability/nolint',
+ 'readability/streams',
+ 'readability/todo',
+ 'readability/utf8',
+ 'runtime/arrays',
+ 'runtime/casting',
+ 'runtime/explicit',
+ 'runtime/int',
+ 'runtime/init',
+ 'runtime/invalid_increment',
+ 'runtime/member_string_references',
+ 'runtime/memset',
+ 'runtime/operator',
+ 'runtime/printf',
+ 'runtime/printf_format',
+ 'runtime/references',
+ 'runtime/rtti',
+ 'runtime/sizeof',
+ 'runtime/string',
+ 'runtime/threadsafe_fn',
+ 'whitespace/blank_line',
+ 'whitespace/braces',
+ 'whitespace/comma',
+ 'whitespace/comments',
+ 'whitespace/empty_loop_body',
+ 'whitespace/end_of_line',
+ 'whitespace/ending_newline',
+ 'whitespace/forcolon',
+ 'whitespace/indent',
+ 'whitespace/labels',
+ 'whitespace/line_length',
+ 'whitespace/newline',
+ 'whitespace/operators',
+ 'whitespace/parens',
+ 'whitespace/semicolon',
+ 'whitespace/tab',
+ 'whitespace/todo'
+ ]
+
+# The default state of the category filter. This is overrided by the --filter=
+# flag. By default all errors are on, so only add here categories that should be
+# off by default (i.e., categories that must be enabled by the --filter= flags).
+# All entries here should start with a '-' or '+', as in the --filter= flag.
+_DEFAULT_FILTERS = ['-build/include_alpha']
+
+# We used to check for high-bit characters, but after much discussion we
+# decided those were OK, as long as they were in UTF-8 and didn't represent
+# hard-coded international strings, which belong in a separate i18n file.
+
+# Headers that we consider STL headers.
+_STL_HEADERS = frozenset([
+ 'algobase.h', 'algorithm', 'alloc.h', 'bitset', 'deque', 'exception',
+ 'function.h', 'functional', 'hash_map', 'hash_map.h', 'hash_set',
+ 'hash_set.h', 'iterator', 'list', 'list.h', 'map', 'memory', 'new',
+ 'pair.h', 'pthread_alloc', 'queue', 'set', 'set.h', 'sstream', 'stack',
+ 'stl_alloc.h', 'stl_relops.h', 'type_traits.h',
+ 'utility', 'vector', 'vector.h',
+ ])
+
+
+# Non-STL C++ system headers.
+_CPP_HEADERS = frozenset([
+ 'algo.h', 'builtinbuf.h', 'bvector.h', 'cassert', 'cctype',
+ 'cerrno', 'cfloat', 'ciso646', 'climits', 'clocale', 'cmath',
+ 'complex', 'complex.h', 'csetjmp', 'csignal', 'cstdarg', 'cstddef',
+ 'cstdio', 'cstdlib', 'cstring', 'ctime', 'cwchar', 'cwctype',
+ 'defalloc.h', 'deque.h', 'editbuf.h', 'exception', 'fstream',
+ 'fstream.h', 'hashtable.h', 'heap.h', 'indstream.h', 'iomanip',
+ 'iomanip.h', 'ios', 'iosfwd', 'iostream', 'iostream.h', 'istream',
+ 'istream.h', 'iterator.h', 'limits', 'map.h', 'multimap.h', 'multiset.h',
+ 'numeric', 'ostream', 'ostream.h', 'parsestream.h', 'pfstream.h',
+ 'PlotFile.h', 'procbuf.h', 'pthread_alloc.h', 'rope', 'rope.h',
+ 'ropeimpl.h', 'SFile.h', 'slist', 'slist.h', 'stack.h', 'stdexcept',
+ 'stdiostream.h', 'streambuf.h', 'stream.h', 'strfile.h', 'string',
+ 'strstream', 'strstream.h', 'tempbuf.h', 'tree.h', 'typeinfo', 'valarray',
+ ])
+
+
+# Assertion macros. These are defined in base/logging.h and
+# testing/base/gunit.h. Note that the _M versions need to come first
+# for substring matching to work.
+_CHECK_MACROS = [
+ 'DCHECK', 'CHECK',
+ 'EXPECT_TRUE_M', 'EXPECT_TRUE',
+ 'ASSERT_TRUE_M', 'ASSERT_TRUE',
+ 'EXPECT_FALSE_M', 'EXPECT_FALSE',
+ 'ASSERT_FALSE_M', 'ASSERT_FALSE',
+ ]
+
+# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
+_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
+
+for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
+ ('>=', 'GE'), ('>', 'GT'),
+ ('<=', 'LE'), ('<', 'LT')]:
+ _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
+ _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
+ _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
+ _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
+ _CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
+ _CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
+
+for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
+ ('>=', 'LT'), ('>', 'LE'),
+ ('<=', 'GT'), ('<', 'GE')]:
+ _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
+ _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
+ _CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
+ _CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
+
+# Alternative tokens and their replacements. For full list, see section 2.5
+# Alternative tokens [lex.digraph] in the C++ standard.
+#
+# Digraphs (such as '%:') are not included here since it's a mess to
+# match those on a word boundary.
+_ALT_TOKEN_REPLACEMENT = {
+ 'and': '&&',
+ 'bitor': '|',
+ 'or': '||',
+ 'xor': '^',
+ 'compl': '~',
+ 'bitand': '&',
+ 'and_eq': '&=',
+ 'or_eq': '|=',
+ 'xor_eq': '^=',
+ 'not': '!',
+ 'not_eq': '!='
+ }
+
+# Compile regular expression that matches all the above keywords. The "[ =()]"
+# bit is meant to avoid matching these keywords outside of boolean expressions.
+#
+# False positives include C-style multi-line comments (http://go/nsiut )
+# and multi-line strings (http://go/beujw ), but those have always been
+# troublesome for cpplint.
+_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
+ r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
+
+
+# These constants define types of headers for use with
+# _IncludeState.CheckNextIncludeOrder().
+_C_SYS_HEADER = 1
+_CPP_SYS_HEADER = 2
+_LIKELY_MY_HEADER = 3
+_POSSIBLE_MY_HEADER = 4
+_OTHER_HEADER = 5
+
+# These constants define the current inline assembly state
+_NO_ASM = 0 # Outside of inline assembly block
+_INSIDE_ASM = 1 # Inside inline assembly block
+_END_ASM = 2 # Last line of inline assembly block
+_BLOCK_ASM = 3 # The whole block is an inline assembly block
+
+# Match start of assembly blocks
+_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
+ r'(?:\s+(volatile|__volatile__))?'
+ r'\s*[{(]')
+
+
+_regexp_compile_cache = {}
+
+# Finds occurrences of NOLINT or NOLINT(...).
+_RE_SUPPRESSION = re.compile(r'\bNOLINT\b(\([^)]*\))?')
+
+# {str, set(int)}: a map from error categories to sets of linenumbers
+# on which those errors are expected and should be suppressed.
+_error_suppressions = {}
+
+# The root directory used for deriving header guard CPP variable.
+# This is set by --root flag.
+_root = None
+
+def ParseNolintSuppressions(filename, raw_line, linenum, error):
+ """Updates the global list of error-suppressions.
+
+ Parses any NOLINT comments on the current line, updating the global
+ error_suppressions store. Reports an error if the NOLINT comment
+ was malformed.
+
+ Args:
+ filename: str, the name of the input file.
+ raw_line: str, the line of input text, with comments.
+ linenum: int, the number of the current line.
+ error: function, an error handler.
+ """
+ # FIXME(adonovan): "NOLINT(" is misparsed as NOLINT(*).
+ matched = _RE_SUPPRESSION.search(raw_line)
+ if matched:
+ category = matched.group(1)
+ if category in (None, '(*)'): # => "suppress all"
+ _error_suppressions.setdefault(None, set()).add(linenum)
+ else:
+ if category.startswith('(') and category.endswith(')'):
+ category = category[1:-1]
+ if category in _ERROR_CATEGORIES:
+ _error_suppressions.setdefault(category, set()).add(linenum)
+ else:
+ error(filename, linenum, 'readability/nolint', 5,
+ 'Unknown NOLINT error category: %s' % category)
+
+
+def ResetNolintSuppressions():
+ "Resets the set of NOLINT suppressions to empty."
+ _error_suppressions.clear()
+
+
+def IsErrorSuppressedByNolint(category, linenum):
+ """Returns true if the specified error category is suppressed on this line.
+
+ Consults the global error_suppressions map populated by
+ ParseNolintSuppressions/ResetNolintSuppressions.
+
+ Args:
+ category: str, the category of the error.
+ linenum: int, the current line number.
+ Returns:
+ bool, True iff the error should be suppressed due to a NOLINT comment.
+ """
+ return (linenum in _error_suppressions.get(category, set()) or
+ linenum in _error_suppressions.get(None, set()))
+
+def Match(pattern, s):
+ """Matches the string with the pattern, caching the compiled regexp."""
+ # The regexp compilation caching is inlined in both Match and Search for
+ # performance reasons; factoring it out into a separate function turns out
+ # to be noticeably expensive.
+ if not pattern in _regexp_compile_cache:
+ _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
+ return _regexp_compile_cache[pattern].match(s)
+
+
+def Search(pattern, s):
+ """Searches the string for the pattern, caching the compiled regexp."""
+ if not pattern in _regexp_compile_cache:
+ _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
+ return _regexp_compile_cache[pattern].search(s)
+
+
+class _IncludeState(dict):
+ """Tracks line numbers for includes, and the order in which includes appear.
+
+ As a dict, an _IncludeState object serves as a mapping between include
+ filename and line number on which that file was included.
+
+ Call CheckNextIncludeOrder() once for each header in the file, passing
+ in the type constants defined above. Calls in an illegal order will
+ raise an _IncludeError with an appropriate error message.
+
+ """
+ # self._section will move monotonically through this set. If it ever
+ # needs to move backwards, CheckNextIncludeOrder will raise an error.
+ _INITIAL_SECTION = 0
+ _MY_H_SECTION = 1
+ _C_SECTION = 2
+ _CPP_SECTION = 3
+ _OTHER_H_SECTION = 4
+
+ _TYPE_NAMES = {
+ _C_SYS_HEADER: 'C system header',
+ _CPP_SYS_HEADER: 'C++ system header',
+ _LIKELY_MY_HEADER: 'header this file implements',
+ _POSSIBLE_MY_HEADER: 'header this file may implement',
+ _OTHER_HEADER: 'other header',
+ }
+ _SECTION_NAMES = {
+ _INITIAL_SECTION: "... nothing. (This can't be an error.)",
+ _MY_H_SECTION: 'a header this file implements',
+ _C_SECTION: 'C system header',
+ _CPP_SECTION: 'C++ system header',
+ _OTHER_H_SECTION: 'other header',
+ }
+
+ def __init__(self):
+ dict.__init__(self)
+ # The name of the current section.
+ self._section = self._INITIAL_SECTION
+ # The path of last found header.
+ self._last_header = ''
+
+ def CanonicalizeAlphabeticalOrder(self, header_path):
+ """Returns a path canonicalized for alphabetical comparison.
+
+ - replaces "-" with "_" so they both cmp the same.
+ - removes '-inl' since we don't require them to be after the main header.
+ - lowercase everything, just in case.
+
+ Args:
+ header_path: Path to be canonicalized.
+
+ Returns:
+ Canonicalized path.
+ """
+ return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
+
+ def IsInAlphabeticalOrder(self, header_path):
+ """Check if a header is in alphabetical order with the previous header.
+
+ Args:
+ header_path: Header to be checked.
+
+ Returns:
+ Returns true if the header is in alphabetical order.
+ """
+ canonical_header = self.CanonicalizeAlphabeticalOrder(header_path)
+ if self._last_header > canonical_header:
+ return False
+ self._last_header = canonical_header
+ return True
+
+ def CheckNextIncludeOrder(self, header_type):
+ """Returns a non-empty error message if the next header is out of order.
+
+ This function also updates the internal state to be ready to check
+ the next include.
+
+ Args:
+ header_type: One of the _XXX_HEADER constants defined above.
+
+ Returns:
+ The empty string if the header is in the right order, or an
+ error message describing what's wrong.
+
+ """
+ error_message = ('Found %s after %s' %
+ (self._TYPE_NAMES[header_type],
+ self._SECTION_NAMES[self._section]))
+
+ last_section = self._section
+
+ if header_type == _C_SYS_HEADER:
+ if self._section <= self._C_SECTION:
+ self._section = self._C_SECTION
+ else:
+ self._last_header = ''
+ return error_message
+ elif header_type == _CPP_SYS_HEADER:
+ if self._section <= self._CPP_SECTION:
+ self._section = self._CPP_SECTION
+ else:
+ self._last_header = ''
+ return error_message
+ elif header_type == _LIKELY_MY_HEADER:
+ if self._section <= self._MY_H_SECTION:
+ self._section = self._MY_H_SECTION
+ else:
+ self._section = self._OTHER_H_SECTION
+ elif header_type == _POSSIBLE_MY_HEADER:
+ if self._section <= self._MY_H_SECTION:
+ self._section = self._MY_H_SECTION
+ else:
+ # This will always be the fallback because we're not sure
+ # enough that the header is associated with this file.
+ self._section = self._OTHER_H_SECTION
+ else:
+ assert header_type == _OTHER_HEADER
+ self._section = self._OTHER_H_SECTION
+
+ if last_section != self._section:
+ self._last_header = ''
+
+ return ''
+
+
+class _CppLintState(object):
+ """Maintains module-wide state.."""
+
+ def __init__(self):
+ self.verbose_level = 1 # global setting.
+ self.error_count = 0 # global count of reported errors
+ # filters to apply when emitting error messages
+ self.filters = _DEFAULT_FILTERS[:]
+ self.counting = 'total' # In what way are we counting errors?
+ self.errors_by_category = {} # string to int dict storing error counts
+
+ # output format:
+ # "emacs" - format that emacs can parse (default)
+ # "vs7" - format that Microsoft Visual Studio 7 can parse
+ self.output_format = 'emacs'
+
+ def SetOutputFormat(self, output_format):
+ """Sets the output format for errors."""
+ self.output_format = output_format
+
+ def SetVerboseLevel(self, level):
+ """Sets the module's verbosity, and returns the previous setting."""
+ last_verbose_level = self.verbose_level
+ self.verbose_level = level
+ return last_verbose_level
+
+ def SetCountingStyle(self, counting_style):
+ """Sets the module's counting options."""
+ self.counting = counting_style
+
+ def SetFilters(self, filters):
+ """Sets the error-message filters.
+
+ These filters are applied when deciding whether to emit a given
+ error message.
+
+ Args:
+ filters: A string of comma-separated filters (eg "+whitespace/indent").
+ Each filter should start with + or -; else we die.
+
+ Raises:
+ ValueError: The comma-separated filters did not all start with '+' or '-'.
+ E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
+ """
+ # Default filters always have less priority than the flag ones.
+ self.filters = _DEFAULT_FILTERS[:]
+ for filt in filters.split(','):
+ clean_filt = filt.strip()
+ if clean_filt:
+ self.filters.append(clean_filt)
+ for filt in self.filters:
+ if not (filt.startswith('+') or filt.startswith('-')):
+ raise ValueError('Every filter in --filters must start with + or -'
+ ' (%s does not)' % filt)
+
+ def ResetErrorCounts(self):
+ """Sets the module's error statistic back to zero."""
+ self.error_count = 0
+ self.errors_by_category = {}
+
+ def IncrementErrorCount(self, category):
+ """Bumps the module's error statistic."""
+ self.error_count += 1
+ if self.counting in ('toplevel', 'detailed'):
+ if self.counting != 'detailed':
+ category = category.split('/')[0]
+ if category not in self.errors_by_category:
+ self.errors_by_category[category] = 0
+ self.errors_by_category[category] += 1
+
+ def PrintErrorCounts(self):
+ """Print a summary of errors by category, and the total."""
+ for category, count in self.errors_by_category.iteritems():
+ sys.stderr.write('Category \'%s\' errors found: %d\n' %
+ (category, count))
+ sys.stderr.write('Total errors found: %d\n' % self.error_count)
+
+_cpplint_state = _CppLintState()
+
+
+def _OutputFormat():
+ """Gets the module's output format."""
+ return _cpplint_state.output_format
+
+
+def _SetOutputFormat(output_format):
+ """Sets the module's output format."""
+ _cpplint_state.SetOutputFormat(output_format)
+
+
+def _VerboseLevel():
+ """Returns the module's verbosity setting."""
+ return _cpplint_state.verbose_level
+
+
+def _SetVerboseLevel(level):
+ """Sets the module's verbosity, and returns the previous setting."""
+ return _cpplint_state.SetVerboseLevel(level)
+
+
+def _SetCountingStyle(level):
+ """Sets the module's counting options."""
+ _cpplint_state.SetCountingStyle(level)
+
+
+def _Filters():
+ """Returns the module's list of output filters, as a list."""
+ return _cpplint_state.filters
+
+
+def _SetFilters(filters):
+ """Sets the module's error-message filters.
+
+ These filters are applied when deciding whether to emit a given
+ error message.
+
+ Args:
+ filters: A string of comma-separated filters (eg "whitespace/indent").
+ Each filter should start with + or -; else we die.
+ """
+ _cpplint_state.SetFilters(filters)
+
+
+class _FunctionState(object):
+ """Tracks current function name and the number of lines in its body."""
+
+ _NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
+ _TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
+
+ def __init__(self):
+ self.in_a_function = False
+ self.lines_in_function = 0
+ self.current_function = ''
+
+ def Begin(self, function_name):
+ """Start analyzing function body.
+
+ Args:
+ function_name: The name of the function being tracked.
+ """
+ self.in_a_function = True
+ self.lines_in_function = 0
+ self.current_function = function_name
+
+ def Count(self):
+ """Count line in current function body."""
+ if self.in_a_function:
+ self.lines_in_function += 1
+
+ def Check(self, error, filename, linenum):
+ """Report if too many lines in function body.
+
+ Args:
+ error: The function to call with any errors found.
+ filename: The name of the current file.
+ linenum: The number of the line to check.
+ """
+ if Match(r'T(EST|est)', self.current_function):
+ base_trigger = self._TEST_TRIGGER
+ else:
+ base_trigger = self._NORMAL_TRIGGER
+ trigger = base_trigger * 2**_VerboseLevel()
+
+ if self.lines_in_function > trigger:
+ error_level = int(math.log(self.lines_in_function / base_trigger, 2))
+ # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
+ if error_level > 5:
+ error_level = 5
+ error(filename, linenum, 'readability/fn_size', error_level,
+ 'Small and focused functions are preferred:'
+ ' %s has %d non-comment lines'
+ ' (error triggered by exceeding %d lines).' % (
+ self.current_function, self.lines_in_function, trigger))
+
+ def End(self):
+ """Stop analyzing function body."""
+ self.in_a_function = False
+
+
+class _IncludeError(Exception):
+ """Indicates a problem with the include order in a file."""
+ pass
+
+
+class FileInfo:
+ """Provides utility functions for filenames.
+
+ FileInfo provides easy access to the components of a file's path
+ relative to the project root.
+ """
+
+ def __init__(self, filename):
+ self._filename = filename
+
+ def FullName(self):
+ """Make Windows paths like Unix."""
+ return os.path.abspath(self._filename).replace('\\', '/')
+
+ def RepositoryName(self):
+ """FullName after removing the local path to the repository.
+
+ If we have a real absolute path name here we can try to do something smart:
+ detecting the root of the checkout and truncating /path/to/checkout from
+ the name so that we get header guards that don't include things like
+ "C:\Documents and Settings\..." or "/home/username/..." in them and thus
+ people on different computers who have checked the source out to different
+ locations won't see bogus errors.
+ """
+ fullname = self.FullName()
+
+ if os.path.exists(fullname):
+ project_dir = os.path.dirname(fullname)
+
+ if os.path.exists(os.path.join(project_dir, ".svn")):
+ # If there's a .svn file in the current directory, we recursively look
+ # up the directory tree for the top of the SVN checkout
+ root_dir = project_dir
+ one_up_dir = os.path.dirname(root_dir)
+ while os.path.exists(os.path.join(one_up_dir, ".svn")):
+ root_dir = os.path.dirname(root_dir)
+ one_up_dir = os.path.dirname(one_up_dir)
+
+ prefix = os.path.commonprefix([root_dir, project_dir])
+ return fullname[len(prefix) + 1:]
+
+ # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
+ # searching up from the current path.
+ root_dir = os.path.dirname(fullname)
+ while (root_dir != os.path.dirname(root_dir) and
+ not os.path.exists(os.path.join(root_dir, ".git")) and
+ not os.path.exists(os.path.join(root_dir, ".hg")) and
+ not os.path.exists(os.path.join(root_dir, ".svn"))):
+ root_dir = os.path.dirname(root_dir)
+
+ if (os.path.exists(os.path.join(root_dir, ".git")) or
+ os.path.exists(os.path.join(root_dir, ".hg")) or
+ os.path.exists(os.path.join(root_dir, ".svn"))):
+ prefix = os.path.commonprefix([root_dir, project_dir])
+ return fullname[len(prefix) + 1:]
+
+ # Don't know what to do; header guard warnings may be wrong...
+ return fullname
+
+ def Split(self):
+ """Splits the file into the directory, basename, and extension.
+
+ For 'chrome/browser/browser.cc', Split() would
+ return ('chrome/browser', 'browser', '.cc')
+
+ Returns:
+ A tuple of (directory, basename, extension).
+ """
+
+ googlename = self.RepositoryName()
+ project, rest = os.path.split(googlename)
+ return (project,) + os.path.splitext(rest)
+
+ def BaseName(self):
+ """File base name - text after the final slash, before the final period."""
+ return self.Split()[1]
+
+ def Extension(self):
+ """File extension - text following the final period."""
+ return self.Split()[2]
+
+ def NoExtension(self):
+ """File has no source file extension."""
+ return '/'.join(self.Split()[0:2])
+
+ def IsSource(self):
+ """File has a source file extension."""
+ return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
+
+
+def _ShouldPrintError(category, confidence, linenum):
+ """If confidence >= verbose, category passes filter and is not suppressed."""
+
+ # There are three ways we might decide not to print an error message:
+ # a "NOLINT(category)" comment appears in the source,
+ # the verbosity level isn't high enough, or the filters filter it out.
+ if IsErrorSuppressedByNolint(category, linenum):
+ return False
+ if confidence < _cpplint_state.verbose_level:
+ return False
+
+ is_filtered = False
+ for one_filter in _Filters():
+ if one_filter.startswith('-'):
+ if category.startswith(one_filter[1:]):
+ is_filtered = True
+ elif one_filter.startswith('+'):
+ if category.startswith(one_filter[1:]):
+ is_filtered = False
+ else:
+ assert False # should have been checked for in SetFilter.
+ if is_filtered:
+ return False
+
+ return True
+
+
+def Error(filename, linenum, category, confidence, message):
+ """Logs the fact we've found a lint error.
+
+ We log where the error was found, and also our confidence in the error,
+ that is, how certain we are this is a legitimate style regression, and
+ not a misidentification or a use that's sometimes justified.
+
+ False positives can be suppressed by the use of
+ "cpplint(category)" comments on the offending line. These are
+ parsed into _error_suppressions.
+
+ Args:
+ filename: The name of the file containing the error.
+ linenum: The number of the line containing the error.
+ category: A string used to describe the "category" this bug
+ falls under: "whitespace", say, or "runtime". Categories
+ may have a hierarchy separated by slashes: "whitespace/indent".
+ confidence: A number from 1-5 representing a confidence score for
+ the error, with 5 meaning that we are certain of the problem,
+ and 1 meaning that it could be a legitimate construct.
+ message: The error message.
+ """
+ if _ShouldPrintError(category, confidence, linenum):
+ _cpplint_state.IncrementErrorCount(category)
+ if _cpplint_state.output_format == 'vs7':
+ sys.stderr.write('%s(%s): %s [%s] [%d]\n' % (
+ filename, linenum, message, category, confidence))
+ else:
+ sys.stderr.write('%s:%s: %s [%s] [%d]\n' % (
+ filename, linenum, message, category, confidence))
+
+
+# Matches standard C++ escape esequences per 2.13.2.3 of the C++ standard.
+_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
+ r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
+# Matches strings. Escape codes should already be removed by ESCAPES.
+_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"')
+# Matches characters. Escape codes should already be removed by ESCAPES.
+_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'")
+# Matches multi-line C++ comments.
+# This RE is a little bit more complicated than one might expect, because we
+# have to take care of space removals tools so we can handle comments inside
+# statements better.
+# The current rule is: We only clear spaces from both sides when we're at the
+# end of the line. Otherwise, we try to remove spaces from the right side,
+# if this doesn't work we try on left side but only if there's a non-character
+# on the right.
+_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
+ r"""(\s*/\*.*\*/\s*$|
+ /\*.*\*/\s+|
+ \s+/\*.*\*/(?=\W)|
+ /\*.*\*/)""", re.VERBOSE)
+
+
+def IsCppString(line):
+ """Does line terminate so, that the next symbol is in string constant.
+
+ This function does not consider single-line nor multi-line comments.
+
+ Args:
+ line: is a partial line of code starting from the 0..n.
+
+ Returns:
+ True, if next character appended to 'line' is inside a
+ string constant.
+ """
+
+ line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
+ return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
+
+
+def FindNextMultiLineCommentStart(lines, lineix):
+ """Find the beginning marker for a multiline comment."""
+ while lineix < len(lines):
+ if lines[lineix].strip().startswith('/*'):
+ # Only return this marker if the comment goes beyond this line
+ if lines[lineix].strip().find('*/', 2) < 0:
+ return lineix
+ lineix += 1
+ return len(lines)
+
+
+def FindNextMultiLineCommentEnd(lines, lineix):
+ """We are inside a comment, find the end marker."""
+ while lineix < len(lines):
+ if lines[lineix].strip().endswith('*/'):
+ return lineix
+ lineix += 1
+ return len(lines)
+
+
+def RemoveMultiLineCommentsFromRange(lines, begin, end):
+ """Clears a range of lines for multi-line comments."""
+ # Having // dummy comments makes the lines non-empty, so we will not get
+ # unnecessary blank line warnings later in the code.
+ for i in range(begin, end):
+ lines[i] = '// dummy'
+
+
+def RemoveMultiLineComments(filename, lines, error):
+ """Removes multiline (c-style) comments from lines."""
+ lineix = 0
+ while lineix < len(lines):
+ lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
+ if lineix_begin >= len(lines):
+ return
+ lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
+ if lineix_end >= len(lines):
+ error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
+ 'Could not find end of multi-line comment')
+ return
+ RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
+ lineix = lineix_end + 1
+
+
+def CleanseComments(line):
+ """Removes //-comments and single-line C-style /* */ comments.
+
+ Args:
+ line: A line of C++ source.
+
+ Returns:
+ The line with single-line comments removed.
+ """
+ commentpos = line.find('//')
+ if commentpos != -1 and not IsCppString(line[:commentpos]):
+ line = line[:commentpos].rstrip()
+ # get rid of /* ... */
+ return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
+
+
+class CleansedLines(object):
+ """Holds 3 copies of all lines with different preprocessing applied to them.
+
+ 1) elided member contains lines without strings and comments,
+ 2) lines member contains lines without comments, and
+ 3) raw_lines member contains all the lines without processing.
+ All these three members are of <type 'list'>, and of the same length.
+ """
+
+ def __init__(self, lines):
+ self.elided = []
+ self.lines = []
+ self.raw_lines = lines
+ self.num_lines = len(lines)
+ for linenum in range(len(lines)):
+ self.lines.append(CleanseComments(lines[linenum]))
+ elided = self._CollapseStrings(lines[linenum])
+ self.elided.append(CleanseComments(elided))
+
+ def NumLines(self):
+ """Returns the number of lines represented."""
+ return self.num_lines
+
+ @staticmethod
+ def _CollapseStrings(elided):
+ """Collapses strings and chars on a line to simple "" or '' blocks.
+
+ We nix strings first so we're not fooled by text like '"http://"'
+
+ Args:
+ elided: The line being processed.
+
+ Returns:
+ The line with collapsed strings.
+ """
+ if not _RE_PATTERN_INCLUDE.match(elided):
+ # Remove escaped characters first to make quote/single quote collapsing
+ # basic. Things that look like escaped characters shouldn't occur
+ # outside of strings and chars.
+ elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
+ elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided)
+ elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided)
+ return elided
+
+
+def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar):
+ """Find the position just after the matching endchar.
+
+ Args:
+ line: a CleansedLines line.
+ startpos: start searching at this position.
+ depth: nesting level at startpos.
+ startchar: expression opening character.
+ endchar: expression closing character.
+
+ Returns:
+ Index just after endchar.
+ """
+ for i in xrange(startpos, len(line)):
+ if line[i] == startchar:
+ depth += 1
+ elif line[i] == endchar:
+ depth -= 1
+ if depth == 0:
+ return i + 1
+ return -1
+
+
+def CloseExpression(clean_lines, linenum, pos):
+ """If input points to ( or { or [, finds the position that closes it.
+
+ If lines[linenum][pos] points to a '(' or '{' or '[', finds the
+ linenum/pos that correspond to the closing of the expression.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ pos: A position on the line.
+
+ Returns:
+ A tuple (line, linenum, pos) pointer *past* the closing brace, or
+ (line, len(lines), -1) if we never find a close. Note we ignore
+ strings and comments when matching; and the line we return is the
+ 'cleansed' line at linenum.
+ """
+
+ line = clean_lines.elided[linenum]
+ startchar = line[pos]
+ if startchar not in '({[':
+ return (line, clean_lines.NumLines(), -1)
+ if startchar == '(': endchar = ')'
+ if startchar == '[': endchar = ']'
+ if startchar == '{': endchar = '}'
+
+ # Check first line
+ end_pos = FindEndOfExpressionInLine(line, pos, 0, startchar, endchar)
+ if end_pos > -1:
+ return (line, linenum, end_pos)
+ tail = line[pos:]
+ num_open = tail.count(startchar) - tail.count(endchar)
+ while linenum < clean_lines.NumLines() - 1:
+ linenum += 1
+ line = clean_lines.elided[linenum]
+ delta = line.count(startchar) - line.count(endchar)
+ if num_open + delta <= 0:
+ return (line, linenum,
+ FindEndOfExpressionInLine(line, 0, num_open, startchar, endchar))
+ num_open += delta
+
+ # Did not find endchar before end of file, give up
+ return (line, clean_lines.NumLines(), -1)
+
+def CheckForCopyright(filename, lines, error):
+ """Logs an error if no Copyright message appears at the top of the file."""
+
+ # We'll say it should occur by line 10. Don't forget there's a
+ # dummy line at the front.
+ for line in xrange(1, min(len(lines), 11)):
+ if re.search(r'Copyright', lines[line], re.I): break
+ else: # means no copyright line was found
+ error(filename, 0, 'legal/copyright', 5,
+ 'No copyright message found. '
+ 'You should have a line: "Copyright [year] <Copyright Owner>"')
+
+
+def GetHeaderGuardCPPVariable(filename):
+ """Returns the CPP variable that should be used as a header guard.
+
+ Args:
+ filename: The name of a C++ header file.
+
+ Returns:
+ The CPP variable that should be used as a header guard in the
+ named file.
+
+ """
+
+ # Restores original filename in case that cpplint is invoked from Emacs's
+ # flymake.
+ filename = re.sub(r'_flymake\.h$', '.h', filename)
+ filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
+
+ fileinfo = FileInfo(filename)
+ file_path_from_root = fileinfo.RepositoryName()
+ if _root:
+ file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root)
+ return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_'
+
+
+def CheckForHeaderGuard(filename, lines, error):
+ """Checks that the file contains a header guard.
+
+ Logs an error if no #ifndef header guard is present. For other
+ headers, checks that the full pathname is used.
+
+ Args:
+ filename: The name of the C++ header file.
+ lines: An array of strings, each representing a line of the file.
+ error: The function to call with any errors found.
+ """
+
+ cppvar = GetHeaderGuardCPPVariable(filename)
+
+ ifndef = None
+ ifndef_linenum = 0
+ define = None
+ endif = None
+ endif_linenum = 0
+ for linenum, line in enumerate(lines):
+ linesplit = line.split()
+ if len(linesplit) >= 2:
+ # find the first occurrence of #ifndef and #define, save arg
+ if not ifndef and linesplit[0] == '#ifndef':
+ # set ifndef to the header guard presented on the #ifndef line.
+ ifndef = linesplit[1]
+ ifndef_linenum = linenum
+ if not define and linesplit[0] == '#define':
+ define = linesplit[1]
+ # find the last occurrence of #endif, save entire line
+ if line.startswith('#endif'):
+ endif = line
+ endif_linenum = linenum
+
+ if not ifndef:
+ error(filename, 0, 'build/header_guard', 5,
+ 'No #ifndef header guard found, suggested CPP variable is: %s' %
+ cppvar)
+ return
+
+ if not define:
+ error(filename, 0, 'build/header_guard', 5,
+ 'No #define header guard found, suggested CPP variable is: %s' %
+ cppvar)
+ return
+
+ # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
+ # for backward compatibility.
+ if ifndef != cppvar:
+ error_level = 0
+ if ifndef != cppvar + '_':
+ error_level = 5
+
+ ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum,
+ error)
+ error(filename, ifndef_linenum, 'build/header_guard', error_level,
+ '#ifndef header guard has wrong style, please use: %s' % cppvar)
+
+ if define != ifndef:
+ error(filename, 0, 'build/header_guard', 5,
+ '#ifndef and #define don\'t match, suggested CPP variable is: %s' %
+ cppvar)
+ return
+
+ if endif != ('#endif // %s' % cppvar):
+ error_level = 0
+ if endif != ('#endif // %s' % (cppvar + '_')):
+ error_level = 5
+
+ ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum,
+ error)
+ error(filename, endif_linenum, 'build/header_guard', error_level,
+ '#endif line should be "#endif // %s"' % cppvar)
+
+
+def CheckForUnicodeReplacementCharacters(filename, lines, error):
+ """Logs an error for each line containing Unicode replacement characters.
+
+ These indicate that either the file contained invalid UTF-8 (likely)
+ or Unicode replacement characters (which it shouldn't). Note that
+ it's possible for this to throw off line numbering if the invalid
+ UTF-8 occurred adjacent to a newline.
+
+ Args:
+ filename: The name of the current file.
+ lines: An array of strings, each representing a line of the file.
+ error: The function to call with any errors found.
+ """
+ for linenum, line in enumerate(lines):
+ if u'\ufffd' in line:
+ error(filename, linenum, 'readability/utf8', 5,
+ 'Line contains invalid UTF-8 (or Unicode replacement character).')
+
+
+def CheckForNewlineAtEOF(filename, lines, error):
+ """Logs an error if there is no newline char at the end of the file.
+
+ Args:
+ filename: The name of the current file.
+ lines: An array of strings, each representing a line of the file.
+ error: The function to call with any errors found.
+ """
+
+ # The array lines() was created by adding two newlines to the
+ # original file (go figure), then splitting on \n.
+ # To verify that the file ends in \n, we just have to make sure the
+ # last-but-two element of lines() exists and is empty.
+ if len(lines) < 3 or lines[-2]:
+ error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
+ 'Could not find a newline character at the end of the file.')
+
+
+def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
+ """Logs an error if we see /* ... */ or "..." that extend past one line.
+
+ /* ... */ comments are legit inside macros, for one line.
+ Otherwise, we prefer // comments, so it's ok to warn about the
+ other. Likewise, it's ok for strings to extend across multiple
+ lines, as long as a line continuation character (backslash)
+ terminates each line. Although not currently prohibited by the C++
+ style guide, it's ugly and unnecessary. We don't do well with either
+ in this lint program, so we warn about both.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ line = clean_lines.elided[linenum]
+
+ # Remove all \\ (escaped backslashes) from the line. They are OK, and the
+ # second (escaped) slash may trigger later \" detection erroneously.
+ line = line.replace('\\\\', '')
+
+ if line.count('/*') > line.count('*/'):
+ error(filename, linenum, 'readability/multiline_comment', 5,
+ 'Complex multi-line /*...*/-style comment found. '
+ 'Lint may give bogus warnings. '
+ 'Consider replacing these with //-style comments, '
+ 'with #if 0...#endif, '
+ 'or with more clearly structured multi-line comments.')
+
+ if (line.count('"') - line.count('\\"')) % 2:
+ error(filename, linenum, 'readability/multiline_string', 5,
+ 'Multi-line string ("...") found. This lint script doesn\'t '
+ 'do well with such strings, and may give bogus warnings. They\'re '
+ 'ugly and unnecessary, and you should use concatenation instead".')
+
+
+threading_list = (
+ ('asctime(', 'asctime_r('),
+ ('ctime(', 'ctime_r('),
+ ('getgrgid(', 'getgrgid_r('),
+ ('getgrnam(', 'getgrnam_r('),
+ ('getlogin(', 'getlogin_r('),
+ ('getpwnam(', 'getpwnam_r('),
+ ('getpwuid(', 'getpwuid_r('),
+ ('gmtime(', 'gmtime_r('),
+ ('localtime(', 'localtime_r('),
+ ('rand(', 'rand_r('),
+ ('readdir(', 'readdir_r('),
+ ('strtok(', 'strtok_r('),
+ ('ttyname(', 'ttyname_r('),
+ )
+
+
+def CheckPosixThreading(filename, clean_lines, linenum, error):
+ """Checks for calls to thread-unsafe functions.
+
+ Much code has been originally written without consideration of
+ multi-threading. Also, engineers are relying on their old experience;
+ they have learned posix before threading extensions were added. These
+ tests guide the engineers to use thread-safe functions (when using
+ posix directly).
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ line = clean_lines.elided[linenum]
+ for single_thread_function, multithread_safe_function in threading_list:
+ ix = line.find(single_thread_function)
+ # Comparisons made explicit for clarity -- pylint: disable-msg=C6403
+ if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and
+ line[ix - 1] not in ('_', '.', '>'))):
+ error(filename, linenum, 'runtime/threadsafe_fn', 2,
+ 'Consider using ' + multithread_safe_function +
+ '...) instead of ' + single_thread_function +
+ '...) for improved thread safety.')
+
+
+# Matches invalid increment: *count++, which moves pointer instead of
+# incrementing a value.
+_RE_PATTERN_INVALID_INCREMENT = re.compile(
+ r'^\s*\*\w+(\+\+|--);')
+
+
+def CheckInvalidIncrement(filename, clean_lines, linenum, error):
+ """Checks for invalid increment *count++.
+
+ For example following function:
+ void increment_counter(int* count) {
+ *count++;
+ }
+ is invalid, because it effectively does count++, moving pointer, and should
+ be replaced with ++*count, (*count)++ or *count += 1.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ line = clean_lines.elided[linenum]
+ if _RE_PATTERN_INVALID_INCREMENT.match(line):
+ error(filename, linenum, 'runtime/invalid_increment', 5,
+ 'Changing pointer instead of value (or unused value of operator*).')
+
+
+class _BlockInfo(object):
+ """Stores information about a generic block of code."""
+
+ def __init__(self, seen_open_brace):
+ self.seen_open_brace = seen_open_brace
+ self.open_parentheses = 0
+ self.inline_asm = _NO_ASM
+
+ def CheckBegin(self, filename, clean_lines, linenum, error):
+ """Run checks that applies to text up to the opening brace.
+
+ This is mostly for checking the text after the class identifier
+ and the "{", usually where the base class is specified. For other
+ blocks, there isn't much to check, so we always pass.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ pass
+
+ def CheckEnd(self, filename, clean_lines, linenum, error):
+ """Run checks that applies to text after the closing brace.
+
+ This is mostly used for checking end of namespace comments.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ pass
+
+
+class _ClassInfo(_BlockInfo):
+ """Stores information about a class."""
+
+ def __init__(self, name, class_or_struct, clean_lines, linenum):
+ _BlockInfo.__init__(self, False)
+ self.name = name
+ self.starting_linenum = linenum
+ self.is_derived = False
+ if class_or_struct == 'struct':
+ self.access = 'public'
+ else:
+ self.access = 'private'
+
+ # Try to find the end of the class. This will be confused by things like:
+ # class A {
+ # } *x = { ...
+ #
+ # But it's still good enough for CheckSectionSpacing.
+ self.last_line = 0
+ depth = 0
+ for i in range(linenum, clean_lines.NumLines()):
+ line = clean_lines.elided[i]
+ depth += line.count('{') - line.count('}')
+ if not depth:
+ self.last_line = i
+ break
+
+ def CheckBegin(self, filename, clean_lines, linenum, error):
+ # Look for a bare ':'
+ if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
+ self.is_derived = True
+
+
+class _NamespaceInfo(_BlockInfo):
+ """Stores information about a namespace."""
+
+ def __init__(self, name, linenum):
+ _BlockInfo.__init__(self, False)
+ self.name = name or ''
+ self.starting_linenum = linenum
+
+ def CheckEnd(self, filename, clean_lines, linenum, error):
+ """Check end of namespace comments."""
+ line = clean_lines.raw_lines[linenum]
+
+ # Check how many lines is enclosed in this namespace. Don't issue
+ # warning for missing namespace comments if there aren't enough
+ # lines. However, do apply checks if there is already an end of
+ # namespace comment and it's incorrect.
+ #
+ # TODO(unknown): We always want to check end of namespace comments
+ # if a namespace is large, but sometimes we also want to apply the
+ # check if a short namespace contained nontrivial things (something
+ # other than forward declarations). There is currently no logic on
+ # deciding what these nontrivial things are, so this check is
+ # triggered by namespace size only, which works most of the time.
+ if (linenum - self.starting_linenum < 10
+ and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)):
+ return
+
+ # Look for matching comment at end of namespace.
+ #
+ # Note that we accept C style "/* */" comments for terminating
+ # namespaces, so that code that terminate namespaces inside
+ # preprocessor macros can be cpplint clean. Example: http://go/nxpiz
+ #
+ # We also accept stuff like "// end of namespace <name>." with the
+ # period at the end.
+ #
+ # Besides these, we don't accept anything else, otherwise we might
+ # get false negatives when existing comment is a substring of the
+ # expected namespace. Example: http://go/ldkdc, http://cl/23548205
+ if self.name:
+ # Named namespace
+ if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) +
+ r'[\*/\.\\\s]*$'),
+ line):
+ error(filename, linenum, 'readability/namespace', 5,
+ 'Namespace should be terminated with "// namespace %s"' %
+ self.name)
+ else:
+ # Anonymous namespace
+ if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
+ error(filename, linenum, 'readability/namespace', 5,
+ 'Namespace should be terminated with "// namespace"')
+
+
+class _PreprocessorInfo(object):
+ """Stores checkpoints of nesting stacks when #if/#else is seen."""
+
+ def __init__(self, stack_before_if):
+ # The entire nesting stack before #if
+ self.stack_before_if = stack_before_if
+
+ # The entire nesting stack up to #else
+ self.stack_before_else = []
+
+ # Whether we have already seen #else or #elif
+ self.seen_else = False
+
+
+class _NestingState(object):
+ """Holds states related to parsing braces."""
+
+ def __init__(self):
+ # Stack for tracking all braces. An object is pushed whenever we
+ # see a "{", and popped when we see a "}". Only 3 types of
+ # objects are possible:
+ # - _ClassInfo: a class or struct.
+ # - _NamespaceInfo: a namespace.
+ # - _BlockInfo: some other type of block.
+ self.stack = []
+
+ # Stack of _PreprocessorInfo objects.
+ self.pp_stack = []
+
+ def SeenOpenBrace(self):
+ """Check if we have seen the opening brace for the innermost block.
+
+ Returns:
+ True if we have seen the opening brace, False if the innermost
+ block is still expecting an opening brace.
+ """
+ return (not self.stack) or self.stack[-1].seen_open_brace
+
+ def InNamespaceBody(self):
+ """Check if we are currently one level inside a namespace body.
+
+ Returns:
+ True if top of the stack is a namespace block, False otherwise.
+ """
+ return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
+
+ def UpdatePreprocessor(self, line):
+ """Update preprocessor stack.
+
+ We need to handle preprocessors due to classes like this:
+ #ifdef SWIG
+ struct ResultDetailsPageElementExtensionPoint {
+ #else
+ struct ResultDetailsPageElementExtensionPoint : public Extension {
+ #endif
+ (see http://go/qwddn for original example)
+
+ We make the following assumptions (good enough for most files):
+ - Preprocessor condition evaluates to true from #if up to first
+ #else/#elif/#endif.
+
+ - Preprocessor condition evaluates to false from #else/#elif up
+ to #endif. We still perform lint checks on these lines, but
+ these do not affect nesting stack.
+
+ Args:
+ line: current line to check.
+ """
+ if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
+ # Beginning of #if block, save the nesting stack here. The saved
+ # stack will allow us to restore the parsing state in the #else case.
+ self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
+ elif Match(r'^\s*#\s*(else|elif)\b', line):
+ # Beginning of #else block
+ if self.pp_stack:
+ if not self.pp_stack[-1].seen_else:
+ # This is the first #else or #elif block. Remember the
+ # whole nesting stack up to this point. This is what we
+ # keep after the #endif.
+ self.pp_stack[-1].seen_else = True
+ self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
+
+ # Restore the stack to how it was before the #if
+ self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
+ else:
+ # TODO(unknown): unexpected #else, issue warning?
+ pass
+ elif Match(r'^\s*#\s*endif\b', line):
+ # End of #if or #else blocks.
+ if self.pp_stack:
+ # If we saw an #else, we will need to restore the nesting
+ # stack to its former state before the #else, otherwise we
+ # will just continue from where we left off.
+ if self.pp_stack[-1].seen_else:
+ # Here we can just use a shallow copy since we are the last
+ # reference to it.
+ self.stack = self.pp_stack[-1].stack_before_else
+ # Drop the corresponding #if
+ self.pp_stack.pop()
+ else:
+ # TODO(unknown): unexpected #endif, issue warning?
+ pass
+
+ def Update(self, filename, clean_lines, linenum, error):
+ """Update nesting state with current line.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ line = clean_lines.elided[linenum]
+
+ # Update pp_stack first
+ self.UpdatePreprocessor(line)
+
+ # Count parentheses. This is to avoid adding struct arguments to
+ # the nesting stack.
+ if self.stack:
+ inner_block = self.stack[-1]
+ depth_change = line.count('(') - line.count(')')
+ inner_block.open_parentheses += depth_change
+
+ # Also check if we are starting or ending an inline assembly block.
+ if inner_block.inline_asm in (_NO_ASM, _END_ASM):
+ if (depth_change != 0 and
+ inner_block.open_parentheses == 1 and
+ _MATCH_ASM.match(line)):
+ # Enter assembly block
+ inner_block.inline_asm = _INSIDE_ASM
+ else:
+ # Not entering assembly block. If previous line was _END_ASM,
+ # we will now shift to _NO_ASM state.
+ inner_block.inline_asm = _NO_ASM
+ elif (inner_block.inline_asm == _INSIDE_ASM and
+ inner_block.open_parentheses == 0):
+ # Exit assembly block
+ inner_block.inline_asm = _END_ASM
+
+ # Consume namespace declaration at the beginning of the line. Do
+ # this in a loop so that we catch same line declarations like this:
+ # namespace proto2 { namespace bridge { class MessageSet; } }
+ while True:
+ # Match start of namespace. The "\b\s*" below catches namespace
+ # declarations even if it weren't followed by a whitespace, this
+ # is so that we don't confuse our namespace checker. The
+ # missing spaces will be flagged by CheckSpacing.
+ namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
+ if not namespace_decl_match:
+ break
+
+ new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
+ self.stack.append(new_namespace)
+
+ line = namespace_decl_match.group(2)
+ if line.find('{') != -1:
+ new_namespace.seen_open_brace = True
+ line = line[line.find('{') + 1:]
+
+ # Look for a class declaration in whatever is left of the line
+ # after parsing namespaces. The regexp accounts for decorated classes
+ # such as in:
+ # class LOCKABLE API Object {
+ # };
+ #
+ # Templates with class arguments may confuse the parser, for example:
+ # template <class T
+ # class Comparator = less<T>,
+ # class Vector = vector<T> >
+ # class HeapQueue {
+ #
+ # Because this parser has no nesting state about templates, by the
+ # time it saw "class Comparator", it may think that it's a new class.
+ # Nested templates have a similar problem:
+ # template <
+ # typename ExportedType,
+ # typename TupleType,
+ # template <typename, typename> class ImplTemplate>
+ #
+ # To avoid these cases, we ignore classes that are followed by '=' or '>'
+ class_decl_match = Match(
+ r'\s*(template\s*<[\w\s<>,:]*>\s*)?'
+ '(class|struct)\s+([A-Z_]+\s+)*(\w+(?:::\w+)*)'
+ '(([^=>]|<[^<>]*>)*)$', line)
+ if (class_decl_match and
+ (not self.stack or self.stack[-1].open_parentheses == 0)):
+ self.stack.append(_ClassInfo(
+ class_decl_match.group(4), class_decl_match.group(2),
+ clean_lines, linenum))
+ line = class_decl_match.group(5)
+
+ # If we have not yet seen the opening brace for the innermost block,
+ # run checks here.
+ if not self.SeenOpenBrace():
+ self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
+
+ # Update access control if we are inside a class/struct
+ if self.stack and isinstance(self.stack[-1], _ClassInfo):
+ access_match = Match(r'\s*(public|private|protected)\s*:', line)
+ if access_match:
+ self.stack[-1].access = access_match.group(1)
+
+ # Consume braces or semicolons from what's left of the line
+ while True:
+ # Match first brace, semicolon, or closed parenthesis.
+ matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
+ if not matched:
+ break
+
+ token = matched.group(1)
+ if token == '{':
+ # If namespace or class hasn't seen a opening brace yet, mark
+ # namespace/class head as complete. Push a new block onto the
+ # stack otherwise.
+ if not self.SeenOpenBrace():
+ self.stack[-1].seen_open_brace = True
+ else:
+ self.stack.append(_BlockInfo(True))
+ if _MATCH_ASM.match(line):
+ self.stack[-1].inline_asm = _BLOCK_ASM
+ elif token == ';' or token == ')':
+ # If we haven't seen an opening brace yet, but we already saw
+ # a semicolon, this is probably a forward declaration. Pop
+ # the stack for these.
+ #
+ # Similarly, if we haven't seen an opening brace yet, but we
+ # already saw a closing parenthesis, then these are probably
+ # function arguments with extra "class" or "struct" keywords.
+ # Also pop these stack for these.
+ if not self.SeenOpenBrace():
+ self.stack.pop()
+ else: # token == '}'
+ # Perform end of block checks and pop the stack.
+ if self.stack:
+ self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
+ self.stack.pop()
+ line = matched.group(2)
+
+ def InnermostClass(self):
+ """Get class info on the top of the stack.
+
+ Returns:
+ A _ClassInfo object if we are inside a class, or None otherwise.
+ """
+ for i in range(len(self.stack), 0, -1):
+ classinfo = self.stack[i - 1]
+ if isinstance(classinfo, _ClassInfo):
+ return classinfo
+ return None
+
+ def CheckClassFinished(self, filename, error):
+ """Checks that all classes have been completely parsed.
+
+ Call this when all lines in a file have been processed.
+ Args:
+ filename: The name of the current file.
+ error: The function to call with any errors found.
+ """
+ # Note: This test can result in false positives if #ifdef constructs
+ # get in the way of brace matching. See the testBuildClass test in
+ # cpplint_unittest.py for an example of this.
+ for obj in self.stack:
+ if isinstance(obj, _ClassInfo):
+ error(filename, obj.starting_linenum, 'build/class', 5,
+ 'Failed to find complete declaration of class %s' %
+ obj.name)
+
+
+def CheckForNonStandardConstructs(filename, clean_lines, linenum,
+ nesting_state, error):
+ """Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
+
+ Complain about several constructs which gcc-2 accepts, but which are
+ not standard C++. Warning about these in lint is one way to ease the
+ transition to new compilers.
+ - put storage class first (e.g. "static const" instead of "const static").
+ - "%lld" instead of %qd" in printf-type functions.
+ - "%1$d" is non-standard in printf-type functions.
+ - "\%" is an undefined character escape sequence.
+ - text after #endif is not allowed.
+ - invalid inner-style forward declaration.
+ - >? and <? operators, and their >?= and <?= cousins.
+
+ Additionally, check for constructor/destructor style violations and reference
+ members, as it is very convenient to do so while checking for
+ gcc-2 compliance.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ nesting_state: A _NestingState instance which maintains information about
+ the current stack of nested blocks being parsed.
+ error: A callable to which errors are reported, which takes 4 arguments:
+ filename, line number, error level, and message
+ """
+
+ # Remove comments from the line, but leave in strings for now.
+ line = clean_lines.lines[linenum]
+
+ if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
+ error(filename, linenum, 'runtime/printf_format', 3,
+ '%q in format strings is deprecated. Use %ll instead.')
+
+ if Search(r'printf\s*\(.*".*%\d+\$', line):
+ error(filename, linenum, 'runtime/printf_format', 2,
+ '%N$ formats are unconventional. Try rewriting to avoid them.')
+
+ # Remove escaped backslashes before looking for undefined escapes.
+ line = line.replace('\\\\', '')
+
+ if Search(r'("|\').*\\(%|\[|\(|{)', line):
+ error(filename, linenum, 'build/printf_format', 3,
+ '%, [, (, and { are undefined character escapes. Unescape them.')
+
+ # For the rest, work with both comments and strings removed.
+ line = clean_lines.elided[linenum]
+
+ if Search(r'\b(const|volatile|void|char|short|int|long'
+ r'|float|double|signed|unsigned'
+ r'|schar|u?int8|u?int16|u?int32|u?int64)'
+ r'\s+(register|static|extern|typedef)\b',
+ line):
+ error(filename, linenum, 'build/storage_class', 5,
+ 'Storage class (static, extern, typedef, etc) should be first.')
+
+ if Match(r'\s*#\s*endif\s*[^/\s]+', line):
+ error(filename, linenum, 'build/endif_comment', 5,
+ 'Uncommented text after #endif is non-standard. Use a comment.')
+
+ if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
+ error(filename, linenum, 'build/forward_decl', 5,
+ 'Inner-style forward declarations are invalid. Remove this line.')
+
+ if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
+ line):
+ error(filename, linenum, 'build/deprecated', 3,
+ '>? and <? (max and min) operators are non-standard and deprecated.')
+
+ if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
+ # TODO(unknown): Could it be expanded safely to arbitrary references,
+ # without triggering too many false positives? The first
+ # attempt triggered 5 warnings for mostly benign code in the regtest, hence
+ # the restriction.
+ # Here's the original regexp, for the reference:
+ # type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
+ # r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
+ error(filename, linenum, 'runtime/member_string_references', 2,
+ 'const string& members are dangerous. It is much better to use '
+ 'alternatives, such as pointers or simple constants.')
+
+ # Everything else in this function operates on class declarations.
+ # Return early if the top of the nesting stack is not a class, or if
+ # the class head is not completed yet.
+ classinfo = nesting_state.InnermostClass()
+ if not classinfo or not classinfo.seen_open_brace:
+ return
+
+ # The class may have been declared with namespace or classname qualifiers.
+ # The constructor and destructor will not have those qualifiers.
+ base_classname = classinfo.name.split('::')[-1]
+
+ # Look for single-argument constructors that aren't marked explicit.
+ # Technically a valid construct, but against style.
+ args = Match(r'\s+(?:inline\s+)?%s\s*\(([^,()]+)\)'
+ % re.escape(base_classname),
+ line)
+ if (args and
+ args.group(1) != 'void' and
+ not Match(r'(const\s+)?%s\s*(?:<\w+>\s*)?&' % re.escape(base_classname),
+ args.group(1).strip())):
+ error(filename, linenum, 'runtime/explicit', 5,
+ 'Single-argument constructors should be marked explicit.')
+
+
+def CheckSpacingForFunctionCall(filename, line, linenum, error):
+ """Checks for the correctness of various spacing around function calls.
+
+ Args:
+ filename: The name of the current file.
+ line: The text of the line to check.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+
+ # Since function calls often occur inside if/for/while/switch
+ # expressions - which have their own, more liberal conventions - we
+ # first see if we should be looking inside such an expression for a
+ # function call, to which we can apply more strict standards.
+ fncall = line # if there's no control flow construct, look at whole line
+ for pattern in (r'\bif\s*\((.*)\)\s*{',
+ r'\bfor\s*\((.*)\)\s*{',
+ r'\bwhile\s*\((.*)\)\s*[{;]',
+ r'\bswitch\s*\((.*)\)\s*{'):
+ match = Search(pattern, line)
+ if match:
+ fncall = match.group(1) # look inside the parens for function calls
+ break
+
+ # Except in if/for/while/switch, there should never be space
+ # immediately inside parens (eg "f( 3, 4 )"). We make an exception
+ # for nested parens ( (a+b) + c ). Likewise, there should never be
+ # a space before a ( when it's a function argument. I assume it's a
+ # function argument when the char before the whitespace is legal in
+ # a function name (alnum + _) and we're not starting a macro. Also ignore
+ # pointers and references to arrays and functions coz they're too tricky:
+ # we use a very simple way to recognize these:
+ # " (something)(maybe-something)" or
+ # " (something)(maybe-something," or
+ # " (something)[something]"
+ # Note that we assume the contents of [] to be short enough that
+ # they'll never need to wrap.
+ if ( # Ignore control structures.
+ not Search(r'\b(if|for|while|switch|return|delete)\b', fncall) and
+ # Ignore pointers/references to functions.
+ not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
+ # Ignore pointers/references to arrays.
+ not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
+ if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
+ error(filename, linenum, 'whitespace/parens', 4,
+ 'Extra space after ( in function call')
+ elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
+ error(filename, linenum, 'whitespace/parens', 2,
+ 'Extra space after (')
+ if (Search(r'\w\s+\(', fncall) and
+ not Search(r'#\s*define|typedef', fncall) and
+ not Search(r'\w\s+\((\w+::)?\*\w+\)\(', fncall)):
+ error(filename, linenum, 'whitespace/parens', 4,
+ 'Extra space before ( in function call')
+ # If the ) is followed only by a newline or a { + newline, assume it's
+ # part of a control statement (if/while/etc), and don't complain
+ if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
+ # If the closing parenthesis is preceded by only whitespaces,
+ # try to give a more descriptive error message.
+ if Search(r'^\s+\)', fncall):
+ error(filename, linenum, 'whitespace/parens', 2,
+ 'Closing ) should be moved to the previous line')
+ else:
+ error(filename, linenum, 'whitespace/parens', 2,
+ 'Extra space before )')
+
+
+def IsBlankLine(line):
+ """Returns true if the given line is blank.
+
+ We consider a line to be blank if the line is empty or consists of
+ only white spaces.
+
+ Args:
+ line: A line of a string.
+
+ Returns:
+ True, if the given line is blank.
+ """
+ return not line or line.isspace()
+
+
+def CheckForFunctionLengths(filename, clean_lines, linenum,
+ function_state, error):
+ """Reports for long function bodies.
+
+ For an overview why this is done, see:
+ http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
+
+ Uses a simplistic algorithm assuming other style guidelines
+ (especially spacing) are followed.
+ Only checks unindented functions, so class members are unchecked.
+ Trivial bodies are unchecked, so constructors with huge initializer lists
+ may be missed.
+ Blank/comment lines are not counted so as to avoid encouraging the removal
+ of vertical space and comments just to get through a lint check.
+ NOLINT *on the last line of a function* disables this check.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ function_state: Current function name and lines in body so far.
+ error: The function to call with any errors found.
+ """
+ lines = clean_lines.lines
+ line = lines[linenum]
+ raw = clean_lines.raw_lines
+ raw_line = raw[linenum]
+ joined_line = ''
+
+ starting_func = False
+ regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
+ match_result = Match(regexp, line)
+ if match_result:
+ # If the name is all caps and underscores, figure it's a macro and
+ # ignore it, unless it's TEST or TEST_F.
+ function_name = match_result.group(1).split()[-1]
+ if function_name == 'TEST' or function_name == 'TEST_F' or (
+ not Match(r'[A-Z_]+$', function_name)):
+ starting_func = True
+
+ if starting_func:
+ body_found = False
+ for start_linenum in xrange(linenum, clean_lines.NumLines()):
+ start_line = lines[start_linenum]
+ joined_line += ' ' + start_line.lstrip()
+ if Search(r'(;|})', start_line): # Declarations and trivial functions
+ body_found = True
+ break # ... ignore
+ elif Search(r'{', start_line):
+ body_found = True
+ function = Search(r'((\w|:)*)\(', line).group(1)
+ if Match(r'TEST', function): # Handle TEST... macros
+ parameter_regexp = Search(r'(\(.*\))', joined_line)
+ if parameter_regexp: # Ignore bad syntax
+ function += parameter_regexp.group(1)
+ else:
+ function += '()'
+ function_state.Begin(function)
+ break
+ if not body_found:
+ # No body for the function (or evidence of a non-function) was found.
+ error(filename, linenum, 'readability/fn_size', 5,
+ 'Lint failed to find start of function body.')
+ elif Match(r'^\}\s*$', line): # function end
+ function_state.Check(error, filename, linenum)
+ function_state.End()
+ elif not Match(r'^\s*$', line):
+ function_state.Count() # Count non-blank/non-comment lines.
+
+
+_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
+
+
+def CheckComment(comment, filename, linenum, error):
+ """Checks for common mistakes in TODO comments.
+
+ Args:
+ comment: The text of the comment from the line in question.
+ filename: The name of the current file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ match = _RE_PATTERN_TODO.match(comment)
+ if match:
+ # One whitespace is correct; zero whitespace is handled elsewhere.
+ leading_whitespace = match.group(1)
+ if len(leading_whitespace) > 1:
+ error(filename, linenum, 'whitespace/todo', 2,
+ 'Too many spaces before TODO')
+
+ username = match.group(2)
+ if not username:
+ error(filename, linenum, 'readability/todo', 2,
+ 'Missing username in TODO; it should look like '
+ '"// TODO(my_username): Stuff."')
+
+ middle_whitespace = match.group(3)
+ # Comparisons made explicit for correctness -- pylint: disable-msg=C6403
+ if middle_whitespace != ' ' and middle_whitespace != '':
+ error(filename, linenum, 'whitespace/todo', 2,
+ 'TODO(my_username) should be followed by a space')
+
+def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
+ """Checks for improper use of DISALLOW* macros.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ nesting_state: A _NestingState instance which maintains information about
+ the current stack of nested blocks being parsed.
+ error: The function to call with any errors found.
+ """
+ line = clean_lines.elided[linenum] # get rid of comments and strings
+
+ matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
+ r'DISALLOW_EVIL_CONSTRUCTORS|'
+ r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
+ if not matched:
+ return
+ if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
+ if nesting_state.stack[-1].access != 'private':
+ error(filename, linenum, 'readability/constructors', 3,
+ '%s must be in the private: section' % matched.group(1))
+
+ else:
+ # Found DISALLOW* macro outside a class declaration, or perhaps it
+ # was used inside a function when it should have been part of the
+ # class declaration. We could issue a warning here, but it
+ # probably resulted in a compiler error already.
+ pass
+
+
+def FindNextMatchingAngleBracket(clean_lines, linenum, init_suffix):
+ """Find the corresponding > to close a template.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: Current line number.
+ init_suffix: Remainder of the current line after the initial <.
+
+ Returns:
+ True if a matching bracket exists.
+ """
+ line = init_suffix
+ nesting_stack = ['<']
+ while True:
+ # Find the next operator that can tell us whether < is used as an
+ # opening bracket or as a less-than operator. We only want to
+ # warn on the latter case.
+ #
+ # We could also check all other operators and terminate the search
+ # early, e.g. if we got something like this "a<b+c", the "<" is
+ # most likely a less-than operator, but then we will get false
+ # positives for default arguments (e.g. http://go/prccd) and
+ # other template expressions (e.g. http://go/oxcjq).
+ match = Search(r'^[^<>(),;\[\]]*([<>(),;\[\]])(.*)$', line)
+ if match:
+ # Found an operator, update nesting stack
+ operator = match.group(1)
+ line = match.group(2)
+
+ if nesting_stack[-1] == '<':
+ # Expecting closing angle bracket
+ if operator in ('<', '(', '['):
+ nesting_stack.append(operator)
+ elif operator == '>':
+ nesting_stack.pop()
+ if not nesting_stack:
+ # Found matching angle bracket
+ return True
+ elif operator == ',':
+ # Got a comma after a bracket, this is most likely a template
+ # argument. We have not seen a closing angle bracket yet, but
+ # it's probably a few lines later if we look for it, so just
+ # return early here.
+ return True
+ else:
+ # Got some other operator.
+ return False
+
+ else:
+ # Expecting closing parenthesis or closing bracket
+ if operator in ('<', '(', '['):
+ nesting_stack.append(operator)
+ elif operator in (')', ']'):
+ # We don't bother checking for matching () or []. If we got
+ # something like (] or [), it would have been a syntax error.
+ nesting_stack.pop()
+
+ else:
+ # Scan the next line
+ linenum += 1
+ if linenum >= len(clean_lines.elided):
+ break
+ line = clean_lines.elided[linenum]
+
+ # Exhausted all remaining lines and still no matching angle bracket.
+ # Most likely the input was incomplete, otherwise we should have
+ # seen a semicolon and returned early.
+ return True
+
+
+def FindPreviousMatchingAngleBracket(clean_lines, linenum, init_prefix):
+ """Find the corresponding < that started a template.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: Current line number.
+ init_prefix: Part of the current line before the initial >.
+
+ Returns:
+ True if a matching bracket exists.
+ """
+ line = init_prefix
+ nesting_stack = ['>']
+ while True:
+ # Find the previous operator
+ match = Search(r'^(.*)([<>(),;\[\]])[^<>(),;\[\]]*$', line)
+ if match:
+ # Found an operator, update nesting stack
+ operator = match.group(2)
+ line = match.group(1)
+
+ if nesting_stack[-1] == '>':
+ # Expecting opening angle bracket
+ if operator in ('>', ')', ']'):
+ nesting_stack.append(operator)
+ elif operator == '<':
+ nesting_stack.pop()
+ if not nesting_stack:
+ # Found matching angle bracket
+ return True
+ elif operator == ',':
+ # Got a comma before a bracket, this is most likely a
+ # template argument. The opening angle bracket is probably
+ # there if we look for it, so just return early here.
+ return True
+ else:
+ # Got some other operator.
+ return False
+
+ else:
+ # Expecting opening parenthesis or opening bracket
+ if operator in ('>', ')', ']'):
+ nesting_stack.append(operator)
+ elif operator in ('(', '['):
+ nesting_stack.pop()
+
+ else:
+ # Scan the previous line
+ linenum -= 1
+ if linenum < 0:
+ break
+ line = clean_lines.elided[linenum]
+
+ # Exhausted all earlier lines and still no matching angle bracket.
+ return False
+
+
+def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
+ """Checks for the correctness of various spacing issues in the code.
+
+ Things we check for: spaces around operators, spaces after
+ if/for/while/switch, no spaces around parens in function calls, two
+ spaces between code and comment, don't start a block with a blank
+ line, don't end a function with a blank line, don't add a blank line
+ after public/protected/private, don't have too many blank lines in a row.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ nesting_state: A _NestingState instance which maintains information about
+ the current stack of nested blocks being parsed.
+ error: The function to call with any errors found.
+ """
+
+ raw = clean_lines.raw_lines
+ line = raw[linenum]
+
+ # Before nixing comments, check if the line is blank for no good
+ # reason. This includes the first line after a block is opened, and
+ # blank lines at the end of a function (ie, right before a line like '}'
+ #
+ # Skip all the blank line checks if we are immediately inside a
+ # namespace body. In other words, don't issue blank line warnings
+ # for this block:
+ # namespace {
+ #
+ # }
+ #
+ # A warning about missing end of namespace comments will be issued instead.
+ if IsBlankLine(line) and not nesting_state.InNamespaceBody():
+ elided = clean_lines.elided
+ prev_line = elided[linenum - 1]
+ prevbrace = prev_line.rfind('{')
+ # TODO(unknown): Don't complain if line before blank line, and line after,
+ # both start with alnums and are indented the same amount.
+ # This ignores whitespace at the start of a namespace block
+ # because those are not usually indented.
+ if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
+ # OK, we have a blank line at the start of a code block. Before we
+ # complain, we check if it is an exception to the rule: The previous
+ # non-empty line has the parameters of a function header that are indented
+ # 4 spaces (because they did not fit in a 80 column line when placed on
+ # the same line as the function name). We also check for the case where
+ # the previous line is indented 6 spaces, which may happen when the
+ # initializers of a constructor do not fit into a 80 column line.
+ exception = False
+ if Match(r' {6}\w', prev_line): # Initializer list?
+ # We are looking for the opening column of initializer list, which
+ # should be indented 4 spaces to cause 6 space indentation afterwards.
+ search_position = linenum-2
+ while (search_position >= 0
+ and Match(r' {6}\w', elided[search_position])):
+ search_position -= 1
+ exception = (search_position >= 0
+ and elided[search_position][:5] == ' :')
+ else:
+ # Search for the function arguments or an initializer list. We use a
+ # simple heuristic here: If the line is indented 4 spaces; and we have a
+ # closing paren, without the opening paren, followed by an opening brace
+ # or colon (for initializer lists) we assume that it is the last line of
+ # a function header. If we have a colon indented 4 spaces, it is an
+ # initializer list.
+ exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
+ prev_line)
+ or Match(r' {4}:', prev_line))
+
+ if not exception:
+ error(filename, linenum, 'whitespace/blank_line', 2,
+ 'Blank line at the start of a code block. Is this needed?')
+ # Ignore blank lines at the end of a block in a long if-else
+ # chain, like this:
+ # if (condition1) {
+ # // Something followed by a blank line
+ #
+ # } else if (condition2) {
+ # // Something else
+ # }
+ if linenum + 1 < clean_lines.NumLines():
+ next_line = raw[linenum + 1]
+ if (next_line
+ and Match(r'\s*}', next_line)
+ and next_line.find('} else ') == -1):
+ error(filename, linenum, 'whitespace/blank_line', 3,
+ 'Blank line at the end of a code block. Is this needed?')
+
+ matched = Match(r'\s*(public|protected|private):', prev_line)
+ if matched:
+ error(filename, linenum, 'whitespace/blank_line', 3,
+ 'Do not leave a blank line after "%s:"' % matched.group(1))
+
+ # Next, we complain if there's a comment too near the text
+ commentpos = line.find('//')
+ if commentpos != -1:
+ # Check if the // may be in quotes. If so, ignore it
+ # Comparisons made explicit for clarity -- pylint: disable-msg=C6403
+ if (line.count('"', 0, commentpos) -
+ line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes
+ # Allow one space for new scopes, two spaces otherwise:
+ if (not Match(r'^\s*{ //', line) and
+ ((commentpos >= 1 and
+ line[commentpos-1] not in string.whitespace) or
+ (commentpos >= 2 and
+ line[commentpos-2] not in string.whitespace))):
+ error(filename, linenum, 'whitespace/comments', 2,
+ 'At least two spaces is best between code and comments')
+ # There should always be a space between the // and the comment
+ commentend = commentpos + 2
+ if commentend < len(line) and not line[commentend] == ' ':
+ # but some lines are exceptions -- e.g. if they're big
+ # comment delimiters like:
+ # //----------------------------------------------------------
+ # or are an empty C++ style Doxygen comment, like:
+ # ///
+ # or they begin with multiple slashes followed by a space:
+ # //////// Header comment
+ match = (Search(r'[=/-]{4,}\s*$', line[commentend:]) or
+ Search(r'^/$', line[commentend:]) or
+ Search(r'^/+ ', line[commentend:]))
+ if not match:
+ error(filename, linenum, 'whitespace/comments', 4,
+ 'Should have a space between // and comment')
+ CheckComment(line[commentpos:], filename, linenum, error)
+
+ line = clean_lines.elided[linenum] # get rid of comments and strings
+
+ # Don't try to do spacing checks for operator methods
+ line = re.sub(r'operator(==|!=|<|<<|<=|>=|>>|>)\(', 'operator\(', line)
+
+ # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
+ # Otherwise not. Note we only check for non-spaces on *both* sides;
+ # sometimes people put non-spaces on one side when aligning ='s among
+ # many lines (not that this is behavior that I approve of...)
+ if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line):
+ error(filename, linenum, 'whitespace/operators', 4,
+ 'Missing spaces around =')
+
+ # It's ok not to have spaces around binary operators like + - * /, but if
+ # there's too little whitespace, we get concerned. It's hard to tell,
+ # though, so we punt on this one for now. TODO.
+
+ # You should always have whitespace around binary operators.
+ #
+ # Check <= and >= first to avoid false positives with < and >, then
+ # check non-include lines for spacing around < and >.
+ match = Search(r'[^<>=!\s](==|!=|<=|>=)[^<>=!\s]', line)
+ if match:
+ error(filename, linenum, 'whitespace/operators', 3,
+ 'Missing spaces around %s' % match.group(1))
+ # We allow no-spaces around << when used like this: 10<<20, but
+ # not otherwise (particularly, not when used as streams)
+ match = Search(r'(\S)(?:L|UL|ULL|l|ul|ull)?<<(\S)', line)
+ if match and not (match.group(1).isdigit() and match.group(2).isdigit()):
+ error(filename, linenum, 'whitespace/operators', 3,
+ 'Missing spaces around <<')
+ elif not Match(r'#.*include', line):
+ # Avoid false positives on ->
+ reduced_line = line.replace('->', '')
+
+ # Look for < that is not surrounded by spaces. This is only
+ # triggered if both sides are missing spaces, even though
+ # technically should should flag if at least one side is missing a
+ # space. This is done to avoid some false positives with shifts.
+ match = Search(r'[^\s<]<([^\s=<].*)', reduced_line)
+ if (match and
+ not FindNextMatchingAngleBracket(clean_lines, linenum, match.group(1))):
+ error(filename, linenum, 'whitespace/operators', 3,
+ 'Missing spaces around <')
+
+ # Look for > that is not surrounded by spaces. Similar to the
+ # above, we only trigger if both sides are missing spaces to avoid
+ # false positives with shifts.
+ match = Search(r'^(.*[^\s>])>[^\s=>]', reduced_line)
+ if (match and
+ not FindPreviousMatchingAngleBracket(clean_lines, linenum,
+ match.group(1))):
+ error(filename, linenum, 'whitespace/operators', 3,
+ 'Missing spaces around >')
+
+ # We allow no-spaces around >> for almost anything. This is because
+ # C++11 allows ">>" to close nested templates, which accounts for
+ # most cases when ">>" is not followed by a space.
+ #
+ # We still warn on ">>" followed by alpha character, because that is
+ # likely due to ">>" being used for right shifts, e.g.:
+ # value >> alpha
+ #
+ # When ">>" is used to close templates, the alphanumeric letter that
+ # follows would be part of an identifier, and there should still be
+ # a space separating the template type and the identifier.
+ # type<type<type>> alpha
+ match = Search(r'>>[a-zA-Z_]', line)
+ if match:
+ error(filename, linenum, 'whitespace/operators', 3,
+ 'Missing spaces around >>')
+
+ # There shouldn't be space around unary operators
+ match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
+ if match:
+ error(filename, linenum, 'whitespace/operators', 4,
+ 'Extra space for operator %s' % match.group(1))
+
+ # A pet peeve of mine: no spaces after an if, while, switch, or for
+ match = Search(r' (if\(|for\(|while\(|switch\()', line)
+ if match:
+ error(filename, linenum, 'whitespace/parens', 5,
+ 'Missing space before ( in %s' % match.group(1))
+
+ # For if/for/while/switch, the left and right parens should be
+ # consistent about how many spaces are inside the parens, and
+ # there should either be zero or one spaces inside the parens.
+ # We don't want: "if ( foo)" or "if ( foo )".
+ # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
+ match = Search(r'\b(if|for|while|switch)\s*'
+ r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
+ line)
+ if match:
+ if len(match.group(2)) != len(match.group(4)):
+ if not (match.group(3) == ';' and
+ len(match.group(2)) == 1 + len(match.group(4)) or
+ not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
+ error(filename, linenum, 'whitespace/parens', 5,
+ 'Mismatching spaces inside () in %s' % match.group(1))
+ if not len(match.group(2)) in [0, 1]:
+ error(filename, linenum, 'whitespace/parens', 5,
+ 'Should have zero or one spaces inside ( and ) in %s' %
+ match.group(1))
+
+ # You should always have a space after a comma (either as fn arg or operator)
+ if Search(r',[^\s]', line):
+ error(filename, linenum, 'whitespace/comma', 3,
+ 'Missing space after ,')
+
+ # You should always have a space after a semicolon
+ # except for few corner cases
+ # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
+ # space after ;
+ if Search(r';[^\s};\\)/]', line):
+ error(filename, linenum, 'whitespace/semicolon', 3,
+ 'Missing space after ;')
+
+ # Next we will look for issues with function calls.
+ CheckSpacingForFunctionCall(filename, line, linenum, error)
+
+ # Except after an opening paren, or after another opening brace (in case of
+ # an initializer list, for instance), you should have spaces before your
+ # braces. And since you should never have braces at the beginning of a line,
+ # this is an easy test.
+ if Search(r'[^ ({]{', line):
+ error(filename, linenum, 'whitespace/braces', 5,
+ 'Missing space before {')
+
+ # Make sure '} else {' has spaces.
+ if Search(r'}else', line):
+ error(filename, linenum, 'whitespace/braces', 5,
+ 'Missing space before else')
+
+ # You shouldn't have spaces before your brackets, except maybe after
+ # 'delete []' or 'new char * []'.
+ if Search(r'\w\s+\[', line) and not Search(r'delete\s+\[', line):
+ error(filename, linenum, 'whitespace/braces', 5,
+ 'Extra space before [')
+
+ # You shouldn't have a space before a semicolon at the end of the line.
+ # There's a special case for "for" since the style guide allows space before
+ # the semicolon there.
+ if Search(r':\s*;\s*$', line):
+ error(filename, linenum, 'whitespace/semicolon', 5,
+ 'Semicolon defining empty statement. Use {} instead.')
+ elif Search(r'^\s*;\s*$', line):
+ error(filename, linenum, 'whitespace/semicolon', 5,
+ 'Line contains only semicolon. If this should be an empty statement, '
+ 'use {} instead.')
+ elif (Search(r'\s+;\s*$', line) and
+ not Search(r'\bfor\b', line)):
+ error(filename, linenum, 'whitespace/semicolon', 5,
+ 'Extra space before last semicolon. If this should be an empty '
+ 'statement, use {} instead.')
+
+ # In range-based for, we wanted spaces before and after the colon, but
+ # not around "::" tokens that might appear.
+ if (Search('for *\(.*[^:]:[^: ]', line) or
+ Search('for *\(.*[^: ]:[^:]', line)):
+ error(filename, linenum, 'whitespace/forcolon', 2,
+ 'Missing space around colon in range-based for loop')
+
+
+def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
+ """Checks for additional blank line issues related to sections.
+
+ Currently the only thing checked here is blank line before protected/private.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ class_info: A _ClassInfo objects.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ # Skip checks if the class is small, where small means 25 lines or less.
+ # 25 lines seems like a good cutoff since that's the usual height of
+ # terminals, and any class that can't fit in one screen can't really
+ # be considered "small".
+ #
+ # Also skip checks if we are on the first line. This accounts for
+ # classes that look like
+ # class Foo { public: ... };
+ #
+ # If we didn't find the end of the class, last_line would be zero,
+ # and the check will be skipped by the first condition.
+ if (class_info.last_line - class_info.starting_linenum <= 24 or
+ linenum <= class_info.starting_linenum):
+ return
+
+ matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
+ if matched:
+ # Issue warning if the line before public/protected/private was
+ # not a blank line, but don't do this if the previous line contains
+ # "class" or "struct". This can happen two ways:
+ # - We are at the beginning of the class.
+ # - We are forward-declaring an inner class that is semantically
+ # private, but needed to be public for implementation reasons.
+ # Also ignores cases where the previous line ends with a backslash as can be
+ # common when defining classes in C macros.
+ prev_line = clean_lines.lines[linenum - 1]
+ if (not IsBlankLine(prev_line) and
+ not Search(r'\b(class|struct)\b', prev_line) and
+ not Search(r'\\$', prev_line)):
+ # Try a bit harder to find the beginning of the class. This is to
+ # account for multi-line base-specifier lists, e.g.:
+ # class Derived
+ # : public Base {
+ end_class_head = class_info.starting_linenum
+ for i in range(class_info.starting_linenum, linenum):
+ if Search(r'\{\s*$', clean_lines.lines[i]):
+ end_class_head = i
+ break
+ if end_class_head < linenum - 1:
+ error(filename, linenum, 'whitespace/blank_line', 3,
+ '"%s:" should be preceded by a blank line' % matched.group(1))
+
+
+def GetPreviousNonBlankLine(clean_lines, linenum):
+ """Return the most recent non-blank line and its line number.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file contents.
+ linenum: The number of the line to check.
+
+ Returns:
+ A tuple with two elements. The first element is the contents of the last
+ non-blank line before the current line, or the empty string if this is the
+ first non-blank line. The second is the line number of that line, or -1
+ if this is the first non-blank line.
+ """
+
+ prevlinenum = linenum - 1
+ while prevlinenum >= 0:
+ prevline = clean_lines.elided[prevlinenum]
+ if not IsBlankLine(prevline): # if not a blank line...
+ return (prevline, prevlinenum)
+ prevlinenum -= 1
+ return ('', -1)
+
+
+def CheckBraces(filename, clean_lines, linenum, error):
+ """Looks for misplaced braces (e.g. at the end of line).
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+
+ line = clean_lines.elided[linenum] # get rid of comments and strings
+
+ if Match(r'\s*{\s*$', line):
+ # We allow an open brace to start a line in the case where someone
+ # is using braces in a block to explicitly create a new scope,
+ # which is commonly used to control the lifetime of
+ # stack-allocated variables. We don't detect this perfectly: we
+ # just don't complain if the last non-whitespace character on the
+ # previous non-blank line is ';', ':', '{', or '}', or if the previous
+ # line starts a preprocessor block.
+ prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
+ if (not Search(r'[;:}{]\s*$', prevline) and
+ not Match(r'\s*#', prevline)):
+ error(filename, linenum, 'whitespace/braces', 4,
+ '{ should almost always be at the end of the previous line')
+
+ # An else clause should be on the same line as the preceding closing brace.
+ if Match(r'\s*else\s*', line):
+ prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
+ if Match(r'\s*}\s*$', prevline):
+ error(filename, linenum, 'whitespace/newline', 4,
+ 'An else should appear on the same line as the preceding }')
+
+ # If braces come on one side of an else, they should be on both.
+ # However, we have to worry about "else if" that spans multiple lines!
+ if Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
+ if Search(r'}\s*else if([^{]*)$', line): # could be multi-line if
+ # find the ( after the if
+ pos = line.find('else if')
+ pos = line.find('(', pos)
+ if pos > 0:
+ (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
+ if endline[endpos:].find('{') == -1: # must be brace after if
+ error(filename, linenum, 'readability/braces', 5,
+ 'If an else has a brace on one side, it should have it on both')
+ else: # common case: else not followed by a multi-line if
+ error(filename, linenum, 'readability/braces', 5,
+ 'If an else has a brace on one side, it should have it on both')
+
+ # Likewise, an else should never have the else clause on the same line
+ if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
+ error(filename, linenum, 'whitespace/newline', 4,
+ 'Else clause should never be on same line as else (use 2 lines)')
+
+ # In the same way, a do/while should never be on one line
+ if Match(r'\s*do [^\s{]', line):
+ error(filename, linenum, 'whitespace/newline', 4,
+ 'do/while clauses should not be on a single line')
+
+ # Braces shouldn't be followed by a ; unless they're defining a struct
+ # or initializing an array.
+ # We can't tell in general, but we can for some common cases.
+ prevlinenum = linenum
+ while True:
+ (prevline, prevlinenum) = GetPreviousNonBlankLine(clean_lines, prevlinenum)
+ if Match(r'\s+{.*}\s*;', line) and not prevline.count(';'):
+ line = prevline + line
+ else:
+ break
+ if (Search(r'{.*}\s*;', line) and
+ line.count('{') == line.count('}') and
+ not Search(r'struct|class|enum|\s*=\s*{', line)):
+ error(filename, linenum, 'readability/braces', 4,
+ "You don't need a ; after a }")
+
+
+def CheckEmptyLoopBody(filename, clean_lines, linenum, error):
+ """Loop for empty loop body with only a single semicolon.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+
+ # Search for loop keywords at the beginning of the line. Because only
+ # whitespaces are allowed before the keywords, this will also ignore most
+ # do-while-loops, since those lines should start with closing brace.
+ line = clean_lines.elided[linenum]
+ if Match(r'\s*(for|while)\s*\(', line):
+ # Find the end of the conditional expression
+ (end_line, end_linenum, end_pos) = CloseExpression(
+ clean_lines, linenum, line.find('('))
+
+ # Output warning if what follows the condition expression is a semicolon.
+ # No warning for all other cases, including whitespace or newline, since we
+ # have a separate check for semicolons preceded by whitespace.
+ if end_pos >= 0 and Match(r';', end_line[end_pos:]):
+ error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
+ 'Empty loop bodies should use {} or continue')
+
+
+def ReplaceableCheck(operator, macro, line):
+ """Determine whether a basic CHECK can be replaced with a more specific one.
+
+ For example suggest using CHECK_EQ instead of CHECK(a == b) and
+ similarly for CHECK_GE, CHECK_GT, CHECK_LE, CHECK_LT, CHECK_NE.
+
+ Args:
+ operator: The C++ operator used in the CHECK.
+ macro: The CHECK or EXPECT macro being called.
+ line: The current source line.
+
+ Returns:
+ True if the CHECK can be replaced with a more specific one.
+ """
+
+ # This matches decimal and hex integers, strings, and chars (in that order).
+ match_constant = r'([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')'
+
+ # Expression to match two sides of the operator with something that
+ # looks like a literal, since CHECK(x == iterator) won't compile.
+ # This means we can't catch all the cases where a more specific
+ # CHECK is possible, but it's less annoying than dealing with
+ # extraneous warnings.
+ match_this = (r'\s*' + macro + r'\((\s*' +
+ match_constant + r'\s*' + operator + r'[^<>].*|'
+ r'.*[^<>]' + operator + r'\s*' + match_constant +
+ r'\s*\))')
+
+ # Don't complain about CHECK(x == NULL) or similar because
+ # CHECK_EQ(x, NULL) won't compile (requires a cast).
+ # Also, don't complain about more complex boolean expressions
+ # involving && or || such as CHECK(a == b || c == d).
+ return Match(match_this, line) and not Search(r'NULL|&&|\|\|', line)
+
+
+def CheckCheck(filename, clean_lines, linenum, error):
+ """Checks the use of CHECK and EXPECT macros.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+
+ # Decide the set of replacement macros that should be suggested
+ raw_lines = clean_lines.raw_lines
+ current_macro = ''
+ for macro in _CHECK_MACROS:
+ if raw_lines[linenum].find(macro) >= 0:
+ current_macro = macro
+ break
+ if not current_macro:
+ # Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT'
+ return
+
+ line = clean_lines.elided[linenum] # get rid of comments and strings
+
+ # Encourage replacing plain CHECKs with CHECK_EQ/CHECK_NE/etc.
+ for operator in ['==', '!=', '>=', '>', '<=', '<']:
+ if ReplaceableCheck(operator, current_macro, line):
+ error(filename, linenum, 'readability/check', 2,
+ 'Consider using %s instead of %s(a %s b)' % (
+ _CHECK_REPLACEMENT[current_macro][operator],
+ current_macro, operator))
+ break
+
+
+def CheckAltTokens(filename, clean_lines, linenum, error):
+ """Check alternative keywords being used in boolean expressions.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ line = clean_lines.elided[linenum]
+
+ # Avoid preprocessor lines
+ if Match(r'^\s*#', line):
+ return
+
+ # Last ditch effort to avoid multi-line comments. This will not help
+ # if the comment started before the current line or ended after the
+ # current line, but it catches most of the false positives. At least,
+ # it provides a way to workaround this warning for people who use
+ # multi-line comments in preprocessor macros.
+ #
+ # TODO(unknown): remove this once cpplint has better support for
+ # multi-line comments.
+ if line.find('/*') >= 0 or line.find('*/') >= 0:
+ return
+
+ for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
+ error(filename, linenum, 'readability/alt_tokens', 2,
+ 'Use operator %s instead of %s' % (
+ _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
+
+
+def GetLineWidth(line):
+ """Determines the width of the line in column positions.
+
+ Args:
+ line: A string, which may be a Unicode string.
+
+ Returns:
+ The width of the line in column positions, accounting for Unicode
+ combining characters and wide characters.
+ """
+ if isinstance(line, unicode):
+ width = 0
+ for uc in unicodedata.normalize('NFC', line):
+ if unicodedata.east_asian_width(uc) in ('W', 'F'):
+ width += 2
+ elif not unicodedata.combining(uc):
+ width += 1
+ return width
+ else:
+ return len(line)
+
+
+def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
+ error):
+ """Checks rules from the 'C++ style rules' section of cppguide.html.
+
+ Most of these rules are hard to test (naming, comment style), but we
+ do what we can. In particular we check for 2-space indents, line lengths,
+ tab usage, spaces inside code, etc.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ file_extension: The extension (without the dot) of the filename.
+ nesting_state: A _NestingState instance which maintains information about
+ the current stack of nested blocks being parsed.
+ error: The function to call with any errors found.
+ """
+
+ raw_lines = clean_lines.raw_lines
+ line = raw_lines[linenum]
+
+ if line.find('\t') != -1:
+ error(filename, linenum, 'whitespace/tab', 1,
+ 'Tab found; better to use spaces')
+
+ # One or three blank spaces at the beginning of the line is weird; it's
+ # hard to reconcile that with 2-space indents.
+ # NOTE: here are the conditions rob pike used for his tests. Mine aren't
+ # as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
+ # if(RLENGTH > 20) complain = 0;
+ # if(match($0, " +(error|private|public|protected):")) complain = 0;
+ # if(match(prev, "&& *$")) complain = 0;
+ # if(match(prev, "\\|\\| *$")) complain = 0;
+ # if(match(prev, "[\",=><] *$")) complain = 0;
+ # if(match($0, " <<")) complain = 0;
+ # if(match(prev, " +for \\(")) complain = 0;
+ # if(prevodd && match(prevprev, " +for \\(")) complain = 0;
+ initial_spaces = 0
+ cleansed_line = clean_lines.elided[linenum]
+ while initial_spaces < len(line) and line[initial_spaces] == ' ':
+ initial_spaces += 1
+ if line and line[-1].isspace():
+ error(filename, linenum, 'whitespace/end_of_line', 4,
+ 'Line ends in whitespace. Consider deleting these extra spaces.')
+ # There are certain situations we allow one space, notably for labels
+ elif ((initial_spaces == 1 or initial_spaces == 3) and
+ not Match(r'\s*\w+\s*:\s*$', cleansed_line)):
+ error(filename, linenum, 'whitespace/indent', 3,
+ 'Weird number of spaces at line-start. '
+ 'Are you using a 2-space indent?')
+ # Labels should always be indented at least one space.
+ elif not initial_spaces and line[:2] != '//' and Search(r'[^:]:\s*$',
+ line):
+ error(filename, linenum, 'whitespace/labels', 4,
+ 'Labels should always be indented at least one space. '
+ 'If this is a member-initializer list in a constructor or '
+ 'the base class list in a class definition, the colon should '
+ 'be on the following line.')
+
+
+ # Check if the line is a header guard.
+ is_header_guard = False
+ if file_extension == 'h':
+ cppvar = GetHeaderGuardCPPVariable(filename)
+ if (line.startswith('#ifndef %s' % cppvar) or
+ line.startswith('#define %s' % cppvar) or
+ line.startswith('#endif // %s' % cppvar)):
+ is_header_guard = True
+ # #include lines and header guards can be long, since there's no clean way to
+ # split them.
+ #
+ # URLs can be long too. It's possible to split these, but it makes them
+ # harder to cut&paste.
+ #
+ # The "$Id:...$" comment may also get very long without it being the
+ # developers fault.
+ if (not line.startswith('#include') and not is_header_guard and
+ not Match(r'^\s*//.*http(s?)://\S*$', line) and
+ not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
+ line_width = GetLineWidth(line)
+ if line_width > 100:
+ error(filename, linenum, 'whitespace/line_length', 4,
+ 'Lines should very rarely be longer than 100 characters')
+ elif line_width > 80:
+ error(filename, linenum, 'whitespace/line_length', 2,
+ 'Lines should be <= 80 characters long')
+
+ if (cleansed_line.count(';') > 1 and
+ # for loops are allowed two ;'s (and may run over two lines).
+ cleansed_line.find('for') == -1 and
+ (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
+ GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
+ # It's ok to have many commands in a switch case that fits in 1 line
+ not ((cleansed_line.find('case ') != -1 or
+ cleansed_line.find('default:') != -1) and
+ cleansed_line.find('break;') != -1)):
+ error(filename, linenum, 'whitespace/newline', 0,
+ 'More than one command on the same line')
+
+ # Some more style checks
+ CheckBraces(filename, clean_lines, linenum, error)
+ CheckEmptyLoopBody(filename, clean_lines, linenum, error)
+ CheckAccess(filename, clean_lines, linenum, nesting_state, error)
+ CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
+ CheckCheck(filename, clean_lines, linenum, error)
+ CheckAltTokens(filename, clean_lines, linenum, error)
+ classinfo = nesting_state.InnermostClass()
+ if classinfo:
+ CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
+
+
+_RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"')
+_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
+# Matches the first component of a filename delimited by -s and _s. That is:
+# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
+# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
+# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
+# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
+_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
+
+
+def _DropCommonSuffixes(filename):
+ """Drops common suffixes like _test.cc or -inl.h from filename.
+
+ For example:
+ >>> _DropCommonSuffixes('foo/foo-inl.h')
+ 'foo/foo'
+ >>> _DropCommonSuffixes('foo/bar/foo.cc')
+ 'foo/bar/foo'
+ >>> _DropCommonSuffixes('foo/foo_internal.h')
+ 'foo/foo'
+ >>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
+ 'foo/foo_unusualinternal'
+
+ Args:
+ filename: The input filename.
+
+ Returns:
+ The filename with the common suffix removed.
+ """
+ for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
+ 'inl.h', 'impl.h', 'internal.h'):
+ if (filename.endswith(suffix) and len(filename) > len(suffix) and
+ filename[-len(suffix) - 1] in ('-', '_')):
+ return filename[:-len(suffix) - 1]
+ return os.path.splitext(filename)[0]
+
+
+def _IsTestFilename(filename):
+ """Determines if the given filename has a suffix that identifies it as a test.
+
+ Args:
+ filename: The input filename.
+
+ Returns:
+ True if 'filename' looks like a test, False otherwise.
+ """
+ if (filename.endswith('_test.cc') or
+ filename.endswith('_unittest.cc') or
+ filename.endswith('_regtest.cc')):
+ return True
+ else:
+ return False
+
+
+def _ClassifyInclude(fileinfo, include, is_system):
+ """Figures out what kind of header 'include' is.
+
+ Args:
+ fileinfo: The current file cpplint is running over. A FileInfo instance.
+ include: The path to a #included file.
+ is_system: True if the #include used <> rather than "".
+
+ Returns:
+ One of the _XXX_HEADER constants.
+
+ For example:
+ >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
+ _C_SYS_HEADER
+ >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
+ _CPP_SYS_HEADER
+ >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
+ _LIKELY_MY_HEADER
+ >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
+ ... 'bar/foo_other_ext.h', False)
+ _POSSIBLE_MY_HEADER
+ >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
+ _OTHER_HEADER
+ """
+ # This is a list of all standard c++ header files, except
+ # those already checked for above.
+ is_stl_h = include in _STL_HEADERS
+ is_cpp_h = is_stl_h or include in _CPP_HEADERS
+
+ if is_system:
+ if is_cpp_h:
+ return _CPP_SYS_HEADER
+ else:
+ return _C_SYS_HEADER
+
+ # If the target file and the include we're checking share a
+ # basename when we drop common extensions, and the include
+ # lives in . , then it's likely to be owned by the target file.
+ target_dir, target_base = (
+ os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
+ include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
+ if target_base == include_base and (
+ include_dir == target_dir or
+ include_dir == os.path.normpath(target_dir + '/../public')):
+ return _LIKELY_MY_HEADER
+
+ # If the target and include share some initial basename
+ # component, it's possible the target is implementing the
+ # include, so it's allowed to be first, but we'll never
+ # complain if it's not there.
+ target_first_component = _RE_FIRST_COMPONENT.match(target_base)
+ include_first_component = _RE_FIRST_COMPONENT.match(include_base)
+ if (target_first_component and include_first_component and
+ target_first_component.group(0) ==
+ include_first_component.group(0)):
+ return _POSSIBLE_MY_HEADER
+
+ return _OTHER_HEADER
+
+
+
+def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
+ """Check rules that are applicable to #include lines.
+
+ Strings on #include lines are NOT removed from elided line, to make
+ certain tasks easier. However, to prevent false positives, checks
+ applicable to #include lines in CheckLanguage must be put here.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ include_state: An _IncludeState instance in which the headers are inserted.
+ error: The function to call with any errors found.
+ """
+ fileinfo = FileInfo(filename)
+
+ line = clean_lines.lines[linenum]
+
+ # "include" should use the new style "foo/bar.h" instead of just "bar.h"
+ if _RE_PATTERN_INCLUDE_NEW_STYLE.search(line):
+ error(filename, linenum, 'build/include', 4,
+ 'Include the directory when naming .h files')
+
+ # we shouldn't include a file more than once. actually, there are a
+ # handful of instances where doing so is okay, but in general it's
+ # not.
+ match = _RE_PATTERN_INCLUDE.search(line)
+ if match:
+ include = match.group(2)
+ is_system = (match.group(1) == '<')
+ if include in include_state:
+ error(filename, linenum, 'build/include', 4,
+ '"%s" already included at %s:%s' %
+ (include, filename, include_state[include]))
+ else:
+ include_state[include] = linenum
+
+ # We want to ensure that headers appear in the right order:
+ # 1) for foo.cc, foo.h (preferred location)
+ # 2) c system files
+ # 3) cpp system files
+ # 4) for foo.cc, foo.h (deprecated location)
+ # 5) other google headers
+ #
+ # We classify each include statement as one of those 5 types
+ # using a number of techniques. The include_state object keeps
+ # track of the highest type seen, and complains if we see a
+ # lower type after that.
+ error_message = include_state.CheckNextIncludeOrder(
+ _ClassifyInclude(fileinfo, include, is_system))
+ if error_message:
+ error(filename, linenum, 'build/include_order', 4,
+ '%s. Should be: %s.h, c system, c++ system, other.' %
+ (error_message, fileinfo.BaseName()))
+ if not include_state.IsInAlphabeticalOrder(include):
+ error(filename, linenum, 'build/include_alpha', 4,
+ 'Include "%s" not in alphabetical order' % include)
+
+ # Look for any of the stream classes that are part of standard C++.
+ match = _RE_PATTERN_INCLUDE.match(line)
+ if match:
+ include = match.group(2)
+ if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
+ # Many unit tests use cout, so we exempt them.
+ if not _IsTestFilename(filename):
+ error(filename, linenum, 'readability/streams', 3,
+ 'Streams are highly discouraged.')
+
+
+def _GetTextInside(text, start_pattern):
+ """Retrieves all the text between matching open and close parentheses.
+
+ Given a string of lines and a regular expression string, retrieve all the text
+ following the expression and between opening punctuation symbols like
+ (, [, or {, and the matching close-punctuation symbol. This properly nested
+ occurrences of the punctuations, so for the text like
+ printf(a(), b(c()));
+ a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
+ start_pattern must match string having an open punctuation symbol at the end.
+
+ Args:
+ text: The lines to extract text. Its comments and strings must be elided.
+ It can be single line and can span multiple lines.
+ start_pattern: The regexp string indicating where to start extracting
+ the text.
+ Returns:
+ The extracted text.
+ None if either the opening string or ending punctuation could not be found.
+ """
+ # TODO(sugawarayu): Audit cpplint.py to see what places could be profitably
+ # rewritten to use _GetTextInside (and use inferior regexp matching today).
+
+ # Give opening punctuations to get the matching close-punctuations.
+ matching_punctuation = {'(': ')', '{': '}', '[': ']'}
+ closing_punctuation = set(matching_punctuation.itervalues())
+
+ # Find the position to start extracting text.
+ match = re.search(start_pattern, text, re.M)
+ if not match: # start_pattern not found in text.
+ return None
+ start_position = match.end(0)
+
+ assert start_position > 0, (
+ 'start_pattern must ends with an opening punctuation.')
+ assert text[start_position - 1] in matching_punctuation, (
+ 'start_pattern must ends with an opening punctuation.')
+ # Stack of closing punctuations we expect to have in text after position.
+ punctuation_stack = [matching_punctuation[text[start_position - 1]]]
+ position = start_position
+ while punctuation_stack and position < len(text):
+ if text[position] == punctuation_stack[-1]:
+ punctuation_stack.pop()
+ elif text[position] in closing_punctuation:
+ # A closing punctuation without matching opening punctuations.
+ return None
+ elif text[position] in matching_punctuation:
+ punctuation_stack.append(matching_punctuation[text[position]])
+ position += 1
+ if punctuation_stack:
+ # Opening punctuations left without matching close-punctuations.
+ return None
+ # punctuations match.
+ return text[start_position:position - 1]
+
+
+def CheckLanguage(filename, clean_lines, linenum, file_extension, include_state,
+ error):
+ """Checks rules from the 'C++ language rules' section of cppguide.html.
+
+ Some of these rules are hard to test (function overloading, using
+ uint32 inappropriately), but we do the best we can.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ file_extension: The extension (without the dot) of the filename.
+ include_state: An _IncludeState instance in which the headers are inserted.
+ error: The function to call with any errors found.
+ """
+ # If the line is empty or consists of entirely a comment, no need to
+ # check it.
+ line = clean_lines.elided[linenum]
+ if not line:
+ return
+
+ match = _RE_PATTERN_INCLUDE.search(line)
+ if match:
+ CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
+ return
+
+ # Create an extended_line, which is the concatenation of the current and
+ # next lines, for more effective checking of code that may span more than one
+ # line.
+ if linenum + 1 < clean_lines.NumLines():
+ extended_line = line + clean_lines.elided[linenum + 1]
+ else:
+ extended_line = line
+
+ # Make Windows paths like Unix.
+ fullname = os.path.abspath(filename).replace('\\', '/')
+
+ # TODO(unknown): figure out if they're using default arguments in fn proto.
+
+ # Check for non-const references in functions. This is tricky because &
+ # is also used to take the address of something. We allow <> for templates,
+ # (ignoring whatever is between the braces) and : for classes.
+ # These are complicated re's. They try to capture the following:
+ # paren (for fn-prototype start), typename, &, varname. For the const
+ # version, we're willing for const to be before typename or after
+ # Don't check the implementation on same line.
+ fnline = line.split('{', 1)[0]
+ if (len(re.findall(r'\([^()]*\b(?:[\w:]|<[^()]*>)+(\s?&|&\s?)\w+', fnline)) >
+ len(re.findall(r'\([^()]*\bconst\s+(?:typename\s+)?(?:struct\s+)?'
+ r'(?:[\w:]|<[^()]*>)+(\s?&|&\s?)\w+', fnline)) +
+ len(re.findall(r'\([^()]*\b(?:[\w:]|<[^()]*>)+\s+const(\s?&|&\s?)[\w]+',
+ fnline))):
+
+ # We allow non-const references in a few standard places, like functions
+ # called "swap()" or iostream operators like "<<" or ">>". We also filter
+ # out for loops, which lint otherwise mistakenly thinks are functions.
+ if not Search(
+ r'(for|swap|Swap|operator[<>][<>])\s*\(\s*'
+ r'(?:(?:typename\s*)?[\w:]|<.*>)+\s*&',
+ fnline):
+ error(filename, linenum, 'runtime/references', 2,
+ 'Is this a non-const reference? '
+ 'If so, make const or use a pointer.')
+
+ # Check to see if they're using an conversion function cast.
+ # I just try to capture the most common basic types, though there are more.
+ # Parameterless conversion functions, such as bool(), are allowed as they are
+ # probably a member operator declaration or default constructor.
+ match = Search(
+ r'(\bnew\s+)?\b' # Grab 'new' operator, if it's there
+ r'(int|float|double|bool|char|int32|uint32|int64|uint64)\([^)]', line)
+ if match:
+ # gMock methods are defined using some variant of MOCK_METHODx(name, type)
+ # where type may be float(), int(string), etc. Without context they are
+ # virtually indistinguishable from int(x) casts. Likewise, gMock's
+ # MockCallback takes a template parameter of the form return_type(arg_type),
+ # which looks much like the cast we're trying to detect.
+ if (match.group(1) is None and # If new operator, then this isn't a cast
+ not (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
+ Match(r'^\s*MockCallback<.*>', line))):
+ # Try a bit harder to catch gmock lines: the only place where
+ # something looks like an old-style cast is where we declare the
+ # return type of the mocked method, and the only time when we
+ # are missing context is if MOCK_METHOD was split across
+ # multiple lines (for example http://go/hrfhr ), so we only need
+ # to check the previous line for MOCK_METHOD.
+ if (linenum == 0 or
+ not Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(\S+,\s*$',
+ clean_lines.elided[linenum - 1])):
+ error(filename, linenum, 'readability/casting', 4,
+ 'Using deprecated casting style. '
+ 'Use static_cast<%s>(...) instead' %
+ match.group(2))
+
+ CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
+ 'static_cast',
+ r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
+
+ # This doesn't catch all cases. Consider (const char * const)"hello".
+ #
+ # (char *) "foo" should always be a const_cast (reinterpret_cast won't
+ # compile).
+ if CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
+ 'const_cast', r'\((char\s?\*+\s?)\)\s*"', error):
+ pass
+ else:
+ # Check pointer casts for other than string constants
+ CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
+ 'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error)
+
+ # In addition, we look for people taking the address of a cast. This
+ # is dangerous -- casts can assign to temporaries, so the pointer doesn't
+ # point where you think.
+ if Search(
+ r'(&\([^)]+\)[\w(])|(&(static|dynamic|reinterpret)_cast\b)', line):
+ error(filename, linenum, 'runtime/casting', 4,
+ ('Are you taking an address of a cast? '
+ 'This is dangerous: could be a temp var. '
+ 'Take the address before doing the cast, rather than after'))
+
+ # Check for people declaring static/global STL strings at the top level.
+ # This is dangerous because the C++ language does not guarantee that
+ # globals with constructors are initialized before the first access.
+ match = Match(
+ r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
+ line)
+ # Make sure it's not a function.
+ # Function template specialization looks like: "string foo<Type>(...".
+ # Class template definitions look like: "string Foo<Type>::Method(...".
+ if match and not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)',
+ match.group(3)):
+ error(filename, linenum, 'runtime/string', 4,
+ 'For a static/global string constant, use a C style string instead: '
+ '"%schar %s[]".' %
+ (match.group(1), match.group(2)))
+
+ # Check that we're not using RTTI outside of testing code.
+ if Search(r'\bdynamic_cast<', line) and not _IsTestFilename(filename):
+ error(filename, linenum, 'runtime/rtti', 5,
+ 'Do not use dynamic_cast<>. If you need to cast within a class '
+ "hierarchy, use static_cast<> to upcast. Google doesn't support "
+ 'RTTI.')
+
+ if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
+ error(filename, linenum, 'runtime/init', 4,
+ 'You seem to be initializing a member variable with itself.')
+
+ if file_extension == 'h':
+ # TODO(unknown): check that 1-arg constructors are explicit.
+ # How to tell it's a constructor?
+ # (handled in CheckForNonStandardConstructs for now)
+ # TODO(unknown): check that classes have DISALLOW_EVIL_CONSTRUCTORS
+ # (level 1 error)
+ pass
+
+ # Check if people are using the verboten C basic types. The only exception
+ # we regularly allow is "unsigned short port" for port.
+ if Search(r'\bshort port\b', line):
+ if not Search(r'\bunsigned short port\b', line):
+ error(filename, linenum, 'runtime/int', 4,
+ 'Use "unsigned short" for ports, not "short"')
+ else:
+ match = Search(r'\b(short|long(?! +double)|long long)\b', line)
+ if match:
+ error(filename, linenum, 'runtime/int', 4,
+ 'Use int16/int64/etc, rather than the C type %s' % match.group(1))
+
+ # When snprintf is used, the second argument shouldn't be a literal.
+ match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
+ if match and match.group(2) != '0':
+ # If 2nd arg is zero, snprintf is used to calculate size.
+ error(filename, linenum, 'runtime/printf', 3,
+ 'If you can, use sizeof(%s) instead of %s as the 2nd arg '
+ 'to snprintf.' % (match.group(1), match.group(2)))
+
+ # Check if some verboten C functions are being used.
+ if Search(r'\bsprintf\b', line):
+ error(filename, linenum, 'runtime/printf', 5,
+ 'Never use sprintf. Use snprintf instead.')
+ match = Search(r'\b(strcpy|strcat)\b', line)
+ if match:
+ error(filename, linenum, 'runtime/printf', 4,
+ 'Almost always, snprintf is better than %s' % match.group(1))
+
+ if Search(r'\bsscanf\b', line):
+ error(filename, linenum, 'runtime/printf', 1,
+ 'sscanf can be ok, but is slow and can overflow buffers.')
+
+ # Check if some verboten operator overloading is going on
+ # TODO(unknown): catch out-of-line unary operator&:
+ # class X {};
+ # int operator&(const X& x) { return 42; } // unary operator&
+ # The trick is it's hard to tell apart from binary operator&:
+ # class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
+ if Search(r'\boperator\s*&\s*\(\s*\)', line):
+ error(filename, linenum, 'runtime/operator', 4,
+ 'Unary operator& is dangerous. Do not use it.')
+
+ # Check for suspicious usage of "if" like
+ # } if (a == b) {
+ if Search(r'\}\s*if\s*\(', line):
+ error(filename, linenum, 'readability/braces', 4,
+ 'Did you mean "else if"? If not, start a new line for "if".')
+
+ # Check for potential format string bugs like printf(foo).
+ # We constrain the pattern not to pick things like DocidForPrintf(foo).
+ # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
+ # TODO(sugawarayu): Catch the following case. Need to change the calling
+ # convention of the whole function to process multiple line to handle it.
+ # printf(
+ # boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
+ printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
+ if printf_args:
+ match = Match(r'([\w.\->()]+)$', printf_args)
+ if match and match.group(1) != '__VA_ARGS__':
+ function_name = re.search(r'\b((?:string)?printf)\s*\(',
+ line, re.I).group(1)
+ error(filename, linenum, 'runtime/printf', 4,
+ 'Potential format string bug. Do %s("%%s", %s) instead.'
+ % (function_name, match.group(1)))
+
+ # Check for potential memset bugs like memset(buf, sizeof(buf), 0).
+ match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
+ if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
+ error(filename, linenum, 'runtime/memset', 4,
+ 'Did you mean "memset(%s, 0, %s)"?'
+ % (match.group(1), match.group(2)))
+
+ if Search(r'\busing namespace\b', line):
+ error(filename, linenum, 'build/namespaces', 5,
+ 'Do not use namespace using-directives. '
+ 'Use using-declarations instead.')
+
+ # Detect variable-length arrays.
+ match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
+ if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
+ match.group(3).find(']') == -1):
+ # Split the size using space and arithmetic operators as delimiters.
+ # If any of the resulting tokens are not compile time constants then
+ # report the error.
+ tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
+ is_const = True
+ skip_next = False
+ for tok in tokens:
+ if skip_next:
+ skip_next = False
+ continue
+
+ if Search(r'sizeof\(.+\)', tok): continue
+ if Search(r'arraysize\(\w+\)', tok): continue
+
+ tok = tok.lstrip('(')
+ tok = tok.rstrip(')')
+ if not tok: continue
+ if Match(r'\d+', tok): continue
+ if Match(r'0[xX][0-9a-fA-F]+', tok): continue
+ if Match(r'k[A-Z0-9]\w*', tok): continue
+ if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
+ if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
+ # A catch all for tricky sizeof cases, including 'sizeof expression',
+ # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
+ # requires skipping the next token because we split on ' ' and '*'.
+ if tok.startswith('sizeof'):
+ skip_next = True
+ continue
+ is_const = False
+ break
+ if not is_const:
+ error(filename, linenum, 'runtime/arrays', 1,
+ 'Do not use variable-length arrays. Use an appropriately named '
+ "('k' followed by CamelCase) compile-time constant for the size.")
+
+ # If DISALLOW_EVIL_CONSTRUCTORS, DISALLOW_COPY_AND_ASSIGN, or
+ # DISALLOW_IMPLICIT_CONSTRUCTORS is present, then it should be the last thing
+ # in the class declaration.
+ match = Match(
+ (r'\s*'
+ r'(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))'
+ r'\(.*\);$'),
+ line)
+ if match and linenum + 1 < clean_lines.NumLines():
+ next_line = clean_lines.elided[linenum + 1]
+ # We allow some, but not all, declarations of variables to be present
+ # in the statement that defines the class. The [\w\*,\s]* fragment of
+ # the regular expression below allows users to declare instances of
+ # the class or pointers to instances, but not less common types such
+ # as function pointers or arrays. It's a tradeoff between allowing
+ # reasonable code and avoiding trying to parse more C++ using regexps.
+ if not Search(r'^\s*}[\w\*,\s]*;', next_line):
+ error(filename, linenum, 'readability/constructors', 3,
+ match.group(1) + ' should be the last thing in the class')
+
+ # Check for use of unnamed namespaces in header files. Registration
+ # macros are typically OK, so we allow use of "namespace {" on lines
+ # that end with backslashes.
+ if (file_extension == 'h'
+ and Search(r'\bnamespace\s*{', line)
+ and line[-1] != '\\'):
+ error(filename, linenum, 'build/namespaces', 4,
+ 'Do not use unnamed namespaces in header files. See '
+ 'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
+ ' for more information.')
+
+
+def CheckCStyleCast(filename, linenum, line, raw_line, cast_type, pattern,
+ error):
+ """Checks for a C-style cast by looking for the pattern.
+
+ This also handles sizeof(type) warnings, due to similarity of content.
+
+ Args:
+ filename: The name of the current file.
+ linenum: The number of the line to check.
+ line: The line of code to check.
+ raw_line: The raw line of code to check, with comments.
+ cast_type: The string for the C++ cast to recommend. This is either
+ reinterpret_cast, static_cast, or const_cast, depending.
+ pattern: The regular expression used to find C-style casts.
+ error: The function to call with any errors found.
+
+ Returns:
+ True if an error was emitted.
+ False otherwise.
+ """
+ match = Search(pattern, line)
+ if not match:
+ return False
+
+ # e.g., sizeof(int)
+ sizeof_match = Match(r'.*sizeof\s*$', line[0:match.start(1) - 1])
+ if sizeof_match:
+ error(filename, linenum, 'runtime/sizeof', 1,
+ 'Using sizeof(type). Use sizeof(varname) instead if possible')
+ return True
+
+ # operator++(int) and operator--(int)
+ if (line[0:match.start(1) - 1].endswith(' operator++') or
+ line[0:match.start(1) - 1].endswith(' operator--')):
+ return False
+
+ remainder = line[match.end(0):]
+
+ # The close paren is for function pointers as arguments to a function.
+ # eg, void foo(void (*bar)(int));
+ # The semicolon check is a more basic function check; also possibly a
+ # function pointer typedef.
+ # eg, void foo(int); or void foo(int) const;
+ # The equals check is for function pointer assignment.
+ # eg, void *(*foo)(int) = ...
+ # The > is for MockCallback<...> ...
+ #
+ # Right now, this will only catch cases where there's a single argument, and
+ # it's unnamed. It should probably be expanded to check for multiple
+ # arguments with some unnamed.
+ function_match = Match(r'\s*(\)|=|(const)?\s*(;|\{|throw\(\)|>))', remainder)
+ if function_match:
+ if (not function_match.group(3) or
+ function_match.group(3) == ';' or
+ ('MockCallback<' not in raw_line and
+ '/*' not in raw_line)):
+ error(filename, linenum, 'readability/function', 3,
+ 'All parameters should be named in a function')
+ return True
+
+ # At this point, all that should be left is actual casts.
+ error(filename, linenum, 'readability/casting', 4,
+ 'Using C-style cast. Use %s<%s>(...) instead' %
+ (cast_type, match.group(1)))
+
+ return True
+
+
+_HEADERS_CONTAINING_TEMPLATES = (
+ ('<deque>', ('deque',)),
+ ('<functional>', ('unary_function', 'binary_function',
+ 'plus', 'minus', 'multiplies', 'divides', 'modulus',
+ 'negate',
+ 'equal_to', 'not_equal_to', 'greater', 'less',
+ 'greater_equal', 'less_equal',
+ 'logical_and', 'logical_or', 'logical_not',
+ 'unary_negate', 'not1', 'binary_negate', 'not2',
+ 'bind1st', 'bind2nd',
+ 'pointer_to_unary_function',
+ 'pointer_to_binary_function',
+ 'ptr_fun',
+ 'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
+ 'mem_fun_ref_t',
+ 'const_mem_fun_t', 'const_mem_fun1_t',
+ 'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
+ 'mem_fun_ref',
+ )),
+ ('<limits>', ('numeric_limits',)),
+ ('<list>', ('list',)),
+ ('<map>', ('map', 'multimap',)),
+ ('<memory>', ('allocator',)),
+ ('<queue>', ('queue', 'priority_queue',)),
+ ('<set>', ('set', 'multiset',)),
+ ('<stack>', ('stack',)),
+ ('<string>', ('char_traits', 'basic_string',)),
+ ('<utility>', ('pair',)),
+ ('<vector>', ('vector',)),
+
+ # gcc extensions.
+ # Note: std::hash is their hash, ::hash is our hash
+ ('<hash_map>', ('hash_map', 'hash_multimap',)),
+ ('<hash_set>', ('hash_set', 'hash_multiset',)),
+ ('<slist>', ('slist',)),
+ )
+
+_RE_PATTERN_STRING = re.compile(r'\bstring\b')
+
+_re_pattern_algorithm_header = []
+for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
+ 'transform'):
+ # Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
+ # type::max().
+ _re_pattern_algorithm_header.append(
+ (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
+ _template,
+ '<algorithm>'))
+
+_re_pattern_templates = []
+for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
+ for _template in _templates:
+ _re_pattern_templates.append(
+ (re.compile(r'(\<|\b)' + _template + r'\s*\<'),
+ _template + '<>',
+ _header))
+
+
+def FilesBelongToSameModule(filename_cc, filename_h):
+ """Check if these two filenames belong to the same module.
+
+ The concept of a 'module' here is a as follows:
+ foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
+ same 'module' if they are in the same directory.
+ some/path/public/xyzzy and some/path/internal/xyzzy are also considered
+ to belong to the same module here.
+
+ If the filename_cc contains a longer path than the filename_h, for example,
+ '/absolute/path/to/base/sysinfo.cc', and this file would include
+ 'base/sysinfo.h', this function also produces the prefix needed to open the
+ header. This is used by the caller of this function to more robustly open the
+ header file. We don't have access to the real include paths in this context,
+ so we need this guesswork here.
+
+ Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
+ according to this implementation. Because of this, this function gives
+ some false positives. This should be sufficiently rare in practice.
+
+ Args:
+ filename_cc: is the path for the .cc file
+ filename_h: is the path for the header path
+
+ Returns:
+ Tuple with a bool and a string:
+ bool: True if filename_cc and filename_h belong to the same module.
+ string: the additional prefix needed to open the header file.
+ """
+
+ if not filename_cc.endswith('.cc'):
+ return (False, '')
+ filename_cc = filename_cc[:-len('.cc')]
+ if filename_cc.endswith('_unittest'):
+ filename_cc = filename_cc[:-len('_unittest')]
+ elif filename_cc.endswith('_test'):
+ filename_cc = filename_cc[:-len('_test')]
+ filename_cc = filename_cc.replace('/public/', '/')
+ filename_cc = filename_cc.replace('/internal/', '/')
+
+ if not filename_h.endswith('.h'):
+ return (False, '')
+ filename_h = filename_h[:-len('.h')]
+ if filename_h.endswith('-inl'):
+ filename_h = filename_h[:-len('-inl')]
+ filename_h = filename_h.replace('/public/', '/')
+ filename_h = filename_h.replace('/internal/', '/')
+
+ files_belong_to_same_module = filename_cc.endswith(filename_h)
+ common_path = ''
+ if files_belong_to_same_module:
+ common_path = filename_cc[:-len(filename_h)]
+ return files_belong_to_same_module, common_path
+
+
+def UpdateIncludeState(filename, include_state, io=codecs):
+ """Fill up the include_state with new includes found from the file.
+
+ Args:
+ filename: the name of the header to read.
+ include_state: an _IncludeState instance in which the headers are inserted.
+ io: The io factory to use to read the file. Provided for testability.
+
+ Returns:
+ True if a header was succesfully added. False otherwise.
+ """
+ headerfile = None
+ try:
+ headerfile = io.open(filename, 'r', 'utf8', 'replace')
+ except IOError:
+ return False
+ linenum = 0
+ for line in headerfile:
+ linenum += 1
+ clean_line = CleanseComments(line)
+ match = _RE_PATTERN_INCLUDE.search(clean_line)
+ if match:
+ include = match.group(2)
+ # The value formatting is cute, but not really used right now.
+ # What matters here is that the key is in include_state.
+ include_state.setdefault(include, '%s:%d' % (filename, linenum))
+ return True
+
+
+def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
+ io=codecs):
+ """Reports for missing stl includes.
+
+ This function will output warnings to make sure you are including the headers
+ necessary for the stl containers and functions that you use. We only give one
+ reason to include a header. For example, if you use both equal_to<> and
+ less<> in a .h file, only one (the latter in the file) of these will be
+ reported as a reason to include the <functional>.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ include_state: An _IncludeState instance.
+ error: The function to call with any errors found.
+ io: The IO factory to use to read the header file. Provided for unittest
+ injection.
+ """
+ required = {} # A map of header name to linenumber and the template entity.
+ # Example of required: { '<functional>': (1219, 'less<>') }
+
+ for linenum in xrange(clean_lines.NumLines()):
+ line = clean_lines.elided[linenum]
+ if not line or line[0] == '#':
+ continue
+
+ # String is special -- it is a non-templatized type in STL.
+ matched = _RE_PATTERN_STRING.search(line)
+ if matched:
+ # Don't warn about strings in non-STL namespaces:
+ # (We check only the first match per line; good enough.)
+ prefix = line[:matched.start()]
+ if prefix.endswith('std::') or not prefix.endswith('::'):
+ required['<string>'] = (linenum, 'string')
+
+ for pattern, template, header in _re_pattern_algorithm_header:
+ if pattern.search(line):
+ required[header] = (linenum, template)
+
+ # The following function is just a speed up, no semantics are changed.
+ if not '<' in line: # Reduces the cpu time usage by skipping lines.
+ continue
+
+ for pattern, template, header in _re_pattern_templates:
+ if pattern.search(line):
+ required[header] = (linenum, template)
+
+ # The policy is that if you #include something in foo.h you don't need to
+ # include it again in foo.cc. Here, we will look at possible includes.
+ # Let's copy the include_state so it is only messed up within this function.
+ include_state = include_state.copy()
+
+ # Did we find the header for this file (if any) and succesfully load it?
+ header_found = False
+
+ # Use the absolute path so that matching works properly.
+ abs_filename = FileInfo(filename).FullName()
+
+ # For Emacs's flymake.
+ # If cpplint is invoked from Emacs's flymake, a temporary file is generated
+ # by flymake and that file name might end with '_flymake.cc'. In that case,
+ # restore original file name here so that the corresponding header file can be
+ # found.
+ # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
+ # instead of 'foo_flymake.h'
+ abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
+
+ # include_state is modified during iteration, so we iterate over a copy of
+ # the keys.
+ header_keys = include_state.keys()
+ for header in header_keys:
+ (same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
+ fullpath = common_path + header
+ if same_module and UpdateIncludeState(fullpath, include_state, io):
+ header_found = True
+
+ # If we can't find the header file for a .cc, assume it's because we don't
+ # know where to look. In that case we'll give up as we're not sure they
+ # didn't include it in the .h file.
+ # TODO(unknown): Do a better job of finding .h files so we are confident that
+ # not having the .h file means there isn't one.
+ if filename.endswith('.cc') and not header_found:
+ return
+
+ # All the lines have been processed, report the errors found.
+ for required_header_unstripped in required:
+ template = required[required_header_unstripped][1]
+ if required_header_unstripped.strip('<>"') not in include_state:
+ error(filename, required[required_header_unstripped][0],
+ 'build/include_what_you_use', 4,
+ 'Add #include ' + required_header_unstripped + ' for ' + template)
+
+
+_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
+
+
+def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
+ """Check that make_pair's template arguments are deduced.
+
+ G++ 4.6 in C++0x mode fails badly if make_pair's template arguments are
+ specified explicitly, and such use isn't intended in any case.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ raw = clean_lines.raw_lines
+ line = raw[linenum]
+ match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
+ if match:
+ error(filename, linenum, 'build/explicit_make_pair',
+ 4, # 4 = high confidence
+ 'For C++11-compatibility, omit template arguments from make_pair'
+ ' OR use pair directly OR if appropriate, construct a pair directly')
+
+
+def ProcessLine(filename, file_extension, clean_lines, line,
+ include_state, function_state, nesting_state, error,
+ extra_check_functions=[]):
+ """Processes a single line in the file.
+
+ Args:
+ filename: Filename of the file that is being processed.
+ file_extension: The extension (dot not included) of the file.
+ clean_lines: An array of strings, each representing a line of the file,
+ with comments stripped.
+ line: Number of line being processed.
+ include_state: An _IncludeState instance in which the headers are inserted.
+ function_state: A _FunctionState instance which counts function lines, etc.
+ nesting_state: A _NestingState instance which maintains information about
+ the current stack of nested blocks being parsed.
+ error: A callable to which errors are reported, which takes 4 arguments:
+ filename, line number, error level, and message
+ extra_check_functions: An array of additional check functions that will be
+ run on each source line. Each function takes 4
+ arguments: filename, clean_lines, line, error
+ """
+ raw_lines = clean_lines.raw_lines
+ ParseNolintSuppressions(filename, raw_lines[line], line, error)
+ nesting_state.Update(filename, clean_lines, line, error)
+ if nesting_state.stack and nesting_state.stack[-1].inline_asm != _NO_ASM:
+ return
+ CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
+ CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
+ CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
+ CheckLanguage(filename, clean_lines, line, file_extension, include_state,
+ error)
+ CheckForNonStandardConstructs(filename, clean_lines, line,
+ nesting_state, error)
+ CheckPosixThreading(filename, clean_lines, line, error)
+ CheckInvalidIncrement(filename, clean_lines, line, error)
+ CheckMakePairUsesDeduction(filename, clean_lines, line, error)
+ for check_fn in extra_check_functions:
+ check_fn(filename, clean_lines, line, error)
+
+def ProcessFileData(filename, file_extension, lines, error,
+ extra_check_functions=[]):
+ """Performs lint checks and reports any errors to the given error function.
+
+ Args:
+ filename: Filename of the file that is being processed.
+ file_extension: The extension (dot not included) of the file.
+ lines: An array of strings, each representing a line of the file, with the
+ last element being empty if the file is terminated with a newline.
+ error: A callable to which errors are reported, which takes 4 arguments:
+ filename, line number, error level, and message
+ extra_check_functions: An array of additional check functions that will be
+ run on each source line. Each function takes 4
+ arguments: filename, clean_lines, line, error
+ """
+ lines = (['// marker so line numbers and indices both start at 1'] + lines +
+ ['// marker so line numbers end in a known way'])
+
+ include_state = _IncludeState()
+ function_state = _FunctionState()
+ nesting_state = _NestingState()
+
+ ResetNolintSuppressions()
+
+ CheckForCopyright(filename, lines, error)
+
+ if file_extension == 'h':
+ CheckForHeaderGuard(filename, lines, error)
+
+ RemoveMultiLineComments(filename, lines, error)
+ clean_lines = CleansedLines(lines)
+ for line in xrange(clean_lines.NumLines()):
+ ProcessLine(filename, file_extension, clean_lines, line,
+ include_state, function_state, nesting_state, error,
+ extra_check_functions)
+ nesting_state.CheckClassFinished(filename, error)
+
+ CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
+
+ # We check here rather than inside ProcessLine so that we see raw
+ # lines rather than "cleaned" lines.
+ CheckForUnicodeReplacementCharacters(filename, lines, error)
+
+ CheckForNewlineAtEOF(filename, lines, error)
+
+def ProcessFile(filename, vlevel, extra_check_functions=[]):
+ """Does google-lint on a single file.
+
+ Args:
+ filename: The name of the file to parse.
+
+ vlevel: The level of errors to report. Every error of confidence
+ >= verbose_level will be reported. 0 is a good default.
+
+ extra_check_functions: An array of additional check functions that will be
+ run on each source line. Each function takes 4
+ arguments: filename, clean_lines, line, error
+ """
+
+ _SetVerboseLevel(vlevel)
+
+ try:
+ # Support the UNIX convention of using "-" for stdin. Note that
+ # we are not opening the file with universal newline support
+ # (which codecs doesn't support anyway), so the resulting lines do
+ # contain trailing '\r' characters if we are reading a file that
+ # has CRLF endings.
+ # If after the split a trailing '\r' is present, it is removed
+ # below. If it is not expected to be present (i.e. os.linesep !=
+ # '\r\n' as in Windows), a warning is issued below if this file
+ # is processed.
+
+ if filename == '-':
+ lines = codecs.StreamReaderWriter(sys.stdin,
+ codecs.getreader('utf8'),
+ codecs.getwriter('utf8'),
+ 'replace').read().split('\n')
+ else:
+ lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
+
+ carriage_return_found = False
+ # Remove trailing '\r'.
+ for linenum in range(len(lines)):
+ if lines[linenum].endswith('\r'):
+ lines[linenum] = lines[linenum].rstrip('\r')
+ carriage_return_found = True
+
+ except IOError:
+ sys.stderr.write(
+ "Skipping input '%s': Can't open for reading\n" % filename)
+ return
+
+ # Note, if no dot is found, this will give the entire filename as the ext.
+ file_extension = filename[filename.rfind('.') + 1:]
+
+ # When reading from stdin, the extension is unknown, so no cpplint tests
+ # should rely on the extension.
+ if (filename != '-' and file_extension != 'cc' and file_extension != 'h'
+ and file_extension != 'cpp'):
+ sys.stderr.write('Ignoring %s; not a .cc or .h file\n' % filename)
+ else:
+ ProcessFileData(filename, file_extension, lines, Error,
+ extra_check_functions)
+ if carriage_return_found and os.linesep != '\r\n':
+ # Use 0 for linenum since outputting only one error for potentially
+ # several lines.
+ Error(filename, 0, 'whitespace/newline', 1,
+ 'One or more unexpected \\r (^M) found;'
+ 'better to use only a \\n')
+
+ sys.stderr.write('Done processing %s\n' % filename)
+
+
+def PrintUsage(message):
+ """Prints a brief usage string and exits, optionally with an error message.
+
+ Args:
+ message: The optional error message.
+ """
+ sys.stderr.write(_USAGE)
+ if message:
+ sys.exit('\nFATAL ERROR: ' + message)
+ else:
+ sys.exit(1)
+
+
+def PrintCategories():
+ """Prints a list of all the error-categories used by error messages.
+
+ These are the categories used to filter messages via --filter.
+ """
+ sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
+ sys.exit(0)
+
+
+def ParseArguments(args):
+ """Parses the command line arguments.
+
+ This may set the output format and verbosity level as side-effects.
+
+ Args:
+ args: The command line arguments:
+
+ Returns:
+ The list of filenames to lint.
+ """
+ try:
+ (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
+ 'counting=',
+ 'filter=',
+ 'root='])
+ except getopt.GetoptError:
+ PrintUsage('Invalid arguments.')
+
+ verbosity = _VerboseLevel()
+ output_format = _OutputFormat()
+ filters = ''
+ counting_style = ''
+
+ for (opt, val) in opts:
+ if opt == '--help':
+ PrintUsage(None)
+ elif opt == '--output':
+ if not val in ('emacs', 'vs7'):
+ PrintUsage('The only allowed output formats are emacs and vs7.')
+ output_format = val
+ elif opt == '--verbose':
+ verbosity = int(val)
+ elif opt == '--filter':
+ filters = val
+ if not filters:
+ PrintCategories()
+ elif opt == '--counting':
+ if val not in ('total', 'toplevel', 'detailed'):
+ PrintUsage('Valid counting options are total, toplevel, and detailed')
+ counting_style = val
+ elif opt == '--root':
+ global _root
+ _root = val
+
+ if not filenames:
+ PrintUsage('No files were specified.')
+
+ _SetOutputFormat(output_format)
+ _SetVerboseLevel(verbosity)
+ _SetFilters(filters)
+ _SetCountingStyle(counting_style)
+
+ return filenames
+
+
+def main():
+ filenames = ParseArguments(sys.argv[1:])
+
+ # Change stderr to write with replacement characters so we don't die
+ # if we try to print something containing non-ASCII characters.
+ sys.stderr = codecs.StreamReaderWriter(sys.stderr,
+ codecs.getreader('utf8'),
+ codecs.getwriter('utf8'),
+ 'replace')
+
+ _cpplint_state.ResetErrorCounts()
+ for filename in filenames:
+ ProcessFile(filename, _cpplint_state.verbose_level)
+ _cpplint_state.PrintErrorCounts()
+
+ sys.exit(_cpplint_state.error_count > 0)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/libvpx/tools/diff.py b/libvpx/tools/diff.py
new file mode 100644
index 0000000..a42a4dc
--- /dev/null
+++ b/libvpx/tools/diff.py
@@ -0,0 +1,127 @@
+#!/usr/bin/env python
+## Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+##
+## Use of this source code is governed by a BSD-style license
+## that can be found in the LICENSE file in the root of the source
+## tree. An additional intellectual property rights grant can be found
+## in the file PATENTS. All contributing project authors may
+## be found in the AUTHORS file in the root of the source tree.
+##
+"""Classes for representing diff pieces."""
+
+__author__ = "jkoleszar@google.com"
+
+import re
+
+
+class DiffLines(object):
+ """A container for one half of a diff."""
+
+ def __init__(self, filename, offset, length):
+ self.filename = filename
+ self.offset = offset
+ self.length = length
+ self.lines = []
+ self.delta_line_nums = []
+
+ def Append(self, line):
+ l = len(self.lines)
+ if line[0] != " ":
+ self.delta_line_nums.append(self.offset + l)
+ self.lines.append(line[1:])
+ assert l+1 <= self.length
+
+ def Complete(self):
+ return len(self.lines) == self.length
+
+ def __contains__(self, item):
+ return item >= self.offset and item <= self.offset + self.length - 1
+
+
+class DiffHunk(object):
+ """A container for one diff hunk, consisting of two DiffLines."""
+
+ def __init__(self, header, file_a, file_b, start_a, len_a, start_b, len_b):
+ self.header = header
+ self.left = DiffLines(file_a, start_a, len_a)
+ self.right = DiffLines(file_b, start_b, len_b)
+ self.lines = []
+
+ def Append(self, line):
+ """Adds a line to the DiffHunk and its DiffLines children."""
+ if line[0] == "-":
+ self.left.Append(line)
+ elif line[0] == "+":
+ self.right.Append(line)
+ elif line[0] == " ":
+ self.left.Append(line)
+ self.right.Append(line)
+ else:
+ assert False, ("Unrecognized character at start of diff line "
+ "%r" % line[0])
+ self.lines.append(line)
+
+ def Complete(self):
+ return self.left.Complete() and self.right.Complete()
+
+ def __repr__(self):
+ return "DiffHunk(%s, %s, len %d)" % (
+ self.left.filename, self.right.filename,
+ max(self.left.length, self.right.length))
+
+
+def ParseDiffHunks(stream):
+ """Walk a file-like object, yielding DiffHunks as they're parsed."""
+
+ file_regex = re.compile(r"(\+\+\+|---) (\S+)")
+ range_regex = re.compile(r"@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))?")
+ hunk = None
+ while True:
+ line = stream.readline()
+ if not line:
+ break
+
+ if hunk is None:
+ # Parse file names
+ diff_file = file_regex.match(line)
+ if diff_file:
+ if line.startswith("---"):
+ a_line = line
+ a = diff_file.group(2)
+ continue
+ if line.startswith("+++"):
+ b_line = line
+ b = diff_file.group(2)
+ continue
+
+ # Parse offset/lengths
+ diffrange = range_regex.match(line)
+ if diffrange:
+ if diffrange.group(2):
+ start_a = int(diffrange.group(1))
+ len_a = int(diffrange.group(3))
+ else:
+ start_a = 1
+ len_a = int(diffrange.group(1))
+
+ if diffrange.group(5):
+ start_b = int(diffrange.group(4))
+ len_b = int(diffrange.group(6))
+ else:
+ start_b = 1
+ len_b = int(diffrange.group(4))
+
+ header = [a_line, b_line, line]
+ hunk = DiffHunk(header, a, b, start_a, len_a, start_b, len_b)
+ else:
+ # Add the current line to the hunk
+ hunk.Append(line)
+
+ # See if the whole hunk has been parsed. If so, yield it and prepare
+ # for the next hunk.
+ if hunk.Complete():
+ yield hunk
+ hunk = None
+
+ # Partial hunks are a parse error
+ assert hunk is None
diff --git a/libvpx/tools/ftfy.sh b/libvpx/tools/ftfy.sh
index c5cfdea..92059f5 100755
--- a/libvpx/tools/ftfy.sh
+++ b/libvpx/tools/ftfy.sh
@@ -29,12 +29,13 @@ log() {
vpx_style() {
- astyle --style=bsd --min-conditional-indent=0 --break-blocks \
- --pad-oper --pad-header --unpad-paren \
- --align-pointer=name \
- --indent-preprocessor --convert-tabs --indent-labels \
- --suffix=none --quiet "$@"
- sed -i "" 's/[[:space:]]\{1,\},/,/g' "$@"
+ for f; do
+ case "$f" in
+ *.h|*.c|*.cc)
+ "${dirname_self}"/vpx-astyle.sh "$f"
+ ;;
+ esac
+ done
}
@@ -119,8 +120,7 @@ cd "$(git rev-parse --show-toplevel)"
git show > "${ORIG_DIFF}"
# Apply the style guide on new and modified files and collect its diff
-for f in $(git diff HEAD^ --name-only -M90 --diff-filter=AM \
- | grep '\.[ch]$'); do
+for f in $(git diff HEAD^ --name-only -M90 --diff-filter=AM); do
case "$f" in
third_party/*) continue;;
nestegg/*) continue;;
diff --git a/libvpx/tools/intersect-diffs.py b/libvpx/tools/intersect-diffs.py
index be9dea5..4dbafa9 100755
--- a/libvpx/tools/intersect-diffs.py
+++ b/libvpx/tools/intersect-diffs.py
@@ -16,121 +16,9 @@ are relevant to A. The resulting file can be applied with patch(1) on top of A.
__author__ = "jkoleszar@google.com"
-import re
import sys
-
-class DiffLines(object):
- """A container for one half of a diff."""
-
- def __init__(self, filename, offset, length):
- self.filename = filename
- self.offset = offset
- self.length = length
- self.lines = []
- self.delta_line_nums = []
-
- def Append(self, line):
- l = len(self.lines)
- if line[0] != " ":
- self.delta_line_nums.append(self.offset + l)
- self.lines.append(line[1:])
- assert l+1 <= self.length
-
- def Complete(self):
- return len(self.lines) == self.length
-
- def __contains__(self, item):
- return item >= self.offset and item <= self.offset + self.length - 1
-
-
-class DiffHunk(object):
- """A container for one diff hunk, consisting of two DiffLines."""
-
- def __init__(self, header, file_a, file_b, start_a, len_a, start_b, len_b):
- self.header = header
- self.left = DiffLines(file_a, start_a, len_a)
- self.right = DiffLines(file_b, start_b, len_b)
- self.lines = []
-
- def Append(self, line):
- """Adds a line to the DiffHunk and its DiffLines children."""
- if line[0] == "-":
- self.left.Append(line)
- elif line[0] == "+":
- self.right.Append(line)
- elif line[0] == " ":
- self.left.Append(line)
- self.right.Append(line)
- else:
- assert False, ("Unrecognized character at start of diff line "
- "%r" % line[0])
- self.lines.append(line)
-
- def Complete(self):
- return self.left.Complete() and self.right.Complete()
-
- def __repr__(self):
- return "DiffHunk(%s, %s, len %d)" % (
- self.left.filename, self.right.filename,
- max(self.left.length, self.right.length))
-
-
-def ParseDiffHunks(stream):
- """Walk a file-like object, yielding DiffHunks as they're parsed."""
-
- file_regex = re.compile(r"(\+\+\+|---) (\S+)")
- range_regex = re.compile(r"@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))?")
- hunk = None
- while True:
- line = stream.readline()
- if not line:
- break
-
- if hunk is None:
- # Parse file names
- diff_file = file_regex.match(line)
- if diff_file:
- if line.startswith("---"):
- a_line = line
- a = diff_file.group(2)
- continue
- if line.startswith("+++"):
- b_line = line
- b = diff_file.group(2)
- continue
-
- # Parse offset/lengths
- diffrange = range_regex.match(line)
- if diffrange:
- if diffrange.group(2):
- start_a = int(diffrange.group(1))
- len_a = int(diffrange.group(3))
- else:
- start_a = 1
- len_a = int(diffrange.group(1))
-
- if diffrange.group(5):
- start_b = int(diffrange.group(4))
- len_b = int(diffrange.group(6))
- else:
- start_b = 1
- len_b = int(diffrange.group(4))
-
- header = [a_line, b_line, line]
- hunk = DiffHunk(header, a, b, start_a, len_a, start_b, len_b)
- else:
- # Add the current line to the hunk
- hunk.Append(line)
-
- # See if the whole hunk has been parsed. If so, yield it and prepare
- # for the next hunk.
- if hunk.Complete():
- yield hunk
- hunk = None
-
- # Partial hunks are a parse error
- assert hunk is None
+import diff
def FormatDiffHunks(hunks):
@@ -162,8 +50,8 @@ def ZipHunks(rhs_hunks, lhs_hunks):
def main():
- old_hunks = [x for x in ParseDiffHunks(open(sys.argv[1], "r"))]
- new_hunks = [x for x in ParseDiffHunks(open(sys.argv[2], "r"))]
+ old_hunks = [x for x in diff.ParseDiffHunks(open(sys.argv[1], "r"))]
+ new_hunks = [x for x in diff.ParseDiffHunks(open(sys.argv[2], "r"))]
out_hunks = []
# Join the right hand side of the older diff with the left hand side of the
diff --git a/libvpx/tools/lint-hunks.py b/libvpx/tools/lint-hunks.py
new file mode 100755
index 0000000..b15a691
--- /dev/null
+++ b/libvpx/tools/lint-hunks.py
@@ -0,0 +1,144 @@
+#!/usr/bin/python
+## Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+##
+## Use of this source code is governed by a BSD-style license
+## that can be found in the LICENSE file in the root of the source
+## tree. An additional intellectual property rights grant can be found
+## in the file PATENTS. All contributing project authors may
+## be found in the AUTHORS file in the root of the source tree.
+##
+"""Performs style checking on each diff hunk."""
+import getopt
+import os
+import StringIO
+import subprocess
+import sys
+
+import diff
+
+
+SHORT_OPTIONS = "h"
+LONG_OPTIONS = ["help"]
+
+TOPLEVEL_CMD = ["git", "rev-parse", "--show-toplevel"]
+DIFF_CMD = ["git", "diff"]
+DIFF_INDEX_CMD = ["git", "diff-index", "-u", "HEAD", "--"]
+SHOW_CMD = ["git", "show"]
+CPPLINT_FILTERS = ["-readability/casting", "-runtime/int"]
+
+
+class Usage(Exception):
+ pass
+
+
+class SubprocessException(Exception):
+ def __init__(self, args):
+ msg = "Failed to execute '%s'"%(" ".join(args))
+ super(SubprocessException, self).__init__(msg)
+
+
+class Subprocess(subprocess.Popen):
+ """Adds the notion of an expected returncode to Popen."""
+
+ def __init__(self, args, expected_returncode=0, **kwargs):
+ self._args = args
+ self._expected_returncode = expected_returncode
+ super(Subprocess, self).__init__(args, **kwargs)
+
+ def communicate(self, *args, **kwargs):
+ result = super(Subprocess, self).communicate(*args, **kwargs)
+ if self._expected_returncode is not None:
+ try:
+ ok = self.returncode in self._expected_returncode
+ except TypeError:
+ ok = self.returncode == self._expected_returncode
+ if not ok:
+ raise SubprocessException(self._args)
+ return result
+
+
+def main(argv=None):
+ if argv is None:
+ argv = sys.argv
+ try:
+ try:
+ opts, args = getopt.getopt(argv[1:], SHORT_OPTIONS, LONG_OPTIONS)
+ except getopt.error, msg:
+ raise Usage(msg)
+
+ # process options
+ for o, _ in opts:
+ if o in ("-h", "--help"):
+ print __doc__
+ sys.exit(0)
+
+ if args and len(args) > 1:
+ print __doc__
+ sys.exit(0)
+
+ # Find the fully qualified path to the root of the tree
+ tl = Subprocess(TOPLEVEL_CMD, stdout=subprocess.PIPE)
+ tl = tl.communicate()[0].strip()
+
+ # See if we're working on the index or not.
+ if args:
+ diff_cmd = DIFF_CMD + [args[0] + "^!"]
+ else:
+ diff_cmd = DIFF_INDEX_CMD
+
+ # Build the command line to execute cpplint
+ cpplint_cmd = [os.path.join(tl, "tools", "cpplint.py"),
+ "--filter=" + ",".join(CPPLINT_FILTERS),
+ "-"]
+
+ # Get a list of all affected lines
+ file_affected_line_map = {}
+ p = Subprocess(diff_cmd, stdout=subprocess.PIPE)
+ stdout = p.communicate()[0]
+ for hunk in diff.ParseDiffHunks(StringIO.StringIO(stdout)):
+ filename = hunk.right.filename[2:]
+ if filename not in file_affected_line_map:
+ file_affected_line_map[filename] = set()
+ file_affected_line_map[filename].update(hunk.right.delta_line_nums)
+
+ # Run each affected file through cpplint
+ lint_failed = False
+ for filename, affected_lines in file_affected_line_map.iteritems():
+ if filename.split(".")[-1] not in ("c", "h", "cc"):
+ continue
+
+ if args:
+ # File contents come from git
+ show_cmd = SHOW_CMD + [args[0] + ":" + filename]
+ show = Subprocess(show_cmd, stdout=subprocess.PIPE)
+ lint = Subprocess(cpplint_cmd, expected_returncode=(0, 1),
+ stdin=show.stdout, stderr=subprocess.PIPE)
+ lint_out = lint.communicate()[1]
+ else:
+ # File contents come from the working tree
+ lint = Subprocess(cpplint_cmd, expected_returncode=(0, 1),
+ stdin=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdin = open(os.path.join(tl, filename)).read()
+ lint_out = lint.communicate(stdin)[1]
+
+ for line in lint_out.split("\n"):
+ fields = line.split(":")
+ if fields[0] != "-":
+ continue
+ warning_line_num = int(fields[1])
+ if warning_line_num in affected_lines:
+ print "%s:%d:%s"%(filename, warning_line_num,
+ ":".join(fields[2:]))
+ lint_failed = True
+
+ # Set exit code if any relevant lint errors seen
+ if lint_failed:
+ return 1
+
+ except Usage, err:
+ print >>sys.stderr, err
+ print >>sys.stderr, "for help use --help"
+ return 2
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/libvpx/tools/vpx-astyle.sh b/libvpx/tools/vpx-astyle.sh
new file mode 100755
index 0000000..6340426
--- /dev/null
+++ b/libvpx/tools/vpx-astyle.sh
@@ -0,0 +1,27 @@
+#!/bin/sh
+set -e
+astyle --style=java --indent=spaces=2 --indent-switches\
+ --min-conditional-indent=0 \
+ --pad-oper --pad-header --unpad-paren \
+ --align-pointer=name \
+ --indent-preprocessor --convert-tabs --indent-labels \
+ --suffix=none --quiet --max-instatement-indent=80 "$@"
+# Disabled, too greedy?
+#sed -i 's;[[:space:]]\{1,\}\[;[;g' "$@"
+
+sed_i() {
+ # Incompatible sed parameter parsing.
+ if sed -i 2>&1 | grep -q 'requires an argument'; then
+ sed -i '' "$@"
+ else
+ sed -i "$@"
+ fi
+}
+
+sed_i -e 's/[[:space:]]\{1,\}\([,;]\)/\1/g' \
+ -e 's/[[:space:]]\{1,\}\([+-]\{2\};\)/\1/g' \
+ -e 's/,[[:space:]]*}/}/g' \
+ -e 's;//\([^/[:space:]].*$\);// \1;g' \
+ -e 's/^\(public\|private\|protected\):$/ \1:/g' \
+ -e 's/[[:space:]]\{1,\}$//g' \
+ "$@"
diff --git a/libvpx/tools_common.c b/libvpx/tools_common.c
index 6f95028..92de794 100644
--- a/libvpx/tools_common.c
+++ b/libvpx/tools_common.c
@@ -20,11 +20,10 @@
#endif
#endif
-FILE* set_binary_mode(FILE *stream)
-{
- (void)stream;
+FILE *set_binary_mode(FILE *stream) {
+ (void)stream;
#if defined(_WIN32) || defined(__OS2__)
- _setmode(_fileno(stream), _O_BINARY);
+ _setmode(_fileno(stream), _O_BINARY);
#endif
- return stream;
+ return stream;
}
diff --git a/libvpx/tools_common.h b/libvpx/tools_common.h
index 80c9747..9e56149 100644
--- a/libvpx/tools_common.h
+++ b/libvpx/tools_common.h
@@ -11,6 +11,6 @@
#define TOOLS_COMMON_H
/* Sets a stdio stream into binary mode */
-FILE* set_binary_mode(FILE *stream);
+FILE *set_binary_mode(FILE *stream);
#endif
diff --git a/libvpx/vp8/common/alloccommon.c b/libvpx/vp8/common/alloccommon.c
index 8af9e90..54afc13 100644
--- a/libvpx/vp8/common/alloccommon.c
+++ b/libvpx/vp8/common/alloccommon.c
@@ -173,7 +173,6 @@ void vp8_create_common(VP8_COMMON *oci)
oci->use_bilinear_mc_filter = 0;
oci->full_pixel = 0;
oci->multi_token_partition = ONE_PARTITION;
- oci->clr_type = REG_YUV;
oci->clamp_type = RECON_CLAMP_REQUIRED;
/* Initialize reference frame sign bias structure to defaults */
diff --git a/libvpx/vp8/common/arm/armv6/filter_v6.asm b/libvpx/vp8/common/arm/armv6/filter_v6.asm
index 1ba91dd..eb4b75b 100644
--- a/libvpx/vp8/common/arm/armv6/filter_v6.asm
+++ b/libvpx/vp8/common/arm/armv6/filter_v6.asm
@@ -394,7 +394,7 @@
mov r4, #0x40 ; rounding factor (for smlad{x})
|height_loop_2nd_4|
- ldrd r8, [r0, #-4] ; load the data
+ ldrd r8, r9, [r0, #-4] ; load the data
orr r7, r7, r3, lsr #1 ; loop counter
|width_loop_2nd_4|
diff --git a/libvpx/vp8/common/arm/armv6/idct_blk_v6.c b/libvpx/vp8/common/arm/armv6/idct_blk_v6.c
index 6002c0f..c94f84a 100644
--- a/libvpx/vp8/common/arm/armv6/idct_blk_v6.c
+++ b/libvpx/vp8/common/arm/armv6/idct_blk_v6.c
@@ -9,7 +9,7 @@
*/
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
void vp8_dequant_idct_add_y_block_v6(short *q, short *dq,
diff --git a/libvpx/vp8/common/arm/bilinearfilter_arm.c b/libvpx/vp8/common/arm/bilinearfilter_arm.c
index c63073c..799c8bd 100644
--- a/libvpx/vp8/common/arm/bilinearfilter_arm.c
+++ b/libvpx/vp8/common/arm/bilinearfilter_arm.c
@@ -9,7 +9,7 @@
*/
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include <math.h>
#include "vp8/common/filter.h"
#include "bilinearfilter_arm.h"
diff --git a/libvpx/vp8/common/arm/filter_arm.c b/libvpx/vp8/common/arm/filter_arm.c
index 148951a..7fe3967 100644
--- a/libvpx/vp8/common/arm/filter_arm.c
+++ b/libvpx/vp8/common/arm/filter_arm.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include <math.h>
#include "vp8/common/filter.h"
#include "vpx_ports/mem.h"
diff --git a/libvpx/vp8/common/arm/loopfilter_arm.c b/libvpx/vp8/common/arm/loopfilter_arm.c
index b8f9bd9..3bdc967 100644
--- a/libvpx/vp8/common/arm/loopfilter_arm.c
+++ b/libvpx/vp8/common/arm/loopfilter_arm.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vp8/common/loopfilter.h"
#include "vp8/common/onyxc_int.h"
diff --git a/libvpx/vp8/common/arm/neon/idct_blk_neon.c b/libvpx/vp8/common/arm/neon/idct_blk_neon.c
index ee7f223..fb327a7 100644
--- a/libvpx/vp8/common/arm/neon/idct_blk_neon.c
+++ b/libvpx/vp8/common/arm/neon/idct_blk_neon.c
@@ -9,7 +9,7 @@
*/
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
/* place these declarations here because we don't want to maintain them
* outside of this scope
diff --git a/libvpx/vp8/common/arm/neon/vp8_subpixelvariance16x16_neon.asm b/libvpx/vp8/common/arm/neon/vp8_subpixelvariance16x16_neon.asm
index e7a3ed1..9d22c52 100644
--- a/libvpx/vp8/common/arm/neon/vp8_subpixelvariance16x16_neon.asm
+++ b/libvpx/vp8/common/arm/neon/vp8_subpixelvariance16x16_neon.asm
@@ -9,9 +9,6 @@
;
-bilinear_taps_coeff
- DCD 128, 0, 112, 16, 96, 32, 80, 48, 64, 64, 48, 80, 32, 96, 16, 112
-
;-----------------
EXPORT |vp8_sub_pixel_variance16x16_neon_func|
@@ -29,6 +26,9 @@ bilinear_taps_coeff
; stack(r6) unsigned int *sse
;note: most of the code is copied from bilinear_predict16x16_neon and vp8_variance16x16_neon.
+bilinear_taps_coeff
+ DCD 128, 0, 112, 16, 96, 32, 80, 48, 64, 64, 48, 80, 32, 96, 16, 112
+
|vp8_sub_pixel_variance16x16_neon_func| PROC
push {r4-r6, lr}
diff --git a/libvpx/vp8/common/arm/reconintra_arm.c b/libvpx/vp8/common/arm/reconintra_arm.c
index 121e090..2874896 100644
--- a/libvpx/vp8/common/arm/reconintra_arm.c
+++ b/libvpx/vp8/common/arm/reconintra_arm.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vp8/common/blockd.h"
#include "vpx_mem/vpx_mem.h"
diff --git a/libvpx/vp8/common/arm/variance_arm.c b/libvpx/vp8/common/arm/variance_arm.c
index 891d767..467a509 100644
--- a/libvpx/vp8/common/arm/variance_arm.c
+++ b/libvpx/vp8/common/arm/variance_arm.c
@@ -9,7 +9,7 @@
*/
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vp8/common/variance.h"
#include "vp8/common/filter.h"
diff --git a/libvpx/vp8/common/dequantize.c b/libvpx/vp8/common/dequantize.c
index 8eda486..6e2f69a 100644
--- a/libvpx/vp8/common/dequantize.c
+++ b/libvpx/vp8/common/dequantize.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vp8/common/blockd.h"
#include "vpx_mem/vpx_mem.h"
diff --git a/libvpx/vp8/common/generic/systemdependent.c b/libvpx/vp8/common/generic/systemdependent.c
index 5a6ac7b..d84df33 100644
--- a/libvpx/vp8/common/generic/systemdependent.c
+++ b/libvpx/vp8/common/generic/systemdependent.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#if ARCH_ARM
#include "vpx_ports/arm.h"
#elif ARCH_X86 || ARCH_X86_64
@@ -82,6 +82,7 @@ static int get_cpu_count()
}
#endif
+void vp8_clear_system_state_c() {};
void vp8_machine_specific_config(VP8_COMMON *ctx)
{
diff --git a/libvpx/vp8/common/idct_blk.c b/libvpx/vp8/common/idct_blk.c
index 0b058c7..8edfffb 100644
--- a/libvpx/vp8/common/idct_blk.c
+++ b/libvpx/vp8/common/idct_blk.c
@@ -9,7 +9,7 @@
*/
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
void vp8_dequant_idct_add_c(short *input, short *dq,
unsigned char *dest, int stride);
diff --git a/libvpx/vp8/common/invtrans.h b/libvpx/vp8/common/invtrans.h
index d048665..9262640 100644
--- a/libvpx/vp8/common/invtrans.h
+++ b/libvpx/vp8/common/invtrans.h
@@ -13,7 +13,7 @@
#define __INC_INVTRANS_H
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "blockd.h"
#include "onyxc_int.h"
diff --git a/libvpx/vp8/common/loopfilter.c b/libvpx/vp8/common/loopfilter.c
index 41b4f12..19857a7 100644
--- a/libvpx/vp8/common/loopfilter.c
+++ b/libvpx/vp8/common/loopfilter.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "loopfilter.h"
#include "onyxc_int.h"
#include "vpx_mem/vpx_mem.h"
@@ -156,39 +156,38 @@ void vp8_loop_filter_frame_init(VP8_COMMON *cm,
continue;
}
- lvl_ref = lvl_seg;
-
/* INTRA_FRAME */
ref = INTRA_FRAME;
/* Apply delta for reference frame */
- lvl_ref += mbd->ref_lf_deltas[ref];
+ lvl_ref = lvl_seg + mbd->ref_lf_deltas[ref];
/* Apply delta for Intra modes */
mode = 0; /* B_PRED */
/* Only the split mode BPRED has a further special case */
- lvl_mode = lvl_ref + mbd->mode_lf_deltas[mode];
- lvl_mode = (lvl_mode > 0) ? (lvl_mode > 63 ? 63 : lvl_mode) : 0; /* clamp */
+ lvl_mode = lvl_ref + mbd->mode_lf_deltas[mode];
+ /* clamp */
+ lvl_mode = (lvl_mode > 0) ? (lvl_mode > 63 ? 63 : lvl_mode) : 0;
lfi->lvl[seg][ref][mode] = lvl_mode;
mode = 1; /* all the rest of Intra modes */
- lvl_mode = (lvl_ref > 0) ? (lvl_ref > 63 ? 63 : lvl_ref) : 0; /* clamp */
+ /* clamp */
+ lvl_mode = (lvl_ref > 0) ? (lvl_ref > 63 ? 63 : lvl_ref) : 0;
lfi->lvl[seg][ref][mode] = lvl_mode;
/* LAST, GOLDEN, ALT */
for(ref = 1; ref < MAX_REF_FRAMES; ref++)
{
- int lvl_ref = lvl_seg;
-
/* Apply delta for reference frame */
- lvl_ref += mbd->ref_lf_deltas[ref];
+ lvl_ref = lvl_seg + mbd->ref_lf_deltas[ref];
/* Apply delta for Inter modes */
for (mode = 1; mode < 4; mode++)
{
lvl_mode = lvl_ref + mbd->mode_lf_deltas[mode];
- lvl_mode = (lvl_mode > 0) ? (lvl_mode > 63 ? 63 : lvl_mode) : 0; /* clamp */
+ /* clamp */
+ lvl_mode = (lvl_mode > 0) ? (lvl_mode > 63 ? 63 : lvl_mode) : 0;
lfi->lvl[seg][ref][mode] = lvl_mode;
}
@@ -567,46 +566,28 @@ void vp8_loop_filter_partial_frame
int mb_cols = post->y_width >> 4;
int mb_rows = post->y_height >> 4;
- int linestocopy, i;
+ int linestocopy;
loop_filter_info_n *lfi_n = &cm->lf_info;
loop_filter_info lfi;
int filter_level;
- int alt_flt_enabled = mbd->segmentation_enabled;
FRAME_TYPE frame_type = cm->frame_type;
const MODE_INFO *mode_info_context;
- int lvl_seg[MAX_MB_SEGMENTS];
+#if 0
+ if(default_filt_lvl == 0) /* no filter applied */
+ return;
+#endif
+
+ /* Initialize the loop filter for this frame. */
+ vp8_loop_filter_frame_init( cm, mbd, default_filt_lvl);
/* number of MB rows to use in partial filtering */
linestocopy = mb_rows / PARTIAL_FRAME_FRACTION;
linestocopy = linestocopy ? linestocopy << 4 : 16; /* 16 lines per MB */
- /* Note the baseline filter values for each segment */
- /* See vp8_loop_filter_frame_init. Rather than call that for each change
- * to default_filt_lvl, copy the relevant calculation here.
- */
- if (alt_flt_enabled)
- {
- for (i = 0; i < MAX_MB_SEGMENTS; i++)
- { /* Abs value */
- if (mbd->mb_segement_abs_delta == SEGMENT_ABSDATA)
- {
- lvl_seg[i] = mbd->segment_feature_data[MB_LVL_ALT_LF][i];
- }
- /* Delta Value */
- else
- {
- lvl_seg[i] = default_filt_lvl
- + mbd->segment_feature_data[MB_LVL_ALT_LF][i];
- lvl_seg[i] = (lvl_seg[i] > 0) ?
- ((lvl_seg[i] > 63) ? 63: lvl_seg[i]) : 0;
- }
- }
- }
-
/* Set up the buffer pointers; partial image starts at ~middle of frame */
y_ptr = post->y_buffer + ((post->y_height >> 5) * 16) * post->y_stride;
mode_info_context = cm->mi + (post->y_height >> 5) * (mb_cols + 1);
@@ -620,10 +601,12 @@ void vp8_loop_filter_partial_frame
mode_info_context->mbmi.mode != SPLITMV &&
mode_info_context->mbmi.mb_skip_coeff);
- if (alt_flt_enabled)
- filter_level = lvl_seg[mode_info_context->mbmi.segment_id];
- else
- filter_level = default_filt_lvl;
+ const int mode_index =
+ lfi_n->mode_lf_lut[mode_info_context->mbmi.mode];
+ const int seg = mode_info_context->mbmi.segment_id;
+ const int ref_frame = mode_info_context->mbmi.ref_frame;
+
+ filter_level = lfi_n->lvl[seg][ref_frame][mode_index];
if (filter_level)
{
diff --git a/libvpx/vp8/common/loopfilter.h b/libvpx/vp8/common/loopfilter.h
index b3af2d6..1e47f34 100644
--- a/libvpx/vp8/common/loopfilter.h
+++ b/libvpx/vp8/common/loopfilter.h
@@ -14,7 +14,7 @@
#include "vpx_ports/mem.h"
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#define MAX_LOOP_FILTER 63
/* fraction of total macroblock rows to be used in fast filter level picking */
diff --git a/libvpx/vp8/common/loopfilter_filters.c b/libvpx/vp8/common/loopfilter_filters.c
index 8235f6e..1d51696 100644
--- a/libvpx/vp8/common/loopfilter_filters.c
+++ b/libvpx/vp8/common/loopfilter_filters.c
@@ -54,7 +54,7 @@ static void vp8_filter(signed char mask, uc hev, uc *op1,
{
signed char ps0, qs0;
signed char ps1, qs1;
- signed char vp8_filter, Filter1, Filter2;
+ signed char filter_value, Filter1, Filter2;
signed char u;
ps1 = (signed char) * op1 ^ 0x80;
@@ -63,35 +63,35 @@ static void vp8_filter(signed char mask, uc hev, uc *op1,
qs1 = (signed char) * oq1 ^ 0x80;
/* add outer taps if we have high edge variance */
- vp8_filter = vp8_signed_char_clamp(ps1 - qs1);
- vp8_filter &= hev;
+ filter_value = vp8_signed_char_clamp(ps1 - qs1);
+ filter_value &= hev;
/* inner taps */
- vp8_filter = vp8_signed_char_clamp(vp8_filter + 3 * (qs0 - ps0));
- vp8_filter &= mask;
+ filter_value = vp8_signed_char_clamp(filter_value + 3 * (qs0 - ps0));
+ filter_value &= mask;
/* save bottom 3 bits so that we round one side +4 and the other +3
* if it equals 4 we'll set to adjust by -1 to account for the fact
* we'd round 3 the other way
*/
- Filter1 = vp8_signed_char_clamp(vp8_filter + 4);
- Filter2 = vp8_signed_char_clamp(vp8_filter + 3);
+ Filter1 = vp8_signed_char_clamp(filter_value + 4);
+ Filter2 = vp8_signed_char_clamp(filter_value + 3);
Filter1 >>= 3;
Filter2 >>= 3;
u = vp8_signed_char_clamp(qs0 - Filter1);
*oq0 = u ^ 0x80;
u = vp8_signed_char_clamp(ps0 + Filter2);
*op0 = u ^ 0x80;
- vp8_filter = Filter1;
+ filter_value = Filter1;
/* outer tap adjustments */
- vp8_filter += 1;
- vp8_filter >>= 1;
- vp8_filter &= ~hev;
+ filter_value += 1;
+ filter_value >>= 1;
+ filter_value &= ~hev;
- u = vp8_signed_char_clamp(qs1 - vp8_filter);
+ u = vp8_signed_char_clamp(qs1 - filter_value);
*oq1 = u ^ 0x80;
- u = vp8_signed_char_clamp(ps1 + vp8_filter);
+ u = vp8_signed_char_clamp(ps1 + filter_value);
*op1 = u ^ 0x80;
}
@@ -162,7 +162,7 @@ static void vp8_mbfilter(signed char mask, uc hev,
uc *op2, uc *op1, uc *op0, uc *oq0, uc *oq1, uc *oq2)
{
signed char s, u;
- signed char vp8_filter, Filter1, Filter2;
+ signed char filter_value, Filter1, Filter2;
signed char ps2 = (signed char) * op2 ^ 0x80;
signed char ps1 = (signed char) * op1 ^ 0x80;
signed char ps0 = (signed char) * op0 ^ 0x80;
@@ -171,11 +171,11 @@ static void vp8_mbfilter(signed char mask, uc hev,
signed char qs2 = (signed char) * oq2 ^ 0x80;
/* add outer taps if we have high edge variance */
- vp8_filter = vp8_signed_char_clamp(ps1 - qs1);
- vp8_filter = vp8_signed_char_clamp(vp8_filter + 3 * (qs0 - ps0));
- vp8_filter &= mask;
+ filter_value = vp8_signed_char_clamp(ps1 - qs1);
+ filter_value = vp8_signed_char_clamp(filter_value + 3 * (qs0 - ps0));
+ filter_value &= mask;
- Filter2 = vp8_filter;
+ Filter2 = filter_value;
Filter2 &= hev;
/* save bottom 3 bits so that we round one side +4 and the other +3 */
@@ -188,8 +188,8 @@ static void vp8_mbfilter(signed char mask, uc hev,
/* only apply wider filter if not high edge variance */
- vp8_filter &= ~hev;
- Filter2 = vp8_filter;
+ filter_value &= ~hev;
+ Filter2 = filter_value;
/* roughly 3/7th difference across boundary */
u = vp8_signed_char_clamp((63 + Filter2 * 27) >> 7);
@@ -291,24 +291,24 @@ static signed char vp8_simple_filter_mask(uc blimit, uc p1, uc p0, uc q0, uc q1)
static void vp8_simple_filter(signed char mask, uc *op1, uc *op0, uc *oq0, uc *oq1)
{
- signed char vp8_filter, Filter1, Filter2;
+ signed char filter_value, Filter1, Filter2;
signed char p1 = (signed char) * op1 ^ 0x80;
signed char p0 = (signed char) * op0 ^ 0x80;
signed char q0 = (signed char) * oq0 ^ 0x80;
signed char q1 = (signed char) * oq1 ^ 0x80;
signed char u;
- vp8_filter = vp8_signed_char_clamp(p1 - q1);
- vp8_filter = vp8_signed_char_clamp(vp8_filter + 3 * (q0 - p0));
- vp8_filter &= mask;
+ filter_value = vp8_signed_char_clamp(p1 - q1);
+ filter_value = vp8_signed_char_clamp(filter_value + 3 * (q0 - p0));
+ filter_value &= mask;
/* save bottom 3 bits so that we round one side +4 and the other +3 */
- Filter1 = vp8_signed_char_clamp(vp8_filter + 4);
+ Filter1 = vp8_signed_char_clamp(filter_value + 4);
Filter1 >>= 3;
u = vp8_signed_char_clamp(q0 - Filter1);
*oq0 = u ^ 0x80;
- Filter2 = vp8_signed_char_clamp(vp8_filter + 3);
+ Filter2 = vp8_signed_char_clamp(filter_value + 3);
Filter2 >>= 3;
u = vp8_signed_char_clamp(p0 + Filter2);
*op0 = u ^ 0x80;
diff --git a/libvpx/vp8/common/mfqe.c b/libvpx/vp8/common/mfqe.c
index 3dff150..0693326 100644
--- a/libvpx/vp8/common/mfqe.c
+++ b/libvpx/vp8/common/mfqe.c
@@ -20,7 +20,7 @@
#include "postproc.h"
#include "variance.h"
#include "vpx_mem/vpx_mem.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vpx_scale/yv12config.h"
#include <limits.h>
@@ -280,7 +280,7 @@ void vp8_multiframe_quality_enhance
FRAME_TYPE frame_type = cm->frame_type;
/* Point at base of Mb MODE_INFO list has motion vectors etc */
- const MODE_INFO *mode_info_context = cm->mi;
+ const MODE_INFO *mode_info_context = cm->show_frame_mi;
int mb_row;
int mb_col;
int totmap, map[4];
diff --git a/libvpx/vp8/common/mips/dspr2/dequantize_dspr2.c b/libvpx/vp8/common/mips/dspr2/dequantize_dspr2.c
index 6823325..619ee80 100644
--- a/libvpx/vp8/common/mips/dspr2/dequantize_dspr2.c
+++ b/libvpx/vp8/common/mips/dspr2/dequantize_dspr2.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vpx_mem/vpx_mem.h"
#if HAVE_DSPR2
diff --git a/libvpx/vp8/common/mips/dspr2/filter_dspr2.c b/libvpx/vp8/common/mips/dspr2/filter_dspr2.c
index 71fdcd7..ace5d40 100644
--- a/libvpx/vp8/common/mips/dspr2/filter_dspr2.c
+++ b/libvpx/vp8/common/mips/dspr2/filter_dspr2.c
@@ -10,7 +10,7 @@
#include <stdlib.h>
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vpx_ports/mem.h"
#if HAVE_DSPR2
diff --git a/libvpx/vp8/common/mips/dspr2/idct_blk_dspr2.c b/libvpx/vp8/common/mips/dspr2/idct_blk_dspr2.c
index 1e0ebd1..ab938cd 100644
--- a/libvpx/vp8/common/mips/dspr2/idct_blk_dspr2.c
+++ b/libvpx/vp8/common/mips/dspr2/idct_blk_dspr2.c
@@ -9,7 +9,7 @@
*/
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#if HAVE_DSPR2
diff --git a/libvpx/vp8/common/mips/dspr2/idctllm_dspr2.c b/libvpx/vp8/common/mips/dspr2/idctllm_dspr2.c
index 25b7936..2eff710 100644
--- a/libvpx/vp8/common/mips/dspr2/idctllm_dspr2.c
+++ b/libvpx/vp8/common/mips/dspr2/idctllm_dspr2.c
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#if HAVE_DSPR2
#define CROP_WIDTH 256
diff --git a/libvpx/vp8/common/mips/dspr2/loopfilter_filters_dspr2.c b/libvpx/vp8/common/mips/dspr2/loopfilter_filters_dspr2.c
index b8e5e4d..9ae6bc8 100644
--- a/libvpx/vp8/common/mips/dspr2/loopfilter_filters_dspr2.c
+++ b/libvpx/vp8/common/mips/dspr2/loopfilter_filters_dspr2.c
@@ -10,7 +10,7 @@
#include <stdlib.h>
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vp8/common/onyxc_int.h"
#if HAVE_DSPR2
diff --git a/libvpx/vp8/common/mips/dspr2/reconinter_dspr2.c b/libvpx/vp8/common/mips/dspr2/reconinter_dspr2.c
index a5239a3..a14b397 100644
--- a/libvpx/vp8/common/mips/dspr2/reconinter_dspr2.c
+++ b/libvpx/vp8/common/mips/dspr2/reconinter_dspr2.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vpx/vpx_integer.h"
#if HAVE_DSPR2
diff --git a/libvpx/vp8/common/onyx.h b/libvpx/vp8/common/onyx.h
index 766b4ea..30c4cbb 100644
--- a/libvpx/vp8/common/onyx.h
+++ b/libvpx/vp8/common/onyx.h
@@ -41,7 +41,8 @@ extern "C"
{
USAGE_STREAM_FROM_SERVER = 0x0,
USAGE_LOCAL_FILE_PLAYBACK = 0x1,
- USAGE_CONSTRAINED_QUALITY = 0x2
+ USAGE_CONSTRAINED_QUALITY = 0x2,
+ USAGE_CONSTANT_QUALITY = 0x3
} END_USAGE;
diff --git a/libvpx/vp8/common/onyxc_int.h b/libvpx/vp8/common/onyxc_int.h
index 5325bac..e9bb7af 100644
--- a/libvpx/vp8/common/onyxc_int.h
+++ b/libvpx/vp8/common/onyxc_int.h
@@ -13,7 +13,7 @@
#define __INC_VP8C_INT_H
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vpx/internal/vpx_codec_internal.h"
#include "loopfilter.h"
#include "entropymv.h"
@@ -72,7 +72,6 @@ typedef struct VP8Common
int horiz_scale;
int vert_scale;
- YUV_TYPE clr_type;
CLAMP_TYPE clamp_type;
YV12_BUFFER_CONFIG *frame_to_show;
@@ -115,9 +114,6 @@ typedef struct VP8Common
int uvdc_delta_q;
int uvac_delta_q;
- unsigned int frames_since_golden;
- unsigned int frames_till_alt_ref_frame;
-
/* We allocate a MODE_INFO struct for each macroblock, together with
an extra row on top and column on the left to simplify prediction. */
@@ -127,7 +123,8 @@ typedef struct VP8Common
MODE_INFO *prev_mip; /* MODE_INFO array 'mip' from last decoded frame */
MODE_INFO *prev_mi; /* 'mi' from last frame (points into prev_mip) */
#endif
-
+ MODE_INFO *show_frame_mi; /* MODE_INFO for the last decoded frame
+ to show */
LOOPFILTERTYPE filter_type;
loop_filter_info_n lf_info;
@@ -156,7 +153,6 @@ typedef struct VP8Common
unsigned int current_video_frame;
- int near_boffset[3];
int version;
TOKEN_PARTITION multi_token_partition;
@@ -164,8 +160,10 @@ typedef struct VP8Common
#ifdef PACKET_TESTING
VP8_HEADER oh;
#endif
+#if CONFIG_POSTPROC_VISUALIZER
double bitrate;
double framerate;
+#endif
#if CONFIG_MULTITHREAD
int processor_core_count;
diff --git a/libvpx/vp8/common/onyxd.h b/libvpx/vp8/common/onyxd.h
index fd7e051..97c81c1 100644
--- a/libvpx/vp8/common/onyxd.h
+++ b/libvpx/vp8/common/onyxd.h
@@ -34,7 +34,6 @@ extern "C"
int postprocess;
int max_threads;
int error_concealment;
- int input_fragments;
} VP8D_CONFIG;
typedef enum
@@ -56,10 +55,6 @@ extern "C"
vpx_codec_err_t vp8dx_get_reference(struct VP8D_COMP* comp, enum vpx_ref_frame_type ref_frame_flag, YV12_BUFFER_CONFIG *sd);
vpx_codec_err_t vp8dx_set_reference(struct VP8D_COMP* comp, enum vpx_ref_frame_type ref_frame_flag, YV12_BUFFER_CONFIG *sd);
- struct VP8D_COMP* vp8dx_create_decompressor(VP8D_CONFIG *oxcf);
-
- void vp8dx_remove_decompressor(struct VP8D_COMP* comp);
-
#ifdef __cplusplus
}
#endif
diff --git a/libvpx/vp8/common/postproc.c b/libvpx/vp8/common/postproc.c
index 80fa530..dd998f1 100644
--- a/libvpx/vp8/common/postproc.c
+++ b/libvpx/vp8/common/postproc.c
@@ -10,11 +10,12 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
+#include "vpx_scale_rtcd.h"
#include "vpx_scale/yv12config.h"
#include "postproc.h"
#include "common.h"
-#include "vpx_scale/vpxscale.h"
+#include "vpx_scale/vpx_scale.h"
#include "systemdependent.h"
#include <limits.h>
@@ -333,7 +334,7 @@ void vp8_deblock(VP8_COMMON *cm,
double level = 6.0e-05 * q * q * q - .0067 * q * q + .306 * q + .0065;
int ppl = (int)(level + .5);
- const MODE_INFO *mode_info_context = cm->mi;
+ const MODE_INFO *mode_info_context = cm->show_frame_mi;
int mbr, mbc;
/* The pixel thresholds are adjusted according to if or not the macroblock
@@ -438,29 +439,28 @@ static void fillrd(struct postproc_state *state, int q, int a)
char char_dist[300];
double sigma;
- int ai = a, qi = q, i;
+ int i;
vp8_clear_system_state();
- sigma = ai + .5 + .6 * (63 - qi) / 63.0;
+ sigma = a + .5 + .6 * (63 - q) / 63.0;
/* set up a lookup table of 256 entries that matches
* a gaussian distribution with sigma determined by q.
*/
{
- double i;
int next, j;
next = 0;
for (i = -32; i < 32; i++)
{
- int a = (int)(.5 + 256 * vp8_gaussian(sigma, 0, i));
+ const int v = (int)(.5 + 256 * vp8_gaussian(sigma, 0, i));
- if (a)
+ if (v)
{
- for (j = 0; j < a; j++)
+ for (j = 0; j < v; j++)
{
char_dist[next+j] = (char) i;
}
@@ -543,12 +543,12 @@ void vp8_plane_add_noise_c(unsigned char *Start, char *noise,
* filled with the same color block.
*/
void vp8_blend_mb_inner_c (unsigned char *y, unsigned char *u, unsigned char *v,
- int y1, int u1, int v1, int alpha, int stride)
+ int y_1, int u_1, int v_1, int alpha, int stride)
{
int i, j;
- int y1_const = y1*((1<<16)-alpha);
- int u1_const = u1*((1<<16)-alpha);
- int v1_const = v1*((1<<16)-alpha);
+ int y1_const = y_1*((1<<16)-alpha);
+ int u1_const = u_1*((1<<16)-alpha);
+ int v1_const = v_1*((1<<16)-alpha);
y += 2*stride + 2;
for (i = 0; i < 12; i++)
@@ -581,12 +581,12 @@ void vp8_blend_mb_inner_c (unsigned char *y, unsigned char *u, unsigned char *v,
* unblended to allow for other visualizations to be layered.
*/
void vp8_blend_mb_outer_c (unsigned char *y, unsigned char *u, unsigned char *v,
- int y1, int u1, int v1, int alpha, int stride)
+ int y_1, int u_1, int v_1, int alpha, int stride)
{
int i, j;
- int y1_const = y1*((1<<16)-alpha);
- int u1_const = u1*((1<<16)-alpha);
- int v1_const = v1*((1<<16)-alpha);
+ int y1_const = y_1*((1<<16)-alpha);
+ int u1_const = u_1*((1<<16)-alpha);
+ int v1_const = v_1*((1<<16)-alpha);
for (i = 0; i < 2; i++)
{
@@ -645,12 +645,12 @@ void vp8_blend_mb_outer_c (unsigned char *y, unsigned char *u, unsigned char *v,
}
void vp8_blend_b_c (unsigned char *y, unsigned char *u, unsigned char *v,
- int y1, int u1, int v1, int alpha, int stride)
+ int y_1, int u_1, int v_1, int alpha, int stride)
{
int i, j;
- int y1_const = y1*((1<<16)-alpha);
- int u1_const = u1*((1<<16)-alpha);
- int v1_const = v1*((1<<16)-alpha);
+ int y1_const = y_1*((1<<16)-alpha);
+ int u1_const = u_1*((1<<16)-alpha);
+ int v1_const = v_1*((1<<16)-alpha);
for (i = 0; i < 4; i++)
{
@@ -675,46 +675,46 @@ void vp8_blend_b_c (unsigned char *y, unsigned char *u, unsigned char *v,
}
}
-static void constrain_line (int x0, int *x1, int y0, int *y1, int width, int height)
+static void constrain_line (int x_0, int *x_1, int y_0, int *y_1, int width, int height)
{
int dx;
int dy;
- if (*x1 > width)
+ if (*x_1 > width)
{
- dx = *x1 - x0;
- dy = *y1 - y0;
+ dx = *x_1 - x_0;
+ dy = *y_1 - y_0;
- *x1 = width;
+ *x_1 = width;
if (dx)
- *y1 = ((width-x0)*dy)/dx + y0;
+ *y_1 = ((width-x_0)*dy)/dx + y_0;
}
- if (*x1 < 0)
+ if (*x_1 < 0)
{
- dx = *x1 - x0;
- dy = *y1 - y0;
+ dx = *x_1 - x_0;
+ dy = *y_1 - y_0;
- *x1 = 0;
+ *x_1 = 0;
if (dx)
- *y1 = ((0-x0)*dy)/dx + y0;
+ *y_1 = ((0-x_0)*dy)/dx + y_0;
}
- if (*y1 > height)
+ if (*y_1 > height)
{
- dx = *x1 - x0;
- dy = *y1 - y0;
+ dx = *x_1 - x_0;
+ dy = *y_1 - y_0;
- *y1 = height;
+ *y_1 = height;
if (dy)
- *x1 = ((height-y0)*dx)/dy + x0;
+ *x_1 = ((height-y_0)*dx)/dy + x_0;
}
- if (*y1 < 0)
+ if (*y_1 < 0)
{
- dx = *x1 - x0;
- dy = *y1 - y0;
+ dx = *x_1 - x_0;
+ dy = *y_1 - y_0;
- *y1 = 0;
+ *y_1 = 0;
if (dy)
- *x1 = ((0-y0)*dx)/dy + x0;
+ *x_1 = ((0-y_0)*dx)/dy + x_0;
}
}
@@ -923,7 +923,7 @@ int vp8_post_proc_frame(VP8_COMMON *oci, YV12_BUFFER_CONFIG *dest, vp8_ppflags_t
if (flags & VP8D_DEBUG_TXT_RATE_INFO)
{
char message[512];
- sprintf(message, "Bitrate: %10.2f frame_rate: %10.2f ", oci->bitrate, oci->framerate);
+ sprintf(message, "Bitrate: %10.2f framerate: %10.2f ", oci->bitrate, oci->framerate);
vp8_blit_text(message, oci->post_proc_buffer.y_buffer, oci->post_proc_buffer.y_stride);
}
diff --git a/libvpx/vp8/common/ppc/systemdependent.c b/libvpx/vp8/common/ppc/systemdependent.c
index 87f4cac..6899c0e 100644
--- a/libvpx/vp8/common/ppc/systemdependent.c
+++ b/libvpx/vp8/common/ppc/systemdependent.c
@@ -12,13 +12,8 @@
#include "subpixel.h"
#include "loopfilter.h"
#include "recon.h"
-#include "idct.h"
#include "onyxc_int.h"
-void (*vp8_short_idct4x4)(short *input, short *output, int pitch);
-void (*vp8_short_idct4x4_1)(short *input, short *output, int pitch);
-void (*vp8_dc_only_idct)(short input_dc, short *output, int pitch);
-
extern void (*vp8_post_proc_down_and_across_mb_row)(
unsigned char *src_ptr,
unsigned char *dst_ptr,
diff --git a/libvpx/vp8/common/reconinter.c b/libvpx/vp8/common/reconinter.c
index 3da3bc7..43f84d0 100644
--- a/libvpx/vp8/common/reconinter.c
+++ b/libvpx/vp8/common/reconinter.c
@@ -11,7 +11,7 @@
#include <limits.h>
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vpx/vpx_integer.h"
#include "blockd.h"
#include "reconinter.h"
diff --git a/libvpx/vp8/common/reconintra.c b/libvpx/vp8/common/reconintra.c
index 4067a68..ec51ffe 100644
--- a/libvpx/vp8/common/reconintra.c
+++ b/libvpx/vp8/common/reconintra.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vpx_mem/vpx_mem.h"
#include "blockd.h"
@@ -36,7 +36,6 @@ void vp8_build_intra_predictors_mby_s_c(MACROBLOCKD *x,
case DC_PRED:
{
int expected_dc;
- int i;
int shift;
int average = 0;
@@ -168,7 +167,6 @@ void vp8_build_intra_predictors_mbuv_s_c(MACROBLOCKD *x,
{
int expected_udc;
int expected_vdc;
- int i;
int shift;
int Uaverage = 0;
int Vaverage = 0;
@@ -217,8 +215,6 @@ void vp8_build_intra_predictors_mbuv_s_c(MACROBLOCKD *x,
break;
case V_PRED:
{
- int i;
-
for (i = 0; i < 8; i++)
{
vpx_memcpy(upred_ptr, uabove_row, 8);
@@ -231,8 +227,6 @@ void vp8_build_intra_predictors_mbuv_s_c(MACROBLOCKD *x,
break;
case H_PRED:
{
- int i;
-
for (i = 0; i < 8; i++)
{
vpx_memset(upred_ptr, uleft_col[i], 8);
@@ -245,8 +239,6 @@ void vp8_build_intra_predictors_mbuv_s_c(MACROBLOCKD *x,
break;
case TM_PRED:
{
- int i;
-
for (i = 0; i < 8; i++)
{
for (j = 0; j < 8; j++)
diff --git a/libvpx/vp8/common/reconintra4x4.c b/libvpx/vp8/common/reconintra4x4.c
index 7bb8d0a..3d4f2c4 100644
--- a/libvpx/vp8/common/reconintra4x4.c
+++ b/libvpx/vp8/common/reconintra4x4.c
@@ -10,17 +10,17 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "blockd.h"
void vp8_intra4x4_predict_c(unsigned char *Above,
unsigned char *yleft, int left_stride,
- B_PREDICTION_MODE b_mode,
+ int _b_mode,
unsigned char *dst, int dst_stride,
unsigned char top_left)
{
int i, r, c;
-
+ B_PREDICTION_MODE b_mode = (B_PREDICTION_MODE)_b_mode;
unsigned char Left[4];
Left[0] = yleft[0];
Left[1] = yleft[left_stride];
diff --git a/libvpx/vp8/common/rtcd.c b/libvpx/vp8/common/rtcd.c
index 01dad46..0b371b0 100644
--- a/libvpx/vp8/common/rtcd.c
+++ b/libvpx/vp8/common/rtcd.c
@@ -9,97 +9,13 @@
*/
#include "vpx_config.h"
#define RTCD_C
-#include "vpx_rtcd.h"
-
-#if CONFIG_MULTITHREAD && defined(_WIN32)
-#include <windows.h>
-#include <stdlib.h>
-static void once(void (*func)(void))
-{
- static CRITICAL_SECTION *lock;
- static LONG waiters;
- static int done;
- void *lock_ptr = &lock;
-
- /* If the initialization is complete, return early. This isn't just an
- * optimization, it prevents races on the destruction of the global
- * lock.
- */
- if(done)
- return;
-
- InterlockedIncrement(&waiters);
-
- /* Get a lock. We create one and try to make it the one-true-lock,
- * throwing it away if we lost the race.
- */
-
- {
- /* Scope to protect access to new_lock */
- CRITICAL_SECTION *new_lock = malloc(sizeof(CRITICAL_SECTION));
- InitializeCriticalSection(new_lock);
- if (InterlockedCompareExchangePointer(lock_ptr, new_lock, NULL) != NULL)
- {
- DeleteCriticalSection(new_lock);
- free(new_lock);
- }
- }
-
- /* At this point, we have a lock that can be synchronized on. We don't
- * care which thread actually performed the allocation.
- */
-
- EnterCriticalSection(lock);
-
- if (!done)
- {
- func();
- done = 1;
- }
-
- LeaveCriticalSection(lock);
-
- /* Last one out should free resources. The destructed objects are
- * protected by checking if(done) above.
- */
- if(!InterlockedDecrement(&waiters))
- {
- DeleteCriticalSection(lock);
- free(lock);
- lock = NULL;
- }
-}
-
-
-#elif CONFIG_MULTITHREAD && HAVE_PTHREAD_H
-#include <pthread.h>
-static void once(void (*func)(void))
-{
- static pthread_once_t lock = PTHREAD_ONCE_INIT;
- pthread_once(&lock, func);
-}
-
-
-#else
-/* No-op version that performs no synchronization. vpx_rtcd() is idempotent,
- * so as long as your platform provides atomic loads/stores of pointers
- * no synchronization is strictly necessary.
- */
-
-static void once(void (*func)(void))
-{
- static int done;
-
- if(!done)
- {
- func();
- done = 1;
- }
-}
-#endif
+#include "vp8_rtcd.h"
+#include "vpx_ports/vpx_once.h"
+extern void vpx_scale_rtcd(void);
-void vpx_rtcd()
+void vp8_rtcd()
{
+ vpx_scale_rtcd();
once(setup_rtcd_internal);
}
diff --git a/libvpx/vp8/common/rtcd_defs.sh b/libvpx/vp8/common/rtcd_defs.sh
index 0f950f8..9ebf389 100644
--- a/libvpx/vp8/common/rtcd_defs.sh
+++ b/libvpx/vp8/common/rtcd_defs.sh
@@ -1,6 +1,8 @@
-common_forward_decls() {
+vp8_common_forward_decls() {
cat <<EOF
-#include "vp8/common/blockd.h"
+/*
+ * VP8
+ */
struct blockd;
struct macroblockd;
@@ -14,7 +16,14 @@ union int_mv;
struct yv12_buffer_config;
EOF
}
-forward_decls common_forward_decls
+forward_decls vp8_common_forward_decls
+
+#
+# system state
+#
+prototype void vp8_clear_system_state ""
+specialize vp8_clear_system_state mmx
+vp8_clear_system_state_mmx=vpx_reset_mmx_state
#
# Dequant
@@ -146,7 +155,7 @@ specialize vp8_build_intra_predictors_mby_s sse2 ssse3
prototype void vp8_build_intra_predictors_mbuv_s "struct macroblockd *x, unsigned char * uabove_row, unsigned char * vabove_row, unsigned char *uleft, unsigned char *vleft, int left_stride, unsigned char * upred_ptr, unsigned char * vpred_ptr, int pred_stride"
specialize vp8_build_intra_predictors_mbuv_s sse2 ssse3
-prototype void vp8_intra4x4_predict "unsigned char *Above, unsigned char *yleft, int left_stride, B_PREDICTION_MODE b_mode, unsigned char *dst, int dst_stride, unsigned char top_left"
+prototype void vp8_intra4x4_predict "unsigned char *Above, unsigned char *yleft, int left_stride, int b_mode, unsigned char *dst, int dst_stride, unsigned char top_left"
specialize vp8_intra4x4_predict media
vp8_intra4x4_predict_media=vp8_intra4x4_predict_armv6
@@ -442,8 +451,9 @@ vp8_short_walsh4x4_media=vp8_short_walsh4x4_armv6
# Quantizer
#
prototype void vp8_regular_quantize_b "struct block *, struct blockd *"
-specialize vp8_regular_quantize_b sse2 sse4_1
-vp8_regular_quantize_b_sse4_1=vp8_regular_quantize_b_sse4
+specialize vp8_regular_quantize_b sse2 #sse4_1
+# TODO(johann) Update sse4 implementation and re-enable
+#vp8_regular_quantize_b_sse4_1=vp8_regular_quantize_b_sse4
prototype void vp8_fast_quantize_b "struct block *, struct blockd *"
specialize vp8_fast_quantize_b sse2 ssse3 media neon
@@ -530,39 +540,3 @@ fi
# End of encoder only functions
fi
-
-# Scaler functions
-if [ "CONFIG_SPATIAL_RESAMPLING" != "yes" ]; then
- prototype void vp8_horizontal_line_4_5_scale "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width"
- prototype void vp8_vertical_band_4_5_scale "unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
- prototype void vp8_last_vertical_band_4_5_scale "unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
- prototype void vp8_horizontal_line_2_3_scale "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width"
- prototype void vp8_vertical_band_2_3_scale "unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
- prototype void vp8_last_vertical_band_2_3_scale "unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
- prototype void vp8_horizontal_line_3_5_scale "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width"
- prototype void vp8_vertical_band_3_5_scale "unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
- prototype void vp8_last_vertical_band_3_5_scale "unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
- prototype void vp8_horizontal_line_3_4_scale "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width"
- prototype void vp8_vertical_band_3_4_scale "unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
- prototype void vp8_last_vertical_band_3_4_scale "unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
- prototype void vp8_horizontal_line_1_2_scale "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width"
- prototype void vp8_vertical_band_1_2_scale "unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
- prototype void vp8_last_vertical_band_1_2_scale "unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
- prototype void vp8_horizontal_line_5_4_scale "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width"
- prototype void vp8_vertical_band_5_4_scale "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
- prototype void vp8_horizontal_line_5_3_scale "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width"
- prototype void vp8_vertical_band_5_3_scale "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
- prototype void vp8_horizontal_line_2_1_scale "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width"
- prototype void vp8_vertical_band_2_1_scale "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
- prototype void vp8_vertical_band_2_1_scale_i "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
-fi
-
-prototype void vp8_yv12_extend_frame_borders "struct yv12_buffer_config *ybf"
-specialize vp8_yv12_extend_frame_borders neon
-
-prototype void vp8_yv12_copy_frame "struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc"
-specialize vp8_yv12_copy_frame neon
-
-prototype void vp8_yv12_copy_y "struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc"
-specialize vp8_yv12_copy_y neon
-
diff --git a/libvpx/vp8/common/systemdependent.h b/libvpx/vp8/common/systemdependent.h
index f99c4bb..e6b0456 100644
--- a/libvpx/vp8/common/systemdependent.h
+++ b/libvpx/vp8/common/systemdependent.h
@@ -10,12 +10,6 @@
#include "vpx_config.h"
-#if ARCH_X86 || ARCH_X86_64
-void vpx_reset_mmx_state(void);
-#define vp8_clear_system_state() vpx_reset_mmx_state()
-#else
-#define vp8_clear_system_state()
-#endif
struct VP8Common;
void vp8_machine_specific_config(struct VP8Common *);
diff --git a/libvpx/vp8/common/variance_c.c b/libvpx/vp8/common/variance_c.c
index da08aff..773b655 100644
--- a/libvpx/vp8/common/variance_c.c
+++ b/libvpx/vp8/common/variance_c.c
@@ -75,7 +75,7 @@ unsigned int vp8_variance16x16_c(
variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 16, &var, &avg);
*sse = var;
- return (var - ((unsigned int)(avg * avg) >> 8));
+ return (var - (((unsigned int)avg * avg) >> 8));
}
unsigned int vp8_variance8x16_c(
@@ -91,7 +91,7 @@ unsigned int vp8_variance8x16_c(
variance(src_ptr, source_stride, ref_ptr, recon_stride, 8, 16, &var, &avg);
*sse = var;
- return (var - ((unsigned int)(avg * avg) >> 7));
+ return (var - (((unsigned int)avg * avg) >> 7));
}
unsigned int vp8_variance16x8_c(
@@ -107,7 +107,7 @@ unsigned int vp8_variance16x8_c(
variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 8, &var, &avg);
*sse = var;
- return (var - ((unsigned int)(avg * avg) >> 7));
+ return (var - (((unsigned int)avg * avg) >> 7));
}
@@ -124,7 +124,7 @@ unsigned int vp8_variance8x8_c(
variance(src_ptr, source_stride, ref_ptr, recon_stride, 8, 8, &var, &avg);
*sse = var;
- return (var - ((unsigned int)(avg * avg) >> 6));
+ return (var - (((unsigned int)avg * avg) >> 6));
}
unsigned int vp8_variance4x4_c(
@@ -140,7 +140,7 @@ unsigned int vp8_variance4x4_c(
variance(src_ptr, source_stride, ref_ptr, recon_stride, 4, 4, &var, &avg);
*sse = var;
- return (var - ((unsigned int)(avg * avg) >> 4));
+ return (var - (((unsigned int)avg * avg) >> 4));
}
diff --git a/libvpx/vp8/common/x86/idct_blk_mmx.c b/libvpx/vp8/common/x86/idct_blk_mmx.c
index 4adf3f5..49b2013 100644
--- a/libvpx/vp8/common/x86/idct_blk_mmx.c
+++ b/libvpx/vp8/common/x86/idct_blk_mmx.c
@@ -9,7 +9,7 @@
*/
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vp8/common/blockd.h"
extern void vp8_dequantize_b_impl_mmx(short *sq, short *dq, short *q);
diff --git a/libvpx/vp8/common/x86/idct_blk_sse2.c b/libvpx/vp8/common/x86/idct_blk_sse2.c
index 056e052..ae96ec8 100644
--- a/libvpx/vp8/common/x86/idct_blk_sse2.c
+++ b/libvpx/vp8/common/x86/idct_blk_sse2.c
@@ -9,7 +9,7 @@
*/
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
void vp8_idct_dequant_0_2x_sse2
(short *q, short *dq ,
diff --git a/libvpx/vp8/common/x86/iwalsh_mmx.asm b/libvpx/vp8/common/x86/iwalsh_mmx.asm
index 4aac094..158c3b7 100644
--- a/libvpx/vp8/common/x86/iwalsh_mmx.asm
+++ b/libvpx/vp8/common/x86/iwalsh_mmx.asm
@@ -24,7 +24,7 @@ sym(vp8_short_inv_walsh4x4_mmx):
movq mm0, [rdx + 0] ;ip[0]
movq mm1, [rdx + 8] ;ip[4]
- movd mm7, rax
+ movq mm7, rax
movq mm2, [rdx + 16] ;ip[8]
movq mm3, [rdx + 24] ;ip[12]
diff --git a/libvpx/vp8/common/x86/loopfilter_block_sse2.asm b/libvpx/vp8/common/x86/loopfilter_block_sse2.asm
index 1c445ef..6d5aaa1 100644
--- a/libvpx/vp8/common/x86/loopfilter_block_sse2.asm
+++ b/libvpx/vp8/common/x86/loopfilter_block_sse2.asm
@@ -136,7 +136,7 @@
global sym(vp8_loop_filter_bh_y_sse2) PRIVATE
sym(vp8_loop_filter_bh_y_sse2):
-%ifidn __OUTPUT_FORMAT__,x64
+%if LIBVPX_YASM_WIN64
%define src rcx ; src_ptr
%define stride rdx ; src_pixel_step
%define blimit r8
@@ -150,6 +150,7 @@ sym(vp8_loop_filter_bh_y_sse2):
push rbp
mov rbp, rsp
+ SAVE_XMM 11
push r12
push r13
mov thresh, arg(4)
@@ -255,9 +256,10 @@ LF_FILTER xmm0, xmm1, xmm3, xmm8, xmm4, xmm2
movdqa i12, xmm3
movdqa i13, xmm8
-%ifidn __OUTPUT_FORMAT__,x64
+%if LIBVPX_YASM_WIN64
pop r13
pop r12
+ RESTORE_XMM
pop rbp
%endif
@@ -276,7 +278,7 @@ LF_FILTER xmm0, xmm1, xmm3, xmm8, xmm4, xmm2
global sym(vp8_loop_filter_bv_y_sse2) PRIVATE
sym(vp8_loop_filter_bv_y_sse2):
-%ifidn __OUTPUT_FORMAT__,x64
+%if LIBVPX_YASM_WIN64
%define src rcx ; src_ptr
%define stride rdx ; src_pixel_step
%define blimit r8
@@ -777,7 +779,7 @@ LF_FILTER xmm0, xmm1, xmm4, xmm8, xmm3, xmm2
; un-ALIGN_STACK
pop rsp
-%ifidn __OUTPUT_FORMAT__,x64
+%if LIBVPX_YASM_WIN64
pop r13
pop r12
RESTORE_XMM
diff --git a/libvpx/vp8/common/x86/mfqe_sse2.asm b/libvpx/vp8/common/x86/mfqe_sse2.asm
index c1d2174..a8a7f56 100644
--- a/libvpx/vp8/common/x86/mfqe_sse2.asm
+++ b/libvpx/vp8/common/x86/mfqe_sse2.asm
@@ -271,7 +271,13 @@ sym(vp8_variance_and_sad_16x16_sse2):
SECTION_RODATA
align 16
t128:
+%ifndef __NASM_VER__
ddq 128
+%elif CONFIG_BIG_ENDIAN
+ dq 0, 128
+%else
+ dq 128, 0
+%endif
align 16
tMFQE: ; 1 << MFQE_PRECISION
times 8 dw 0x10
diff --git a/libvpx/vp8/common/x86/postproc_mmx.asm b/libvpx/vp8/common/x86/postproc_mmx.asm
index 966c586..5cf110b 100644
--- a/libvpx/vp8/common/x86/postproc_mmx.asm
+++ b/libvpx/vp8/common/x86/postproc_mmx.asm
@@ -61,7 +61,7 @@ sym(vp8_mbpost_proc_down_mmx):
mov rcx, 8
.init_borderd ; initialize borders
lea rdi, [rdi + rax]
- movq [rdi], xmm1
+ movq [rdi], mm1
dec rcx
jne .init_borderd
@@ -193,7 +193,6 @@ sym(vp8_mbpost_proc_down_mmx):
movq mm4, [sym(vp8_rv) + rcx*2]
%endif
paddw mm1, mm4
- ;paddw xmm1, eight8s
psraw mm1, 4
packuswb mm1, mm0
diff --git a/libvpx/vp8/common/x86/recon_sse2.asm b/libvpx/vp8/common/x86/recon_sse2.asm
index fe77450..1434bcd 100644
--- a/libvpx/vp8/common/x86/recon_sse2.asm
+++ b/libvpx/vp8/common/x86/recon_sse2.asm
@@ -890,6 +890,7 @@ sym(vp8_intra_pred_y_tm_%1):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 5
+ SAVE_XMM 7
push rsi
push rdi
GET_GOT rbx
@@ -957,6 +958,7 @@ vp8_intra_pred_y_tm_%1_loop:
RESTORE_GOT
pop rdi
pop rsi
+ RESTORE_XMM
UNSHADOW_ARGS
pop rbp
ret
diff --git a/libvpx/vp8/common/x86/recon_wrapper_sse2.c b/libvpx/vp8/common/x86/recon_wrapper_sse2.c
index b482faa..65f4251 100644
--- a/libvpx/vp8/common/x86/recon_wrapper_sse2.c
+++ b/libvpx/vp8/common/x86/recon_wrapper_sse2.c
@@ -9,7 +9,7 @@
*/
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vpx_mem/vpx_mem.h"
#include "vp8/common/blockd.h"
diff --git a/libvpx/vp8/common/x86/sad_sse3.asm b/libvpx/vp8/common/x86/sad_sse3.asm
index f90a589..69c8d37 100644
--- a/libvpx/vp8/common/x86/sad_sse3.asm
+++ b/libvpx/vp8/common/x86/sad_sse3.asm
@@ -33,7 +33,7 @@
movsxd rax, dword ptr arg(1) ; src_stride
movsxd rdx, dword ptr arg(3) ; ref_stride
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
SAVE_XMM 7, u
%define src_ptr rcx
%define src_stride rdx
@@ -76,7 +76,7 @@
pop rsi
pop rbp
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
RESTORE_XMM
%endif
%endif
@@ -111,7 +111,7 @@
xchg rbx, rax
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
SAVE_XMM 7, u
%define src_ptr rcx
%define src_stride rdx
@@ -156,7 +156,7 @@
pop rsi
pop rbp
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
pop rsi
RESTORE_XMM
%endif
diff --git a/libvpx/vp8/common/x86/subpixel_ssse3.asm b/libvpx/vp8/common/x86/subpixel_ssse3.asm
index 13bcaf6..c06f245 100644
--- a/libvpx/vp8/common/x86/subpixel_ssse3.asm
+++ b/libvpx/vp8/common/x86/subpixel_ssse3.asm
@@ -352,6 +352,7 @@ sym(vp8_filter_block1d4_h6_ssse3):
pop rdi
pop rsi
RESTORE_GOT
+ RESTORE_XMM
UNSHADOW_ARGS
pop rbp
ret
diff --git a/libvpx/vp8/common/x86/variance_mmx.c b/libvpx/vp8/common/x86/variance_mmx.c
index 0c4dd4a..36995db 100644
--- a/libvpx/vp8/common/x86/variance_mmx.c
+++ b/libvpx/vp8/common/x86/variance_mmx.c
@@ -91,7 +91,7 @@ unsigned int vp8_variance4x4_mmx(
vp8_get4x4var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg) ;
*sse = var;
- return (var - ((unsigned int)(avg * avg) >> 4));
+ return (var - (((unsigned int)avg * avg) >> 4));
}
@@ -108,7 +108,7 @@ unsigned int vp8_variance8x8_mmx(
vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg) ;
*sse = var;
- return (var - ((unsigned int)(avg * avg) >> 6));
+ return (var - (((unsigned int)avg * avg) >> 6));
}
@@ -153,7 +153,7 @@ unsigned int vp8_variance16x16_mmx(
var = sse0 + sse1 + sse2 + sse3;
avg = sum0 + sum1 + sum2 + sum3;
*sse = var;
- return (var - ((unsigned int)(avg * avg) >> 8));
+ return (var - (((unsigned int)avg * avg) >> 8));
}
unsigned int vp8_variance16x8_mmx(
@@ -172,7 +172,7 @@ unsigned int vp8_variance16x8_mmx(
var = sse0 + sse1;
avg = sum0 + sum1;
*sse = var;
- return (var - ((unsigned int)(avg * avg) >> 7));
+ return (var - (((unsigned int)avg * avg) >> 7));
}
@@ -194,7 +194,7 @@ unsigned int vp8_variance8x16_mmx(
avg = sum0 + sum1;
*sse = var;
- return (var - ((unsigned int)(avg * avg) >> 7));
+ return (var - (((unsigned int)avg * avg) >> 7));
}
@@ -219,7 +219,7 @@ unsigned int vp8_sub_pixel_variance4x4_mmx
&xsum, &xxsum
);
*sse = xxsum;
- return (xxsum - ((unsigned int)(xsum * xsum) >> 4));
+ return (xxsum - (((unsigned int)xsum * xsum) >> 4));
}
@@ -244,7 +244,7 @@ unsigned int vp8_sub_pixel_variance8x8_mmx
&xsum, &xxsum
);
*sse = xxsum;
- return (xxsum - ((unsigned int)(xsum * xsum) >> 6));
+ return (xxsum - (((unsigned int)xsum * xsum) >> 6));
}
unsigned int vp8_sub_pixel_variance16x16_mmx
@@ -282,7 +282,7 @@ unsigned int vp8_sub_pixel_variance16x16_mmx
xxsum0 += xxsum1;
*sse = xxsum0;
- return (xxsum0 - ((unsigned int)(xsum0 * xsum0) >> 8));
+ return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 8));
}
@@ -335,7 +335,7 @@ unsigned int vp8_sub_pixel_variance16x8_mmx
xxsum0 += xxsum1;
*sse = xxsum0;
- return (xxsum0 - ((unsigned int)(xsum0 * xsum0) >> 7));
+ return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 7));
}
unsigned int vp8_sub_pixel_variance8x16_mmx
@@ -358,7 +358,7 @@ unsigned int vp8_sub_pixel_variance8x16_mmx
&xsum, &xxsum
);
*sse = xxsum;
- return (xxsum - ((unsigned int)(xsum * xsum) >> 7));
+ return (xxsum - (((unsigned int)xsum * xsum) >> 7));
}
diff --git a/libvpx/vp8/common/x86/variance_sse2.c b/libvpx/vp8/common/x86/variance_sse2.c
index afd6429..7fa5f53 100644
--- a/libvpx/vp8/common/x86/variance_sse2.c
+++ b/libvpx/vp8/common/x86/variance_sse2.c
@@ -148,7 +148,7 @@ unsigned int vp8_variance4x4_wmt(
vp8_get4x4var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg) ;
*sse = var;
- return (var - ((unsigned int)(avg * avg) >> 4));
+ return (var - (((unsigned int)avg * avg) >> 4));
}
@@ -165,7 +165,7 @@ unsigned int vp8_variance8x8_wmt
vp8_get8x8var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg) ;
*sse = var;
- return (var - ((unsigned int)(avg * avg) >> 6));
+ return (var - (((unsigned int)avg * avg) >> 6));
}
@@ -184,7 +184,7 @@ unsigned int vp8_variance16x16_wmt
vp8_get16x16var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
*sse = sse0;
- return (sse0 - ((unsigned int)(sum0 * sum0) >> 8));
+ return (sse0 - (((unsigned int)sum0 * sum0) >> 8));
}
unsigned int vp8_mse16x16_wmt(
const unsigned char *src_ptr,
@@ -220,7 +220,7 @@ unsigned int vp8_variance16x8_wmt
var = sse0 + sse1;
avg = sum0 + sum1;
*sse = var;
- return (var - ((unsigned int)(avg * avg) >> 7));
+ return (var - (((unsigned int)avg * avg) >> 7));
}
@@ -241,7 +241,7 @@ unsigned int vp8_variance8x16_wmt
var = sse0 + sse1;
avg = sum0 + sum1;
*sse = var;
- return (var - ((unsigned int)(avg * avg) >> 7));
+ return (var - (((unsigned int)avg * avg) >> 7));
}
@@ -265,7 +265,7 @@ unsigned int vp8_sub_pixel_variance4x4_wmt
&xsum, &xxsum
);
*sse = xxsum;
- return (xxsum - ((unsigned int)(xsum * xsum) >> 4));
+ return (xxsum - (((unsigned int)xsum * xsum) >> 4));
}
@@ -314,7 +314,7 @@ unsigned int vp8_sub_pixel_variance8x8_wmt
}
*sse = xxsum;
- return (xxsum - ((unsigned int)(xsum * xsum) >> 6));
+ return (xxsum - (((unsigned int)xsum * xsum) >> 6));
}
unsigned int vp8_sub_pixel_variance16x16_wmt
@@ -376,7 +376,7 @@ unsigned int vp8_sub_pixel_variance16x16_wmt
}
*sse = xxsum0;
- return (xxsum0 - ((unsigned int)(xsum0 * xsum0) >> 8));
+ return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 8));
}
unsigned int vp8_sub_pixel_mse16x16_wmt(
@@ -447,7 +447,7 @@ unsigned int vp8_sub_pixel_variance16x8_wmt
}
*sse = xxsum0;
- return (xxsum0 - ((unsigned int)(xsum0 * xsum0) >> 7));
+ return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 7));
}
unsigned int vp8_sub_pixel_variance8x16_wmt
@@ -495,7 +495,7 @@ unsigned int vp8_sub_pixel_variance8x16_wmt
}
*sse = xxsum;
- return (xxsum - ((unsigned int)(xsum * xsum) >> 7));
+ return (xxsum - (((unsigned int)xsum * xsum) >> 7));
}
@@ -515,7 +515,7 @@ unsigned int vp8_variance_halfpixvar16x16_h_wmt(
&xsum0, &xxsum0);
*sse = xxsum0;
- return (xxsum0 - ((unsigned int)(xsum0 * xsum0) >> 8));
+ return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 8));
}
@@ -534,7 +534,7 @@ unsigned int vp8_variance_halfpixvar16x16_v_wmt(
&xsum0, &xxsum0);
*sse = xxsum0;
- return (xxsum0 - ((unsigned int)(xsum0 * xsum0) >> 8));
+ return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 8));
}
@@ -554,5 +554,5 @@ unsigned int vp8_variance_halfpixvar16x16_hv_wmt(
&xsum0, &xxsum0);
*sse = xxsum0;
- return (xxsum0 - ((unsigned int)(xsum0 * xsum0) >> 8));
+ return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 8));
}
diff --git a/libvpx/vp8/common/x86/variance_ssse3.c b/libvpx/vp8/common/x86/variance_ssse3.c
index ba2055c..f90f811 100644
--- a/libvpx/vp8/common/x86/variance_ssse3.c
+++ b/libvpx/vp8/common/x86/variance_ssse3.c
@@ -113,7 +113,7 @@ unsigned int vp8_sub_pixel_variance16x16_ssse3
}
*sse = xxsum0;
- return (xxsum0 - ((unsigned int)(xsum0 * xsum0) >> 8));
+ return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 8));
}
unsigned int vp8_sub_pixel_variance16x8_ssse3
@@ -162,5 +162,5 @@ unsigned int vp8_sub_pixel_variance16x8_ssse3
}
*sse = xxsum0;
- return (xxsum0 - ((unsigned int)(xsum0 * xsum0) >> 7));
+ return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 7));
}
diff --git a/libvpx/vp8/common/x86/vp8_asm_stubs.c b/libvpx/vp8/common/x86/vp8_asm_stubs.c
index 3437a23..c0416b7 100644
--- a/libvpx/vp8/common/x86/vp8_asm_stubs.c
+++ b/libvpx/vp8/common/x86/vp8_asm_stubs.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vpx_ports/mem.h"
#include "filter_x86.h"
diff --git a/libvpx/vp8/decoder/asm_dec_offsets.c b/libvpx/vp8/decoder/asm_dec_offsets.c
deleted file mode 100644
index 842a0d5..0000000
--- a/libvpx/vp8/decoder/asm_dec_offsets.c
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-
-#include "vpx_ports/asm_offsets.h"
-#include "onyxd_int.h"
-
-BEGIN
-
-DEFINE(bool_decoder_user_buffer_end, offsetof(BOOL_DECODER, user_buffer_end));
-DEFINE(bool_decoder_user_buffer, offsetof(BOOL_DECODER, user_buffer));
-DEFINE(bool_decoder_value, offsetof(BOOL_DECODER, value));
-DEFINE(bool_decoder_count, offsetof(BOOL_DECODER, count));
-DEFINE(bool_decoder_range, offsetof(BOOL_DECODER, range));
-
-END
-
-/* add asserts for any offset that is not supported by assembly code */
-/* add asserts for any size that is not supported by assembly code */
diff --git a/libvpx/vp8/decoder/dboolhuff.c b/libvpx/vp8/decoder/dboolhuff.c
index 7e7b05a..0007d7a 100644
--- a/libvpx/vp8/decoder/dboolhuff.c
+++ b/libvpx/vp8/decoder/dboolhuff.c
@@ -10,18 +10,20 @@
#include "dboolhuff.h"
-#include "vpx_ports/mem.h"
-#include "vpx_mem/vpx_mem.h"
int vp8dx_start_decode(BOOL_DECODER *br,
const unsigned char *source,
- unsigned int source_sz)
+ unsigned int source_sz,
+ vp8_decrypt_cb *decrypt_cb,
+ void *decrypt_state)
{
br->user_buffer_end = source+source_sz;
br->user_buffer = source;
br->value = 0;
br->count = -8;
br->range = 255;
+ br->decrypt_cb = decrypt_cb;
+ br->decrypt_state = decrypt_state;
if (source_sz && !source)
return 1;
@@ -32,21 +34,42 @@ int vp8dx_start_decode(BOOL_DECODER *br,
return 0;
}
-
void vp8dx_bool_decoder_fill(BOOL_DECODER *br)
{
- const unsigned char *bufptr;
- const unsigned char *bufend;
- VP8_BD_VALUE value;
- int count;
- bufend = br->user_buffer_end;
- bufptr = br->user_buffer;
- value = br->value;
- count = br->count;
-
- VP8DX_BOOL_DECODER_FILL(count, value, bufptr, bufend);
-
- br->user_buffer = bufptr;
+ const unsigned char *bufptr = br->user_buffer;
+ VP8_BD_VALUE value = br->value;
+ int count = br->count;
+ int shift = VP8_BD_VALUE_SIZE - 8 - (count + 8);
+ size_t bytes_left = br->user_buffer_end - bufptr;
+ size_t bits_left = bytes_left * CHAR_BIT;
+ int x = (int)(shift + CHAR_BIT - bits_left);
+ int loop_end = 0;
+ unsigned char decrypted[sizeof(VP8_BD_VALUE) + 1];
+
+ if (br->decrypt_cb) {
+ size_t n = bytes_left > sizeof(decrypted) ? sizeof(decrypted) : bytes_left;
+ br->decrypt_cb(br->decrypt_state, bufptr, decrypted, (int)n);
+ bufptr = decrypted;
+ }
+
+ if(x >= 0)
+ {
+ count += VP8_LOTS_OF_BITS;
+ loop_end = x;
+ }
+
+ if (x < 0 || bits_left)
+ {
+ while(shift >= loop_end)
+ {
+ count += CHAR_BIT;
+ value |= (VP8_BD_VALUE)*bufptr << shift;
+ ++bufptr;
+ ++br->user_buffer;
+ shift -= CHAR_BIT;
+ }
+ }
+
br->value = value;
br->count = count;
}
diff --git a/libvpx/vp8/decoder/dboolhuff.h b/libvpx/vp8/decoder/dboolhuff.h
index 1a08c05..4c0ca1c 100644
--- a/libvpx/vp8/decoder/dboolhuff.h
+++ b/libvpx/vp8/decoder/dboolhuff.h
@@ -9,21 +9,30 @@
*/
-#ifndef DBOOLHUFF_H
-#define DBOOLHUFF_H
+#ifndef DBOOLHUFF_H_
+#define DBOOLHUFF_H_
+
#include <stddef.h>
#include <limits.h>
+
#include "vpx_config.h"
#include "vpx_ports/mem.h"
#include "vpx/vpx_integer.h"
typedef size_t VP8_BD_VALUE;
-# define VP8_BD_VALUE_SIZE ((int)sizeof(VP8_BD_VALUE)*CHAR_BIT)
+#define VP8_BD_VALUE_SIZE ((int)sizeof(VP8_BD_VALUE)*CHAR_BIT)
+
/*This is meant to be a large, positive constant that can still be efficiently
loaded as an immediate (on platforms like ARM, for example).
Even relatively modest values like 100 would work fine.*/
-# define VP8_LOTS_OF_BITS (0x40000000)
+#define VP8_LOTS_OF_BITS (0x40000000)
+
+/*Decrypt n bytes of data from input -> output, using the decrypt_state
+ passed in VP8D_SET_DECRYPTOR.
+*/
+typedef void (vp8_decrypt_cb)(void *decrypt_state, const unsigned char *input,
+ unsigned char *output, int count);
typedef struct
{
@@ -32,46 +41,20 @@ typedef struct
VP8_BD_VALUE value;
int count;
unsigned int range;
+ vp8_decrypt_cb *decrypt_cb;
+ void *decrypt_state;
} BOOL_DECODER;
DECLARE_ALIGNED(16, extern const unsigned char, vp8_norm[256]);
int vp8dx_start_decode(BOOL_DECODER *br,
const unsigned char *source,
- unsigned int source_sz);
+ unsigned int source_sz,
+ vp8_decrypt_cb *decrypt_cb,
+ void *decrypt_state);
void vp8dx_bool_decoder_fill(BOOL_DECODER *br);
-/*The refill loop is used in several places, so define it in a macro to make
- sure they're all consistent.
- An inline function would be cleaner, but has a significant penalty, because
- multiple BOOL_DECODER fields must be modified, and the compiler is not smart
- enough to eliminate the stores to those fields and the subsequent reloads
- from them when inlining the function.*/
-#define VP8DX_BOOL_DECODER_FILL(_count,_value,_bufptr,_bufend) \
- do \
- { \
- int shift = VP8_BD_VALUE_SIZE - 8 - ((_count) + 8); \
- int loop_end, x; \
- size_t bits_left = ((_bufend)-(_bufptr))*CHAR_BIT; \
- \
- x = (int)(shift + CHAR_BIT - bits_left); \
- loop_end = 0; \
- if(x >= 0) \
- { \
- (_count) += VP8_LOTS_OF_BITS; \
- loop_end = x; \
- if(!bits_left) break; \
- } \
- while(shift >= loop_end) \
- { \
- (_count) += CHAR_BIT; \
- (_value) |= (VP8_BD_VALUE)*(_bufptr)++ << shift; \
- shift -= CHAR_BIT; \
- } \
- } \
- while(0) \
-
static int vp8dx_decode_bool(BOOL_DECODER *br, int probability) {
unsigned int bit = 0;
@@ -151,4 +134,5 @@ static int vp8dx_bool_error(BOOL_DECODER *br)
/* No error. */
return 0;
}
-#endif
+
+#endif // DBOOLHUFF_H_
diff --git a/libvpx/vp8/decoder/decodemv.h b/libvpx/vp8/decoder/decodemv.h
index 9403424..05a33d2 100644
--- a/libvpx/vp8/decoder/decodemv.h
+++ b/libvpx/vp8/decoder/decodemv.h
@@ -8,7 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#ifndef DECODEMV_H_
+#define DECODEMV_H_
#include "onyxd_int.h"
void vp8_decode_mode_mvs(VP8D_COMP *);
+
+#endif // DECODEMV_H_
diff --git a/libvpx/vp8/decoder/decoderthreading.h b/libvpx/vp8/decoder/decoderthreading.h
index 60c39d1..bc716e4 100644
--- a/libvpx/vp8/decoder/decoderthreading.h
+++ b/libvpx/vp8/decoder/decoderthreading.h
@@ -8,19 +8,15 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
-
-
-
-#ifndef _DECODER_THREADING_H
-#define _DECODER_THREADING_H
+#ifndef DECODERTHREADING_H_
+#define DECODERTHREADING_H_
#if CONFIG_MULTITHREAD
-extern void vp8mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd);
-extern void vp8_decoder_remove_threads(VP8D_COMP *pbi);
-extern void vp8_decoder_create_threads(VP8D_COMP *pbi);
-extern void vp8mt_alloc_temp_buffers(VP8D_COMP *pbi, int width, int prev_mb_rows);
-extern void vp8mt_de_alloc_temp_buffers(VP8D_COMP *pbi, int mb_rows);
+void vp8mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd);
+void vp8_decoder_remove_threads(VP8D_COMP *pbi);
+void vp8_decoder_create_threads(VP8D_COMP *pbi);
+void vp8mt_alloc_temp_buffers(VP8D_COMP *pbi, int width, int prev_mb_rows);
+void vp8mt_de_alloc_temp_buffers(VP8D_COMP *pbi, int mb_rows);
#endif
-#endif
+#endif // DECODERTHREADING_H_
diff --git a/libvpx/vp8/decoder/decodframe.c b/libvpx/vp8/decoder/decodframe.c
index a4a00f6..51eeb02 100644
--- a/libvpx/vp8/decoder/decodframe.c
+++ b/libvpx/vp8/decoder/decodframe.c
@@ -10,7 +10,8 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
+#include "./vpx_scale_rtcd.h"
#include "onyxd_int.h"
#include "vp8/common/header.h"
#include "vp8/common/reconintra4x4.h"
@@ -20,7 +21,7 @@
#include "vp8/common/alloccommon.h"
#include "vp8/common/entropymode.h"
#include "vp8/common/quant_common.h"
-#include "vpx_scale/vpxscale.h"
+#include "vpx_scale/vpx_scale.h"
#include "vp8/common/setupintrarecon.h"
#include "decodemv.h"
@@ -758,11 +759,16 @@ static void decode_mb_rows(VP8D_COMP *pbi)
}
-static unsigned int read_partition_size(const unsigned char *cx_size)
+static unsigned int read_partition_size(VP8D_COMP *pbi,
+ const unsigned char *cx_size)
{
- const unsigned int size =
- cx_size[0] + (cx_size[1] << 8) + (cx_size[2] << 16);
- return size;
+ unsigned char temp[3];
+ if (pbi->decrypt_cb)
+ {
+ pbi->decrypt_cb(pbi->decrypt_state, cx_size, temp, 3);
+ cx_size = temp;
+ }
+ return cx_size[0] + (cx_size[1] << 8) + (cx_size[2] << 16);
}
static int read_is_valid(const unsigned char *start,
@@ -793,7 +799,7 @@ static unsigned int read_available_partition_size(
if (i < num_part - 1)
{
if (read_is_valid(partition_size_ptr, 3, first_fragment_end))
- partition_size = read_partition_size(partition_size_ptr);
+ partition_size = read_partition_size(pbi, partition_size_ptr);
else if (pbi->ec_active)
partition_size = (unsigned int)bytes_left;
else
@@ -827,8 +833,8 @@ static void setup_token_decoder(VP8D_COMP *pbi,
unsigned int partition_idx;
unsigned int fragment_idx;
unsigned int num_token_partitions;
- const unsigned char *first_fragment_end = pbi->fragments[0] +
- pbi->fragment_sizes[0];
+ const unsigned char *first_fragment_end = pbi->fragments.ptrs[0] +
+ pbi->fragments.sizes[0];
TOKEN_PARTITION multi_token_partition =
(TOKEN_PARTITION)vp8_read_literal(&pbi->mbc[8], 2);
@@ -838,10 +844,10 @@ static void setup_token_decoder(VP8D_COMP *pbi,
/* Check for partitions within the fragments and unpack the fragments
* so that each fragment pointer points to its corresponding partition. */
- for (fragment_idx = 0; fragment_idx < pbi->num_fragments; ++fragment_idx)
+ for (fragment_idx = 0; fragment_idx < pbi->fragments.count; ++fragment_idx)
{
- unsigned int fragment_size = pbi->fragment_sizes[fragment_idx];
- const unsigned char *fragment_end = pbi->fragments[fragment_idx] +
+ unsigned int fragment_size = pbi->fragments.sizes[fragment_idx];
+ const unsigned char *fragment_end = pbi->fragments.ptrs[fragment_idx] +
fragment_size;
/* Special case for handling the first partition since we have already
* read its size. */
@@ -849,16 +855,16 @@ static void setup_token_decoder(VP8D_COMP *pbi,
{
/* Size of first partition + token partition sizes element */
ptrdiff_t ext_first_part_size = token_part_sizes -
- pbi->fragments[0] + 3 * (num_token_partitions - 1);
+ pbi->fragments.ptrs[0] + 3 * (num_token_partitions - 1);
fragment_size -= (unsigned int)ext_first_part_size;
if (fragment_size > 0)
{
- pbi->fragment_sizes[0] = (unsigned int)ext_first_part_size;
+ pbi->fragments.sizes[0] = (unsigned int)ext_first_part_size;
/* The fragment contains an additional partition. Move to
* next. */
fragment_idx++;
- pbi->fragments[fragment_idx] = pbi->fragments[0] +
- pbi->fragment_sizes[0];
+ pbi->fragments.ptrs[fragment_idx] = pbi->fragments.ptrs[0] +
+ pbi->fragments.sizes[0];
}
}
/* Split the chunk into partitions read from the bitstream */
@@ -867,12 +873,12 @@ static void setup_token_decoder(VP8D_COMP *pbi,
ptrdiff_t partition_size = read_available_partition_size(
pbi,
token_part_sizes,
- pbi->fragments[fragment_idx],
+ pbi->fragments.ptrs[fragment_idx],
first_fragment_end,
fragment_end,
fragment_idx - 1,
num_token_partitions);
- pbi->fragment_sizes[fragment_idx] = (unsigned int)partition_size;
+ pbi->fragments.sizes[fragment_idx] = (unsigned int)partition_size;
fragment_size -= (unsigned int)partition_size;
assert(fragment_idx <= num_token_partitions);
if (fragment_size > 0)
@@ -880,19 +886,20 @@ static void setup_token_decoder(VP8D_COMP *pbi,
/* The fragment contains an additional partition.
* Move to next. */
fragment_idx++;
- pbi->fragments[fragment_idx] =
- pbi->fragments[fragment_idx - 1] + partition_size;
+ pbi->fragments.ptrs[fragment_idx] =
+ pbi->fragments.ptrs[fragment_idx - 1] + partition_size;
}
}
}
- pbi->num_fragments = num_token_partitions + 1;
+ pbi->fragments.count = num_token_partitions + 1;
- for (partition_idx = 1; partition_idx < pbi->num_fragments; ++partition_idx)
+ for (partition_idx = 1; partition_idx < pbi->fragments.count; ++partition_idx)
{
if (vp8dx_start_decode(bool_decoder,
- pbi->fragments[partition_idx],
- pbi->fragment_sizes[partition_idx]))
+ pbi->fragments.ptrs[partition_idx],
+ pbi->fragments.sizes[partition_idx],
+ pbi->decrypt_cb, pbi->decrypt_state))
vpx_internal_error(&pbi->common.error, VPX_CODEC_MEM_ERROR,
"Failed to allocate bool decoder %d",
partition_idx);
@@ -979,11 +986,11 @@ static void init_frame(VP8D_COMP *pbi)
int vp8_decode_frame(VP8D_COMP *pbi)
{
- vp8_reader *const bc = & pbi->mbc[8];
- VP8_COMMON *const pc = & pbi->common;
- MACROBLOCKD *const xd = & pbi->mb;
- const unsigned char *data = pbi->fragments[0];
- const unsigned char *data_end = data + pbi->fragment_sizes[0];
+ vp8_reader *const bc = &pbi->mbc[8];
+ VP8_COMMON *const pc = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+ const unsigned char *data = pbi->fragments.ptrs[0];
+ const unsigned char *data_end = data + pbi->fragments.sizes[0];
ptrdiff_t first_partition_length_in_bytes;
int i, j, k, l;
@@ -1015,18 +1022,30 @@ int vp8_decode_frame(VP8D_COMP *pbi)
}
else
{
- pc->frame_type = (FRAME_TYPE)(data[0] & 1);
- pc->version = (data[0] >> 1) & 7;
- pc->show_frame = (data[0] >> 4) & 1;
+ unsigned char clear_buffer[10];
+ const unsigned char *clear = data;
+ if (pbi->decrypt_cb)
+ {
+ int n = data_end - data;
+ if (n > 10) n = 10;
+ pbi->decrypt_cb(pbi->decrypt_state, data, clear_buffer, n);
+ clear = clear_buffer;
+ }
+
+ pc->frame_type = (FRAME_TYPE)(clear[0] & 1);
+ pc->version = (clear[0] >> 1) & 7;
+ pc->show_frame = (clear[0] >> 4) & 1;
first_partition_length_in_bytes =
- (data[0] | (data[1] << 8) | (data[2] << 16)) >> 5;
+ (clear[0] | (clear[1] << 8) | (clear[2] << 16)) >> 5;
- if (!pbi->ec_active && (data + first_partition_length_in_bytes > data_end
+ if (!pbi->ec_active &&
+ (data + first_partition_length_in_bytes > data_end
|| data + first_partition_length_in_bytes < data))
vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt partition 0 length");
data += 3;
+ clear += 3;
vp8_setup_version(pc);
@@ -1039,7 +1058,7 @@ int vp8_decode_frame(VP8D_COMP *pbi)
*/
if (!pbi->ec_active || data + 3 < data_end)
{
- if (data[0] != 0x9d || data[1] != 0x01 || data[2] != 0x2a)
+ if (clear[0] != 0x9d || clear[1] != 0x01 || clear[2] != 0x2a)
vpx_internal_error(&pc->error, VPX_CODEC_UNSUP_BITSTREAM,
"Invalid frame sync code");
}
@@ -1050,13 +1069,13 @@ int vp8_decode_frame(VP8D_COMP *pbi)
*/
if (!pbi->ec_active || data + 6 < data_end)
{
- pc->Width = (data[3] | (data[4] << 8)) & 0x3fff;
- pc->horiz_scale = data[4] >> 6;
- pc->Height = (data[5] | (data[6] << 8)) & 0x3fff;
- pc->vert_scale = data[6] >> 6;
+ pc->Width = (clear[3] | (clear[4] << 8)) & 0x3fff;
+ pc->horiz_scale = clear[4] >> 6;
+ pc->Height = (clear[5] | (clear[6] << 8)) & 0x3fff;
+ pc->vert_scale = clear[6] >> 6;
}
data += 7;
-
+ clear += 7;
}
else
{
@@ -1071,11 +1090,12 @@ int vp8_decode_frame(VP8D_COMP *pbi)
init_frame(pbi);
- if (vp8dx_start_decode(bc, data, (unsigned int)(data_end - data)))
+ if (vp8dx_start_decode(bc, data, (unsigned int)(data_end - data),
+ pbi->decrypt_cb, pbi->decrypt_state))
vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate bool decoder 0");
if (pc->frame_type == KEY_FRAME) {
- pc->clr_type = (YUV_TYPE)vp8_read_bit(bc);
+ (void)vp8_read_bit(bc); // colorspace
pc->clamp_type = (CLAMP_TYPE)vp8_read_bit(bc);
}
@@ -1334,11 +1354,11 @@ int vp8_decode_frame(VP8D_COMP *pbi)
#if CONFIG_MULTITHREAD
if (pbi->b_multithreaded_rd && pc->multi_token_partition != ONE_PARTITION)
{
- unsigned int i;
+ unsigned int thread;
vp8mt_decode_mb_rows(pbi, xd);
vp8_yv12_extend_frame_borders(yv12_fb_new);
- for (i = 0; i < pbi->decoding_thread_count; ++i)
- corrupt_tokens |= pbi->mb_row_di[i].mbd.corrupted;
+ for (thread = 0; thread < pbi->decoding_thread_count; ++thread)
+ corrupt_tokens |= pbi->mb_row_di[thread].mbd.corrupted;
}
else
#endif
diff --git a/libvpx/vp8/decoder/detokenize.h b/libvpx/vp8/decoder/detokenize.h
index 8640bda..f2130b3 100644
--- a/libvpx/vp8/decoder/detokenize.h
+++ b/libvpx/vp8/decoder/detokenize.h
@@ -8,13 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
-#ifndef DETOKENIZE_H
-#define DETOKENIZE_H
+#ifndef DETOKENIZE_H_
+#define DETOKENIZE_H_
#include "onyxd_int.h"
void vp8_reset_mb_tokens_context(MACROBLOCKD *x);
int vp8_decode_mb_tokens(VP8D_COMP *, MACROBLOCKD *);
-#endif /* DETOKENIZE_H */
+#endif // DETOKENIZE_H
diff --git a/libvpx/vp8/decoder/ec_types.h b/libvpx/vp8/decoder/ec_types.h
index ccb5ddb..b24bfd9 100644
--- a/libvpx/vp8/decoder/ec_types.h
+++ b/libvpx/vp8/decoder/ec_types.h
@@ -14,7 +14,6 @@
#define MAX_OVERLAPS 16
-
/* The area (pixel area in Q6) the block pointed to by bmi overlaps
* another block with.
*/
@@ -48,4 +47,4 @@ typedef struct
MV_REFERENCE_FRAME ref_frame;
} EC_BLOCK;
-#endif /* VP8_DEC_EC_TYPES_H */
+#endif // VP8_DEC_EC_TYPES_H
diff --git a/libvpx/vp8/decoder/error_concealment.c b/libvpx/vp8/decoder/error_concealment.c
index 8b2e32b..0b58c98 100644
--- a/libvpx/vp8/decoder/error_concealment.c
+++ b/libvpx/vp8/decoder/error_concealment.c
@@ -8,14 +8,14 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include <assert.h>
+
#include "error_concealment.h"
#include "onyxd_int.h"
#include "decodemv.h"
#include "vpx_mem/vpx_mem.h"
#include "vp8/common/findnearmv.h"
-#include <assert.h>
-
#define MIN(x,y) (((x)<(y))?(x):(y))
#define MAX(x,y) (((x)>(y))?(x):(y))
diff --git a/libvpx/vp8/decoder/error_concealment.h b/libvpx/vp8/decoder/error_concealment.h
index 65ae9d9..fb96b36 100644
--- a/libvpx/vp8/decoder/error_concealment.h
+++ b/libvpx/vp8/decoder/error_concealment.h
@@ -9,8 +9,8 @@
*/
-#ifndef ERROR_CONCEALMENT_H
-#define ERROR_CONCEALMENT_H
+#ifndef ERROR_CONCEALMENT_H_
+#define ERROR_CONCEALMENT_H_
#include "onyxd_int.h"
#include "ec_types.h"
@@ -38,4 +38,4 @@ void vp8_interpolate_motion(MACROBLOCKD *mb,
*/
void vp8_conceal_corrupt_mb(MACROBLOCKD *xd);
-#endif
+#endif // ERROR_CONCEALMENT_H_
diff --git a/libvpx/vp8/decoder/onyxd_if.c b/libvpx/vp8/decoder/onyxd_if.c
index 8d6871b..2d9e343 100644
--- a/libvpx/vp8/decoder/onyxd_if.c
+++ b/libvpx/vp8/decoder/onyxd_if.c
@@ -25,7 +25,8 @@
#include <assert.h>
#include "vp8/common/quant_common.h"
-#include "vpx_scale/vpxscale.h"
+#include "./vpx_scale_rtcd.h"
+#include "vpx_scale/vpx_scale.h"
#include "vp8/common/systemdependent.h"
#include "vpx_ports/vpx_timer.h"
#include "detokenize.h"
@@ -41,7 +42,16 @@ extern void vp8cx_init_de_quantizer(VP8D_COMP *pbi);
static int get_free_fb (VP8_COMMON *cm);
static void ref_cnt_fb (int *buf, int *idx, int new_idx);
-struct VP8D_COMP * vp8dx_create_decompressor(VP8D_CONFIG *oxcf)
+static void remove_decompressor(VP8D_COMP *pbi)
+{
+#if CONFIG_ERROR_CONCEALMENT
+ vp8_de_alloc_overlap_lists(pbi);
+#endif
+ vp8_remove_common(&pbi->common);
+ vpx_free(pbi);
+}
+
+static struct VP8D_COMP * create_decompressor(VP8D_CONFIG *oxcf)
{
VP8D_COMP *pbi = vpx_memalign(32, sizeof(VP8D_COMP));
@@ -53,7 +63,7 @@ struct VP8D_COMP * vp8dx_create_decompressor(VP8D_CONFIG *oxcf)
if (setjmp(pbi->common.error.jmp))
{
pbi->common.error.setjmp = 0;
- vp8dx_remove_decompressor(pbi);
+ remove_decompressor(pbi);
return 0;
}
@@ -64,11 +74,6 @@ struct VP8D_COMP * vp8dx_create_decompressor(VP8D_CONFIG *oxcf)
pbi->common.current_video_frame = 0;
pbi->ready_for_new_data = 1;
-#if CONFIG_MULTITHREAD
- pbi->max_threads = oxcf->max_threads;
- vp8_decoder_create_threads(pbi);
-#endif
-
/* vp8cx_init_de_quantizer() is first called here. Add check in frame_init_dequantizer() to avoid
* unnecessary calling of vp8cx_init_de_quantizer() for every frame.
*/
@@ -91,9 +96,6 @@ struct VP8D_COMP * vp8dx_create_decompressor(VP8D_CONFIG *oxcf)
pbi->decoded_key_frame = 0;
- pbi->input_fragments = oxcf->input_fragments;
- pbi->num_fragments = 0;
-
/* Independent partitions is activated when a frame updates the
* token probability table to have equal probabilities over the
* PREV_COEF context.
@@ -105,25 +107,6 @@ struct VP8D_COMP * vp8dx_create_decompressor(VP8D_CONFIG *oxcf)
return pbi;
}
-
-void vp8dx_remove_decompressor(VP8D_COMP *pbi)
-{
- if (!pbi)
- return;
-
-#if CONFIG_MULTITHREAD
- if (pbi->b_multithreaded_rd)
- vp8mt_de_alloc_temp_buffers(pbi, pbi->common.mb_rows);
- vp8_decoder_remove_threads(pbi);
-#endif
-#if CONFIG_ERROR_CONCEALMENT
- vp8_de_alloc_overlap_lists(pbi);
-#endif
- vp8_remove_common(&pbi->common);
- vpx_free(pbi);
-}
-
-
vpx_codec_err_t vp8dx_get_reference(VP8D_COMP *pbi, enum vpx_ref_frame_type ref_frame_flag, YV12_BUFFER_CONFIG *sd)
{
VP8_COMMON *cm = &pbi->common;
@@ -281,60 +264,13 @@ static int swap_frame_buffers (VP8_COMMON *cm)
return err;
}
-int vp8dx_receive_compressed_data(VP8D_COMP *pbi, size_t size,
- const uint8_t *source,
- int64_t time_stamp)
+int check_fragments_for_errors(VP8D_COMP *pbi)
{
-#if HAVE_NEON
- int64_t dx_store_reg[8];
-#endif
- VP8_COMMON *cm = &pbi->common;
- int retcode = -1;
-
- pbi->common.error.error_code = VPX_CODEC_OK;
-
- if (pbi->num_fragments == 0)
- {
- /* New frame, reset fragment pointers and sizes */
- vpx_memset((void*)pbi->fragments, 0, sizeof(pbi->fragments));
- vpx_memset(pbi->fragment_sizes, 0, sizeof(pbi->fragment_sizes));
- }
- if (pbi->input_fragments && !(source == NULL && size == 0))
- {
- /* Store a pointer to this fragment and return. We haven't
- * received the complete frame yet, so we will wait with decoding.
- */
- assert(pbi->num_fragments < MAX_PARTITIONS);
- pbi->fragments[pbi->num_fragments] = source;
- pbi->fragment_sizes[pbi->num_fragments] = size;
- pbi->num_fragments++;
- if (pbi->num_fragments > (1 << EIGHT_PARTITION) + 1)
- {
- pbi->common.error.error_code = VPX_CODEC_UNSUP_BITSTREAM;
- pbi->common.error.setjmp = 0;
- pbi->num_fragments = 0;
- return -1;
- }
- return 0;
- }
-
- if (!pbi->input_fragments)
- {
- pbi->fragments[0] = source;
- pbi->fragment_sizes[0] = size;
- pbi->num_fragments = 1;
- }
- assert(pbi->common.multi_token_partition <= EIGHT_PARTITION);
- if (pbi->num_fragments == 0)
- {
- pbi->num_fragments = 1;
- pbi->fragments[0] = NULL;
- pbi->fragment_sizes[0] = 0;
- }
-
if (!pbi->ec_active &&
- pbi->num_fragments <= 1 && pbi->fragment_sizes[0] == 0)
+ pbi->fragments.count <= 1 && pbi->fragments.sizes[0] == 0)
{
+ VP8_COMMON *cm = &pbi->common;
+
/* If error concealment is disabled we won't signal missing frames
* to the decoder.
*/
@@ -360,12 +296,29 @@ int vp8dx_receive_compressed_data(VP8D_COMP *pbi, size_t size,
/* Signal that we have no frame to show. */
cm->show_frame = 0;
- pbi->num_fragments = 0;
-
/* Nothing more to do. */
return 0;
}
+ return 1;
+}
+
+int vp8dx_receive_compressed_data(VP8D_COMP *pbi, size_t size,
+ const uint8_t *source,
+ int64_t time_stamp)
+{
+#if HAVE_NEON
+ int64_t dx_store_reg[8];
+#endif
+ VP8_COMMON *cm = &pbi->common;
+ int retcode = -1;
+
+ pbi->common.error.error_code = VPX_CODEC_OK;
+
+ retcode = check_fragments_for_errors(pbi);
+ if(retcode <= 0)
+ return retcode;
+
#if HAVE_NEON
#if CONFIG_RUNTIME_CPU_DETECT
if (cm->cpu_caps & HAS_NEON)
@@ -418,7 +371,13 @@ int vp8dx_receive_compressed_data(VP8D_COMP *pbi, size_t size,
vp8_clear_system_state();
-#if CONFIG_ERROR_CONCEALMENT
+ if (cm->show_frame)
+ {
+ cm->current_video_frame++;
+ cm->show_frame_mi = cm->mi;
+ }
+
+ #if CONFIG_ERROR_CONCEALMENT
/* swap the mode infos to storage for future error concealment */
if (pbi->ec_enabled && pbi->common.prev_mi)
{
@@ -440,9 +399,6 @@ int vp8dx_receive_compressed_data(VP8D_COMP *pbi, size_t size,
}
#endif
- if (cm->show_frame)
- cm->current_video_frame++;
-
pbi->ready_for_new_data = 0;
pbi->last_time_stamp = time_stamp;
@@ -457,7 +413,6 @@ decode_exit:
#endif
pbi->common.error.setjmp = 0;
- pbi->num_fragments = 0;
return retcode;
}
int vp8dx_get_raw_frame(VP8D_COMP *pbi, YV12_BUFFER_CONFIG *sd, int64_t *time_stamp, int64_t *time_end_stamp, vp8_ppflags_t *flags)
@@ -475,7 +430,6 @@ int vp8dx_get_raw_frame(VP8D_COMP *pbi, YV12_BUFFER_CONFIG *sd, int64_t *time_st
*time_stamp = pbi->last_time_stamp;
*time_end_stamp = 0;
- sd->clrtype = pbi->common.clr_type;
#if CONFIG_POSTPROC
ret = vp8_post_proc_frame(&pbi->common, sd, flags);
#else
@@ -520,3 +474,54 @@ int vp8dx_references_buffer( VP8_COMMON *oci, int ref_frame )
return 0;
}
+
+int vp8_create_decoder_instances(struct frame_buffers *fb, VP8D_CONFIG *oxcf)
+{
+ if(!fb->use_frame_threads)
+ {
+ /* decoder instance for single thread mode */
+ fb->pbi[0] = create_decompressor(oxcf);
+ if(!fb->pbi[0])
+ return VPX_CODEC_ERROR;
+
+#if CONFIG_MULTITHREAD
+ /* enable row-based threading only when use_frame_threads
+ * is disabled */
+ fb->pbi[0]->max_threads = oxcf->max_threads;
+ vp8_decoder_create_threads(fb->pbi[0]);
+#endif
+ }
+ else
+ {
+ /* TODO : create frame threads and decoder instances for each
+ * thread here */
+ }
+
+ return VPX_CODEC_OK;
+}
+
+int vp8_remove_decoder_instances(struct frame_buffers *fb)
+{
+ if(!fb->use_frame_threads)
+ {
+ VP8D_COMP *pbi = fb->pbi[0];
+
+ if (!pbi)
+ return VPX_CODEC_ERROR;
+#if CONFIG_MULTITHREAD
+ if (pbi->b_multithreaded_rd)
+ vp8mt_de_alloc_temp_buffers(pbi, pbi->common.mb_rows);
+ vp8_decoder_remove_threads(pbi);
+#endif
+
+ /* decoder instance for single thread mode */
+ remove_decompressor(pbi);
+ }
+ else
+ {
+ /* TODO : remove frame threads and decoder instances for each
+ * thread here */
+ }
+
+ return VPX_CODEC_OK;
+}
diff --git a/libvpx/vp8/decoder/onyxd_int.h b/libvpx/vp8/decoder/onyxd_int.h
index 0063beb..54a98f7 100644
--- a/libvpx/vp8/decoder/onyxd_int.h
+++ b/libvpx/vp8/decoder/onyxd_int.h
@@ -9,8 +9,9 @@
*/
-#ifndef __INC_VP8D_INT_H
-#define __INC_VP8D_INT_H
+#ifndef ONYXD_INT_H_
+#define ONYXD_INT_H_
+
#include "vpx_config.h"
#include "vp8/common/onyxd.h"
#include "treereader.h"
@@ -33,6 +34,31 @@ typedef struct
MACROBLOCKD mbd;
} MB_ROW_DEC;
+
+typedef struct
+{
+ int enabled;
+ unsigned int count;
+ const unsigned char *ptrs[MAX_PARTITIONS];
+ unsigned int sizes[MAX_PARTITIONS];
+} FRAGMENT_DATA;
+
+#define MAX_FB_MT_DEC 32
+
+struct frame_buffers
+{
+ /*
+ * this struct will be populated with frame buffer management
+ * info in future commits. */
+
+ /* enable/disable frame-based threading */
+ int use_frame_threads;
+
+ /* decoder instances */
+ struct VP8D_COMP *pbi[MAX_FB_MT_DEC];
+
+};
+
typedef struct VP8D_COMP
{
DECLARE_ALIGNED(16, MACROBLOCKD, mb);
@@ -46,10 +72,7 @@ typedef struct VP8D_COMP
VP8D_CONFIG oxcf;
-
- const unsigned char *fragments[MAX_PARTITIONS];
- unsigned int fragment_sizes[MAX_PARTITIONS];
- unsigned int num_fragments;
+ FRAGMENT_DATA fragments;
#if CONFIG_MULTITHREAD
/* variable for threading */
@@ -95,15 +118,19 @@ typedef struct VP8D_COMP
#endif
int ec_enabled;
int ec_active;
- int input_fragments;
int decoded_key_frame;
int independent_partitions;
int frame_corrupt_residual;
+ vp8_decrypt_cb *decrypt_cb;
+ void *decrypt_state;
} VP8D_COMP;
int vp8_decode_frame(VP8D_COMP *cpi);
+int vp8_create_decoder_instances(struct frame_buffers *fb, VP8D_CONFIG *oxcf);
+int vp8_remove_decoder_instances(struct frame_buffers *fb);
+
#if CONFIG_DEBUG
#define CHECK_MEM_ERROR(lval,expr) do {\
lval = (expr); \
@@ -121,4 +148,4 @@ int vp8_decode_frame(VP8D_COMP *cpi);
} while(0)
#endif
-#endif
+#endif // ONYXD_INT_H_
diff --git a/libvpx/vp8/decoder/threading.c b/libvpx/vp8/decoder/threading.c
index 88c06be..7303189 100644
--- a/libvpx/vp8/decoder/threading.c
+++ b/libvpx/vp8/decoder/threading.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#if !defined(WIN32) && CONFIG_OS_SUPPORT == 1
# include <unistd.h>
#endif
@@ -36,7 +36,7 @@
} while (0)
-extern void vp8_mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd);
+void vp8_mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd);
static void setup_decoding_thread_data(VP8D_COMP *pbi, MACROBLOCKD *xd, MB_ROW_DEC *mbrd, int count)
{
@@ -343,7 +343,6 @@ static void mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd, int start_mb_row)
for (mb_row = start_mb_row; mb_row < pc->mb_rows; mb_row += (pbi->decoding_thread_count + 1))
{
- int i;
int recon_yoffset, recon_uvoffset;
int mb_col;
int filter_level;
diff --git a/libvpx/vp8/decoder/treereader.h b/libvpx/vp8/decoder/treereader.h
index 238ff85..9393bb4 100644
--- a/libvpx/vp8/decoder/treereader.h
+++ b/libvpx/vp8/decoder/treereader.h
@@ -9,18 +9,17 @@
*/
-#ifndef tree_reader_h
-#define tree_reader_h 1
+#ifndef TREEREADER_H_
+#define TREEREADER_H_
#include "vp8/common/treecoder.h"
-
#include "dboolhuff.h"
typedef BOOL_DECODER vp8_reader;
#define vp8_read vp8dx_decode_bool
#define vp8_read_literal vp8_decode_value
-#define vp8_read_bit( R) vp8_read( R, vp8_prob_half)
+#define vp8_read_bit(R) vp8_read(R, vp8_prob_half)
/* Intent of tree data structure is to make decoding trivial. */
@@ -38,4 +37,4 @@ static int vp8_treed_read(
return -i;
}
-#endif /* tree_reader_h */
+#endif // TREEREADER_H_
diff --git a/libvpx/vp8/encoder/arm/armv5te/boolhuff_armv5te.asm b/libvpx/vp8/encoder/arm/armv5te/boolhuff_armv5te.asm
index a644a00..4abe818 100644
--- a/libvpx/vp8/encoder/arm/armv5te/boolhuff_armv5te.asm
+++ b/libvpx/vp8/encoder/arm/armv5te/boolhuff_armv5te.asm
@@ -15,7 +15,7 @@
EXPORT |vp8_encode_value|
IMPORT |vp8_validate_buffer_arm|
- INCLUDE asm_enc_offsets.asm
+ INCLUDE vp8_asm_enc_offsets.asm
ARM
REQUIRE8
diff --git a/libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_armv5.asm b/libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_armv5.asm
index a1cd467..90a141c 100644
--- a/libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_armv5.asm
+++ b/libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_armv5.asm
@@ -12,7 +12,7 @@
EXPORT |vp8cx_pack_tokens_armv5|
IMPORT |vp8_validate_buffer_arm|
- INCLUDE asm_enc_offsets.asm
+ INCLUDE vp8_asm_enc_offsets.asm
ARM
REQUIRE8
diff --git a/libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_mbrow_armv5.asm b/libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_mbrow_armv5.asm
index 1fa5e6c..3a8d17a 100644
--- a/libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_mbrow_armv5.asm
+++ b/libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_mbrow_armv5.asm
@@ -12,7 +12,7 @@
EXPORT |vp8cx_pack_mb_row_tokens_armv5|
IMPORT |vp8_validate_buffer_arm|
- INCLUDE asm_enc_offsets.asm
+ INCLUDE vp8_asm_enc_offsets.asm
ARM
REQUIRE8
diff --git a/libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_partitions_armv5.asm b/libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_partitions_armv5.asm
index 90a98fe..e9aa495 100644
--- a/libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_partitions_armv5.asm
+++ b/libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_partitions_armv5.asm
@@ -12,7 +12,7 @@
EXPORT |vp8cx_pack_tokens_into_partitions_armv5|
IMPORT |vp8_validate_buffer_arm|
- INCLUDE asm_enc_offsets.asm
+ INCLUDE vp8_asm_enc_offsets.asm
ARM
REQUIRE8
diff --git a/libvpx/vp8/encoder/arm/armv6/vp8_fast_quantize_b_armv6.asm b/libvpx/vp8/encoder/arm/armv6/vp8_fast_quantize_b_armv6.asm
index d61f5d9..de35a1e 100644
--- a/libvpx/vp8/encoder/arm/armv6/vp8_fast_quantize_b_armv6.asm
+++ b/libvpx/vp8/encoder/arm/armv6/vp8_fast_quantize_b_armv6.asm
@@ -11,7 +11,7 @@
EXPORT |vp8_fast_quantize_b_armv6|
- INCLUDE asm_enc_offsets.asm
+ INCLUDE vp8_asm_enc_offsets.asm
ARM
REQUIRE8
diff --git a/libvpx/vp8/encoder/arm/armv6/vp8_subtract_armv6.asm b/libvpx/vp8/encoder/arm/armv6/vp8_subtract_armv6.asm
index f329f8f..05746cf 100644
--- a/libvpx/vp8/encoder/arm/armv6/vp8_subtract_armv6.asm
+++ b/libvpx/vp8/encoder/arm/armv6/vp8_subtract_armv6.asm
@@ -13,7 +13,7 @@
EXPORT |vp8_subtract_mbuv_armv6|
EXPORT |vp8_subtract_b_armv6|
- INCLUDE asm_enc_offsets.asm
+ INCLUDE vp8_asm_enc_offsets.asm
ARM
REQUIRE8
diff --git a/libvpx/vp8/encoder/arm/dct_arm.c b/libvpx/vp8/encoder/arm/dct_arm.c
index af0fb27..f71300d 100644
--- a/libvpx/vp8/encoder/arm/dct_arm.c
+++ b/libvpx/vp8/encoder/arm/dct_arm.c
@@ -9,7 +9,7 @@
*/
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#if HAVE_MEDIA
diff --git a/libvpx/vp8/encoder/arm/neon/fastquantizeb_neon.asm b/libvpx/vp8/encoder/arm/neon/fastquantizeb_neon.asm
index 1430588..9374310 100644
--- a/libvpx/vp8/encoder/arm/neon/fastquantizeb_neon.asm
+++ b/libvpx/vp8/encoder/arm/neon/fastquantizeb_neon.asm
@@ -12,7 +12,7 @@
EXPORT |vp8_fast_quantize_b_neon|
EXPORT |vp8_fast_quantize_b_pair_neon|
- INCLUDE asm_enc_offsets.asm
+ INCLUDE vp8_asm_enc_offsets.asm
ARM
REQUIRE8
diff --git a/libvpx/vp8/encoder/arm/neon/shortfdct_neon.asm b/libvpx/vp8/encoder/arm/neon/shortfdct_neon.asm
index 09dd011..5ea8dd8 100644
--- a/libvpx/vp8/encoder/arm/neon/shortfdct_neon.asm
+++ b/libvpx/vp8/encoder/arm/neon/shortfdct_neon.asm
@@ -97,7 +97,7 @@ coeff
vmlal.s16 q11, d6, d17 ; c1*2217 + d1*5352 + 12000
vmlsl.s16 q12, d6, d16 ; d1*2217 - c1*5352 + 51000
- vmvn.s16 d4, d4
+ vmvn d4, d4
vshrn.s32 d1, q11, #16 ; op[4] = (c1*2217 + d1*5352 + 12000)>>16
vsub.s16 d1, d1, d4 ; op[4] += (d1!=0)
vshrn.s32 d3, q12, #16 ; op[12]= (d1*2217 - c1*5352 + 51000)>>16
@@ -200,7 +200,7 @@ coeff
vmlal.s16 q11, d27, d17 ; B[4] = c1*2217 + d1*5352 + 12000
vmlsl.s16 q12, d27, d16 ; B[12] = d1*2217 - c1*5352 + 51000
- vmvn.s16 q14, q14
+ vmvn q14, q14
vshrn.s32 d1, q9, #16 ; A[4] = (c1*2217 + d1*5352 + 12000)>>16
vshrn.s32 d3, q10, #16 ; A[12]= (d1*2217 - c1*5352 + 51000)>>16
diff --git a/libvpx/vp8/encoder/arm/neon/subtract_neon.asm b/libvpx/vp8/encoder/arm/neon/subtract_neon.asm
index 91a328c..5bda786 100644
--- a/libvpx/vp8/encoder/arm/neon/subtract_neon.asm
+++ b/libvpx/vp8/encoder/arm/neon/subtract_neon.asm
@@ -12,7 +12,7 @@
EXPORT |vp8_subtract_mby_neon|
EXPORT |vp8_subtract_mbuv_neon|
- INCLUDE asm_enc_offsets.asm
+ INCLUDE vp8_asm_enc_offsets.asm
ARM
REQUIRE8
diff --git a/libvpx/vp8/encoder/arm/quantize_arm.c b/libvpx/vp8/encoder/arm/quantize_arm.c
index 8999e34..80d9ad0 100644
--- a/libvpx/vp8/encoder/arm/quantize_arm.c
+++ b/libvpx/vp8/encoder/arm/quantize_arm.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vp8/encoder/block.h"
#include <math.h>
#include "vpx_mem/vpx_mem.h"
diff --git a/libvpx/vp8/encoder/bitstream.c b/libvpx/vp8/encoder/bitstream.c
index f84ae68..5f0c1f7 100644
--- a/libvpx/vp8/encoder/bitstream.c
+++ b/libvpx/vp8/encoder/bitstream.c
@@ -50,7 +50,7 @@ const int vp8cx_base_skip_false_prob[128] =
unsigned __int64 Sectionbits[500];
#endif
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
int intra_mode_stats[10][10][10];
static unsigned int tree_update_hist [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES] [2];
extern unsigned int active_section;
@@ -90,17 +90,17 @@ static void update_mode(
if (new_b + (n << 8) < old_b)
{
- int i = 0;
+ int j = 0;
vp8_write_bit(w, 1);
do
{
- const vp8_prob p = Pnew[i];
+ const vp8_prob p = Pnew[j];
- vp8_write_literal(w, Pcur[i] = p ? p : 1, 8);
+ vp8_write_literal(w, Pcur[j] = p ? p : 1, 8);
}
- while (++i < n);
+ while (++j < n);
}
else
vp8_write_bit(w, 0);
@@ -245,15 +245,15 @@ void vp8_pack_tokens_c(vp8_writer *w, const TOKENEXTRA *p, int xcount)
if (L)
{
- const unsigned char *pp = b->prob;
- int v = e >> 1;
- int n = L; /* number of bits in v, assumed nonzero */
- int i = 0;
+ const unsigned char *proba = b->prob;
+ const int v2 = e >> 1;
+ int n2 = L; /* number of bits in v2, assumed nonzero */
+ i = 0;
do
{
- const int bb = (v >> --n) & 1;
- split = 1 + (((range - 1) * pp[i>>1]) >> 8);
+ const int bb = (v2 >> --n2) & 1;
+ split = 1 + (((range - 1) * proba[i>>1]) >> 8);
i = b->tree[i+bb];
if (bb)
@@ -301,7 +301,7 @@ void vp8_pack_tokens_c(vp8_writer *w, const TOKENEXTRA *p, int xcount)
lowvalue <<= shift;
}
- while (n);
+ while (n2);
}
@@ -493,7 +493,7 @@ static void write_mb_features(vp8_writer *w, const MB_MODE_INFO *mi, const MACRO
}
void vp8_convert_rfct_to_prob(VP8_COMP *const cpi)
{
- const int *const rfct = cpi->count_mb_ref_frame_usage;
+ const int *const rfct = cpi->mb.count_mb_ref_frame_usage;
const int rf_intra = rfct[INTRA_FRAME];
const int rf_inter = rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
@@ -531,7 +531,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
vp8_convert_rfct_to_prob(cpi);
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
active_section = 1;
#endif
@@ -580,7 +580,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
xd->mb_to_top_edge = -((mb_row * 16)) << 3;
xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3;
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
active_section = 9;
#endif
@@ -593,7 +593,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
if (rf == INTRA_FRAME)
{
vp8_write(w, 0, cpi->prob_intra_coded);
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
active_section = 6;
#endif
write_ymode(w, mode, pc->fc.ymode_prob);
@@ -633,13 +633,13 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
vp8_mv_ref_probs(mv_ref_p, ct);
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
accum_mv_refs(mode, ct);
#endif
}
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
active_section = 3;
#endif
@@ -649,7 +649,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
{
case NEWMV:
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
active_section = 5;
#endif
@@ -692,7 +692,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
if (blockmode == NEW4X4)
{
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
active_section = 11;
#endif
write_mv(w, &blockmv.as_mv, &best_mv, (const MV_CONTEXT *) mvc);
@@ -769,7 +769,7 @@ static void write_kfmodes(VP8_COMP *cpi)
const B_PREDICTION_MODE L = left_block_mode(m, i);
const int bm = m->bmi[i].as_mode;
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
++intra_mode_stats [A] [L] [bm];
#endif
@@ -980,6 +980,12 @@ void vp8_calc_ref_frame_costs(int *ref_frame_cost,
int prob_garf
)
{
+ assert(prob_intra >= 0);
+ assert(prob_intra <= 255);
+ assert(prob_last >= 0);
+ assert(prob_last <= 255);
+ assert(prob_garf >= 0);
+ assert(prob_garf <= 255);
ref_frame_cost[INTRA_FRAME] = vp8_cost_zero(prob_intra);
ref_frame_cost[LAST_FRAME] = vp8_cost_one(prob_intra)
+ vp8_cost_zero(prob_last);
@@ -996,7 +1002,7 @@ int vp8_estimate_entropy_savings(VP8_COMP *cpi)
{
int savings = 0;
- const int *const rfct = cpi->count_mb_ref_frame_usage;
+ const int *const rfct = cpi->mb.count_mb_ref_frame_usage;
const int rf_intra = rfct[INTRA_FRAME];
const int rf_inter = rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
int new_intra, new_last, new_garf, oldtotal, newtotal;
@@ -1154,7 +1160,7 @@ void vp8_update_coef_probs(VP8_COMP *cpi)
#endif
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
++ tree_update_hist [i][j][k][t] [u];
#endif
@@ -1175,7 +1181,7 @@ void vp8_update_coef_probs(VP8_COMP *cpi)
while (++t < ENTROPY_NODES);
/* Accum token counts for generation of default statistics */
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
t = 0;
do
@@ -1316,7 +1322,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
vp8_start_encode(bc, cx_data, cx_data_end);
/* signal clr type */
- vp8_write_bit(bc, pc->clr_type);
+ vp8_write_bit(bc, 0);
vp8_write_bit(bc, pc->clamp_type);
}
@@ -1521,7 +1527,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
if (pc->frame_type != KEY_FRAME)
vp8_write_bit(bc, pc->refresh_last_frame);
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
if (pc->frame_type == INTER_FRAME)
active_section = 0;
@@ -1544,7 +1550,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
vp8_update_coef_probs(cpi);
#endif
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
active_section = 2;
#endif
@@ -1555,7 +1561,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
{
write_kfmodes(cpi);
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
active_section = 8;
#endif
}
@@ -1563,7 +1569,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
{
pack_inter_mode_mvs(cpi);
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
active_section = 1;
#endif
}
@@ -1681,7 +1687,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
#endif
}
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
void print_tree_update_probs()
{
int i, j, k, l;
diff --git a/libvpx/vp8/encoder/block.h b/libvpx/vp8/encoder/block.h
index f9d63eb..cf74c7a 100644
--- a/libvpx/vp8/encoder/block.h
+++ b/libvpx/vp8/encoder/block.h
@@ -18,6 +18,9 @@
#include "vp8/common/entropy.h"
#include "vpx_ports/mem.h"
+#define MAX_MODES 20
+#define MAX_ERROR_BINS 1024
+
/* motion search site */
typedef struct
{
@@ -34,7 +37,7 @@ typedef struct block
/* 16 Y blocks, 4 U blocks, 4 V blocks each with 16 entries */
short *quant;
short *quant_fast;
- unsigned char *quant_shift;
+ short *quant_shift;
short *zbin;
short *zrun_zbin_boost;
short *round;
@@ -134,7 +137,19 @@ typedef struct macroblock
int uv_mode_count[VP8_UV_MODES]; /* intra MB type cts this frame */
int64_t prediction_error;
int64_t intra_error;
-
+ int count_mb_ref_frame_usage[MAX_REF_FRAMES];
+
+ int rd_thresh_mult[MAX_MODES];
+ int rd_threshes[MAX_MODES];
+ unsigned int mbs_tested_so_far;
+ unsigned int mode_test_hit_counts[MAX_MODES];
+ int zbin_mode_boost_enabled;
+ int zbin_mode_boost;
+ int last_zbin_mode_boost;
+
+ int last_zbin_over_quant;
+ int zbin_over_quant;
+ int error_bins[MAX_ERROR_BINS];
void (*short_fdct4x4)(short *input, short *output, int pitch);
void (*short_fdct8x4)(short *input, short *output, int pitch);
diff --git a/libvpx/vp8/encoder/boolhuff.c b/libvpx/vp8/encoder/boolhuff.c
index 74770a2..3b0c03a 100644
--- a/libvpx/vp8/encoder/boolhuff.c
+++ b/libvpx/vp8/encoder/boolhuff.c
@@ -16,7 +16,7 @@ unsigned __int64 Sectionbits[500];
#endif
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
unsigned int active_section = 0;
#endif
diff --git a/libvpx/vp8/encoder/boolhuff.h b/libvpx/vp8/encoder/boolhuff.h
index 8309063..39ab586 100644
--- a/libvpx/vp8/encoder/boolhuff.h
+++ b/libvpx/vp8/encoder/boolhuff.h
@@ -67,7 +67,7 @@ static void vp8_encode_bool(BOOL_CODER *br, int bit, int probability)
unsigned int lowvalue = br->lowvalue;
register unsigned int shift;
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
#if defined(SECTIONBITS_OUTPUT)
if (bit)
diff --git a/libvpx/vp8/encoder/denoising.c b/libvpx/vp8/encoder/denoising.c
index c0dd7c1..7819265 100644
--- a/libvpx/vp8/encoder/denoising.c
+++ b/libvpx/vp8/encoder/denoising.c
@@ -13,7 +13,7 @@
#include "vp8/common/reconinter.h"
#include "vpx/vpx_integer.h"
#include "vpx_mem/vpx_mem.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
static const unsigned int NOISE_MOTION_THRESHOLD = 25 * 25;
/* SSE_DIFF_THRESHOLD is selected as ~95% confidence assuming
@@ -140,8 +140,7 @@ int vp8_denoiser_allocate(VP8_DENOISER *denoiser, int width, int height)
int i;
assert(denoiser);
- /* don't need one for intra start at 1 */
- for (i = 1; i < MAX_REF_FRAMES; i++)
+ for (i = 0; i < MAX_REF_FRAMES; i++)
{
denoiser->yv12_running_avg[i].flags = 0;
@@ -175,8 +174,7 @@ void vp8_denoiser_free(VP8_DENOISER *denoiser)
int i;
assert(denoiser);
- /* we don't have one for intra ref frame */
- for (i = 1; i < MAX_REF_FRAMES ; i++)
+ for (i = 0; i < MAX_REF_FRAMES ; i++)
{
vp8_yv12_de_alloc_frame_buffer(&denoiser->yv12_running_avg[i]);
}
@@ -208,8 +206,6 @@ void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser,
MB_MODE_INFO saved_mbmi;
MACROBLOCKD *filter_xd = &x->e_mbd;
MB_MODE_INFO *mbmi = &filter_xd->mode_info_context->mbmi;
- int mv_col;
- int mv_row;
int sse_diff = zero_mv_sse - best_sse;
saved_mbmi = *mbmi;
@@ -291,7 +287,7 @@ void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser,
{
/* Filter. */
decision = vp8_denoiser_filter(&denoiser->yv12_mc_running_avg,
- &denoiser->yv12_running_avg[LAST_FRAME],
+ &denoiser->yv12_running_avg[INTRA_FRAME],
x,
motion_magnitude2,
recon_yoffset, recon_uvoffset);
@@ -303,7 +299,7 @@ void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser,
*/
vp8_copy_mem16x16(
x->thismb, 16,
- denoiser->yv12_running_avg[LAST_FRAME].y_buffer + recon_yoffset,
- denoiser->yv12_running_avg[LAST_FRAME].y_stride);
+ denoiser->yv12_running_avg[INTRA_FRAME].y_buffer + recon_yoffset,
+ denoiser->yv12_running_avg[INTRA_FRAME].y_stride);
}
}
diff --git a/libvpx/vp8/encoder/encodeframe.c b/libvpx/vp8/encoder/encodeframe.c
index 8828dd9..b550f6b 100644
--- a/libvpx/vp8/encoder/encodeframe.c
+++ b/libvpx/vp8/encoder/encodeframe.c
@@ -10,6 +10,7 @@
#include "vpx_config.h"
+#include "vp8_rtcd.h"
#include "encodemb.h"
#include "encodemv.h"
#include "vp8/common/common.h"
@@ -45,7 +46,6 @@ extern void vp8_auto_select_speed(VP8_COMP *cpi);
extern void vp8cx_init_mbrthread_data(VP8_COMP *cpi,
MACROBLOCK *x,
MB_ROW_COMP *mbr_ei,
- int mb_row,
int count);
static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x );
@@ -530,7 +530,8 @@ void encode_mb_row(VP8_COMP *cpi,
* segmentation map
*/
if ((cpi->current_layer == 0) &&
- (cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled))
+ (cpi->cyclic_refresh_mode_enabled &&
+ xd->segmentation_enabled))
{
cpi->segmentation_map[map_index+mb_col] = xd->mode_info_context->mbmi.segment_id;
@@ -642,8 +643,6 @@ static void init_encode_frame_mb_context(VP8_COMP *cpi)
xd->left_context = &cm->left_context;
- vp8_zero(cpi->count_mb_ref_frame_usage)
-
x->mvc = cm->fc.mvc;
vpx_memset(cm->above_context, 0,
@@ -678,6 +677,7 @@ static void init_encode_frame_mb_context(VP8_COMP *cpi)
vp8_zero(x->uv_mode_count)
x->prediction_error = 0;
x->intra_error = 0;
+ vp8_zero(x->count_mb_ref_frame_usage);
}
static void sum_coef_counts(MACROBLOCK *x, MACROBLOCK *x_thread)
@@ -766,7 +766,7 @@ void vp8_encode_frame(VP8_COMP *cpi)
vp8cx_frame_init_quantizer(cpi);
- vp8_initialize_rd_consts(cpi,
+ vp8_initialize_rd_consts(cpi, x,
vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q));
vp8cx_initialize_me_consts(cpi, cm->base_qindex);
@@ -805,7 +805,8 @@ void vp8_encode_frame(VP8_COMP *cpi)
{
int i;
- vp8cx_init_mbrthread_data(cpi, x, cpi->mb_row_ei, 1, cpi->encoding_thread_count);
+ vp8cx_init_mbrthread_data(cpi, x, cpi->mb_row_ei,
+ cpi->encoding_thread_count);
for (i = 0; i < cm->mb_rows; i++)
cpi->mt_current_mb_col[i] = -1;
@@ -852,11 +853,10 @@ void vp8_encode_frame(VP8_COMP *cpi)
if (xd->segmentation_enabled)
{
- int i, j;
+ int j;
if (xd->segmentation_enabled)
{
-
for (i = 0; i < cpi->encoding_thread_count; i++)
{
for (j = 0; j < 4; j++)
@@ -868,7 +868,7 @@ void vp8_encode_frame(VP8_COMP *cpi)
for (i = 0; i < cpi->encoding_thread_count; i++)
{
int mode_count;
- int mv_vals;
+ int c_idx;
totalrate += cpi->mb_row_ei[i].totalrate;
cpi->mb.skip_true_count += cpi->mb_row_ei[i].mb.skip_true_count;
@@ -881,18 +881,26 @@ void vp8_encode_frame(VP8_COMP *cpi)
cpi->mb.uv_mode_count[mode_count] +=
cpi->mb_row_ei[i].mb.uv_mode_count[mode_count];
- for(mv_vals = 0; mv_vals < MVvals; mv_vals++)
+ for(c_idx = 0; c_idx < MVvals; c_idx++)
{
- cpi->mb.MVcount[0][mv_vals] +=
- cpi->mb_row_ei[i].mb.MVcount[0][mv_vals];
- cpi->mb.MVcount[1][mv_vals] +=
- cpi->mb_row_ei[i].mb.MVcount[1][mv_vals];
+ cpi->mb.MVcount[0][c_idx] +=
+ cpi->mb_row_ei[i].mb.MVcount[0][c_idx];
+ cpi->mb.MVcount[1][c_idx] +=
+ cpi->mb_row_ei[i].mb.MVcount[1][c_idx];
}
cpi->mb.prediction_error +=
cpi->mb_row_ei[i].mb.prediction_error;
cpi->mb.intra_error += cpi->mb_row_ei[i].mb.intra_error;
+ for(c_idx = 0; c_idx < MAX_REF_FRAMES; c_idx++)
+ cpi->mb.count_mb_ref_frame_usage[c_idx] +=
+ cpi->mb_row_ei[i].mb.count_mb_ref_frame_usage[c_idx];
+
+ for(c_idx = 0; c_idx < MAX_ERROR_BINS; c_idx++)
+ cpi->mb.error_bins[c_idx] +=
+ cpi->mb_row_ei[i].mb.error_bins[c_idx];
+
/* add up counts for each thread */
sum_coef_counts(x, &cpi->mb_row_ei[i].mb);
}
@@ -987,13 +995,14 @@ void vp8_encode_frame(VP8_COMP *cpi)
{
int tot_modes;
- tot_modes = cpi->count_mb_ref_frame_usage[INTRA_FRAME]
- + cpi->count_mb_ref_frame_usage[LAST_FRAME]
- + cpi->count_mb_ref_frame_usage[GOLDEN_FRAME]
- + cpi->count_mb_ref_frame_usage[ALTREF_FRAME];
+ tot_modes = cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME]
+ + cpi->mb.count_mb_ref_frame_usage[LAST_FRAME]
+ + cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME]
+ + cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME];
if (tot_modes)
- cpi->this_frame_percent_intra = cpi->count_mb_ref_frame_usage[INTRA_FRAME] * 100 / tot_modes;
+ cpi->this_frame_percent_intra =
+ cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME] * 100 / tot_modes;
}
@@ -1224,17 +1233,17 @@ int vp8cx_encode_inter_macroblock
if (cpi->sf.RD)
{
- int zbin_mode_boost_enabled = cpi->zbin_mode_boost_enabled;
+ int zbin_mode_boost_enabled = x->zbin_mode_boost_enabled;
/* Are we using the fast quantizer for the mode selection? */
if(cpi->sf.use_fastquant_for_pick)
{
- cpi->mb.quantize_b = vp8_fast_quantize_b;
- cpi->mb.quantize_b_pair = vp8_fast_quantize_b_pair;
+ x->quantize_b = vp8_fast_quantize_b;
+ x->quantize_b_pair = vp8_fast_quantize_b_pair;
/* the fast quantizer does not use zbin_extra, so
* do not recalculate */
- cpi->zbin_mode_boost_enabled = 0;
+ x->zbin_mode_boost_enabled = 0;
}
vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
&distortion, &intra_error);
@@ -1242,12 +1251,12 @@ int vp8cx_encode_inter_macroblock
/* switch back to the regular quantizer for the encode */
if (cpi->sf.improved_quant)
{
- cpi->mb.quantize_b = vp8_regular_quantize_b;
- cpi->mb.quantize_b_pair = vp8_regular_quantize_b_pair;
+ x->quantize_b = vp8_regular_quantize_b;
+ x->quantize_b_pair = vp8_regular_quantize_b_pair;
}
/* restore cpi->zbin_mode_boost_enabled */
- cpi->zbin_mode_boost_enabled = zbin_mode_boost_enabled;
+ x->zbin_mode_boost_enabled = zbin_mode_boost_enabled;
}
else
@@ -1290,25 +1299,27 @@ int vp8cx_encode_inter_macroblock
}
{
- /* Experimental code. Special case for gf and arf zeromv modes.
- * Increase zbin size to supress noise
+ /* Experimental code.
+ * Special case for gf and arf zeromv modes, for 1 temporal layer.
+ * Increase zbin size to supress noise.
*/
- cpi->zbin_mode_boost = 0;
- if (cpi->zbin_mode_boost_enabled)
+ x->zbin_mode_boost = 0;
+ if (x->zbin_mode_boost_enabled)
{
if ( xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME )
{
if (xd->mode_info_context->mbmi.mode == ZEROMV)
{
- if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME)
- cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
+ if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME &&
+ cpi->oxcf.number_of_layers == 1)
+ x->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
else
- cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
+ x->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
}
else if (xd->mode_info_context->mbmi.mode == SPLITMV)
- cpi->zbin_mode_boost = 0;
+ x->zbin_mode_boost = 0;
else
- cpi->zbin_mode_boost = MV_ZBIN_BOOST;
+ x->zbin_mode_boost = MV_ZBIN_BOOST;
}
}
@@ -1318,7 +1329,7 @@ int vp8cx_encode_inter_macroblock
vp8_update_zbin_extra(cpi, x);
}
- cpi->count_mb_ref_frame_usage[xd->mode_info_context->mbmi.ref_frame] ++;
+ x->count_mb_ref_frame_usage[xd->mode_info_context->mbmi.ref_frame] ++;
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
{
diff --git a/libvpx/vp8/encoder/encodeintra.c b/libvpx/vp8/encoder/encodeintra.c
index 340dd63..cfa4cb9 100644
--- a/libvpx/vp8/encoder/encodeintra.c
+++ b/libvpx/vp8/encoder/encodeintra.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "quantize.h"
#include "vp8/common/reconintra4x4.h"
#include "encodemb.h"
diff --git a/libvpx/vp8/encoder/encodemb.c b/libvpx/vp8/encoder/encodemb.c
index 7d494f2..7ed2fe1 100644
--- a/libvpx/vp8/encoder/encodemb.c
+++ b/libvpx/vp8/encoder/encodemb.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "encodemb.h"
#include "vp8/common/reconinter.h"
#include "quantize.h"
diff --git a/libvpx/vp8/encoder/encodemv.c b/libvpx/vp8/encoder/encodemv.c
index 0c43d06..2a74ff4 100644
--- a/libvpx/vp8/encoder/encodemv.c
+++ b/libvpx/vp8/encoder/encodemv.c
@@ -16,7 +16,7 @@
#include <math.h>
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
extern unsigned int active_section;
#endif
@@ -359,7 +359,7 @@ void vp8_write_mvprobs(VP8_COMP *cpi)
vp8_writer *const w = cpi->bc;
MV_CONTEXT *mvc = cpi->common.fc.mvc;
int flags[2] = {0, 0};
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
active_section = 4;
#endif
write_component_probs(
@@ -374,7 +374,7 @@ void vp8_write_mvprobs(VP8_COMP *cpi)
if (flags[0] || flags[1])
vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cpi->common.fc.mvc, flags);
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
active_section = 5;
#endif
}
diff --git a/libvpx/vp8/encoder/ethreading.c b/libvpx/vp8/encoder/ethreading.c
index 39340f2..d4b17ce 100644
--- a/libvpx/vp8/encoder/ethreading.c
+++ b/libvpx/vp8/encoder/ethreading.c
@@ -214,7 +214,9 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
* vp8cx_encode_inter_macroblock()) back into the
* global segmentation map
*/
- if (cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled)
+ if ((cpi->current_layer == 0) &&
+ (cpi->cyclic_refresh_mode_enabled &&
+ xd->segmentation_enabled))
{
const MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi;
cpi->segmentation_map[map_index + mb_col] = mbmi->segment_id;
@@ -416,13 +418,23 @@ static void setup_mbby_copy(MACROBLOCK *mbdst, MACROBLOCK *mbsrc)
zd->block[i].dequant = zd->dequant_uv;
zd->block[24].dequant = zd->dequant_y2;
#endif
+
+
+ vpx_memcpy(z->rd_threshes, x->rd_threshes, sizeof(x->rd_threshes));
+ vpx_memcpy(z->rd_thresh_mult, x->rd_thresh_mult,
+ sizeof(x->rd_thresh_mult));
+
+ z->zbin_over_quant = x->zbin_over_quant;
+ z->zbin_mode_boost_enabled = x->zbin_mode_boost_enabled;
+ z->zbin_mode_boost = x->zbin_mode_boost;
+
+ vpx_memset(z->error_bins, 0, sizeof(z->error_bins));
}
}
void vp8cx_init_mbrthread_data(VP8_COMP *cpi,
MACROBLOCK *x,
MB_ROW_COMP *mbr_ei,
- int mb_row,
int count
)
{
@@ -430,7 +442,6 @@ void vp8cx_init_mbrthread_data(VP8_COMP *cpi,
VP8_COMMON *const cm = & cpi->common;
MACROBLOCKD *const xd = & x->e_mbd;
int i;
- (void) mb_row;
for (i = 0; i < count; i++)
{
@@ -478,6 +489,8 @@ void vp8cx_init_mbrthread_data(VP8_COMP *cpi,
vp8_zero(mb->MVcount);
mb->prediction_error = 0;
mb->intra_error = 0;
+ vp8_zero(mb->count_mb_ref_frame_usage);
+ mb->mbs_tested_so_far = 0;
}
}
diff --git a/libvpx/vp8/encoder/firstpass.c b/libvpx/vp8/encoder/firstpass.c
index b668c8f..ded0c43 100644
--- a/libvpx/vp8/encoder/firstpass.c
+++ b/libvpx/vp8/encoder/firstpass.c
@@ -12,6 +12,7 @@
#include <limits.h>
#include <stdio.h>
+#include "./vpx_scale_rtcd.h"
#include "block.h"
#include "onyx_int.h"
#include "vp8/common/variance.h"
@@ -20,7 +21,7 @@
#include "vp8/common/systemdependent.h"
#include "mcomp.h"
#include "firstpass.h"
-#include "vpx_scale/vpxscale.h"
+#include "vpx_scale/vpx_scale.h"
#include "encodemb.h"
#include "vp8/common/extend.h"
#include "vpx_mem/vpx_mem.h"
@@ -569,7 +570,7 @@ void vp8_first_pass(VP8_COMP *cpi)
/* Initialise the MV cost table to the defaults */
{
int flag[2] = {1, 1};
- vp8_initialize_rd_consts(cpi, vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q));
+ vp8_initialize_rd_consts(cpi, x, vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q));
vpx_memcpy(cm->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context));
vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cm->fc.mvc, flag);
}
@@ -857,7 +858,9 @@ skip_motion_search:
*/
if ((cm->current_video_frame > 0) &&
(cpi->twopass.this_frame_stats.pcnt_inter > 0.20) &&
- ((cpi->twopass.this_frame_stats.intra_error / cpi->twopass.this_frame_stats.coded_error) > 2.0))
+ ((cpi->twopass.this_frame_stats.intra_error /
+ DOUBLE_DIVIDE_CHECK(cpi->twopass.this_frame_stats.coded_error)) >
+ 2.0))
{
vp8_yv12_copy_frame(lst_yv12, gld_yv12);
}
@@ -1322,7 +1325,7 @@ static int estimate_kf_group_q(VP8_COMP *cpi, double section_err, int section_ta
return Q;
}
-extern void vp8_new_frame_rate(VP8_COMP *cpi, double framerate);
+extern void vp8_new_framerate(VP8_COMP *cpi, double framerate);
void vp8_init_second_pass(VP8_COMP *cpi)
{
@@ -1346,9 +1349,9 @@ void vp8_init_second_pass(VP8_COMP *cpi)
* sum duration is not. Its calculated based on the actual durations of
* all frames from the first pass.
*/
- vp8_new_frame_rate(cpi, 10000000.0 * cpi->twopass.total_stats.count / cpi->twopass.total_stats.duration);
+ vp8_new_framerate(cpi, 10000000.0 * cpi->twopass.total_stats.count / cpi->twopass.total_stats.duration);
- cpi->output_frame_rate = cpi->frame_rate;
+ cpi->output_framerate = cpi->framerate;
cpi->twopass.bits_left = (int64_t)(cpi->twopass.total_stats.duration * cpi->oxcf.target_bandwidth / 10000000.0) ;
cpi->twopass.bits_left -= (int64_t)(cpi->twopass.total_stats.duration * two_pass_min_rate / 10000000.0);
@@ -2115,23 +2118,25 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
(cpi->twopass.kf_group_error_left > 0))
{
cpi->twopass.gf_group_bits =
- (int)((double)cpi->twopass.kf_group_bits *
- (gf_group_err / (double)cpi->twopass.kf_group_error_left));
+ (int64_t)(cpi->twopass.kf_group_bits *
+ (gf_group_err / cpi->twopass.kf_group_error_left));
}
else
cpi->twopass.gf_group_bits = 0;
- cpi->twopass.gf_group_bits = (int)(
+ cpi->twopass.gf_group_bits =
(cpi->twopass.gf_group_bits < 0)
? 0
: (cpi->twopass.gf_group_bits > cpi->twopass.kf_group_bits)
- ? cpi->twopass.kf_group_bits : cpi->twopass.gf_group_bits);
+ ? cpi->twopass.kf_group_bits : cpi->twopass.gf_group_bits;
/* Clip cpi->twopass.gf_group_bits based on user supplied data rate
* variability limit (cpi->oxcf.two_pass_vbrmax_section)
*/
- if (cpi->twopass.gf_group_bits > max_bits * cpi->baseline_gf_interval)
- cpi->twopass.gf_group_bits = max_bits * cpi->baseline_gf_interval;
+ if (cpi->twopass.gf_group_bits >
+ (int64_t)max_bits * cpi->baseline_gf_interval)
+ cpi->twopass.gf_group_bits =
+ (int64_t)max_bits * cpi->baseline_gf_interval;
/* Reset the file position */
reset_fpf_position(cpi, start_pos);
@@ -2393,7 +2398,7 @@ static void assign_std_frame_bits(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
target_frame_size += cpi->min_frame_bandwidth;
/* Every other frame gets a few extra bits */
- if ( (cpi->common.frames_since_golden & 0x01) &&
+ if ( (cpi->frames_since_golden & 0x01) &&
(cpi->frames_till_gf_update_due > 0) )
{
target_frame_size += cpi->twopass.alt_extra_bits;
@@ -2445,7 +2450,7 @@ void vp8_second_pass(VP8_COMP *cpi)
*/
if (cpi->oxcf.error_resilient_mode)
{
- cpi->twopass.gf_group_bits = (int)cpi->twopass.kf_group_bits;
+ cpi->twopass.gf_group_bits = cpi->twopass.kf_group_bits;
cpi->twopass.gf_group_error_left =
(int)cpi->twopass.kf_group_error_left;
cpi->baseline_gf_interval = cpi->twopass.frames_to_key;
@@ -2524,7 +2529,7 @@ void vp8_second_pass(VP8_COMP *cpi)
/* Set nominal per second bandwidth for this frame */
cpi->target_bandwidth = (int)
- (cpi->per_frame_bandwidth * cpi->output_frame_rate);
+ (cpi->per_frame_bandwidth * cpi->output_framerate);
if (cpi->target_bandwidth < 0)
cpi->target_bandwidth = 0;
@@ -3180,7 +3185,7 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
/* Convert to a per second bitrate */
cpi->target_bandwidth = (int)(cpi->twopass.kf_bits *
- cpi->output_frame_rate);
+ cpi->output_framerate);
}
/* Note the total error score of the kf group minus the key frame itself */
@@ -3219,7 +3224,7 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
cpi->common.vert_scale = NORMAL;
/* Calculate Average bits per frame. */
- av_bits_per_frame = cpi->oxcf.target_bandwidth / DOUBLE_DIVIDE_CHECK((double)cpi->frame_rate);
+ av_bits_per_frame = cpi->oxcf.target_bandwidth / DOUBLE_DIVIDE_CHECK((double)cpi->framerate);
/* CBR... Use the clip average as the target for deciding resample */
if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)
@@ -3294,7 +3299,7 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
}
else
{
- int64_t clip_bits = (int64_t)(cpi->twopass.total_stats.count * cpi->oxcf.target_bandwidth / DOUBLE_DIVIDE_CHECK((double)cpi->frame_rate));
+ int64_t clip_bits = (int64_t)(cpi->twopass.total_stats.count * cpi->oxcf.target_bandwidth / DOUBLE_DIVIDE_CHECK((double)cpi->framerate));
int64_t over_spend = cpi->oxcf.starting_buffer_level - cpi->buffer_level;
/* If triggered last time the threshold for triggering again is
diff --git a/libvpx/vp8/encoder/mcomp.c b/libvpx/vp8/encoder/mcomp.c
index b08c7a5..83c3989 100644
--- a/libvpx/vp8/encoder/mcomp.c
+++ b/libvpx/vp8/encoder/mcomp.c
@@ -18,7 +18,7 @@
#include <math.h>
#include "vp8/common/findnearmv.h"
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
static int mv_ref_ct [31] [4] [2];
static int mv_mode_cts [4] [2];
#endif
@@ -233,19 +233,18 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
#if ARCH_X86 || ARCH_X86_64
MACROBLOCKD *xd = &x->e_mbd;
- unsigned char *y0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col;
+ unsigned char *y_0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col;
unsigned char *y;
- int buf_r1, buf_r2, buf_c1, buf_c2;
+ int buf_r1, buf_r2, buf_c1;
/* Clamping to avoid out-of-range data access */
buf_r1 = ((bestmv->as_mv.row - 3) < x->mv_row_min)?(bestmv->as_mv.row - x->mv_row_min):3;
buf_r2 = ((bestmv->as_mv.row + 3) > x->mv_row_max)?(x->mv_row_max - bestmv->as_mv.row):3;
buf_c1 = ((bestmv->as_mv.col - 3) < x->mv_col_min)?(bestmv->as_mv.col - x->mv_col_min):3;
- buf_c2 = ((bestmv->as_mv.col + 3) > x->mv_col_max)?(x->mv_col_max - bestmv->as_mv.col):3;
y_stride = 32;
/* Copy to intermediate buffer before searching. */
- vfp->copymem(y0 - buf_c1 - pre_stride*buf_r1, pre_stride, xd->y_buf, y_stride, 16+buf_r1+buf_r2);
+ vfp->copymem(y_0 - buf_c1 - pre_stride*buf_r1, pre_stride, xd->y_buf, y_stride, 16+buf_r1+buf_r2);
y = xd->y_buf + y_stride*buf_r1 +buf_c1;
#else
unsigned char *y = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col;
@@ -376,12 +375,12 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
#if ARCH_X86 || ARCH_X86_64
MACROBLOCKD *xd = &x->e_mbd;
- unsigned char *y0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col;
+ unsigned char *y_0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col;
unsigned char *y;
y_stride = 32;
/* Copy 18 rows x 32 cols area to intermediate buffer before searching. */
- vfp->copymem(y0 - 1 - pre_stride, pre_stride, xd->y_buf, y_stride, 18);
+ vfp->copymem(y_0 - 1 - pre_stride, pre_stride, xd->y_buf, y_stride, 18);
y = xd->y_buf + y_stride + 1;
#else
unsigned char *y = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col;
@@ -687,12 +686,12 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
#if ARCH_X86 || ARCH_X86_64
MACROBLOCKD *xd = &x->e_mbd;
- unsigned char *y0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col;
+ unsigned char *y_0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col;
unsigned char *y;
y_stride = 32;
/* Copy 18 rows x 32 cols area to intermediate buffer before searching. */
- vfp->copymem(y0 - 1 - pre_stride, pre_stride, xd->y_buf, y_stride, 18);
+ vfp->copymem(y_0 - 1 - pre_stride, pre_stride, xd->y_buf, y_stride, 18);
y = xd->y_buf + y_stride + 1;
#else
unsigned char *y = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col;
@@ -1913,7 +1912,7 @@ int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
+ mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
}
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
void print_mode_context(void)
{
FILE *f = fopen("modecont.c", "w");
@@ -1966,8 +1965,8 @@ void print_mode_context(void)
fclose(f);
}
-/* MV ref count ENTROPY_STATS stats code */
-#ifdef ENTROPY_STATS
+/* MV ref count VP8_ENTROPY_STATS stats code */
+#ifdef VP8_ENTROPY_STATS
void init_mv_ref_counts()
{
vpx_memset(mv_ref_ct, 0, sizeof(mv_ref_ct));
@@ -2021,6 +2020,6 @@ void accum_mv_refs(MB_PREDICTION_MODE m, const int ct[4])
}
}
-#endif/* END MV ref count ENTROPY_STATS stats code */
+#endif/* END MV ref count VP8_ENTROPY_STATS stats code */
#endif
diff --git a/libvpx/vp8/encoder/mcomp.h b/libvpx/vp8/encoder/mcomp.h
index 890113f..e36c515 100644
--- a/libvpx/vp8/encoder/mcomp.h
+++ b/libvpx/vp8/encoder/mcomp.h
@@ -15,7 +15,7 @@
#include "block.h"
#include "vp8/common/variance.h"
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
extern void init_mv_ref_counts();
extern void accum_mv_refs(MB_PREDICTION_MODE, const int near_mv_ref_cts[4]);
#endif
diff --git a/libvpx/vp8/encoder/onyx_if.c b/libvpx/vp8/encoder/onyx_if.c
index c7d81b1..7c07975 100644
--- a/libvpx/vp8/encoder/onyx_if.c
+++ b/libvpx/vp8/encoder/onyx_if.c
@@ -10,6 +10,7 @@
#include "vpx_config.h"
+#include "./vpx_scale_rtcd.h"
#include "vp8/common/onyxc_int.h"
#include "vp8/common/blockd.h"
#include "onyx_int.h"
@@ -19,7 +20,7 @@
#include "mcomp.h"
#include "firstpass.h"
#include "psnr.h"
-#include "vpx_scale/vpxscale.h"
+#include "vpx_scale/vpx_scale.h"
#include "vp8/common/extend.h"
#include "ratectrl.h"
#include "vp8/common/quant_common.h"
@@ -110,7 +111,7 @@ extern int skip_false_count;
#endif
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
extern int intra_mode_stats[10][10][10];
#endif
@@ -238,7 +239,7 @@ static void save_layer_context(VP8_COMP *cpi)
lc->rate_correction_factor = cpi->rate_correction_factor;
lc->key_frame_rate_correction_factor = cpi->key_frame_rate_correction_factor;
lc->gf_rate_correction_factor = cpi->gf_rate_correction_factor;
- lc->zbin_over_quant = cpi->zbin_over_quant;
+ lc->zbin_over_quant = cpi->mb.zbin_over_quant;
lc->inter_frame_target = cpi->inter_frame_target;
lc->total_byte_count = cpi->total_byte_count;
lc->filter_level = cpi->common.filter_level;
@@ -246,8 +247,8 @@ static void save_layer_context(VP8_COMP *cpi)
lc->last_frame_percent_intra = cpi->last_frame_percent_intra;
memcpy (lc->count_mb_ref_frame_usage,
- cpi->count_mb_ref_frame_usage,
- sizeof(cpi->count_mb_ref_frame_usage));
+ cpi->mb.count_mb_ref_frame_usage,
+ sizeof(cpi->mb.count_mb_ref_frame_usage));
}
static void restore_layer_context(VP8_COMP *cpi, const int layer)
@@ -276,16 +277,135 @@ static void restore_layer_context(VP8_COMP *cpi, const int layer)
cpi->rate_correction_factor = lc->rate_correction_factor;
cpi->key_frame_rate_correction_factor = lc->key_frame_rate_correction_factor;
cpi->gf_rate_correction_factor = lc->gf_rate_correction_factor;
- cpi->zbin_over_quant = lc->zbin_over_quant;
+ cpi->mb.zbin_over_quant = lc->zbin_over_quant;
cpi->inter_frame_target = lc->inter_frame_target;
cpi->total_byte_count = lc->total_byte_count;
cpi->common.filter_level = lc->filter_level;
cpi->last_frame_percent_intra = lc->last_frame_percent_intra;
- memcpy (cpi->count_mb_ref_frame_usage,
+ memcpy (cpi->mb.count_mb_ref_frame_usage,
lc->count_mb_ref_frame_usage,
- sizeof(cpi->count_mb_ref_frame_usage));
+ sizeof(cpi->mb.count_mb_ref_frame_usage));
+}
+
+static int rescale(int val, int num, int denom)
+{
+ int64_t llnum = num;
+ int64_t llden = denom;
+ int64_t llval = val;
+
+ return (int)(llval * llnum / llden);
+}
+
+static void init_temporal_layer_context(VP8_COMP *cpi,
+ VP8_CONFIG *oxcf,
+ const int layer,
+ double prev_layer_framerate)
+{
+ LAYER_CONTEXT *lc = &cpi->layer_context[layer];
+
+ lc->framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[layer];
+ lc->target_bandwidth = cpi->oxcf.target_bitrate[layer] * 1000;
+
+ lc->starting_buffer_level_in_ms = oxcf->starting_buffer_level;
+ lc->optimal_buffer_level_in_ms = oxcf->optimal_buffer_level;
+ lc->maximum_buffer_size_in_ms = oxcf->maximum_buffer_size;
+
+ lc->starting_buffer_level =
+ rescale((int)(oxcf->starting_buffer_level),
+ lc->target_bandwidth, 1000);
+
+ if (oxcf->optimal_buffer_level == 0)
+ lc->optimal_buffer_level = lc->target_bandwidth / 8;
+ else
+ lc->optimal_buffer_level =
+ rescale((int)(oxcf->optimal_buffer_level),
+ lc->target_bandwidth, 1000);
+
+ if (oxcf->maximum_buffer_size == 0)
+ lc->maximum_buffer_size = lc->target_bandwidth / 8;
+ else
+ lc->maximum_buffer_size =
+ rescale((int)(oxcf->maximum_buffer_size),
+ lc->target_bandwidth, 1000);
+
+ /* Work out the average size of a frame within this layer */
+ if (layer > 0)
+ lc->avg_frame_size_for_layer =
+ (int)((cpi->oxcf.target_bitrate[layer] -
+ cpi->oxcf.target_bitrate[layer-1]) * 1000 /
+ (lc->framerate - prev_layer_framerate));
+
+ lc->active_worst_quality = cpi->oxcf.worst_allowed_q;
+ lc->active_best_quality = cpi->oxcf.best_allowed_q;
+ lc->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
+
+ lc->buffer_level = lc->starting_buffer_level;
+ lc->bits_off_target = lc->starting_buffer_level;
+
+ lc->total_actual_bits = 0;
+ lc->ni_av_qi = 0;
+ lc->ni_tot_qi = 0;
+ lc->ni_frames = 0;
+ lc->rate_correction_factor = 1.0;
+ lc->key_frame_rate_correction_factor = 1.0;
+ lc->gf_rate_correction_factor = 1.0;
+ lc->inter_frame_target = 0;
+}
+
+// Upon a run-time change in temporal layers, reset the layer context parameters
+// for any "new" layers. For "existing" layers, let them inherit the parameters
+// from the previous layer state (at the same layer #). In future we may want
+// to better map the previous layer state(s) to the "new" ones.
+static void reset_temporal_layer_change(VP8_COMP *cpi,
+ VP8_CONFIG *oxcf,
+ const int prev_num_layers)
+{
+ int i;
+ double prev_layer_framerate = 0;
+ const int curr_num_layers = cpi->oxcf.number_of_layers;
+ // If the previous state was 1 layer, get current layer context from cpi.
+ // We need this to set the layer context for the new layers below.
+ if (prev_num_layers == 1)
+ {
+ cpi->current_layer = 0;
+ save_layer_context(cpi);
+ }
+ for (i = 0; i < curr_num_layers; i++)
+ {
+ LAYER_CONTEXT *lc = &cpi->layer_context[i];
+ if (i >= prev_num_layers)
+ {
+ init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
+ }
+ // The initial buffer levels are set based on their starting levels.
+ // We could set the buffer levels based on the previous state (normalized
+ // properly by the layer bandwidths) but we would need to keep track of
+ // the previous set of layer bandwidths (i.e., target_bitrate[i])
+ // before the layer change. For now, reset to the starting levels.
+ lc->buffer_level = cpi->oxcf.starting_buffer_level_in_ms *
+ cpi->oxcf.target_bitrate[i];
+ lc->bits_off_target = lc->buffer_level;
+ // TDOD(marpan): Should we set the rate_correction_factor and
+ // active_worst/best_quality to values derived from the previous layer
+ // state (to smooth-out quality dips/rate fluctuation at transition)?
+
+ // We need to treat the 1 layer case separately: oxcf.target_bitrate[i]
+ // is not set for 1 layer, and the restore_layer_context/save_context()
+ // are not called in the encoding loop, so we need to call it here to
+ // pass the layer context state to |cpi|.
+ if (curr_num_layers == 1)
+ {
+ lc->target_bandwidth = cpi->oxcf.target_bandwidth;
+ lc->buffer_level = cpi->oxcf.starting_buffer_level_in_ms *
+ lc->target_bandwidth / 1000;
+ lc->bits_off_target = lc->buffer_level;
+ restore_layer_context(cpi, 0);
+ }
+ prev_layer_framerate = cpi->output_framerate /
+ cpi->oxcf.rate_decimator[i];
+ }
}
static void setup_features(VP8_COMP *cpi)
@@ -640,11 +760,9 @@ void vp8_set_speed_features(VP8_COMP *cpi)
for (i = 0; i < MAX_MODES; i ++)
{
cpi->mode_check_freq[i] = 0;
- cpi->mode_test_hit_counts[i] = 0;
- cpi->mode_chosen_counts[i] = 0;
}
- cpi->mbs_tested_so_far = 0;
+ cpi->mb.mbs_tested_so_far = 0;
/* best quality defaults */
sf->RD = 1;
@@ -826,7 +944,7 @@ void vp8_set_speed_features(VP8_COMP *cpi)
{
unsigned int sum = 0;
unsigned int total_mbs = cm->MBs;
- int i, thresh;
+ int thresh;
unsigned int total_skip;
int min = 2000;
@@ -838,7 +956,7 @@ void vp8_set_speed_features(VP8_COMP *cpi)
for (i = 0; i < min; i++)
{
- sum += cpi->error_bins[i];
+ sum += cpi->mb.error_bins[i];
}
total_skip = sum;
@@ -847,7 +965,7 @@ void vp8_set_speed_features(VP8_COMP *cpi)
/* i starts from 2 to make sure thresh started from 2048 */
for (; i < 1024; i++)
{
- sum += cpi->error_bins[i];
+ sum += cpi->mb.error_bins[i];
if (10 * sum >= (unsigned int)(cpi->Speed - 6)*(total_mbs - total_skip))
break;
@@ -902,7 +1020,7 @@ void vp8_set_speed_features(VP8_COMP *cpi)
if (Speed >= 15)
sf->half_pixel_search = 0;
- vpx_memset(cpi->error_bins, 0, sizeof(cpi->error_bins));
+ vpx_memset(cpi->mb.error_bins, 0, sizeof(cpi->mb.error_bins));
}; /* switch */
@@ -1077,10 +1195,7 @@ void vp8_alloc_compressor_data(VP8_COMP *cpi)
}
/* Data used for real time vc mode to see if gf needs refreshing */
- cpi->inter_zz_count = 0;
cpi->zeromv_count = 0;
- cpi->gf_bad_count = 0;
- cpi->gf_update_recommended = 0;
/* Structures used to monitor GF usage */
@@ -1167,21 +1282,21 @@ int vp8_reverse_trans(int x)
return 63;
}
-void vp8_new_frame_rate(VP8_COMP *cpi, double framerate)
+void vp8_new_framerate(VP8_COMP *cpi, double framerate)
{
if(framerate < .1)
framerate = 30;
- cpi->frame_rate = framerate;
- cpi->output_frame_rate = framerate;
+ cpi->framerate = framerate;
+ cpi->output_framerate = framerate;
cpi->per_frame_bandwidth = (int)(cpi->oxcf.target_bandwidth /
- cpi->output_frame_rate);
+ cpi->output_framerate);
cpi->av_per_frame_bandwidth = cpi->per_frame_bandwidth;
cpi->min_frame_bandwidth = (int)(cpi->av_per_frame_bandwidth *
cpi->oxcf.two_pass_vbrmin_section / 100);
/* Set Maximum gf/arf interval */
- cpi->max_gf_interval = ((int)(cpi->output_frame_rate / 2.0) + 2);
+ cpi->max_gf_interval = ((int)(cpi->output_framerate / 2.0) + 2);
if(cpi->max_gf_interval < 12)
cpi->max_gf_interval = 12;
@@ -1204,17 +1319,6 @@ void vp8_new_frame_rate(VP8_COMP *cpi, double framerate)
}
-static int
-rescale(int val, int num, int denom)
-{
- int64_t llnum = num;
- int64_t llden = denom;
- int64_t llval = val;
-
- return (int)(llval * llnum / llden);
-}
-
-
static void init_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
{
VP8_COMMON *cm = &cpi->common;
@@ -1233,13 +1337,13 @@ static void init_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
* seems like a reasonable framerate, then use that as a guess, otherwise
* use 30.
*/
- cpi->frame_rate = (double)(oxcf->timebase.den) /
- (double)(oxcf->timebase.num);
+ cpi->framerate = (double)(oxcf->timebase.den) /
+ (double)(oxcf->timebase.num);
- if (cpi->frame_rate > 180)
- cpi->frame_rate = 30;
+ if (cpi->framerate > 180)
+ cpi->framerate = 30;
- cpi->ref_frame_rate = cpi->frame_rate;
+ cpi->ref_framerate = cpi->framerate;
/* change includes all joint functionality */
vp8_change_config(cpi, oxcf);
@@ -1265,63 +1369,13 @@ static void init_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
if (cpi->oxcf.number_of_layers > 1)
{
unsigned int i;
- double prev_layer_frame_rate=0;
+ double prev_layer_framerate=0;
for (i=0; i<cpi->oxcf.number_of_layers; i++)
{
- LAYER_CONTEXT *lc = &cpi->layer_context[i];
-
- /* Layer configuration */
- lc->frame_rate =
- cpi->output_frame_rate / cpi->oxcf.rate_decimator[i];
- lc->target_bandwidth = cpi->oxcf.target_bitrate[i] * 1000;
-
- lc->starting_buffer_level_in_ms = oxcf->starting_buffer_level;
- lc->optimal_buffer_level_in_ms = oxcf->optimal_buffer_level;
- lc->maximum_buffer_size_in_ms = oxcf->maximum_buffer_size;
-
- lc->starting_buffer_level =
- rescale((int)(oxcf->starting_buffer_level),
- lc->target_bandwidth, 1000);
-
- if (oxcf->optimal_buffer_level == 0)
- lc->optimal_buffer_level = lc->target_bandwidth / 8;
- else
- lc->optimal_buffer_level =
- rescale((int)(oxcf->optimal_buffer_level),
- lc->target_bandwidth, 1000);
-
- if (oxcf->maximum_buffer_size == 0)
- lc->maximum_buffer_size = lc->target_bandwidth / 8;
- else
- lc->maximum_buffer_size =
- rescale((int)oxcf->maximum_buffer_size,
- lc->target_bandwidth, 1000);
-
- /* Work out the average size of a frame within this layer */
- if (i > 0)
- lc->avg_frame_size_for_layer =
- (int)((cpi->oxcf.target_bitrate[i] -
- cpi->oxcf.target_bitrate[i-1]) * 1000 /
- (lc->frame_rate - prev_layer_frame_rate));
-
- lc->active_worst_quality = cpi->oxcf.worst_allowed_q;
- lc->active_best_quality = cpi->oxcf.best_allowed_q;
- lc->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
-
- lc->buffer_level = lc->starting_buffer_level;
- lc->bits_off_target = lc->starting_buffer_level;
-
- lc->total_actual_bits = 0;
- lc->ni_av_qi = 0;
- lc->ni_tot_qi = 0;
- lc->ni_frames = 0;
- lc->rate_correction_factor = 1.0;
- lc->key_frame_rate_correction_factor = 1.0;
- lc->gf_rate_correction_factor = 1.0;
- lc->inter_frame_target = 0;
-
- prev_layer_frame_rate = lc->frame_rate;
+ init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
+ prev_layer_framerate = cpi->output_framerate /
+ cpi->oxcf.rate_decimator[i];
}
}
@@ -1345,14 +1399,14 @@ static void update_layer_contexts (VP8_COMP *cpi)
if (oxcf->number_of_layers > 1)
{
unsigned int i;
- double prev_layer_frame_rate=0;
+ double prev_layer_framerate=0;
for (i=0; i<oxcf->number_of_layers; i++)
{
LAYER_CONTEXT *lc = &cpi->layer_context[i];
- lc->frame_rate =
- cpi->ref_frame_rate / oxcf->rate_decimator[i];
+ lc->framerate =
+ cpi->ref_framerate / oxcf->rate_decimator[i];
lc->target_bandwidth = oxcf->target_bitrate[i] * 1000;
lc->starting_buffer_level = rescale(
@@ -1378,9 +1432,9 @@ static void update_layer_contexts (VP8_COMP *cpi)
lc->avg_frame_size_for_layer =
(int)((oxcf->target_bitrate[i] -
oxcf->target_bitrate[i-1]) * 1000 /
- (lc->frame_rate - prev_layer_frame_rate));
+ (lc->framerate - prev_layer_framerate));
- prev_layer_frame_rate = lc->frame_rate;
+ prev_layer_framerate = lc->framerate;
}
}
}
@@ -1388,7 +1442,7 @@ static void update_layer_contexts (VP8_COMP *cpi)
void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
{
VP8_COMMON *cm = &cpi->common;
- int last_w, last_h;
+ int last_w, last_h, prev_number_of_layers;
if (!cpi)
return;
@@ -1413,6 +1467,7 @@ void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
last_w = cpi->oxcf.Width;
last_h = cpi->oxcf.Height;
+ prev_number_of_layers = cpi->oxcf.number_of_layers;
cpi->oxcf = *oxcf;
@@ -1570,7 +1625,7 @@ void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
cpi->oxcf.target_bandwidth, 1000);
/* Set up frame rate and related parameters rate control values. */
- vp8_new_frame_rate(cpi, cpi->frame_rate);
+ vp8_new_framerate(cpi, cpi->framerate);
/* Set absolute upper and lower quality limits */
cpi->worst_quality = cpi->oxcf.worst_allowed_q;
@@ -1605,6 +1660,16 @@ void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
cpi->target_bandwidth = cpi->oxcf.target_bandwidth;
+ // Check if the number of temporal layers has changed, and if so reset the
+ // pattern counter and set/initialize the temporal layer context for the
+ // new layer configuration.
+ if (cpi->oxcf.number_of_layers != prev_number_of_layers)
+ {
+ // If the number of temporal layers are changed we must start at the
+ // base of the pattern cycle, so reset temporal_pattern_counter.
+ cpi->temporal_pattern_counter = 0;
+ reset_temporal_layer_change(cpi, oxcf, prev_number_of_layers);
+ }
cm->Width = cpi->oxcf.Width;
cm->Height = cpi->oxcf.Height;
@@ -1742,6 +1807,7 @@ struct VP8_COMP* vp8_create_compressor(VP8_CONFIG *oxcf)
memcpy(cpi->base_skip_false_prob, vp8cx_base_skip_false_prob, sizeof(vp8cx_base_skip_false_prob));
cpi->common.current_video_frame = 0;
+ cpi->temporal_pattern_counter = 0;
cpi->kf_overspend_bits = 0;
cpi->kf_bitrate_adjustment = 0;
cpi->frames_till_gf_update_due = 0;
@@ -1809,7 +1875,7 @@ struct VP8_COMP* vp8_create_compressor(VP8_CONFIG *oxcf)
else
cpi->cyclic_refresh_map = (signed char *) NULL;
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
init_context_counters();
#endif
@@ -1879,7 +1945,7 @@ struct VP8_COMP* vp8_create_compressor(VP8_CONFIG *oxcf)
for (i = 0; i < KEY_FRAME_CONTEXT; i++)
{
- cpi->prior_key_frame_distance[i] = (int)cpi->output_frame_rate;
+ cpi->prior_key_frame_distance[i] = (int)cpi->output_framerate;
}
#ifdef OUTPUT_YUV_SRC
@@ -1924,10 +1990,10 @@ struct VP8_COMP* vp8_create_compressor(VP8_CONFIG *oxcf)
/* Set starting values of RD threshold multipliers (128 = *1) */
for (i = 0; i < MAX_MODES; i++)
{
- cpi->rd_thresh_mult[i] = 128;
+ cpi->mb.rd_thresh_mult[i] = 128;
}
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
init_mv_ref_counts();
#endif
@@ -2002,7 +2068,7 @@ struct VP8_COMP* vp8_create_compressor(VP8_CONFIG *oxcf)
cpi->refining_search_sad = vp8_refining_search_sad;
/* make sure frame 1 is okay */
- cpi->error_bins[0] = cpi->common.MBs;
+ cpi->mb.error_bins[0] = cpi->common.MBs;
/* vp8cx_init_quantizer() is first called here. Add check in
* vp8cx_frame_init_quantizer() so that vp8cx_init_quantizer is only
@@ -2064,7 +2130,7 @@ void vp8_remove_compressor(VP8_COMP **ptr)
#endif
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
print_context_counters();
print_tree_update_probs();
print_mode_context();
@@ -2207,7 +2273,7 @@ void vp8_remove_compressor(VP8_COMP **ptr)
{
extern int count_mb_seg[4];
FILE *f = fopen("modes.stt", "a");
- double dr = (double)cpi->frame_rate * (double)bytes * (double)8 / (double)count / (double)1000 ;
+ double dr = (double)cpi->framerate * (double)bytes * (double)8 / (double)count / (double)1000 ;
fprintf(f, "intra_mode in Intra Frames:\n");
fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d\n", y_modes[0], y_modes[1], y_modes[2], y_modes[3], y_modes[4]);
fprintf(f, "UV:%8d, %8d, %8d, %8d\n", uv_modes[0], uv_modes[1], uv_modes[2], uv_modes[3]);
@@ -2246,7 +2312,7 @@ void vp8_remove_compressor(VP8_COMP **ptr)
}
#endif
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
{
int i, j, k;
FILE *fmode = fopen("modecontext.c", "w");
@@ -2591,7 +2657,7 @@ static void scale_and_extend_source(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
Scale2Ratio(cm->horiz_scale, &hr, &hs);
Scale2Ratio(cm->vert_scale, &vr, &vs);
- vp8_scale_frame(sd, &cpi->scaled_source, cm->temp_scale_frame.y_buffer,
+ vpx_scale_frame(sd, &cpi->scaled_source, cm->temp_scale_frame.y_buffer,
tmp_height, hs, hr, vs, vr, 0);
vp8_yv12_extend_frame_borders(&cpi->scaled_source);
@@ -2684,12 +2750,12 @@ static void update_alt_ref_frame_stats(VP8_COMP *cpi)
cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
/* this frame refreshes means next frames don't unless specified by user */
- cpi->common.frames_since_golden = 0;
+ cpi->frames_since_golden = 0;
/* Clear the alternate reference update pending flag. */
cpi->source_alt_ref_pending = 0;
- /* Set the alternate refernce frame active flag */
+ /* Set the alternate reference frame active flag */
cpi->source_alt_ref_active = 1;
@@ -2736,7 +2802,7 @@ static void update_golden_frame_stats(VP8_COMP *cpi)
* user
*/
cm->refresh_golden_frame = 0;
- cpi->common.frames_since_golden = 0;
+ cpi->frames_since_golden = 0;
cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
@@ -2768,17 +2834,21 @@ static void update_golden_frame_stats(VP8_COMP *cpi)
if (cpi->frames_till_gf_update_due > 0)
cpi->frames_till_gf_update_due--;
- if (cpi->common.frames_till_alt_ref_frame)
- cpi->common.frames_till_alt_ref_frame --;
+ if (cpi->frames_till_alt_ref_frame)
+ cpi->frames_till_alt_ref_frame --;
- cpi->common.frames_since_golden ++;
+ cpi->frames_since_golden ++;
- if (cpi->common.frames_since_golden > 1)
+ if (cpi->frames_since_golden > 1)
{
- cpi->recent_ref_frame_usage[INTRA_FRAME] += cpi->count_mb_ref_frame_usage[INTRA_FRAME];
- cpi->recent_ref_frame_usage[LAST_FRAME] += cpi->count_mb_ref_frame_usage[LAST_FRAME];
- cpi->recent_ref_frame_usage[GOLDEN_FRAME] += cpi->count_mb_ref_frame_usage[GOLDEN_FRAME];
- cpi->recent_ref_frame_usage[ALTREF_FRAME] += cpi->count_mb_ref_frame_usage[ALTREF_FRAME];
+ cpi->recent_ref_frame_usage[INTRA_FRAME] +=
+ cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME];
+ cpi->recent_ref_frame_usage[LAST_FRAME] +=
+ cpi->mb.count_mb_ref_frame_usage[LAST_FRAME];
+ cpi->recent_ref_frame_usage[GOLDEN_FRAME] +=
+ cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME];
+ cpi->recent_ref_frame_usage[ALTREF_FRAME] +=
+ cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME];
}
}
}
@@ -2790,7 +2860,7 @@ static void update_rd_ref_frame_probs(VP8_COMP *cpi)
{
VP8_COMMON *cm = &cpi->common;
- const int *const rfct = cpi->count_mb_ref_frame_usage;
+ const int *const rfct = cpi->mb.count_mb_ref_frame_usage;
const int rf_intra = rfct[INTRA_FRAME];
const int rf_inter = rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
@@ -2815,14 +2885,16 @@ static void update_rd_ref_frame_probs(VP8_COMP *cpi)
if (cpi->common.refresh_alt_ref_frame)
{
cpi->prob_intra_coded += 40;
+ if (cpi->prob_intra_coded > 255)
+ cpi->prob_intra_coded = 255;
cpi->prob_last_coded = 200;
cpi->prob_gf_coded = 1;
}
- else if (cpi->common.frames_since_golden == 0)
+ else if (cpi->frames_since_golden == 0)
{
cpi->prob_last_coded = 214;
}
- else if (cpi->common.frames_since_golden == 1)
+ else if (cpi->frames_since_golden == 1)
{
cpi->prob_last_coded = 192;
cpi->prob_gf_coded = 220;
@@ -3131,6 +3203,57 @@ static void update_reference_frames(VP8_COMP *cpi)
cpi->current_ref_frames[LAST_FRAME] = cm->current_video_frame;
#endif
}
+
+#if CONFIG_TEMPORAL_DENOISING
+ if (cpi->oxcf.noise_sensitivity)
+ {
+ /* we shouldn't have to keep multiple copies as we know in advance which
+ * buffer we should start - for now to get something up and running
+ * I've chosen to copy the buffers
+ */
+ if (cm->frame_type == KEY_FRAME)
+ {
+ int i;
+ vp8_yv12_copy_frame(
+ cpi->Source,
+ &cpi->denoiser.yv12_running_avg[LAST_FRAME]);
+
+ vp8_yv12_extend_frame_borders(
+ &cpi->denoiser.yv12_running_avg[LAST_FRAME]);
+
+ for (i = 2; i < MAX_REF_FRAMES - 1; i++)
+ vp8_yv12_copy_frame(
+ &cpi->denoiser.yv12_running_avg[LAST_FRAME],
+ &cpi->denoiser.yv12_running_avg[i]);
+ }
+ else /* For non key frames */
+ {
+ vp8_yv12_extend_frame_borders(
+ &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
+
+ if (cm->refresh_alt_ref_frame || cm->copy_buffer_to_arf)
+ {
+ vp8_yv12_copy_frame(
+ &cpi->denoiser.yv12_running_avg[INTRA_FRAME],
+ &cpi->denoiser.yv12_running_avg[ALTREF_FRAME]);
+ }
+ if (cm->refresh_golden_frame || cm->copy_buffer_to_gf)
+ {
+ vp8_yv12_copy_frame(
+ &cpi->denoiser.yv12_running_avg[INTRA_FRAME],
+ &cpi->denoiser.yv12_running_avg[GOLDEN_FRAME]);
+ }
+ if(cm->refresh_last_frame)
+ {
+ vp8_yv12_copy_frame(
+ &cpi->denoiser.yv12_running_avg[INTRA_FRAME],
+ &cpi->denoiser.yv12_running_avg[LAST_FRAME]);
+ }
+ }
+
+ }
+#endif
+
}
void vp8_loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm)
@@ -3174,51 +3297,6 @@ void vp8_loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm)
}
vp8_yv12_extend_frame_borders(cm->frame_to_show);
-#if CONFIG_TEMPORAL_DENOISING
- if (cpi->oxcf.noise_sensitivity)
- {
-
-
- /* we shouldn't have to keep multiple copies as we know in advance which
- * buffer we should start - for now to get something up and running
- * I've chosen to copy the buffers
- */
- if (cm->frame_type == KEY_FRAME)
- {
- int i;
- vp8_yv12_copy_frame(
- cpi->Source,
- &cpi->denoiser.yv12_running_avg[LAST_FRAME]);
-
- vp8_yv12_extend_frame_borders(
- &cpi->denoiser.yv12_running_avg[LAST_FRAME]);
-
- for (i = 2; i < MAX_REF_FRAMES - 1; i++)
- vp8_yv12_copy_frame(
- cpi->Source,
- &cpi->denoiser.yv12_running_avg[i]);
- }
- else /* For non key frames */
- {
- vp8_yv12_extend_frame_borders(
- &cpi->denoiser.yv12_running_avg[LAST_FRAME]);
-
- if (cm->refresh_alt_ref_frame || cm->copy_buffer_to_arf)
- {
- vp8_yv12_copy_frame(
- &cpi->denoiser.yv12_running_avg[LAST_FRAME],
- &cpi->denoiser.yv12_running_avg[ALTREF_FRAME]);
- }
- if (cm->refresh_golden_frame || cm->copy_buffer_to_gf)
- {
- vp8_yv12_copy_frame(
- &cpi->denoiser.yv12_running_avg[LAST_FRAME],
- &cpi->denoiser.yv12_running_avg[GOLDEN_FRAME]);
- }
- }
-
- }
-#endif
}
@@ -3290,31 +3368,31 @@ static void encode_frame_to_data_rate
cpi->per_frame_bandwidth = cpi->twopass.gf_bits;
/* per second target bitrate */
cpi->target_bandwidth = (int)(cpi->twopass.gf_bits *
- cpi->output_frame_rate);
+ cpi->output_framerate);
}
}
else
#endif
- cpi->per_frame_bandwidth = (int)(cpi->target_bandwidth / cpi->output_frame_rate);
+ cpi->per_frame_bandwidth = (int)(cpi->target_bandwidth / cpi->output_framerate);
/* Default turn off buffer to buffer copying */
cm->copy_buffer_to_gf = 0;
cm->copy_buffer_to_arf = 0;
/* Clear zbin over-quant value and mode boost values. */
- cpi->zbin_over_quant = 0;
- cpi->zbin_mode_boost = 0;
+ cpi->mb.zbin_over_quant = 0;
+ cpi->mb.zbin_mode_boost = 0;
/* Enable or disable mode based tweaking of the zbin
* For 2 Pass Only used where GF/ARF prediction quality
* is above a threshold
*/
- cpi->zbin_mode_boost_enabled = 1;
+ cpi->mb.zbin_mode_boost_enabled = 1;
if (cpi->pass == 2)
{
if ( cpi->gfu_boost <= 400 )
{
- cpi->zbin_mode_boost_enabled = 0;
+ cpi->mb.zbin_mode_boost_enabled = 0;
}
}
@@ -3324,7 +3402,7 @@ static void encode_frame_to_data_rate
else
cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 0;
- /* Check to see if a key frame is signalled
+ /* Check to see if a key frame is signaled
* For two pass with auto key frame enabled cm->frame_type may already
* be set, but not for one pass.
*/
@@ -3381,7 +3459,7 @@ static void encode_frame_to_data_rate
/* Reset the RD threshold multipliers to default of * 1 (128) */
for (i = 0; i < MAX_MODES; i++)
{
- cpi->rd_thresh_mult[i] = 128;
+ cpi->mb.rd_thresh_mult[i] = 128;
}
}
@@ -3459,7 +3537,7 @@ static void encode_frame_to_data_rate
/* Note that we should not throw out a key frame (especially when
* spatial resampling is enabled).
*/
- if ((cm->frame_type == KEY_FRAME))
+ if (cm->frame_type == KEY_FRAME)
{
cpi->decimation_count = cpi->decimation_factor;
}
@@ -3477,6 +3555,8 @@ static void encode_frame_to_data_rate
cm->current_video_frame++;
cpi->frames_since_key++;
+ // We advance the temporal pattern for dropped frames.
+ cpi->temporal_pattern_counter++;
#if CONFIG_INTERNAL_STATS
cpi->count ++;
@@ -3518,6 +3598,8 @@ static void encode_frame_to_data_rate
#endif
cm->current_video_frame++;
cpi->frames_since_key++;
+ // We advance the temporal pattern for dropped frames.
+ cpi->temporal_pattern_counter++;
return;
}
@@ -4070,8 +4152,9 @@ static void encode_frame_to_data_rate
q_low = (Q < q_high) ? (Q + 1) : q_high;
/* If we are using over quant do the same for zbin_oq_low */
- if (cpi->zbin_over_quant > 0)
- zbin_oq_low = (cpi->zbin_over_quant < zbin_oq_high) ? (cpi->zbin_over_quant + 1) : zbin_oq_high;
+ if (cpi->mb.zbin_over_quant > 0)
+ zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high) ?
+ (cpi->mb.zbin_over_quant + 1) : zbin_oq_high;
if (undershoot_seen)
{
@@ -4087,11 +4170,13 @@ static void encode_frame_to_data_rate
* is max)
*/
if (Q < MAXQ)
- cpi->zbin_over_quant = 0;
+ cpi->mb.zbin_over_quant = 0;
else
{
- zbin_oq_low = (cpi->zbin_over_quant < zbin_oq_high) ? (cpi->zbin_over_quant + 1) : zbin_oq_high;
- cpi->zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
+ zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high) ?
+ (cpi->mb.zbin_over_quant + 1) : zbin_oq_high;
+ cpi->mb.zbin_over_quant =
+ (zbin_oq_high + zbin_oq_low) / 2;
}
}
else
@@ -4104,7 +4189,9 @@ static void encode_frame_to_data_rate
Q = vp8_regulate_q(cpi, cpi->this_frame_target);
- while (((Q < q_low) || (cpi->zbin_over_quant < zbin_oq_low)) && (Retries < 10))
+ while (((Q < q_low) ||
+ (cpi->mb.zbin_over_quant < zbin_oq_low)) &&
+ (Retries < 10))
{
vp8_update_rate_correction_factors(cpi, 0);
Q = vp8_regulate_q(cpi, cpi->this_frame_target);
@@ -4117,12 +4204,13 @@ static void encode_frame_to_data_rate
/* Frame is too small */
else
{
- if (cpi->zbin_over_quant == 0)
+ if (cpi->mb.zbin_over_quant == 0)
/* Lower q_high if not using over quant */
q_high = (Q > q_low) ? (Q - 1) : q_low;
else
/* else lower zbin_oq_high */
- zbin_oq_high = (cpi->zbin_over_quant > zbin_oq_low) ? (cpi->zbin_over_quant - 1) : zbin_oq_low;
+ zbin_oq_high = (cpi->mb.zbin_over_quant > zbin_oq_low) ?
+ (cpi->mb.zbin_over_quant - 1) : zbin_oq_low;
if (overshoot_seen)
{
@@ -4138,9 +4226,10 @@ static void encode_frame_to_data_rate
* is max)
*/
if (Q < MAXQ)
- cpi->zbin_over_quant = 0;
+ cpi->mb.zbin_over_quant = 0;
else
- cpi->zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
+ cpi->mb.zbin_over_quant =
+ (zbin_oq_high + zbin_oq_low) / 2;
}
else
{
@@ -4163,7 +4252,9 @@ static void encode_frame_to_data_rate
q_low = Q;
}
- while (((Q > q_high) || (cpi->zbin_over_quant > zbin_oq_high)) && (Retries < 10))
+ while (((Q > q_high) ||
+ (cpi->mb.zbin_over_quant > zbin_oq_high)) &&
+ (Retries < 10))
{
vp8_update_rate_correction_factors(cpi, 0);
Q = vp8_regulate_q(cpi, cpi->this_frame_target);
@@ -4181,7 +4272,9 @@ static void encode_frame_to_data_rate
Q = q_low;
/* Clamp cpi->zbin_over_quant */
- cpi->zbin_over_quant = (cpi->zbin_over_quant < zbin_oq_low) ? zbin_oq_low : (cpi->zbin_over_quant > zbin_oq_high) ? zbin_oq_high : cpi->zbin_over_quant;
+ cpi->mb.zbin_over_quant = (cpi->mb.zbin_over_quant < zbin_oq_low) ?
+ zbin_oq_low : (cpi->mb.zbin_over_quant > zbin_oq_high) ?
+ zbin_oq_high : cpi->mb.zbin_over_quant;
Loop = Q != last_q;
}
@@ -4263,7 +4356,6 @@ static void encode_frame_to_data_rate
/* Point to beginning of MODE_INFO arrays. */
MODE_INFO *tmp = cm->mi;
- cpi->inter_zz_count = 0;
cpi->zeromv_count = 0;
if(cm->frame_type != KEY_FRAME)
@@ -4272,8 +4364,6 @@ static void encode_frame_to_data_rate
{
for (mb_col = 0; mb_col < cm->mb_cols; mb_col ++)
{
- if(tmp->mbmi.mode == ZEROMV && tmp->mbmi.ref_frame == LAST_FRAME)
- cpi->inter_zz_count++;
if(tmp->mbmi.mode == ZEROMV)
cpi->zeromv_count++;
tmp++;
@@ -4467,7 +4557,7 @@ static void encode_frame_to_data_rate
{
LAYER_CONTEXT *lc = &cpi->layer_context[i];
int bits_off_for_this_layer =
- (int)(lc->target_bandwidth / lc->frame_rate -
+ (int)(lc->target_bandwidth / lc->framerate -
cpi->projected_frame_size);
lc->bits_off_target += bits_off_for_this_layer;
@@ -4583,9 +4673,6 @@ static void encode_frame_to_data_rate
cm->frame_type, cm->refresh_golden_frame,
cm->refresh_alt_ref_frame);
- for (i = 0; i < MAX_MODES; i++)
- fprintf(fmodes, "%5d ", cpi->mode_chosen_counts[i]);
-
fprintf(fmodes, "\n");
fclose(fmodes);
@@ -4680,6 +4767,7 @@ static void encode_frame_to_data_rate
{
cm->current_video_frame++;
cpi->frames_since_key++;
+ cpi->temporal_pattern_counter++;
}
/* reset to normal state now that we are done. */
@@ -4703,67 +4791,6 @@ static void encode_frame_to_data_rate
}
-
-
-static void check_gf_quality(VP8_COMP *cpi)
-{
- VP8_COMMON *cm = &cpi->common;
- int gf_active_pct = (100 * cpi->gf_active_count) / (cm->mb_rows * cm->mb_cols);
- int gf_ref_usage_pct = (cpi->count_mb_ref_frame_usage[GOLDEN_FRAME] * 100) / (cm->mb_rows * cm->mb_cols);
- int last_ref_zz_useage = (cpi->inter_zz_count * 100) / (cm->mb_rows * cm->mb_cols);
-
- /* Gf refresh is not currently being signalled */
- if (cpi->gf_update_recommended == 0)
- {
- if (cpi->common.frames_since_golden > 7)
- {
- /* Low use of gf */
- if ((gf_active_pct < 10) || ((gf_active_pct + gf_ref_usage_pct) < 15))
- {
- /* ...but last frame zero zero usage is reasonbable so a
- * new gf might be appropriate
- */
- if (last_ref_zz_useage >= 25)
- {
- cpi->gf_bad_count ++;
-
- /* Check that the condition is stable */
- if (cpi->gf_bad_count >= 8)
- {
- cpi->gf_update_recommended = 1;
- cpi->gf_bad_count = 0;
- }
- }
- else
- /* Restart count as the background is not stable enough */
- cpi->gf_bad_count = 0;
- }
- else
- /* Gf useage has picked up so reset count */
- cpi->gf_bad_count = 0;
- }
- }
- /* If the signal is set but has not been read should we cancel it. */
- else if (last_ref_zz_useage < 15)
- {
- cpi->gf_update_recommended = 0;
- cpi->gf_bad_count = 0;
- }
-
-#if 0
- {
- FILE *f = fopen("gfneeded.stt", "a");
- fprintf(f, "%10d %10d %10d %10d %10ld \n",
- cm->current_video_frame,
- cpi->common.frames_since_golden,
- gf_active_pct, gf_ref_usage_pct,
- cpi->gf_update_recommended);
- fclose(f);
- }
-
-#endif
-}
-
#if !(CONFIG_REALTIME_ONLY)
static void Pass2Encode(VP8_COMP *cpi, unsigned long *size, unsigned char *dest, unsigned char * dest_end, unsigned int *frame_flags)
{
@@ -4778,7 +4805,7 @@ static void Pass2Encode(VP8_COMP *cpi, unsigned long *size, unsigned char *dest,
{
double two_pass_min_rate = (double)(cpi->oxcf.target_bandwidth
*cpi->oxcf.two_pass_vbrmin_section / 100);
- cpi->twopass.bits_left += (int64_t)(two_pass_min_rate / cpi->frame_rate);
+ cpi->twopass.bits_left += (int64_t)(two_pass_min_rate / cpi->framerate);
}
}
#endif
@@ -4794,8 +4821,10 @@ int vp8_receive_raw_frame(VP8_COMP *cpi, unsigned int frame_flags, YV12_BUFFER_C
{
#if HAVE_NEON
int64_t store_reg[8];
-#endif
+#if CONFIG_RUNTIME_CPU_DETECT
VP8_COMMON *cm = &cpi->common;
+#endif
+#endif
struct vpx_usec_timer timer;
int res = 0;
@@ -4821,7 +4850,6 @@ int vp8_receive_raw_frame(VP8_COMP *cpi, unsigned int frame_flags, YV12_BUFFER_C
if(vp8_lookahead_push(cpi->lookahead, sd, time_stamp, end_time,
frame_flags, cpi->active_map_enabled ? cpi->active_map : NULL))
res = -1;
- cm->clr_type = sd->clrtype;
vpx_usec_timer_mark(&timer);
cpi->time_receive_data += vpx_usec_timer_elapsed(&timer);
@@ -4906,7 +4934,7 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned l
cpi->frames_till_gf_update_due);
force_src_buffer = &cpi->alt_ref_buffer;
}
- cm->frames_till_alt_ref_frame = cpi->frames_till_gf_update_due;
+ cpi->frames_till_alt_ref_frame = cpi->frames_till_gf_update_due;
cm->refresh_alt_ref_frame = 1;
cm->refresh_golden_frame = 0;
cm->refresh_last_frame = 0;
@@ -5011,7 +5039,7 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned l
if (this_duration)
{
if (step)
- cpi->ref_frame_rate = 10000000.0 / this_duration;
+ cpi->ref_framerate = 10000000.0 / this_duration;
else
{
double avg_duration, interval;
@@ -5025,11 +5053,11 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned l
if(interval > 10000000.0)
interval = 10000000;
- avg_duration = 10000000.0 / cpi->ref_frame_rate;
+ avg_duration = 10000000.0 / cpi->ref_framerate;
avg_duration *= (interval - avg_duration + this_duration);
avg_duration /= interval;
- cpi->ref_frame_rate = 10000000.0 / avg_duration;
+ cpi->ref_framerate = 10000000.0 / avg_duration;
}
if (cpi->oxcf.number_of_layers > 1)
@@ -5040,12 +5068,12 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned l
for (i=0; i<cpi->oxcf.number_of_layers; i++)
{
LAYER_CONTEXT *lc = &cpi->layer_context[i];
- lc->frame_rate = cpi->ref_frame_rate /
- cpi->oxcf.rate_decimator[i];
+ lc->framerate = cpi->ref_framerate /
+ cpi->oxcf.rate_decimator[i];
}
}
else
- vp8_new_frame_rate(cpi, cpi->ref_frame_rate);
+ vp8_new_framerate(cpi, cpi->ref_framerate);
}
cpi->last_time_stamp_seen = cpi->source->ts_start;
@@ -5060,15 +5088,13 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned l
/* Restore layer specific context & set frame rate */
layer = cpi->oxcf.layer_id[
- cm->current_video_frame % cpi->oxcf.periodicity];
+ cpi->temporal_pattern_counter % cpi->oxcf.periodicity];
restore_layer_context (cpi, layer);
- vp8_new_frame_rate (cpi, cpi->layer_context[layer].frame_rate);
+ vp8_new_framerate(cpi, cpi->layer_context[layer].framerate);
}
if (cpi->compressor_speed == 2)
{
- if (cpi->oxcf.number_of_layers == 1)
- check_gf_quality(cpi);
vpx_usec_timer_start(&tsctimer);
vpx_usec_timer_start(&ticktimer);
}
@@ -5229,7 +5255,7 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned l
if (cm->show_frame)
{
-
+ cpi->common.show_frame_mi = cpi->common.mi;
cpi->count ++;
if (cpi->b_calculate_psnr)
@@ -5410,6 +5436,7 @@ int vp8_get_preview_raw_frame(VP8_COMP *cpi, YV12_BUFFER_CONFIG *dest, vp8_ppfla
#endif
#if CONFIG_POSTPROC
+ cpi->common.show_frame_mi = cpi->common.mi;
ret = vp8_post_proc_frame(&cpi->common, dest, flags);
#else
diff --git a/libvpx/vp8/encoder/onyx_int.h b/libvpx/vp8/encoder/onyx_int.h
index ed9c762..3ab0fe8 100644
--- a/libvpx/vp8/encoder/onyx_int.h
+++ b/libvpx/vp8/encoder/onyx_int.h
@@ -43,7 +43,7 @@
#define AF_THRESH 25
#define AF_THRESH2 100
#define ARF_DECAY_THRESH 12
-#define MAX_MODES 20
+
#define MIN_THRESHMULT 32
#define MAX_THRESHMULT 512
@@ -232,7 +232,7 @@ enum
typedef struct
{
/* Layer configuration */
- double frame_rate;
+ double framerate;
int target_bandwidth;
/* Layer specific coding parameters */
@@ -282,17 +282,17 @@ typedef struct VP8_COMP
{
DECLARE_ALIGNED(16, short, Y1quant[QINDEX_RANGE][16]);
- DECLARE_ALIGNED(16, unsigned char, Y1quant_shift[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, Y1quant_shift[QINDEX_RANGE][16]);
DECLARE_ALIGNED(16, short, Y1zbin[QINDEX_RANGE][16]);
DECLARE_ALIGNED(16, short, Y1round[QINDEX_RANGE][16]);
DECLARE_ALIGNED(16, short, Y2quant[QINDEX_RANGE][16]);
- DECLARE_ALIGNED(16, unsigned char, Y2quant_shift[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, Y2quant_shift[QINDEX_RANGE][16]);
DECLARE_ALIGNED(16, short, Y2zbin[QINDEX_RANGE][16]);
DECLARE_ALIGNED(16, short, Y2round[QINDEX_RANGE][16]);
DECLARE_ALIGNED(16, short, UVquant[QINDEX_RANGE][16]);
- DECLARE_ALIGNED(16, unsigned char, UVquant_shift[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, UVquant_shift[QINDEX_RANGE][16]);
DECLARE_ALIGNED(16, short, UVzbin[QINDEX_RANGE][16]);
DECLARE_ALIGNED(16, short, UVround[QINDEX_RANGE][16]);
@@ -320,6 +320,7 @@ typedef struct VP8_COMP
YV12_BUFFER_CONFIG scaled_source;
YV12_BUFFER_CONFIG *last_frame_unscaled_source;
+ unsigned int frames_till_alt_ref_frame;
/* frame in src_buffers has been identified to be encoded as an alt ref */
int source_alt_ref_pending;
/* an alt ref frame has been encoded and is usable */
@@ -349,13 +350,8 @@ typedef struct VP8_COMP
int ambient_err;
unsigned int mode_check_freq[MAX_MODES];
- unsigned int mode_test_hit_counts[MAX_MODES];
- unsigned int mode_chosen_counts[MAX_MODES];
- unsigned int mbs_tested_so_far;
- int rd_thresh_mult[MAX_MODES];
int rd_baseline_thresh[MAX_MODES];
- int rd_threshes[MAX_MODES];
int RDMULT;
int RDDIV ;
@@ -374,6 +370,7 @@ typedef struct VP8_COMP
double key_frame_rate_correction_factor;
double gf_rate_correction_factor;
+ unsigned int frames_since_golden;
/* Count down till next GF */
int frames_till_gf_update_due;
@@ -406,7 +403,7 @@ typedef struct VP8_COMP
/* Minimum allocation that should be used for any frame */
int min_frame_bandwidth;
int inter_frame_target;
- double output_frame_rate;
+ double output_framerate;
int64_t last_time_stamp_seen;
int64_t last_end_time_stamp_seen;
int64_t first_time_stamp_ever;
@@ -416,18 +413,12 @@ typedef struct VP8_COMP
int ni_frames;
int avg_frame_qindex;
- int zbin_over_quant;
- int zbin_mode_boost;
- int zbin_mode_boost_enabled;
- int last_zbin_over_quant;
- int last_zbin_mode_boost;
-
int64_t total_byte_count;
int buffered_mode;
- double frame_rate;
- double ref_frame_rate;
+ double framerate;
+ double ref_framerate;
int64_t buffer_level;
int64_t bits_off_target;
@@ -477,7 +468,6 @@ typedef struct VP8_COMP
int Speed;
int compressor_speed;
- int interquantizer;
int auto_gold;
int auto_adjust_gold_quantizer;
int auto_worst_q;
@@ -493,24 +483,16 @@ typedef struct VP8_COMP
int last_skip_probs_q[3];
int recent_ref_frame_usage[MAX_REF_FRAMES];
- int count_mb_ref_frame_usage[MAX_REF_FRAMES];
int this_frame_percent_intra;
int last_frame_percent_intra;
int ref_frame_flags;
SPEED_FEATURES sf;
- int error_bins[1024];
- /* Data used for real time conferencing mode to help determine if it
- * would be good to update the gf
- */
- int inter_zz_count;
/* Count ZEROMV on all reference frames. */
int zeromv_count;
int lf_zeromv_pct;
- int gf_bad_count;
- int gf_update_recommended;
unsigned char *segmentation_map;
signed char segment_feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
@@ -529,6 +511,10 @@ typedef struct VP8_COMP
int cyclic_refresh_q;
signed char *cyclic_refresh_map;
+ // Frame counter for the temporal pattern. Counter is rest when the temporal
+ // layers are changed dynamically (run-time change).
+ unsigned int temporal_pattern_counter;
+
#if CONFIG_MULTITHREAD
/* multithread data */
int * mt_current_mb_col;
@@ -606,7 +592,7 @@ typedef struct VP8_COMP
/* Error score of frames still to be coded in kf group */
int64_t kf_group_error_left;
/* Projected Bits available for a group including 1 GF or ARF */
- int gf_group_bits;
+ int64_t gf_group_bits;
/* Bits for the golden frame or ARF */
int gf_bits;
int alt_extra_bits;
@@ -712,11 +698,8 @@ typedef struct VP8_COMP
} rd_costs;
} VP8_COMP;
-void control_data_rate(VP8_COMP *cpi);
-
-void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char *dest_end, unsigned long *size);
-
-int rd_cost_intra_mb(MACROBLOCKD *x);
+void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest,
+ unsigned char *dest_end, unsigned long *size);
void vp8_tokenize_mb(VP8_COMP *, MACROBLOCK *, TOKENEXTRA **);
diff --git a/libvpx/vp8/encoder/pickinter.c b/libvpx/vp8/encoder/pickinter.c
index 3f09a9f..c5279fe 100644
--- a/libvpx/vp8/encoder/pickinter.c
+++ b/libvpx/vp8/encoder/pickinter.c
@@ -389,7 +389,7 @@ static void pick_intra_mbuv_mode(MACROBLOCK *mb)
}
-static void update_mvcount(VP8_COMP *cpi, MACROBLOCK *x, int_mv *best_ref_mv)
+static void update_mvcount(MACROBLOCK *x, int_mv *best_ref_mv)
{
MACROBLOCKD *xd = &x->e_mbd;
/* Split MV modes currently not supported when RD is nopt enabled,
@@ -594,6 +594,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
unsigned int zero_mv_sse = INT_MAX, best_sse = INT_MAX;
#endif
+ int sf_improved_mv_pred = cpi->sf.improved_mv_pred;
int_mv mvp;
int near_sadidx[8] = {0, 1, 2, 3, 4, 5, 6, 7};
@@ -680,7 +681,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
get_predictor_pointers(cpi, plane, recon_yoffset, recon_uvoffset);
/* Count of the number of MBs tested so far this frame */
- cpi->mbs_tested_so_far++;
+ x->mbs_tested_so_far++;
*returnintra = INT_MAX;
x->skip = 0;
@@ -701,7 +702,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
int this_rd = INT_MAX;
int this_ref_frame = ref_frame_map[vp8_ref_frame_order[mode_index]];
- if (best_rd <= cpi->rd_threshes[mode_index])
+ if (best_rd <= x->rd_threshes[mode_index])
continue;
if (this_ref_frame < 0)
@@ -746,22 +747,22 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
/* Check to see if the testing frequency for this mode is at its max
* If so then prevent it from being tested and increase the threshold
* for its testing */
- if (cpi->mode_test_hit_counts[mode_index] &&
+ if (x->mode_test_hit_counts[mode_index] &&
(cpi->mode_check_freq[mode_index] > 1))
{
- if (cpi->mbs_tested_so_far <= (cpi->mode_check_freq[mode_index] *
- cpi->mode_test_hit_counts[mode_index]))
+ if (x->mbs_tested_so_far <= (cpi->mode_check_freq[mode_index] *
+ x->mode_test_hit_counts[mode_index]))
{
/* Increase the threshold for coding this mode to make it less
* likely to be chosen */
- cpi->rd_thresh_mult[mode_index] += 4;
+ x->rd_thresh_mult[mode_index] += 4;
- if (cpi->rd_thresh_mult[mode_index] > MAX_THRESHMULT)
- cpi->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
+ if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT)
+ x->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
- cpi->rd_threshes[mode_index] =
+ x->rd_threshes[mode_index] =
(cpi->rd_baseline_thresh[mode_index] >> 7) *
- cpi->rd_thresh_mult[mode_index];
+ x->rd_thresh_mult[mode_index];
continue;
}
}
@@ -769,7 +770,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
/* We have now reached the point where we are going to test the current
* mode so increment the counter for the number of times it has been
* tested */
- cpi->mode_test_hit_counts[mode_index] ++;
+ x->mode_test_hit_counts[mode_index] ++;
rate2 = 0;
distortion2 = 0;
@@ -882,7 +883,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
last frame motion info is not stored, then we can not
use improved_mv_pred. */
if (cpi->oxcf.mr_encoder_id && !parent_ref_valid)
- cpi->sf.improved_mv_pred = 0;
+ sf_improved_mv_pred = 0;
if (parent_ref_valid && parent_ref_frame)
{
@@ -899,7 +900,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
}else
#endif
{
- if(cpi->sf.improved_mv_pred)
+ if(sf_improved_mv_pred)
{
if(!saddone)
{
@@ -1109,12 +1110,12 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
/* Testing this mode gave rise to an improvement in best error
* score. Lower threshold a bit for next time
*/
- cpi->rd_thresh_mult[mode_index] =
- (cpi->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2)) ?
- cpi->rd_thresh_mult[mode_index] - 2 : MIN_THRESHMULT;
- cpi->rd_threshes[mode_index] =
+ x->rd_thresh_mult[mode_index] =
+ (x->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2)) ?
+ x->rd_thresh_mult[mode_index] - 2 : MIN_THRESHMULT;
+ x->rd_threshes[mode_index] =
(cpi->rd_baseline_thresh[mode_index] >> 7) *
- cpi->rd_thresh_mult[mode_index];
+ x->rd_thresh_mult[mode_index];
}
/* If the mode did not help improve the best error case then raise the
@@ -1122,14 +1123,14 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
*/
else
{
- cpi->rd_thresh_mult[mode_index] += 4;
+ x->rd_thresh_mult[mode_index] += 4;
- if (cpi->rd_thresh_mult[mode_index] > MAX_THRESHMULT)
- cpi->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
+ if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT)
+ x->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
- cpi->rd_threshes[mode_index] =
+ x->rd_threshes[mode_index] =
(cpi->rd_baseline_thresh[mode_index] >> 7) *
- cpi->rd_thresh_mult[mode_index];
+ x->rd_thresh_mult[mode_index];
}
if (x->skip)
@@ -1139,16 +1140,16 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
/* Reduce the activation RD thresholds for the best choice mode */
if ((cpi->rd_baseline_thresh[best_mode_index] > 0) && (cpi->rd_baseline_thresh[best_mode_index] < (INT_MAX >> 2)))
{
- int best_adjustment = (cpi->rd_thresh_mult[best_mode_index] >> 3);
+ int best_adjustment = (x->rd_thresh_mult[best_mode_index] >> 3);
- cpi->rd_thresh_mult[best_mode_index] =
- (cpi->rd_thresh_mult[best_mode_index]
+ x->rd_thresh_mult[best_mode_index] =
+ (x->rd_thresh_mult[best_mode_index]
>= (MIN_THRESHMULT + best_adjustment)) ?
- cpi->rd_thresh_mult[best_mode_index] - best_adjustment :
+ x->rd_thresh_mult[best_mode_index] - best_adjustment :
MIN_THRESHMULT;
- cpi->rd_threshes[best_mode_index] =
+ x->rd_threshes[best_mode_index] =
(cpi->rd_baseline_thresh[best_mode_index] >> 7) *
- cpi->rd_thresh_mult[best_mode_index];
+ x->rd_thresh_mult[best_mode_index];
}
@@ -1160,7 +1161,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
this_rdbin = 1023;
}
- cpi->error_bins[this_rdbin] ++;
+ x->error_bins[this_rdbin] ++;
}
#if CONFIG_TEMPORAL_DENOISING
@@ -1241,7 +1242,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
!= cpi->common.ref_frame_sign_bias[xd->mode_info_context->mbmi.ref_frame])
best_ref_mv.as_int = best_ref_mv_sb[!sign_bias].as_int;
- update_mvcount(cpi, x, &best_ref_mv);
+ update_mvcount(x, &best_ref_mv);
}
diff --git a/libvpx/vp8/encoder/picklpf.c b/libvpx/vp8/encoder/picklpf.c
index 4121349..250d04c 100644
--- a/libvpx/vp8/encoder/picklpf.c
+++ b/libvpx/vp8/encoder/picklpf.c
@@ -9,11 +9,12 @@
*/
+#include "./vpx_scale_rtcd.h"
#include "vp8/common/onyxc_int.h"
#include "onyx_int.h"
#include "quantize.h"
#include "vpx_mem/vpx_mem.h"
-#include "vpx_scale/vpxscale.h"
+#include "vpx_scale/vpx_scale.h"
#include "vp8/common/alloccommon.h"
#include "vp8/common/loopfilter.h"
#if ARCH_ARM
@@ -312,7 +313,7 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
/* Get baseline error score */
/* Copy the unfiltered / processed recon buffer to the new buffer */
- vp8_yv12_copy_y(saved_frame, cm->frame_to_show);
+ vpx_yv12_copy_y(saved_frame, cm->frame_to_show);
vp8cx_set_alt_lf_level(cpi, filt_mid);
vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_mid);
@@ -338,7 +339,7 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
if(ss_err[filt_low] == 0)
{
/* Get Low filter error score */
- vp8_yv12_copy_y(saved_frame, cm->frame_to_show);
+ vpx_yv12_copy_y(saved_frame, cm->frame_to_show);
vp8cx_set_alt_lf_level(cpi, filt_low);
vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_low);
@@ -366,7 +367,7 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
{
if(ss_err[filt_high] == 0)
{
- vp8_yv12_copy_y(saved_frame, cm->frame_to_show);
+ vpx_yv12_copy_y(saved_frame, cm->frame_to_show);
vp8cx_set_alt_lf_level(cpi, filt_high);
vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_high);
diff --git a/libvpx/vp8/encoder/psnr.c b/libvpx/vp8/encoder/psnr.c
index 5bb49ad..b3a3d95 100644
--- a/libvpx/vp8/encoder/psnr.c
+++ b/libvpx/vp8/encoder/psnr.c
@@ -13,7 +13,7 @@
#include "math.h"
#include "vp8/common/systemdependent.h" /* for vp8_clear_system_state() */
-#define MAX_PSNR 60
+#define MAX_PSNR 100
double vp8_mse2psnr(double Samples, double Peak, double Mse)
{
diff --git a/libvpx/vp8/encoder/quantize.c b/libvpx/vp8/encoder/quantize.c
index 88fea11..fda997f 100644
--- a/libvpx/vp8/encoder/quantize.c
+++ b/libvpx/vp8/encoder/quantize.c
@@ -50,8 +50,8 @@ void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d)
if (x >= zbin)
{
x += round_ptr[rc];
- y = (((x * quant_ptr[rc]) >> 16) + x)
- >> quant_shift_ptr[rc]; /* quantize (x) */
+ y = ((((x * quant_ptr[rc]) >> 16) + x)
+ * quant_shift_ptr[rc]) >> 16; /* quantize (x) */
x = (y ^ sz) - sz; /* get the sign back */
qcoeff_ptr[rc] = x; /* write to destination */
dqcoeff_ptr[rc] = x * dequant_ptr[rc]; /* dequantized value */
@@ -113,7 +113,7 @@ void vp8_regular_quantize_b_c(BLOCK *b, BLOCKD *d)
short *zbin_ptr = b->zbin;
short *round_ptr = b->round;
short *quant_ptr = b->quant;
- unsigned char *quant_shift_ptr = b->quant_shift;
+ short *quant_shift_ptr = b->quant_shift;
short *qcoeff_ptr = d->qcoeff;
short *dqcoeff_ptr = d->dqcoeff;
short *dequant_ptr = d->dequant;
@@ -138,8 +138,8 @@ void vp8_regular_quantize_b_c(BLOCK *b, BLOCKD *d)
if (x >= zbin)
{
x += round_ptr[rc];
- y = (((x * quant_ptr[rc]) >> 16) + x)
- >> quant_shift_ptr[rc]; /* quantize (x) */
+ y = ((((x * quant_ptr[rc]) >> 16) + x)
+ * quant_shift_ptr[rc]) >> 16; /* quantize (x) */
x = (y ^ sz) - sz; /* get the sign back */
qcoeff_ptr[rc] = x; /* write to destination */
dqcoeff_ptr[rc] = x * dequant_ptr[rc]; /* dequantized value */
@@ -167,7 +167,7 @@ void vp8_strict_quantize_b_c(BLOCK *b, BLOCKD *d)
int sz;
short *coeff_ptr;
short *quant_ptr;
- unsigned char *quant_shift_ptr;
+ short *quant_shift_ptr;
short *qcoeff_ptr;
short *dqcoeff_ptr;
short *dequant_ptr;
@@ -184,21 +184,21 @@ void vp8_strict_quantize_b_c(BLOCK *b, BLOCKD *d)
for (i = 0; i < 16; i++)
{
int dq;
- int round;
+ int rounding;
/*TODO: These arrays should be stored in zig-zag order.*/
rc = vp8_default_zig_zag1d[i];
z = coeff_ptr[rc];
dq = dequant_ptr[rc];
- round = dq >> 1;
+ rounding = dq >> 1;
/* Sign of z. */
sz = -(z < 0);
x = (z + sz) ^ sz;
- x += round;
+ x += rounding;
if (x >= dq)
{
/* Quantize x. */
- y = (((x * quant_ptr[rc]) >> 16) + x) >> quant_shift_ptr[rc];
+ y = ((((x * quant_ptr[rc]) >> 16) + x) * quant_shift_ptr[rc]) >> 16;
/* Put the sign back. */
x = (y + sz) ^ sz;
/* Save the coefficient and its dequantized value. */
@@ -406,7 +406,7 @@ static const int qzbin_factors_y2[129] =
#define EXACT_QUANT
#ifdef EXACT_QUANT
static void invert_quant(int improved_quant, short *quant,
- unsigned char *shift, short d)
+ short *shift, short d)
{
if(improved_quant)
{
@@ -418,11 +418,15 @@ static void invert_quant(int improved_quant, short *quant,
t = 1 + (1<<(16+l))/d;
*quant = (short)(t - (1<<16));
*shift = l;
+ /* use multiplication and constant shift by 16 */
+ *shift = 1 << (16 - *shift);
}
else
{
*quant = (1 << 16) / d;
*shift = 0;
+ /* use multiplication and constant shift by 16 */
+ *shift = 1 << (16 - *shift);
}
}
@@ -587,20 +591,20 @@ void vp8cx_init_quantizer(VP8_COMP *cpi)
#define ZBIN_EXTRA_Y \
(( cpi->common.Y1dequant[QIndex][1] * \
- ( cpi->zbin_over_quant + \
- cpi->zbin_mode_boost + \
+ ( x->zbin_over_quant + \
+ x->zbin_mode_boost + \
x->act_zbin_adj ) ) >> 7)
#define ZBIN_EXTRA_UV \
(( cpi->common.UVdequant[QIndex][1] * \
- ( cpi->zbin_over_quant + \
- cpi->zbin_mode_boost + \
+ ( x->zbin_over_quant + \
+ x->zbin_mode_boost + \
x->act_zbin_adj ) ) >> 7)
#define ZBIN_EXTRA_Y2 \
(( cpi->common.Y2dequant[QIndex][1] * \
- ( (cpi->zbin_over_quant / 2) + \
- cpi->zbin_mode_boost + \
+ ( (x->zbin_over_quant / 2) + \
+ x->zbin_mode_boost + \
x->act_zbin_adj ) ) >> 7)
void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip)
@@ -702,15 +706,15 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip)
/* save this macroblock QIndex for vp8_update_zbin_extra() */
x->q_index = QIndex;
- cpi->last_zbin_over_quant = cpi->zbin_over_quant;
- cpi->last_zbin_mode_boost = cpi->zbin_mode_boost;
+ x->last_zbin_over_quant = x->zbin_over_quant;
+ x->last_zbin_mode_boost = x->zbin_mode_boost;
x->last_act_zbin_adj = x->act_zbin_adj;
}
- else if(cpi->last_zbin_over_quant != cpi->zbin_over_quant
- || cpi->last_zbin_mode_boost != cpi->zbin_mode_boost
+ else if(x->last_zbin_over_quant != x->zbin_over_quant
+ || x->last_zbin_mode_boost != x->zbin_mode_boost
|| x->last_act_zbin_adj != x->act_zbin_adj)
{
/* Y */
@@ -729,8 +733,8 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip)
zbin_extra = ZBIN_EXTRA_Y2;
x->block[24].zbin_extra = (short)zbin_extra;
- cpi->last_zbin_over_quant = cpi->zbin_over_quant;
- cpi->last_zbin_mode_boost = cpi->zbin_mode_boost;
+ x->last_zbin_over_quant = x->zbin_over_quant;
+ x->last_zbin_mode_boost = x->zbin_mode_boost;
x->last_act_zbin_adj = x->act_zbin_adj;
}
}
@@ -764,7 +768,7 @@ void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x)
void vp8cx_frame_init_quantizer(VP8_COMP *cpi)
{
/* Clear Zbin mode boost for default case */
- cpi->zbin_mode_boost = 0;
+ cpi->mb.zbin_mode_boost = 0;
/* MB level quantizer setup */
vp8cx_mb_init_quantizer(cpi, &cpi->mb, 0);
diff --git a/libvpx/vp8/encoder/ratectrl.c b/libvpx/vp8/encoder/ratectrl.c
index 77c1c5a..1e8259c 100644
--- a/libvpx/vp8/encoder/ratectrl.c
+++ b/libvpx/vp8/encoder/ratectrl.c
@@ -234,7 +234,7 @@ void vp8_save_coding_context(VP8_COMP *cpi)
cc->frames_since_key = cpi->frames_since_key;
cc->filter_level = cpi->common.filter_level;
cc->frames_till_gf_update_due = cpi->frames_till_gf_update_due;
- cc->frames_since_golden = cpi->common.frames_since_golden;
+ cc->frames_since_golden = cpi->frames_since_golden;
vp8_copy(cc->mvc, cpi->common.fc.mvc);
vp8_copy(cc->mvcosts, cpi->rd_costs.mvcosts);
@@ -271,7 +271,7 @@ void vp8_restore_coding_context(VP8_COMP *cpi)
cpi->frames_since_key = cc->frames_since_key;
cpi->common.filter_level = cc->filter_level;
cpi->frames_till_gf_update_due = cc->frames_till_gf_update_due;
- cpi->common.frames_since_golden = cc->frames_since_golden;
+ cpi->frames_since_golden = cc->frames_since_golden;
vp8_copy(cpi->common.fc.mvc, cc->mvc);
@@ -388,7 +388,7 @@ static void calc_iframe_target_size(VP8_COMP *cpi)
int initial_boost = 32; /* |3.0 * per_frame_bandwidth| */
/* Boost depends somewhat on frame rate: only used for 1 layer case. */
if (cpi->oxcf.number_of_layers == 1) {
- kf_boost = MAX(initial_boost, (int)(2 * cpi->output_frame_rate - 16));
+ kf_boost = MAX(initial_boost, (int)(2 * cpi->output_framerate - 16));
}
else {
/* Initial factor: set target size to: |3.0 * per_frame_bandwidth|. */
@@ -399,9 +399,9 @@ static void calc_iframe_target_size(VP8_COMP *cpi)
kf_boost = kf_boost * kf_boost_qadjustment[Q] / 100;
/* frame separation adjustment ( down) */
- if (cpi->frames_since_key < cpi->output_frame_rate / 2)
+ if (cpi->frames_since_key < cpi->output_framerate / 2)
kf_boost = (int)(kf_boost
- * cpi->frames_since_key / (cpi->output_frame_rate / 2));
+ * cpi->frames_since_key / (cpi->output_framerate / 2));
/* Minimal target size is |2* per_frame_bandwidth|. */
if (kf_boost < 16)
@@ -614,7 +614,6 @@ static void calc_gf_params(VP8_COMP *cpi)
static void calc_pframe_target_size(VP8_COMP *cpi)
{
int min_frame_target;
- int Adjustment;
int old_per_frame_bandwidth = cpi->per_frame_bandwidth;
if ( cpi->current_layer > 0)
@@ -658,6 +657,7 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
/* 1 pass */
else
{
+ int Adjustment;
/* Make rate adjustment to recover bits spent in key frame
* Test to see if the key frame inter data rate correction
* should still be in force
@@ -688,7 +688,7 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
*/
if ((cpi->gf_overspend_bits > 0) && (cpi->this_frame_target > min_frame_target))
{
- int Adjustment = (cpi->non_gf_bitrate_adjustment <= cpi->gf_overspend_bits) ? cpi->non_gf_bitrate_adjustment : cpi->gf_overspend_bits;
+ Adjustment = (cpi->non_gf_bitrate_adjustment <= cpi->gf_overspend_bits) ? cpi->non_gf_bitrate_adjustment : cpi->gf_overspend_bits;
if (Adjustment > (cpi->this_frame_target - min_frame_target))
Adjustment = (cpi->this_frame_target - min_frame_target);
@@ -715,7 +715,7 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
if (Adjustment > (cpi->this_frame_target - min_frame_target))
Adjustment = (cpi->this_frame_target - min_frame_target);
- if (cpi->common.frames_since_golden == (cpi->current_gf_interval >> 1))
+ if (cpi->frames_since_golden == (cpi->current_gf_interval >> 1))
cpi->this_frame_target += ((cpi->current_gf_interval - 1) * Adjustment);
else
cpi->this_frame_target -= Adjustment;
@@ -1109,7 +1109,9 @@ void vp8_update_rate_correction_factors(VP8_COMP *cpi, int damp_var)
}
else
{
- if (cpi->common.refresh_alt_ref_frame || cpi->common.refresh_golden_frame)
+ if (cpi->oxcf.number_of_layers == 1 &&
+ (cpi->common.refresh_alt_ref_frame ||
+ cpi->common.refresh_golden_frame))
rate_correction_factor = cpi->gf_rate_correction_factor;
else
rate_correction_factor = cpi->rate_correction_factor;
@@ -1122,9 +1124,9 @@ void vp8_update_rate_correction_factors(VP8_COMP *cpi, int damp_var)
projected_size_based_on_q = (int)(((.5 + rate_correction_factor * vp8_bits_per_mb[cpi->common.frame_type][Q]) * cpi->common.MBs) / (1 << BPER_MB_NORMBITS));
/* Make some allowance for cpi->zbin_over_quant */
- if (cpi->zbin_over_quant > 0)
+ if (cpi->mb.zbin_over_quant > 0)
{
- int Z = cpi->zbin_over_quant;
+ int Z = cpi->mb.zbin_over_quant;
double Factor = 0.99;
double factor_adjustment = 0.01 / 256.0;
@@ -1186,7 +1188,9 @@ void vp8_update_rate_correction_factors(VP8_COMP *cpi, int damp_var)
cpi->key_frame_rate_correction_factor = rate_correction_factor;
else
{
- if (cpi->common.refresh_alt_ref_frame || cpi->common.refresh_golden_frame)
+ if (cpi->oxcf.number_of_layers == 1 &&
+ (cpi->common.refresh_alt_ref_frame ||
+ cpi->common.refresh_golden_frame))
cpi->gf_rate_correction_factor = rate_correction_factor;
else
cpi->rate_correction_factor = rate_correction_factor;
@@ -1199,7 +1203,7 @@ int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame)
int Q = cpi->active_worst_quality;
/* Reset Zbin OQ value */
- cpi->zbin_over_quant = 0;
+ cpi->mb.zbin_over_quant = 0;
if (cpi->oxcf.fixed_q >= 0)
{
@@ -1209,11 +1213,13 @@ int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame)
{
Q = cpi->oxcf.key_q;
}
- else if (cpi->common.refresh_alt_ref_frame)
+ else if (cpi->oxcf.number_of_layers == 1 &&
+ cpi->common.refresh_alt_ref_frame)
{
Q = cpi->oxcf.alt_q;
}
- else if (cpi->common.refresh_golden_frame)
+ else if (cpi->oxcf.number_of_layers == 1 &&
+ cpi->common.refresh_golden_frame)
{
Q = cpi->oxcf.gold_q;
}
@@ -1232,7 +1238,9 @@ int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame)
correction_factor = cpi->key_frame_rate_correction_factor;
else
{
- if (cpi->common.refresh_alt_ref_frame || cpi->common.refresh_golden_frame)
+ if (cpi->oxcf.number_of_layers == 1 &&
+ (cpi->common.refresh_alt_ref_frame ||
+ cpi->common.refresh_golden_frame))
correction_factor = cpi->gf_rate_correction_factor;
else
correction_factor = cpi->rate_correction_factor;
@@ -1281,7 +1289,10 @@ int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame)
if (cpi->common.frame_type == KEY_FRAME)
zbin_oqmax = 0;
- else if (cpi->common.refresh_alt_ref_frame || (cpi->common.refresh_golden_frame && !cpi->source_alt_ref_active))
+ else if (cpi->oxcf.number_of_layers == 1 &&
+ (cpi->common.refresh_alt_ref_frame ||
+ (cpi->common.refresh_golden_frame &&
+ !cpi->source_alt_ref_active)))
zbin_oqmax = 16;
else
zbin_oqmax = ZBIN_OQ_MAX;
@@ -1307,12 +1318,12 @@ int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame)
* normal maximum by expanding the zero bin and hence
* decreasing the number of low magnitude non zero coefficients.
*/
- while (cpi->zbin_over_quant < zbin_oqmax)
+ while (cpi->mb.zbin_over_quant < zbin_oqmax)
{
- cpi->zbin_over_quant ++;
+ cpi->mb.zbin_over_quant ++;
- if (cpi->zbin_over_quant > zbin_oqmax)
- cpi->zbin_over_quant = zbin_oqmax;
+ if (cpi->mb.zbin_over_quant > zbin_oqmax)
+ cpi->mb.zbin_over_quant = zbin_oqmax;
/* Adjust bits_per_mb_at_this_q estimate */
bits_per_mb_at_this_q = (int)(Factor * bits_per_mb_at_this_q);
@@ -1349,10 +1360,10 @@ static int estimate_keyframe_frequency(VP8_COMP *cpi)
* whichever is smaller.
*/
int key_freq = cpi->oxcf.key_freq>0 ? cpi->oxcf.key_freq : 1;
- av_key_frame_frequency = (int)cpi->output_frame_rate * 2;
+ av_key_frame_frequency = 1 + (int)cpi->output_framerate * 2;
if (cpi->oxcf.auto_key && av_key_frame_frequency > key_freq)
- av_key_frame_frequency = cpi->oxcf.key_freq;
+ av_key_frame_frequency = key_freq;
cpi->prior_key_frame_distance[KEY_FRAME_CONTEXT - 1]
= av_key_frame_frequency;
@@ -1382,6 +1393,10 @@ static int estimate_keyframe_frequency(VP8_COMP *cpi)
av_key_frame_frequency /= total_weight;
}
+ // TODO (marpan): Given the checks above, |av_key_frame_frequency|
+ // should always be above 0. But for now we keep the sanity check in.
+ if (av_key_frame_frequency == 0)
+ av_key_frame_frequency = 1;
return av_key_frame_frequency;
}
diff --git a/libvpx/vp8/encoder/rdopt.c b/libvpx/vp8/encoder/rdopt.c
index 7d80606..521e84f 100644
--- a/libvpx/vp8/encoder/rdopt.c
+++ b/libvpx/vp8/encoder/rdopt.c
@@ -14,7 +14,7 @@
#include <limits.h>
#include <assert.h>
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vp8/common/pragmas.h"
#include "tokenize.h"
#include "treewriter.h"
@@ -223,7 +223,7 @@ void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex)
cpi->mb.sadperbit4 = sad_per_bit4lut[QIndex];
}
-void vp8_initialize_rd_consts(VP8_COMP *cpi, int Qvalue)
+void vp8_initialize_rd_consts(VP8_COMP *cpi, MACROBLOCK *x, int Qvalue)
{
int q;
int i;
@@ -238,15 +238,15 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int Qvalue)
cpi->RDMULT = (int)(rdconst * (capped_q * capped_q));
/* Extend rate multiplier along side quantizer zbin increases */
- if (cpi->zbin_over_quant > 0)
+ if (cpi->mb.zbin_over_quant > 0)
{
double oq_factor;
double modq;
/* Experimental code using the same basic equation as used for Q above
- * The units of cpi->zbin_over_quant are 1/128 of Q bin size
+ * The units of cpi->mb.zbin_over_quant are 1/128 of Q bin size
*/
- oq_factor = 1.0 + ((double)0.0015625 * cpi->zbin_over_quant);
+ oq_factor = 1.0 + ((double)0.0015625 * cpi->mb.zbin_over_quant);
modq = (int)((double)capped_q * oq_factor);
cpi->RDMULT = (int)(rdconst * (modq * modq));
}
@@ -265,6 +265,11 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int Qvalue)
vp8_set_speed_features(cpi);
+ for (i = 0; i < MAX_MODES; i++)
+ {
+ x->mode_test_hit_counts[i] = 0;
+ }
+
q = (int)pow(Qvalue, 1.25);
if (q < 8)
@@ -279,14 +284,14 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int Qvalue)
{
if (cpi->sf.thresh_mult[i] < INT_MAX)
{
- cpi->rd_threshes[i] = cpi->sf.thresh_mult[i] * q / 100;
+ x->rd_threshes[i] = cpi->sf.thresh_mult[i] * q / 100;
}
else
{
- cpi->rd_threshes[i] = INT_MAX;
+ x->rd_threshes[i] = INT_MAX;
}
- cpi->rd_baseline_thresh[i] = cpi->rd_threshes[i];
+ cpi->rd_baseline_thresh[i] = x->rd_threshes[i];
}
}
else
@@ -297,14 +302,14 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int Qvalue)
{
if (cpi->sf.thresh_mult[i] < (INT_MAX / q))
{
- cpi->rd_threshes[i] = cpi->sf.thresh_mult[i] * q;
+ x->rd_threshes[i] = cpi->sf.thresh_mult[i] * q;
}
else
{
- cpi->rd_threshes[i] = INT_MAX;
+ x->rd_threshes[i] = INT_MAX;
}
- cpi->rd_baseline_thresh[i] = cpi->rd_threshes[i];
+ cpi->rd_baseline_thresh[i] = x->rd_threshes[i];
}
}
@@ -336,7 +341,7 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int Qvalue)
void vp8_auto_select_speed(VP8_COMP *cpi)
{
- int milliseconds_for_compress = (int)(1000000 / cpi->frame_rate);
+ int milliseconds_for_compress = (int)(1000000 / cpi->framerate);
milliseconds_for_compress = milliseconds_for_compress * (16 - cpi->oxcf.cpu_used) / 16;
@@ -879,8 +884,8 @@ static void rd_pick_intra_mbuv_mode(MACROBLOCK *x, int *rate,
for (mode = DC_PRED; mode <= TM_PRED; mode++)
{
- int rate;
- int distortion;
+ int this_rate;
+ int this_distortion;
int this_rd;
xd->mode_info_context->mbmi.uv_mode = mode;
@@ -902,17 +907,17 @@ static void rd_pick_intra_mbuv_mode(MACROBLOCK *x, int *rate,
vp8_quantize_mbuv(x);
rate_to = rd_cost_mbuv(x);
- rate = rate_to + x->intra_uv_mode_cost[xd->frame_type][xd->mode_info_context->mbmi.uv_mode];
+ this_rate = rate_to + x->intra_uv_mode_cost[xd->frame_type][xd->mode_info_context->mbmi.uv_mode];
- distortion = vp8_mbuverror(x) / 4;
+ this_distortion = vp8_mbuverror(x) / 4;
- this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
+ this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
if (this_rd < best_rd)
{
best_rd = this_rd;
- d = distortion;
- r = rate;
+ d = this_distortion;
+ r = this_rate;
*rate_tokenonly = rate_to;
mode_selected = mode;
}
@@ -1289,12 +1294,11 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
if (bestsme < INT_MAX)
{
- int distortion;
+ int disto;
unsigned int sse;
cpi->find_fractional_mv_step(x, c, e, &mode_mv[NEW4X4],
bsi->ref_mv, x->errorperbit, v_fn_ptr, x->mvcost,
- &distortion, &sse);
-
+ &disto, &sse);
}
} /* NEW4X4 */
@@ -1728,7 +1732,7 @@ void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x, int recon_yoffse
}
}
-static void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x, int_mv *best_ref_mv)
+static void rd_update_mvcount(MACROBLOCK *x, int_mv *best_ref_mv)
{
if (x->e_mbd.mode_info_context->mbmi.mode == SPLITMV)
{
@@ -2010,7 +2014,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
*returnintra = INT_MAX;
/* Count of the number of MBs tested so far this frame */
- cpi->mbs_tested_so_far++;
+ x->mbs_tested_so_far++;
x->skip = 0;
@@ -2022,7 +2026,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
int this_ref_frame = ref_frame_map[vp8_ref_frame_order[mode_index]];
/* Test best rd so far against threshold for trying this mode. */
- if (best_mode.rd <= cpi->rd_threshes[mode_index])
+ if (best_mode.rd <= x->rd_threshes[mode_index])
continue;
if (this_ref_frame < 0)
@@ -2068,19 +2072,21 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
* max If so then prevent it from being tested and increase the
* threshold for its testing
*/
- if (cpi->mode_test_hit_counts[mode_index] && (cpi->mode_check_freq[mode_index] > 1))
+ if (x->mode_test_hit_counts[mode_index] && (cpi->mode_check_freq[mode_index] > 1))
{
- if (cpi->mbs_tested_so_far <= cpi->mode_check_freq[mode_index] * cpi->mode_test_hit_counts[mode_index])
+ if (x->mbs_tested_so_far <= cpi->mode_check_freq[mode_index] * x->mode_test_hit_counts[mode_index])
{
/* Increase the threshold for coding this mode to make it
* less likely to be chosen
*/
- cpi->rd_thresh_mult[mode_index] += 4;
+ x->rd_thresh_mult[mode_index] += 4;
- if (cpi->rd_thresh_mult[mode_index] > MAX_THRESHMULT)
- cpi->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
+ if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT)
+ x->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
- cpi->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) * cpi->rd_thresh_mult[mode_index];
+ x->rd_threshes[mode_index] =
+ (cpi->rd_baseline_thresh[mode_index] >> 7) *
+ x->rd_thresh_mult[mode_index];
continue;
}
@@ -2090,28 +2096,28 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
* current mode so increment the counter for the number of times
* it has been tested
*/
- cpi->mode_test_hit_counts[mode_index] ++;
+ x->mode_test_hit_counts[mode_index] ++;
/* Experimental code. Special case for gf and arf zeromv modes.
* Increase zbin size to supress noise
*/
- if (cpi->zbin_mode_boost_enabled)
+ if (x->zbin_mode_boost_enabled)
{
if ( this_ref_frame == INTRA_FRAME )
- cpi->zbin_mode_boost = 0;
+ x->zbin_mode_boost = 0;
else
{
if (vp8_mode_order[mode_index] == ZEROMV)
{
if (this_ref_frame != LAST_FRAME)
- cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
+ x->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
else
- cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
+ x->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
}
else if (vp8_mode_order[mode_index] == SPLITMV)
- cpi->zbin_mode_boost = 0;
+ x->zbin_mode_boost = 0;
else
- cpi->zbin_mode_boost = MV_ZBIN_BOOST;
+ x->zbin_mode_boost = MV_ZBIN_BOOST;
}
vp8_update_zbin_extra(cpi, x);
@@ -2170,8 +2176,10 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
int this_rd_thresh;
int distortion;
- this_rd_thresh = (vp8_ref_frame_order[mode_index] == 1) ? cpi->rd_threshes[THR_NEW1] : cpi->rd_threshes[THR_NEW3];
- this_rd_thresh = (vp8_ref_frame_order[mode_index] == 2) ? cpi->rd_threshes[THR_NEW2] : this_rd_thresh;
+ this_rd_thresh = (vp8_ref_frame_order[mode_index] == 1) ?
+ x->rd_threshes[THR_NEW1] : x->rd_threshes[THR_NEW3];
+ this_rd_thresh = (vp8_ref_frame_order[mode_index] == 2) ?
+ x->rd_threshes[THR_NEW2] : this_rd_thresh;
tmp_rd = vp8_rd_pick_best_mbsegmentation(cpi, x, &best_ref_mv,
best_mode.yrd, mdcounts,
@@ -2464,8 +2472,9 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
/* Testing this mode gave rise to an improvement in best error
* score. Lower threshold a bit for next time
*/
- cpi->rd_thresh_mult[mode_index] = (cpi->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2)) ? cpi->rd_thresh_mult[mode_index] - 2 : MIN_THRESHMULT;
- cpi->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) * cpi->rd_thresh_mult[mode_index];
+ x->rd_thresh_mult[mode_index] =
+ (x->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2)) ?
+ x->rd_thresh_mult[mode_index] - 2 : MIN_THRESHMULT;
}
/* If the mode did not help improve the best error case then raise
@@ -2473,13 +2482,14 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
*/
else
{
- cpi->rd_thresh_mult[mode_index] += 4;
-
- if (cpi->rd_thresh_mult[mode_index] > MAX_THRESHMULT)
- cpi->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
+ x->rd_thresh_mult[mode_index] += 4;
- cpi->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) * cpi->rd_thresh_mult[mode_index];
+ if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT)
+ x->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
}
+ x->rd_threshes[mode_index] =
+ (cpi->rd_baseline_thresh[mode_index] >> 7) *
+ x->rd_thresh_mult[mode_index];
if (x->skip)
break;
@@ -2489,15 +2499,18 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
/* Reduce the activation RD thresholds for the best choice mode */
if ((cpi->rd_baseline_thresh[best_mode_index] > 0) && (cpi->rd_baseline_thresh[best_mode_index] < (INT_MAX >> 2)))
{
- int best_adjustment = (cpi->rd_thresh_mult[best_mode_index] >> 2);
-
- cpi->rd_thresh_mult[best_mode_index] = (cpi->rd_thresh_mult[best_mode_index] >= (MIN_THRESHMULT + best_adjustment)) ? cpi->rd_thresh_mult[best_mode_index] - best_adjustment : MIN_THRESHMULT;
- cpi->rd_threshes[best_mode_index] = (cpi->rd_baseline_thresh[best_mode_index] >> 7) * cpi->rd_thresh_mult[best_mode_index];
+ int best_adjustment = (x->rd_thresh_mult[best_mode_index] >> 2);
+
+ x->rd_thresh_mult[best_mode_index] =
+ (x->rd_thresh_mult[best_mode_index] >=
+ (MIN_THRESHMULT + best_adjustment)) ?
+ x->rd_thresh_mult[best_mode_index] - best_adjustment :
+ MIN_THRESHMULT;
+ x->rd_threshes[best_mode_index] =
+ (cpi->rd_baseline_thresh[best_mode_index] >> 7) *
+ x->rd_thresh_mult[best_mode_index];
}
- /* Note how often each mode chosen as best */
- cpi->mode_chosen_counts[best_mode_index] ++;
-
#if CONFIG_TEMPORAL_DENOISING
if (cpi->oxcf.noise_sensitivity)
{
@@ -2591,7 +2604,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
!= cpi->common.ref_frame_sign_bias[xd->mode_info_context->mbmi.ref_frame])
best_ref_mv.as_int = best_ref_mv_sb[!sign_bias].as_int;
- rd_update_mvcount(cpi, x, &best_ref_mv);
+ rd_update_mvcount(x, &best_ref_mv);
}
void vp8_rd_pick_intra_mode(MACROBLOCK *x, int *rate_)
diff --git a/libvpx/vp8/encoder/rdopt.h b/libvpx/vp8/encoder/rdopt.h
index d7b0442..1e11fa7 100644
--- a/libvpx/vp8/encoder/rdopt.h
+++ b/libvpx/vp8/encoder/rdopt.h
@@ -65,7 +65,7 @@ static void insertsortsad(int arr[],int idx[], int len)
}
}
-extern void vp8_initialize_rd_consts(VP8_COMP *cpi, int Qvalue);
+extern void vp8_initialize_rd_consts(VP8_COMP *cpi, MACROBLOCK *x, int Qvalue);
extern void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int recon_uvoffset, int *returnrate, int *returndistortion, int *returnintra);
extern void vp8_rd_pick_intra_mode(MACROBLOCK *x, int *rate);
diff --git a/libvpx/vp8/encoder/temporal_filter.c b/libvpx/vp8/encoder/temporal_filter.c
index b83ae89..7e3af71 100644
--- a/libvpx/vp8/encoder/temporal_filter.c
+++ b/libvpx/vp8/encoder/temporal_filter.c
@@ -17,7 +17,7 @@
#include "mcomp.h"
#include "firstpass.h"
#include "psnr.h"
-#include "vpx_scale/vpxscale.h"
+#include "vpx_scale/vpx_scale.h"
#include "vp8/common/extend.h"
#include "ratectrl.h"
#include "vp8/common/quant_common.h"
diff --git a/libvpx/vp8/encoder/tokenize.c b/libvpx/vp8/encoder/tokenize.c
index 3b5268b..11559a7 100644
--- a/libvpx/vp8/encoder/tokenize.c
+++ b/libvpx/vp8/encoder/tokenize.c
@@ -20,7 +20,7 @@
/* Global event counters used for accumulating statistics across several
compressions, then generating context.c = initial stats. */
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
_int64 context_counters[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#endif
void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t) ;
@@ -413,7 +413,7 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
}
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
void init_context_counters(void)
{
diff --git a/libvpx/vp8/encoder/tokenize.h b/libvpx/vp8/encoder/tokenize.h
index c2d1438..1e6cea1 100644
--- a/libvpx/vp8/encoder/tokenize.h
+++ b/libvpx/vp8/encoder/tokenize.h
@@ -33,7 +33,7 @@ typedef struct
int rd_cost_mby(MACROBLOCKD *);
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
void init_context_counters();
void print_context_counters();
diff --git a/libvpx/vp8/encoder/asm_enc_offsets.c b/libvpx/vp8/encoder/vp8_asm_enc_offsets.c
index a4169b3..a4169b3 100644
--- a/libvpx/vp8/encoder/asm_enc_offsets.c
+++ b/libvpx/vp8/encoder/vp8_asm_enc_offsets.c
diff --git a/libvpx/vp8/encoder/x86/dct_sse2.asm b/libvpx/vp8/encoder/x86/dct_sse2.asm
index d880ce0..d06bca5 100644
--- a/libvpx/vp8/encoder/x86/dct_sse2.asm
+++ b/libvpx/vp8/encoder/x86/dct_sse2.asm
@@ -29,7 +29,7 @@
movsxd rax, dword ptr arg(2)
lea rcx, [rsi + rax*2]
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
%define input rcx
%define output rdx
%define pitch r8
@@ -53,7 +53,7 @@
RESTORE_GOT
pop rbp
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
RESTORE_XMM
%endif
%endif
diff --git a/libvpx/vp8/encoder/x86/denoising_sse2.c b/libvpx/vp8/encoder/x86/denoising_sse2.c
index c1ac6c1..cceb826 100644
--- a/libvpx/vp8/encoder/x86/denoising_sse2.c
+++ b/libvpx/vp8/encoder/x86/denoising_sse2.c
@@ -12,9 +12,10 @@
#include "vp8/common/reconinter.h"
#include "vpx/vpx_integer.h"
#include "vpx_mem/vpx_mem.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include <emmintrin.h>
+#include "vpx_ports/emmintrin_compat.h"
union sum_union {
__m128i v;
diff --git a/libvpx/vp8/encoder/x86/quantize_sse2.asm b/libvpx/vp8/encoder/x86/quantize_sse2.asm
deleted file mode 100644
index 724e54c..0000000
--- a/libvpx/vp8/encoder/x86/quantize_sse2.asm
+++ /dev/null
@@ -1,386 +0,0 @@
-;
-; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
-;
-; Use of this source code is governed by a BSD-style license and patent
-; grant that can be found in the LICENSE file in the root of the source
-; tree. All contributing project authors may be found in the AUTHORS
-; file in the root of the source tree.
-;
-
-
-%include "vpx_ports/x86_abi_support.asm"
-%include "asm_enc_offsets.asm"
-
-
-; void vp8_regular_quantize_b_sse2 | arg
-; (BLOCK *b, | 0
-; BLOCKD *d) | 1
-
-global sym(vp8_regular_quantize_b_sse2) PRIVATE
-sym(vp8_regular_quantize_b_sse2):
- push rbp
- mov rbp, rsp
- SAVE_XMM 7
- GET_GOT rbx
-
-%if ABI_IS_32BIT
- push rdi
- push rsi
-%else
- %ifidn __OUTPUT_FORMAT__,x64
- push rdi
- push rsi
- %endif
-%endif
-
- ALIGN_STACK 16, rax
- %define zrun_zbin_boost 0 ; 8
- %define abs_minus_zbin 8 ; 32
- %define temp_qcoeff 40 ; 32
- %define qcoeff 72 ; 32
- %define stack_size 104
- sub rsp, stack_size
- ; end prolog
-
-%if ABI_IS_32BIT
- mov rdi, arg(0) ; BLOCK *b
- mov rsi, arg(1) ; BLOCKD *d
-%else
- %ifidn __OUTPUT_FORMAT__,x64
- mov rdi, rcx ; BLOCK *b
- mov rsi, rdx ; BLOCKD *d
- %else
- ;mov rdi, rdi ; BLOCK *b
- ;mov rsi, rsi ; BLOCKD *d
- %endif
-%endif
-
- mov rdx, [rdi + vp8_block_coeff] ; coeff_ptr
- mov rcx, [rdi + vp8_block_zbin] ; zbin_ptr
- movd xmm7, [rdi + vp8_block_zbin_extra] ; zbin_oq_value
-
- ; z
- movdqa xmm0, [rdx]
- movdqa xmm4, [rdx + 16]
- mov rdx, [rdi + vp8_block_round] ; round_ptr
-
- pshuflw xmm7, xmm7, 0
- punpcklwd xmm7, xmm7 ; duplicated zbin_oq_value
-
- movdqa xmm1, xmm0
- movdqa xmm5, xmm4
-
- ; sz
- psraw xmm0, 15
- psraw xmm4, 15
-
- ; (z ^ sz)
- pxor xmm1, xmm0
- pxor xmm5, xmm4
-
- ; x = abs(z)
- psubw xmm1, xmm0
- psubw xmm5, xmm4
-
- movdqa xmm2, [rcx]
- movdqa xmm3, [rcx + 16]
- mov rcx, [rdi + vp8_block_quant] ; quant_ptr
-
- ; *zbin_ptr + zbin_oq_value
- paddw xmm2, xmm7
- paddw xmm3, xmm7
-
- ; x - (*zbin_ptr + zbin_oq_value)
- psubw xmm1, xmm2
- psubw xmm5, xmm3
- movdqa [rsp + abs_minus_zbin], xmm1
- movdqa [rsp + abs_minus_zbin + 16], xmm5
-
- ; add (zbin_ptr + zbin_oq_value) back
- paddw xmm1, xmm2
- paddw xmm5, xmm3
-
- movdqa xmm2, [rdx]
- movdqa xmm6, [rdx + 16]
-
- movdqa xmm3, [rcx]
- movdqa xmm7, [rcx + 16]
-
- ; x + round
- paddw xmm1, xmm2
- paddw xmm5, xmm6
-
- ; y = x * quant_ptr >> 16
- pmulhw xmm3, xmm1
- pmulhw xmm7, xmm5
-
- ; y += x
- paddw xmm1, xmm3
- paddw xmm5, xmm7
-
- movdqa [rsp + temp_qcoeff], xmm1
- movdqa [rsp + temp_qcoeff + 16], xmm5
-
- pxor xmm6, xmm6
- ; zero qcoeff
- movdqa [rsp + qcoeff], xmm6
- movdqa [rsp + qcoeff + 16], xmm6
-
- mov rdx, [rdi + vp8_block_zrun_zbin_boost] ; zbin_boost_ptr
- mov rax, [rdi + vp8_block_quant_shift] ; quant_shift_ptr
- mov [rsp + zrun_zbin_boost], rdx
-
-%macro ZIGZAG_LOOP 1
- ; x
- movsx ecx, WORD PTR[rsp + abs_minus_zbin + %1 * 2]
-
- ; if (x >= zbin)
- sub cx, WORD PTR[rdx] ; x - zbin
- lea rdx, [rdx + 2] ; zbin_boost_ptr++
- jl .rq_zigzag_loop_%1 ; x < zbin
-
- movsx edi, WORD PTR[rsp + temp_qcoeff + %1 * 2]
-
- ; downshift by quant_shift[rc]
- movsx cx, BYTE PTR[rax + %1] ; quant_shift_ptr[rc]
- sar edi, cl ; also sets Z bit
- je .rq_zigzag_loop_%1 ; !y
- mov WORD PTR[rsp + qcoeff + %1 * 2], di ;qcoeff_ptr[rc] = temp_qcoeff[rc]
- mov rdx, [rsp + zrun_zbin_boost] ; reset to b->zrun_zbin_boost
-.rq_zigzag_loop_%1:
-%endmacro
-; in vp8_default_zig_zag1d order: see vp8/common/entropy.c
-ZIGZAG_LOOP 0
-ZIGZAG_LOOP 1
-ZIGZAG_LOOP 4
-ZIGZAG_LOOP 8
-ZIGZAG_LOOP 5
-ZIGZAG_LOOP 2
-ZIGZAG_LOOP 3
-ZIGZAG_LOOP 6
-ZIGZAG_LOOP 9
-ZIGZAG_LOOP 12
-ZIGZAG_LOOP 13
-ZIGZAG_LOOP 10
-ZIGZAG_LOOP 7
-ZIGZAG_LOOP 11
-ZIGZAG_LOOP 14
-ZIGZAG_LOOP 15
-
- movdqa xmm2, [rsp + qcoeff]
- movdqa xmm3, [rsp + qcoeff + 16]
-
- mov rcx, [rsi + vp8_blockd_dequant] ; dequant_ptr
- mov rdi, [rsi + vp8_blockd_dqcoeff] ; dqcoeff_ptr
-
- ; y ^ sz
- pxor xmm2, xmm0
- pxor xmm3, xmm4
- ; x = (y ^ sz) - sz
- psubw xmm2, xmm0
- psubw xmm3, xmm4
-
- ; dequant
- movdqa xmm0, [rcx]
- movdqa xmm1, [rcx + 16]
-
- mov rcx, [rsi + vp8_blockd_qcoeff] ; qcoeff_ptr
-
- pmullw xmm0, xmm2
- pmullw xmm1, xmm3
-
- movdqa [rcx], xmm2 ; store qcoeff
- movdqa [rcx + 16], xmm3
- movdqa [rdi], xmm0 ; store dqcoeff
- movdqa [rdi + 16], xmm1
-
- mov rcx, [rsi + vp8_blockd_eob]
-
- ; select the last value (in zig_zag order) for EOB
- pcmpeqw xmm2, xmm6
- pcmpeqw xmm3, xmm6
- ; !
- pcmpeqw xmm6, xmm6
- pxor xmm2, xmm6
- pxor xmm3, xmm6
- ; mask inv_zig_zag
- pand xmm2, [GLOBAL(inv_zig_zag)]
- pand xmm3, [GLOBAL(inv_zig_zag + 16)]
- ; select the max value
- pmaxsw xmm2, xmm3
- pshufd xmm3, xmm2, 00001110b
- pmaxsw xmm2, xmm3
- pshuflw xmm3, xmm2, 00001110b
- pmaxsw xmm2, xmm3
- pshuflw xmm3, xmm2, 00000001b
- pmaxsw xmm2, xmm3
- movd eax, xmm2
- and eax, 0xff
-
- mov BYTE PTR [rcx], al ; store eob
-
- ; begin epilog
- add rsp, stack_size
- pop rsp
-%if ABI_IS_32BIT
- pop rsi
- pop rdi
-%else
- %ifidn __OUTPUT_FORMAT__,x64
- pop rsi
- pop rdi
- %endif
-%endif
- RESTORE_GOT
- RESTORE_XMM
- pop rbp
- ret
-
-; void vp8_fast_quantize_b_sse2 | arg
-; (BLOCK *b, | 0
-; BLOCKD *d) | 1
-
-global sym(vp8_fast_quantize_b_sse2) PRIVATE
-sym(vp8_fast_quantize_b_sse2):
- push rbp
- mov rbp, rsp
- GET_GOT rbx
-
-%if ABI_IS_32BIT
- push rdi
- push rsi
-%else
- %ifidn __OUTPUT_FORMAT__,x64
- push rdi
- push rsi
- %else
- ; these registers are used for passing arguments
- %endif
-%endif
-
- ; end prolog
-
-%if ABI_IS_32BIT
- mov rdi, arg(0) ; BLOCK *b
- mov rsi, arg(1) ; BLOCKD *d
-%else
- %ifidn __OUTPUT_FORMAT__,x64
- mov rdi, rcx ; BLOCK *b
- mov rsi, rdx ; BLOCKD *d
- %else
- ;mov rdi, rdi ; BLOCK *b
- ;mov rsi, rsi ; BLOCKD *d
- %endif
-%endif
-
- mov rax, [rdi + vp8_block_coeff]
- mov rcx, [rdi + vp8_block_round]
- mov rdx, [rdi + vp8_block_quant_fast]
-
- ; z = coeff
- movdqa xmm0, [rax]
- movdqa xmm4, [rax + 16]
-
- ; dup z so we can save sz
- movdqa xmm1, xmm0
- movdqa xmm5, xmm4
-
- ; sz = z >> 15
- psraw xmm0, 15
- psraw xmm4, 15
-
- ; x = abs(z) = (z ^ sz) - sz
- pxor xmm1, xmm0
- pxor xmm5, xmm4
- psubw xmm1, xmm0
- psubw xmm5, xmm4
-
- ; x += round
- paddw xmm1, [rcx]
- paddw xmm5, [rcx + 16]
-
- mov rax, [rsi + vp8_blockd_qcoeff]
- mov rcx, [rsi + vp8_blockd_dequant]
- mov rdi, [rsi + vp8_blockd_dqcoeff]
-
- ; y = x * quant >> 16
- pmulhw xmm1, [rdx]
- pmulhw xmm5, [rdx + 16]
-
- ; x = (y ^ sz) - sz
- pxor xmm1, xmm0
- pxor xmm5, xmm4
- psubw xmm1, xmm0
- psubw xmm5, xmm4
-
- ; qcoeff = x
- movdqa [rax], xmm1
- movdqa [rax + 16], xmm5
-
- ; x * dequant
- movdqa xmm2, xmm1
- movdqa xmm3, xmm5
- pmullw xmm2, [rcx]
- pmullw xmm3, [rcx + 16]
-
- ; dqcoeff = x * dequant
- movdqa [rdi], xmm2
- movdqa [rdi + 16], xmm3
-
- pxor xmm4, xmm4 ;clear all bits
- pcmpeqw xmm1, xmm4
- pcmpeqw xmm5, xmm4
-
- pcmpeqw xmm4, xmm4 ;set all bits
- pxor xmm1, xmm4
- pxor xmm5, xmm4
-
- pand xmm1, [GLOBAL(inv_zig_zag)]
- pand xmm5, [GLOBAL(inv_zig_zag + 16)]
-
- pmaxsw xmm1, xmm5
-
- mov rcx, [rsi + vp8_blockd_eob]
-
- ; now down to 8
- pshufd xmm5, xmm1, 00001110b
-
- pmaxsw xmm1, xmm5
-
- ; only 4 left
- pshuflw xmm5, xmm1, 00001110b
-
- pmaxsw xmm1, xmm5
-
- ; okay, just 2!
- pshuflw xmm5, xmm1, 00000001b
-
- pmaxsw xmm1, xmm5
-
- movd eax, xmm1
- and eax, 0xff
-
- mov BYTE PTR [rcx], al ; store eob
-
- ; begin epilog
-%if ABI_IS_32BIT
- pop rsi
- pop rdi
-%else
- %ifidn __OUTPUT_FORMAT__,x64
- pop rsi
- pop rdi
- %endif
-%endif
-
- RESTORE_GOT
- pop rbp
- ret
-
-SECTION_RODATA
-align 16
-inv_zig_zag:
- dw 0x0001, 0x0002, 0x0006, 0x0007
- dw 0x0003, 0x0005, 0x0008, 0x000d
- dw 0x0004, 0x0009, 0x000c, 0x000e
- dw 0x000a, 0x000b, 0x000f, 0x0010
diff --git a/libvpx/vp8/encoder/x86/quantize_sse2.c b/libvpx/vp8/encoder/x86/quantize_sse2.c
new file mode 100644
index 0000000..f495bf2
--- /dev/null
+++ b/libvpx/vp8/encoder/x86/quantize_sse2.c
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vpx_config.h"
+#include "vp8_rtcd.h"
+#include "vpx_ports/x86.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vp8/encoder/block.h"
+#include "vp8/common/entropy.h" /* vp8_default_inv_zig_zag */
+
+#include <mmintrin.h> /* MMX */
+#include <xmmintrin.h> /* SSE */
+#include <emmintrin.h> /* SSE2 */
+
+#define SELECT_EOB(i, z) \
+ do { \
+ short boost = *zbin_boost_ptr; \
+ int cmp = (x[z] < boost) | (y[z] == 0); \
+ zbin_boost_ptr++; \
+ if (cmp) \
+ goto select_eob_end_##i; \
+ qcoeff_ptr[z] = y[z]; \
+ eob = i; \
+ zbin_boost_ptr = b->zrun_zbin_boost; \
+ select_eob_end_##i:; \
+ } while (0)
+
+void vp8_regular_quantize_b_sse2(BLOCK *b, BLOCKD *d)
+{
+ char eob = 0;
+ short *zbin_boost_ptr = b->zrun_zbin_boost;
+ short *qcoeff_ptr = d->qcoeff;
+ DECLARE_ALIGNED_ARRAY(16, short, x, 16);
+ DECLARE_ALIGNED_ARRAY(16, short, y, 16);
+
+ __m128i sz0, x0, sz1, x1, y0, y1, x_minus_zbin0, x_minus_zbin1;
+ __m128i quant_shift0 = _mm_load_si128((__m128i *)(b->quant_shift));
+ __m128i quant_shift1 = _mm_load_si128((__m128i *)(b->quant_shift + 8));
+ __m128i z0 = _mm_load_si128((__m128i *)(b->coeff));
+ __m128i z1 = _mm_load_si128((__m128i *)(b->coeff+8));
+ __m128i zbin_extra = _mm_cvtsi32_si128(b->zbin_extra);
+ __m128i zbin0 = _mm_load_si128((__m128i *)(b->zbin));
+ __m128i zbin1 = _mm_load_si128((__m128i *)(b->zbin + 8));
+ __m128i round0 = _mm_load_si128((__m128i *)(b->round));
+ __m128i round1 = _mm_load_si128((__m128i *)(b->round + 8));
+ __m128i quant0 = _mm_load_si128((__m128i *)(b->quant));
+ __m128i quant1 = _mm_load_si128((__m128i *)(b->quant + 8));
+ __m128i dequant0 = _mm_load_si128((__m128i *)(d->dequant));
+ __m128i dequant1 = _mm_load_si128((__m128i *)(d->dequant + 8));
+
+ vpx_memset(qcoeff_ptr, 0, 32);
+
+ /* Duplicate to all lanes. */
+ zbin_extra = _mm_shufflelo_epi16(zbin_extra, 0);
+ zbin_extra = _mm_unpacklo_epi16(zbin_extra, zbin_extra);
+
+ /* Sign of z: z >> 15 */
+ sz0 = _mm_srai_epi16(z0, 15);
+ sz1 = _mm_srai_epi16(z1, 15);
+
+ /* x = abs(z): (z ^ sz) - sz */
+ x0 = _mm_xor_si128(z0, sz0);
+ x1 = _mm_xor_si128(z1, sz1);
+ x0 = _mm_sub_epi16(x0, sz0);
+ x1 = _mm_sub_epi16(x1, sz1);
+
+ /* zbin[] + zbin_extra */
+ zbin0 = _mm_add_epi16(zbin0, zbin_extra);
+ zbin1 = _mm_add_epi16(zbin1, zbin_extra);
+
+ /* In C x is compared to zbin where zbin = zbin[] + boost + extra. Rebalance
+ * the equation because boost is the only value which can change:
+ * x - (zbin[] + extra) >= boost */
+ x_minus_zbin0 = _mm_sub_epi16(x0, zbin0);
+ x_minus_zbin1 = _mm_sub_epi16(x1, zbin1);
+
+ _mm_store_si128((__m128i *)(x), x_minus_zbin0);
+ _mm_store_si128((__m128i *)(x + 8), x_minus_zbin1);
+
+ /* All the remaining calculations are valid whether they are done now with
+ * simd or later inside the loop one at a time. */
+ x0 = _mm_add_epi16(x0, round0);
+ x1 = _mm_add_epi16(x1, round1);
+
+ y0 = _mm_mulhi_epi16(x0, quant0);
+ y1 = _mm_mulhi_epi16(x1, quant1);
+
+ y0 = _mm_add_epi16(y0, x0);
+ y1 = _mm_add_epi16(y1, x1);
+
+ /* Instead of shifting each value independently we convert the scaling
+ * factor with 1 << (16 - shift) so we can use multiply/return high half. */
+ y0 = _mm_mulhi_epi16(y0, quant_shift0);
+ y1 = _mm_mulhi_epi16(y1, quant_shift1);
+
+ /* Return the sign: (y ^ sz) - sz */
+ y0 = _mm_xor_si128(y0, sz0);
+ y1 = _mm_xor_si128(y1, sz1);
+ y0 = _mm_sub_epi16(y0, sz0);
+ y1 = _mm_sub_epi16(y1, sz1);
+
+ _mm_store_si128((__m128i *)(y), y0);
+ _mm_store_si128((__m128i *)(y + 8), y1);
+
+ zbin_boost_ptr = b->zrun_zbin_boost;
+
+ /* The loop gets unrolled anyway. Avoid the vp8_default_zig_zag1d lookup. */
+ SELECT_EOB(1, 0);
+ SELECT_EOB(2, 1);
+ SELECT_EOB(3, 4);
+ SELECT_EOB(4, 8);
+ SELECT_EOB(5, 5);
+ SELECT_EOB(6, 2);
+ SELECT_EOB(7, 3);
+ SELECT_EOB(8, 6);
+ SELECT_EOB(9, 9);
+ SELECT_EOB(10, 12);
+ SELECT_EOB(11, 13);
+ SELECT_EOB(12, 10);
+ SELECT_EOB(13, 7);
+ SELECT_EOB(14, 11);
+ SELECT_EOB(15, 14);
+ SELECT_EOB(16, 15);
+
+ y0 = _mm_load_si128((__m128i *)(d->qcoeff));
+ y1 = _mm_load_si128((__m128i *)(d->qcoeff + 8));
+
+ /* dqcoeff = qcoeff * dequant */
+ y0 = _mm_mullo_epi16(y0, dequant0);
+ y1 = _mm_mullo_epi16(y1, dequant1);
+
+ _mm_store_si128((__m128i *)(d->dqcoeff), y0);
+ _mm_store_si128((__m128i *)(d->dqcoeff + 8), y1);
+
+ *d->eob = eob;
+}
+
+void vp8_fast_quantize_b_sse2(BLOCK *b, BLOCKD *d)
+{
+ __m128i z0 = _mm_load_si128((__m128i *)(b->coeff));
+ __m128i z1 = _mm_load_si128((__m128i *)(b->coeff + 8));
+ __m128i round0 = _mm_load_si128((__m128i *)(b->round));
+ __m128i round1 = _mm_load_si128((__m128i *)(b->round + 8));
+ __m128i quant_fast0 = _mm_load_si128((__m128i *)(b->quant_fast));
+ __m128i quant_fast1 = _mm_load_si128((__m128i *)(b->quant_fast + 8));
+ __m128i dequant0 = _mm_load_si128((__m128i *)(d->dequant));
+ __m128i dequant1 = _mm_load_si128((__m128i *)(d->dequant + 8));
+ __m128i inv_zig_zag0 = _mm_load_si128((const __m128i *)(vp8_default_inv_zig_zag));
+ __m128i inv_zig_zag1 = _mm_load_si128((const __m128i *)(vp8_default_inv_zig_zag + 8));
+
+ __m128i sz0, sz1, x0, x1, y0, y1, xdq0, xdq1, zeros, ones;
+
+ /* sign of z: z >> 15 */
+ sz0 = _mm_srai_epi16(z0, 15);
+ sz1 = _mm_srai_epi16(z1, 15);
+
+ /* x = abs(z): (z ^ sz) - sz */
+ x0 = _mm_xor_si128(z0, sz0);
+ x1 = _mm_xor_si128(z1, sz1);
+ x0 = _mm_sub_epi16(x0, sz0);
+ x1 = _mm_sub_epi16(x1, sz1);
+
+ /* x += round */
+ x0 = _mm_add_epi16(x0, round0);
+ x1 = _mm_add_epi16(x1, round1);
+
+ /* y = (x * quant) >> 16 */
+ y0 = _mm_mulhi_epi16(x0, quant_fast0);
+ y1 = _mm_mulhi_epi16(x1, quant_fast1);
+
+ /* x = abs(y) = (y ^ sz) - sz */
+ y0 = _mm_xor_si128(y0, sz0);
+ y1 = _mm_xor_si128(y1, sz1);
+ x0 = _mm_sub_epi16(y0, sz0);
+ x1 = _mm_sub_epi16(y1, sz1);
+
+ /* qcoeff = x */
+ _mm_store_si128((__m128i *)(d->qcoeff), x0);
+ _mm_store_si128((__m128i *)(d->qcoeff + 8), x1);
+
+ /* x * dequant */
+ xdq0 = _mm_mullo_epi16(x0, dequant0);
+ xdq1 = _mm_mullo_epi16(x1, dequant1);
+
+ /* dqcoeff = x * dequant */
+ _mm_store_si128((__m128i *)(d->dqcoeff), xdq0);
+ _mm_store_si128((__m128i *)(d->dqcoeff + 8), xdq1);
+
+ /* build a mask for the zig zag */
+ zeros = _mm_setzero_si128();
+
+ x0 = _mm_cmpeq_epi16(x0, zeros);
+ x1 = _mm_cmpeq_epi16(x1, zeros);
+
+ ones = _mm_cmpeq_epi16(zeros, zeros);
+
+ x0 = _mm_xor_si128(x0, ones);
+ x1 = _mm_xor_si128(x1, ones);
+
+ x0 = _mm_and_si128(x0, inv_zig_zag0);
+ x1 = _mm_and_si128(x1, inv_zig_zag1);
+
+ x0 = _mm_max_epi16(x0, x1);
+
+ /* now down to 8 */
+ x1 = _mm_shuffle_epi32(x0, 0xE); // 0b00001110
+
+ x0 = _mm_max_epi16(x0, x1);
+
+ /* only 4 left */
+ x1 = _mm_shufflelo_epi16(x0, 0xE); // 0b00001110
+
+ x0 = _mm_max_epi16(x0, x1);
+
+ /* okay, just 2! */
+ x1 = _mm_shufflelo_epi16(x0, 0x1); // 0b00000001
+
+ x0 = _mm_max_epi16(x0, x1);
+
+ *d->eob = 0xFF & _mm_cvtsi128_si32(x0);
+}
diff --git a/libvpx/vp8/encoder/x86/quantize_sse4.asm b/libvpx/vp8/encoder/x86/quantize_sse4.asm
index f0e5d40..dbd171b 100644
--- a/libvpx/vp8/encoder/x86/quantize_sse4.asm
+++ b/libvpx/vp8/encoder/x86/quantize_sse4.asm
@@ -9,7 +9,7 @@
%include "vpx_ports/x86_abi_support.asm"
-%include "asm_enc_offsets.asm"
+%include "vp8_asm_enc_offsets.asm"
; void vp8_regular_quantize_b_sse4 | arg
@@ -31,7 +31,7 @@ sym(vp8_regular_quantize_b_sse4):
%define stack_size 32
sub rsp, stack_size
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
SAVE_XMM 8, u
push rdi
push rsi
@@ -43,7 +43,7 @@ sym(vp8_regular_quantize_b_sse4):
mov rdi, arg(0) ; BLOCK *b
mov rsi, arg(1) ; BLOCKD *d
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
mov rdi, rcx ; BLOCK *b
mov rsi, rdx ; BLOCKD *d
%else
@@ -240,7 +240,7 @@ ZIGZAG_LOOP 15, 7, xmm3, xmm7, xmm8
pop rbp
%else
%undef xmm5
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
pop rsi
pop rdi
RESTORE_XMM
diff --git a/libvpx/vp8/encoder/x86/quantize_ssse3.asm b/libvpx/vp8/encoder/x86/quantize_ssse3.asm
index dd526f4..7b1dc11 100644
--- a/libvpx/vp8/encoder/x86/quantize_ssse3.asm
+++ b/libvpx/vp8/encoder/x86/quantize_ssse3.asm
@@ -9,7 +9,7 @@
%include "vpx_ports/x86_abi_support.asm"
-%include "asm_enc_offsets.asm"
+%include "vp8_asm_enc_offsets.asm"
; void vp8_fast_quantize_b_ssse3 | arg
@@ -27,7 +27,7 @@ sym(vp8_fast_quantize_b_ssse3):
push rdi
push rsi
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
push rdi
push rsi
%endif
@@ -38,7 +38,7 @@ sym(vp8_fast_quantize_b_ssse3):
mov rdi, arg(0) ; BLOCK *b
mov rsi, arg(1) ; BLOCKD *d
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
mov rdi, rcx ; BLOCK *b
mov rsi, rdx ; BLOCKD *d
%else
@@ -122,7 +122,7 @@ sym(vp8_fast_quantize_b_ssse3):
pop rsi
pop rdi
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
pop rsi
pop rdi
%endif
diff --git a/libvpx/vp8/encoder/x86/temporal_filter_apply_sse2.asm b/libvpx/vp8/encoder/x86/temporal_filter_apply_sse2.asm
index ce9d983..bd92b39 100644
--- a/libvpx/vp8/encoder/x86/temporal_filter_apply_sse2.asm
+++ b/libvpx/vp8/encoder/x86/temporal_filter_apply_sse2.asm
@@ -50,7 +50,7 @@ sym(vp8_temporal_filter_apply_sse2):
; 0x8000 >> (16 - strength)
mov rdx, 16
sub rdx, arg(4) ; 16 - strength
- movd xmm4, rdx ; can't use rdx w/ shift
+ movq xmm4, rdx ; can't use rdx w/ shift
movdqa xmm5, [GLOBAL(_const_top_bit)]
psrlw xmm5, xmm4
movdqa [rsp + rounding_bit], xmm5
diff --git a/libvpx/vp8/encoder/x86/vp8_enc_stubs_mmx.c b/libvpx/vp8/encoder/x86/vp8_enc_stubs_mmx.c
index da25f52..cf3d8ca 100644
--- a/libvpx/vp8/encoder/x86/vp8_enc_stubs_mmx.c
+++ b/libvpx/vp8/encoder/x86/vp8_enc_stubs_mmx.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vpx_ports/x86.h"
#include "vp8/encoder/block.h"
diff --git a/libvpx/vp8/encoder/x86/vp8_enc_stubs_sse2.c b/libvpx/vp8/encoder/x86/vp8_enc_stubs_sse2.c
index 68db815..3dfbee3 100644
--- a/libvpx/vp8/encoder/x86/vp8_enc_stubs_sse2.c
+++ b/libvpx/vp8/encoder/x86/vp8_enc_stubs_sse2.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vpx_ports/x86.h"
#include "vp8/encoder/block.h"
diff --git a/libvpx/vp8/vp8_common.mk b/libvpx/vp8/vp8_common.mk
index a328f46..f98eb31 100644
--- a/libvpx/vp8/vp8_common.mk
+++ b/libvpx/vp8/vp8_common.mk
@@ -14,7 +14,6 @@ VP8_COMMON_SRCS-yes += common/ppflags.h
VP8_COMMON_SRCS-yes += common/onyx.h
VP8_COMMON_SRCS-yes += common/onyxd.h
VP8_COMMON_SRCS-yes += common/alloccommon.c
-VP8_COMMON_SRCS-yes += common/asm_com_offsets.c
VP8_COMMON_SRCS-yes += common/blockd.c
VP8_COMMON_SRCS-yes += common/coefupdateprobs.h
VP8_COMMON_SRCS-yes += common/debugmodes.c
@@ -191,3 +190,5 @@ VP8_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/variance_neon$(ASM)
VP8_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp8_subpixelvariance8x8_neon$(ASM)
VP8_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp8_subpixelvariance16x16_neon$(ASM)
VP8_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp8_subpixelvariance16x16s_neon$(ASM)
+
+$(eval $(call rtcd_h_template,vp8_rtcd,vp8/common/rtcd_defs.sh))
diff --git a/libvpx/vp8/vp8_cx_iface.c b/libvpx/vp8/vp8_cx_iface.c
index eeac3a8..19e9d27 100644
--- a/libvpx/vp8/vp8_cx_iface.c
+++ b/libvpx/vp8/vp8_cx_iface.c
@@ -9,7 +9,7 @@
*/
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vpx/vpx_codec.h"
#include "vpx/internal/vpx_codec_internal.h"
#include "vpx_version.h"
@@ -153,7 +153,7 @@ static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx,
#else
RANGE_CHECK_HI(cfg, g_lag_in_frames, 25);
#endif
- RANGE_CHECK(cfg, rc_end_usage, VPX_VBR, VPX_CQ);
+ RANGE_CHECK(cfg, rc_end_usage, VPX_VBR, VPX_Q);
RANGE_CHECK_HI(cfg, rc_undershoot_pct, 1000);
RANGE_CHECK_HI(cfg, rc_overshoot_pct, 1000);
RANGE_CHECK_HI(cfg, rc_2pass_vbr_bias_pct, 100);
@@ -204,7 +204,7 @@ static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx,
RANGE_CHECK_HI(vp8_cfg, arnr_strength, 6);
RANGE_CHECK(vp8_cfg, arnr_type, 1, 3);
RANGE_CHECK(vp8_cfg, cq_level, 0, 63);
- if(finalize && cfg->rc_end_usage == VPX_CQ)
+ if (finalize && (cfg->rc_end_usage == VPX_CQ || cfg->rc_end_usage == VPX_Q))
RANGE_CHECK(vp8_cfg, cq_level,
cfg->rc_min_quantizer, cfg->rc_max_quantizer);
@@ -327,17 +327,14 @@ static vpx_codec_err_t set_vp8e_config(VP8_CONFIG *oxcf,
oxcf->resample_up_water_mark = cfg.rc_resize_up_thresh;
oxcf->resample_down_water_mark = cfg.rc_resize_down_thresh;
- if (cfg.rc_end_usage == VPX_VBR)
- {
- oxcf->end_usage = USAGE_LOCAL_FILE_PLAYBACK;
- }
- else if (cfg.rc_end_usage == VPX_CBR)
- {
- oxcf->end_usage = USAGE_STREAM_FROM_SERVER;
- }
- else if (cfg.rc_end_usage == VPX_CQ)
- {
- oxcf->end_usage = USAGE_CONSTRAINED_QUALITY;
+ if (cfg.rc_end_usage == VPX_VBR) {
+ oxcf->end_usage = USAGE_LOCAL_FILE_PLAYBACK;
+ } else if (cfg.rc_end_usage == VPX_CBR) {
+ oxcf->end_usage = USAGE_STREAM_FROM_SERVER;
+ } else if (cfg.rc_end_usage == VPX_CQ) {
+ oxcf->end_usage = USAGE_CONSTRAINED_QUALITY;
+ } else if (cfg.rc_end_usage == VPX_Q) {
+ oxcf->end_usage = USAGE_CONSTANT_QUALITY;
}
oxcf->target_bandwidth = cfg.rc_target_bitrate;
@@ -582,7 +579,7 @@ static vpx_codec_err_t vp8e_init(vpx_codec_ctx_t *ctx,
struct VP8_COMP *optr;
- vpx_rtcd();
+ vp8_rtcd();
if (!ctx->priv)
{
@@ -684,6 +681,8 @@ static vpx_codec_err_t image2yuvconfig(const vpx_image_t *img,
yv12->u_buffer = img->planes[VPX_PLANE_U];
yv12->v_buffer = img->planes[VPX_PLANE_V];
+ yv12->y_crop_width = img->d_w;
+ yv12->y_crop_height = img->d_h;
yv12->y_width = img->d_w;
yv12->y_height = img->d_h;
yv12->uv_width = (1 + yv12->y_width) / 2;
@@ -693,7 +692,6 @@ static vpx_codec_err_t image2yuvconfig(const vpx_image_t *img,
yv12->uv_stride = img->stride[VPX_PLANE_U];
yv12->border = (img->stride[VPX_PLANE_Y] - img->w) / 2;
- yv12->clrtype = (img->fmt == VPX_IMG_FMT_VPXI420 || img->fmt == VPX_IMG_FMT_VPXYV12);
return res;
}
@@ -1077,11 +1075,7 @@ static vpx_image_t *vp8e_get_preview(vpx_codec_alg_priv_t *ctx)
ctx->preview_img.planes[VPX_PLANE_U] = sd.u_buffer;
ctx->preview_img.planes[VPX_PLANE_V] = sd.v_buffer;
- if (sd.clrtype == REG_YUV)
- ctx->preview_img.fmt = VPX_IMG_FMT_I420;
- else
- ctx->preview_img.fmt = VPX_IMG_FMT_VPXI420;
-
+ ctx->preview_img.fmt = VPX_IMG_FMT_I420;
ctx->preview_img.x_chroma_shift = 1;
ctx->preview_img.y_chroma_shift = 1;
@@ -1178,7 +1172,9 @@ static vpx_codec_err_t vp8e_set_scalemode(vpx_codec_alg_priv_t *ctx,
{
int res;
vpx_scaling_mode_t scalemode = *(vpx_scaling_mode_t *)data ;
- res = vp8_set_internal_size(ctx->cpi, scalemode.h_scaling_mode, scalemode.v_scaling_mode);
+ res = vp8_set_internal_size(ctx->cpi,
+ (VPX_SCALING)scalemode.h_scaling_mode,
+ (VPX_SCALING)scalemode.v_scaling_mode);
if (!res)
{
@@ -1273,7 +1269,7 @@ static vpx_codec_enc_cfg_map_t vp8e_usage_cfg_map[] =
1, /* g_delete_first_pass_file */
"vp8.fpf" /* first pass filename */
#endif
-
+ VPX_SS_DEFAULT_LAYERS, /* ss_number_layers */
1, /* ts_number_layers */
{0}, /* ts_target_bitrate */
{0}, /* ts_rate_decimator */
diff --git a/libvpx/vp8/vp8_dx_iface.c b/libvpx/vp8/vp8_dx_iface.c
index c13d697..871b8d3 100644
--- a/libvpx/vp8/vp8_dx_iface.c
+++ b/libvpx/vp8/vp8_dx_iface.c
@@ -11,7 +11,7 @@
#include <stdlib.h>
#include <string.h>
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vpx/vpx_decoder.h"
#include "vpx/vp8dx.h"
#include "vpx/internal/vpx_codec_internal.h"
@@ -41,15 +41,6 @@ typedef enum
static unsigned long vp8_priv_sz(const vpx_codec_dec_cfg_t *si, vpx_codec_flags_t);
-typedef struct
-{
- unsigned int id;
- unsigned long sz;
- unsigned int align;
- unsigned int flags;
- unsigned long(*calc_sz)(const vpx_codec_dec_cfg_t *, vpx_codec_flags_t);
-} mem_req_t;
-
static const mem_req_t vp8_mem_req_segs[] =
{
{VP8_SEG_ALG_PRIV, 0, 8, VPX_CODEC_MEM_ZERO, vp8_priv_sz},
@@ -64,7 +55,6 @@ struct vpx_codec_alg_priv
vp8_stream_info_t si;
int defer_alloc;
int decoder_init;
- struct VP8D_COMP *pbi;
int postproc_cfg_set;
vp8_postproc_cfg_t postproc_cfg;
#if CONFIG_POSTPROC_VISUALIZER
@@ -74,9 +64,13 @@ struct vpx_codec_alg_priv
int dbg_color_b_modes_flag;
int dbg_display_mv_flag;
#endif
+ vp8_decrypt_cb *decrypt_cb;
+ void *decrypt_state;
vpx_image_t img;
int img_setup;
+ struct frame_buffers yv12_frame_buffers;
void *user_priv;
+ FRAGMENT_DATA fragments;
};
static unsigned long vp8_priv_sz(const vpx_codec_dec_cfg_t *si, vpx_codec_flags_t flags)
@@ -90,65 +84,6 @@ static unsigned long vp8_priv_sz(const vpx_codec_dec_cfg_t *si, vpx_codec_flags_
return sizeof(vpx_codec_alg_priv_t);
}
-
-static void vp8_mmap_dtor(vpx_codec_mmap_t *mmap)
-{
- free(mmap->priv);
-}
-
-static vpx_codec_err_t vp8_mmap_alloc(vpx_codec_mmap_t *mmap)
-{
- vpx_codec_err_t res;
- unsigned int align;
-
- align = mmap->align ? mmap->align - 1 : 0;
-
- if (mmap->flags & VPX_CODEC_MEM_ZERO)
- mmap->priv = calloc(1, mmap->sz + align);
- else
- mmap->priv = malloc(mmap->sz + align);
-
- res = (mmap->priv) ? VPX_CODEC_OK : VPX_CODEC_MEM_ERROR;
- mmap->base = (void *)((((uintptr_t)mmap->priv) + align) & ~(uintptr_t)align);
- mmap->dtor = vp8_mmap_dtor;
- return res;
-}
-
-static vpx_codec_err_t vp8_validate_mmaps(const vp8_stream_info_t *si,
- const vpx_codec_mmap_t *mmaps,
- vpx_codec_flags_t init_flags)
-{
- int i;
- vpx_codec_err_t res = VPX_CODEC_OK;
-
- for (i = 0; i < NELEMENTS(vp8_mem_req_segs) - 1; i++)
- {
- /* Ensure the segment has been allocated */
- if (!mmaps[i].base)
- {
- res = VPX_CODEC_MEM_ERROR;
- break;
- }
-
- /* Verify variable size segment is big enough for the current si. */
- if (vp8_mem_req_segs[i].calc_sz)
- {
- vpx_codec_dec_cfg_t cfg;
-
- cfg.w = si->w;
- cfg.h = si->h;
-
- if (mmaps[i].sz < vp8_mem_req_segs[i].calc_sz(&cfg, init_flags))
- {
- res = VPX_CODEC_MEM_ERROR;
- break;
- }
- }
- }
-
- return res;
-}
-
static void vp8_init_ctx(vpx_codec_ctx_t *ctx, const vpx_codec_mmap_t *mmap)
{
int i;
@@ -163,6 +98,8 @@ static void vp8_init_ctx(vpx_codec_ctx_t *ctx, const vpx_codec_mmap_t *mmap)
ctx->priv->alg_priv->mmaps[0] = *mmap;
ctx->priv->alg_priv->si.sz = sizeof(ctx->priv->alg_priv->si);
+ ctx->priv->alg_priv->decrypt_cb = NULL;
+ ctx->priv->alg_priv->decrypt_state = NULL;
ctx->priv->init_flags = ctx->init_flags;
if (ctx->config.dec)
@@ -173,16 +110,6 @@ static void vp8_init_ctx(vpx_codec_ctx_t *ctx, const vpx_codec_mmap_t *mmap)
}
}
-static void *mmap_lkup(vpx_codec_alg_priv_t *ctx, unsigned int id)
-{
- int i;
-
- for (i = 0; i < NELEMENTS(ctx->mmaps); i++)
- if (ctx->mmaps[i].id == id)
- return ctx->mmaps[i].base;
-
- return NULL;
-}
static void vp8_finalize_mmaps(vpx_codec_alg_priv_t *ctx)
{
/* nothing to clean up */
@@ -194,7 +121,7 @@ static vpx_codec_err_t vp8_init(vpx_codec_ctx_t *ctx,
vpx_codec_err_t res = VPX_CODEC_OK;
(void) data;
- vpx_rtcd();
+ vp8_rtcd();
/* This function only allocates space for the vpx_codec_alg_priv_t
* structure. More memory may be required at the time the stream
@@ -209,15 +136,38 @@ static vpx_codec_err_t vp8_init(vpx_codec_ctx_t *ctx,
mmap.align = vp8_mem_req_segs[0].align;
mmap.flags = vp8_mem_req_segs[0].flags;
- res = vp8_mmap_alloc(&mmap);
+ res = vpx_mmap_alloc(&mmap);
+ if (res != VPX_CODEC_OK) return res;
- if (!res)
- {
- vp8_init_ctx(ctx, &mmap);
+ vp8_init_ctx(ctx, &mmap);
- ctx->priv->alg_priv->defer_alloc = 1;
- /*post processing level initialized to do nothing */
- }
+ /* initialize number of fragments to zero */
+ ctx->priv->alg_priv->fragments.count = 0;
+ /* is input fragments enabled? */
+ ctx->priv->alg_priv->fragments.enabled =
+ (ctx->priv->alg_priv->base.init_flags &
+ VPX_CODEC_USE_INPUT_FRAGMENTS);
+
+ ctx->priv->alg_priv->defer_alloc = 1;
+ /*post processing level initialized to do nothing */
+ }
+
+ ctx->priv->alg_priv->yv12_frame_buffers.use_frame_threads =
+ (ctx->priv->alg_priv->base.init_flags &
+ VPX_CODEC_USE_FRAME_THREADING);
+
+ /* for now, disable frame threading */
+ ctx->priv->alg_priv->yv12_frame_buffers.use_frame_threads = 0;
+
+ if(ctx->priv->alg_priv->yv12_frame_buffers.use_frame_threads &&
+ (( ctx->priv->alg_priv->base.init_flags &
+ VPX_CODEC_USE_ERROR_CONCEALMENT)
+ || ( ctx->priv->alg_priv->base.init_flags &
+ VPX_CODEC_USE_INPUT_FRAGMENTS) ) )
+ {
+ /* row-based threading, error concealment, and input fragments will
+ * not be supported when using frame-based threading */
+ res = VPX_CODEC_INVALID_PARAM;
}
return res;
@@ -227,7 +177,7 @@ static vpx_codec_err_t vp8_destroy(vpx_codec_alg_priv_t *ctx)
{
int i;
- vp8dx_remove_decompressor(ctx->pbi);
+ vp8_remove_decoder_instances(&ctx->yv12_frame_buffers);
for (i = NELEMENTS(ctx->mmaps) - 1; i >= 0; i--)
{
@@ -238,14 +188,18 @@ static vpx_codec_err_t vp8_destroy(vpx_codec_alg_priv_t *ctx)
return VPX_CODEC_OK;
}
-static vpx_codec_err_t vp8_peek_si(const uint8_t *data,
- unsigned int data_sz,
- vpx_codec_stream_info_t *si)
+static vpx_codec_err_t vp8_peek_si_internal(const uint8_t *data,
+ unsigned int data_sz,
+ vpx_codec_stream_info_t *si,
+ vp8_decrypt_cb *decrypt_cb,
+ void *decrypt_state)
{
vpx_codec_err_t res = VPX_CODEC_OK;
if(data + data_sz <= data)
+ {
res = VPX_CODEC_INVALID_PARAM;
+ }
else
{
/* Parse uncompresssed part of key frame header.
@@ -254,30 +208,44 @@ static vpx_codec_err_t vp8_peek_si(const uint8_t *data,
* 4 bytes:- including image width and height in the lowest 14 bits
* of each 2-byte value.
*/
+ uint8_t clear_buffer[10];
+ const uint8_t *clear = data;
+ if (decrypt_cb)
+ {
+ int n = data_sz > 10 ? 10 : data_sz;
+ decrypt_cb(decrypt_state, data, clear_buffer, n);
+ clear = clear_buffer;
+ }
si->is_kf = 0;
- if (data_sz >= 10 && !(data[0] & 0x01)) /* I-Frame */
+ if (data_sz >= 10 && !(clear[0] & 0x01)) /* I-Frame */
{
- const uint8_t *c = data + 3;
si->is_kf = 1;
/* vet via sync code */
- if (c[0] != 0x9d || c[1] != 0x01 || c[2] != 0x2a)
+ if (clear[3] != 0x9d || clear[4] != 0x01 || clear[5] != 0x2a)
res = VPX_CODEC_UNSUP_BITSTREAM;
- si->w = (c[3] | (c[4] << 8)) & 0x3fff;
- si->h = (c[5] | (c[6] << 8)) & 0x3fff;
+ si->w = (clear[6] | (clear[7] << 8)) & 0x3fff;
+ si->h = (clear[8] | (clear[9] << 8)) & 0x3fff;
/*printf("w=%d, h=%d\n", si->w, si->h);*/
if (!(si->h | si->w))
res = VPX_CODEC_UNSUP_BITSTREAM;
}
else
+ {
res = VPX_CODEC_UNSUP_BITSTREAM;
+ }
}
return res;
+}
+static vpx_codec_err_t vp8_peek_si(const uint8_t *data,
+ unsigned int data_sz,
+ vpx_codec_stream_info_t *si) {
+ return vp8_peek_si_internal(data, data_sz, si, NULL, NULL);
}
static vpx_codec_err_t vp8_get_si(vpx_codec_alg_priv_t *ctx,
@@ -320,8 +288,7 @@ static void yuvconfig2image(vpx_image_t *img,
* the Y, U, and V planes, nor other alignment adjustments that
* might be representable by a YV12_BUFFER_CONFIG, so we just
* initialize all the fields.*/
- img->fmt = yv12->clrtype == REG_YUV ?
- VPX_IMG_FMT_I420 : VPX_IMG_FMT_VPXI420;
+ img->fmt = VPX_IMG_FMT_I420;
img->w = yv12->y_stride;
img->h = (yv12->y_height + 2 * VP8BORDERINPIXELS + 15) & ~15;
img->d_w = yv12->y_width;
@@ -343,6 +310,47 @@ static void yuvconfig2image(vpx_image_t *img,
img->self_allocd = 0;
}
+static int
+update_fragments(vpx_codec_alg_priv_t *ctx,
+ const uint8_t *data,
+ unsigned int data_sz,
+ vpx_codec_err_t *res)
+{
+ *res = VPX_CODEC_OK;
+
+ if (ctx->fragments.count == 0)
+ {
+ /* New frame, reset fragment pointers and sizes */
+ vpx_memset((void*)ctx->fragments.ptrs, 0, sizeof(ctx->fragments.ptrs));
+ vpx_memset(ctx->fragments.sizes, 0, sizeof(ctx->fragments.sizes));
+ }
+ if (ctx->fragments.enabled && !(data == NULL && data_sz == 0))
+ {
+ /* Store a pointer to this fragment and return. We haven't
+ * received the complete frame yet, so we will wait with decoding.
+ */
+ ctx->fragments.ptrs[ctx->fragments.count] = data;
+ ctx->fragments.sizes[ctx->fragments.count] = data_sz;
+ ctx->fragments.count++;
+ if (ctx->fragments.count > (1 << EIGHT_PARTITION) + 1)
+ {
+ ctx->fragments.count = 0;
+ *res = VPX_CODEC_INVALID_PARAM;
+ return -1;
+ }
+ return 0;
+ }
+
+ if (!ctx->fragments.enabled)
+ {
+ ctx->fragments.ptrs[0] = data;
+ ctx->fragments.sizes[0] = data_sz;
+ ctx->fragments.count = 1;
+ }
+
+ return 1;
+}
+
static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx,
const uint8_t *data,
unsigned int data_sz,
@@ -353,6 +361,11 @@ static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx,
unsigned int resolution_change = 0;
unsigned int w, h;
+
+ /* Update the input fragment data */
+ if(update_fragments(ctx, data, data_sz, &res) <= 0)
+ return res;
+
/* Determine the stream parameters. Note that we rely on peek_si to
* validate that we have a buffer that does not wrap around the top
* of the heap.
@@ -360,7 +373,8 @@ static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx,
w = ctx->si.w;
h = ctx->si.h;
- res = ctx->base.iface->dec.peek_si(data, data_sz, &ctx->si);
+ res = vp8_peek_si_internal(ctx->fragments.ptrs[0], ctx->fragments.sizes[0],
+ &ctx->si, ctx->decrypt_cb, ctx->decrypt_state);
if((res == VPX_CODEC_UNSUP_BITSTREAM) && !ctx->si.is_kf)
{
@@ -395,7 +409,7 @@ static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx,
ctx->mmaps[i].sz = vp8_mem_req_segs[i].calc_sz(&cfg,
ctx->base.init_flags);
- res = vp8_mmap_alloc(&ctx->mmaps[i]);
+ res = vpx_mmap_alloc(&ctx->mmaps[i]);
}
if (!res)
@@ -407,12 +421,13 @@ static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx,
/* Initialize the decoder instance on the first frame*/
if (!res && !ctx->decoder_init)
{
- res = vp8_validate_mmaps(&ctx->si, ctx->mmaps, ctx->base.init_flags);
+ res = vpx_validate_mmaps(&ctx->si, ctx->mmaps,
+ vp8_mem_req_segs, NELEMENTS(vp8_mem_req_segs),
+ ctx->base.init_flags);
if (!res)
{
VP8D_CONFIG oxcf;
- struct VP8D_COMP* optr;
oxcf.Width = ctx->si.w;
oxcf.Height = ctx->si.h;
@@ -421,10 +436,6 @@ static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx,
oxcf.max_threads = ctx->cfg.threads;
oxcf.error_concealment =
(ctx->base.init_flags & VPX_CODEC_USE_ERROR_CONCEALMENT);
- oxcf.input_fragments =
- (ctx->base.init_flags & VPX_CODEC_USE_INPUT_FRAGMENTS);
-
- optr = vp8dx_create_decompressor(&oxcf);
/* If postprocessing was enabled by the application and a
* configuration has not been provided, default it.
@@ -438,20 +449,19 @@ static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx,
ctx->postproc_cfg.noise_level = 0;
}
- if (!optr)
- res = VPX_CODEC_ERROR;
- else
- ctx->pbi = optr;
+ res = vp8_create_decoder_instances(&ctx->yv12_frame_buffers, &oxcf);
+ ctx->yv12_frame_buffers.pbi[0]->decrypt_cb = ctx->decrypt_cb;
+ ctx->yv12_frame_buffers.pbi[0]->decrypt_state = ctx->decrypt_state;
}
ctx->decoder_init = 1;
}
- if (!res && ctx->pbi)
+ if (!res)
{
+ VP8D_COMP *pbi = ctx->yv12_frame_buffers.pbi[0];
if(resolution_change)
{
- VP8D_COMP *pbi = ctx->pbi;
VP8_COMMON *const pc = & pbi->common;
MACROBLOCKD *const xd = & pbi->mb;
#if CONFIG_MULTITHREAD
@@ -541,15 +551,20 @@ static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx,
pbi->common.error.setjmp = 0;
/* required to get past the first get_free_fb() call */
- ctx->pbi->common.fb_idx_ref_cnt[0] = 0;
+ pbi->common.fb_idx_ref_cnt[0] = 0;
}
+ /* update the pbi fragment data */
+ pbi->fragments = ctx->fragments;
+
ctx->user_priv = user_priv;
- if (vp8dx_receive_compressed_data(ctx->pbi, data_sz, data, deadline))
+ if (vp8dx_receive_compressed_data(pbi, data_sz, data, deadline))
{
- VP8D_COMP *pbi = (VP8D_COMP *)ctx->pbi;
res = update_error_state(ctx, &pbi->common.error);
}
+
+ /* get ready for the next series of fragments */
+ ctx->fragments.count = 0;
}
return res;
@@ -563,7 +578,7 @@ static vpx_image_t *vp8_get_frame(vpx_codec_alg_priv_t *ctx,
/* iter acts as a flip flop, so an image is only returned on the first
* call to get_frame.
*/
- if (!(*iter))
+ if (!(*iter) && ctx->yv12_frame_buffers.pbi[0])
{
YV12_BUFFER_CONFIG sd;
int64_t time_stamp = 0, time_end_stamp = 0;
@@ -590,7 +605,8 @@ static vpx_image_t *vp8_get_frame(vpx_codec_alg_priv_t *ctx,
#endif
}
- if (0 == vp8dx_get_raw_frame(ctx->pbi, &sd, &time_stamp, &time_end_stamp, &flags))
+ if (0 == vp8dx_get_raw_frame(ctx->yv12_frame_buffers.pbi[0], &sd,
+ &time_stamp, &time_end_stamp, &flags))
{
yuvconfig2image(&ctx->img, &sd, ctx->user_priv);
@@ -693,6 +709,8 @@ static vpx_codec_err_t image2yuvconfig(const vpx_image_t *img,
yv12->u_buffer = img->planes[VPX_PLANE_U];
yv12->v_buffer = img->planes[VPX_PLANE_V];
+ yv12->y_crop_width = img->d_w;
+ yv12->y_crop_height = img->d_h;
yv12->y_width = img->d_w;
yv12->y_height = img->d_h;
yv12->uv_width = yv12->y_width / 2;
@@ -702,8 +720,6 @@ static vpx_codec_err_t image2yuvconfig(const vpx_image_t *img,
yv12->uv_stride = img->stride[VPX_PLANE_U];
yv12->border = (img->stride[VPX_PLANE_Y] - img->d_w) / 2;
- yv12->clrtype = (img->fmt == VPX_IMG_FMT_VPXI420 || img->fmt == VPX_IMG_FMT_VPXYV12);
-
return res;
}
@@ -715,14 +731,15 @@ static vpx_codec_err_t vp8_set_reference(vpx_codec_alg_priv_t *ctx,
vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *);
- if (data)
+ if (data && !ctx->yv12_frame_buffers.use_frame_threads)
{
vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data;
YV12_BUFFER_CONFIG sd;
image2yuvconfig(&frame->img, &sd);
- return vp8dx_set_reference(ctx->pbi, frame->frame_type, &sd);
+ return vp8dx_set_reference(ctx->yv12_frame_buffers.pbi[0],
+ frame->frame_type, &sd);
}
else
return VPX_CODEC_INVALID_PARAM;
@@ -736,14 +753,15 @@ static vpx_codec_err_t vp8_get_reference(vpx_codec_alg_priv_t *ctx,
vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *);
- if (data)
+ if (data && !ctx->yv12_frame_buffers.use_frame_threads)
{
vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data;
YV12_BUFFER_CONFIG sd;
image2yuvconfig(&frame->img, &sd);
- return vp8dx_get_reference(ctx->pbi, frame->frame_type, &sd);
+ return vp8dx_get_reference(ctx->yv12_frame_buffers.pbi[0],
+ frame->frame_type, &sd);
}
else
return VPX_CODEC_INVALID_PARAM;
@@ -799,10 +817,11 @@ static vpx_codec_err_t vp8_get_last_ref_updates(vpx_codec_alg_priv_t *ctx,
va_list args)
{
int *update_info = va_arg(args, int *);
- VP8D_COMP *pbi = (VP8D_COMP *)ctx->pbi;
- if (update_info)
+ if (update_info && !ctx->yv12_frame_buffers.use_frame_threads)
{
+ VP8D_COMP *pbi = (VP8D_COMP *)ctx->yv12_frame_buffers.pbi[0];
+
*update_info = pbi->common.refresh_alt_ref_frame * (int) VP8_ALTR_FRAME
+ pbi->common.refresh_golden_frame * (int) VP8_GOLD_FRAME
+ pbi->common.refresh_last_frame * (int) VP8_LAST_FRAME;
@@ -819,11 +838,11 @@ static vpx_codec_err_t vp8_get_last_ref_frame(vpx_codec_alg_priv_t *ctx,
va_list args)
{
int *ref_info = va_arg(args, int *);
- VP8D_COMP *pbi = (VP8D_COMP *)ctx->pbi;
- VP8_COMMON *oci = &pbi->common;
- if (ref_info)
+ if (ref_info && !ctx->yv12_frame_buffers.use_frame_threads)
{
+ VP8D_COMP *pbi = (VP8D_COMP *)ctx->yv12_frame_buffers.pbi[0];
+ VP8_COMMON *oci = &pbi->common;
*ref_info =
(vp8dx_references_buffer( oci, ALTREF_FRAME )?VP8_ALTR_FRAME:0) |
(vp8dx_references_buffer( oci, GOLDEN_FRAME )?VP8_GOLD_FRAME:0) |
@@ -841,10 +860,10 @@ static vpx_codec_err_t vp8_get_frame_corrupted(vpx_codec_alg_priv_t *ctx,
{
int *corrupted = va_arg(args, int *);
+ VP8D_COMP *pbi = (VP8D_COMP *)ctx->yv12_frame_buffers.pbi[0];
- if (corrupted)
+ if (corrupted && pbi)
{
- VP8D_COMP *pbi = (VP8D_COMP *)ctx->pbi;
*corrupted = pbi->common.frame_to_show->corrupted;
return VPX_CODEC_OK;
@@ -854,6 +873,25 @@ static vpx_codec_err_t vp8_get_frame_corrupted(vpx_codec_alg_priv_t *ctx,
}
+static vpx_codec_err_t vp8_set_decryptor(vpx_codec_alg_priv_t *ctx,
+ int ctrl_id,
+ va_list args)
+{
+ vp8_decrypt_init *init = va_arg(args, vp8_decrypt_init *);
+
+ if (init)
+ {
+ ctx->decrypt_cb = init->decrypt_cb;
+ ctx->decrypt_state = init->decrypt_state;
+ }
+ else
+ {
+ ctx->decrypt_cb = NULL;
+ ctx->decrypt_state = NULL;
+ }
+ return VPX_CODEC_OK;
+}
+
vpx_codec_ctrl_fn_map_t vp8_ctf_maps[] =
{
{VP8_SET_REFERENCE, vp8_set_reference},
@@ -866,6 +904,7 @@ vpx_codec_ctrl_fn_map_t vp8_ctf_maps[] =
{VP8D_GET_LAST_REF_UPDATES, vp8_get_last_ref_updates},
{VP8D_GET_FRAME_CORRUPTED, vp8_get_frame_corrupted},
{VP8D_GET_LAST_REF_USED, vp8_get_last_ref_frame},
+ {VP8D_SET_DECRYPTOR, vp8_set_decryptor},
{ -1, NULL},
};
diff --git a/libvpx/vp8/vp8cx.mk b/libvpx/vp8/vp8cx.mk
index 0ae2f10..cd091f3 100644
--- a/libvpx/vp8/vp8cx.mk
+++ b/libvpx/vp8/vp8cx.mk
@@ -9,8 +9,6 @@
##
-include $(SRC_PATH_BARE)/$(VP8_PREFIX)vp8_common.mk
-
VP8_CX_EXPORTS += exports_enc
VP8_CX_SRCS-yes += $(VP8_COMMON_SRCS-yes)
@@ -26,7 +24,6 @@ VP8_CX_SRCS-yes += vp8cx.mk
VP8_CX_SRCS-yes += vp8_cx_iface.c
-VP8_CX_SRCS-yes += encoder/asm_enc_offsets.c
VP8_CX_SRCS-yes += encoder/defaultcoefcounts.h
VP8_CX_SRCS-yes += encoder/bitstream.c
VP8_CX_SRCS-yes += encoder/boolhuff.c
@@ -80,6 +77,7 @@ VP8_CX_SRCS-$(CONFIG_INTERNAL_STATS) += common/postproc.c
VP8_CX_SRCS-yes += encoder/temporal_filter.c
VP8_CX_SRCS-$(CONFIG_MULTI_RES_ENCODING) += encoder/mr_dissim.c
VP8_CX_SRCS-$(CONFIG_MULTI_RES_ENCODING) += encoder/mr_dissim.h
+VP8_CX_SRCS-yes += encoder/vp8_asm_enc_offsets.c
ifeq ($(CONFIG_REALTIME_ONLY),yes)
VP8_CX_SRCS_REMOVE-yes += encoder/firstpass.c
@@ -91,13 +89,10 @@ VP8_CX_SRCS-$(HAVE_MMX) += encoder/x86/subtract_mmx.asm
VP8_CX_SRCS-$(HAVE_MMX) += encoder/x86/vp8_enc_stubs_mmx.c
VP8_CX_SRCS-$(HAVE_SSE2) += encoder/x86/dct_sse2.asm
VP8_CX_SRCS-$(HAVE_SSE2) += encoder/x86/fwalsh_sse2.asm
-VP8_CX_SRCS-$(HAVE_SSE2) += encoder/x86/quantize_sse2.asm
+VP8_CX_SRCS-$(HAVE_SSE2) += encoder/x86/quantize_sse2.c
ifeq ($(CONFIG_TEMPORAL_DENOISING),yes)
VP8_CX_SRCS-$(HAVE_SSE2) += encoder/x86/denoising_sse2.c
-ifeq ($(HAVE_SSE2),yes)
-vp8/encoder/x86/denoising_sse2.c.o: CFLAGS += -msse2
-endif
endif
VP8_CX_SRCS-$(HAVE_SSE2) += encoder/x86/subtract_sse2.asm
@@ -113,5 +108,7 @@ ifeq ($(CONFIG_REALTIME_ONLY),yes)
VP8_CX_SRCS_REMOVE-$(HAVE_SSE2) += encoder/x86/temporal_filter_apply_sse2.asm
endif
-
VP8_CX_SRCS-yes := $(filter-out $(VP8_CX_SRCS_REMOVE-yes),$(VP8_CX_SRCS-yes))
+
+$(eval $(call asm_offsets_template,\
+ vp8_asm_enc_offsets.asm, $(VP8_PREFIX)encoder/vp8_asm_enc_offsets.c))
diff --git a/libvpx/vp8/vp8dx.mk b/libvpx/vp8/vp8dx.mk
index dd39190..4a8f467 100644
--- a/libvpx/vp8/vp8dx.mk
+++ b/libvpx/vp8/vp8dx.mk
@@ -9,8 +9,6 @@
##
-include $(SRC_PATH_BARE)/$(VP8_PREFIX)vp8_common.mk
-
VP8_DX_EXPORTS += exports_dec
VP8_DX_SRCS-yes += $(VP8_COMMON_SRCS-yes)
@@ -22,31 +20,6 @@ VP8_DX_SRCS-yes += vp8dx.mk
VP8_DX_SRCS-yes += vp8_dx_iface.c
-# common
-#define ARM
-#define DISABLE_THREAD
-
-#INCLUDES += algo/vpx_common/vpx_mem/include
-#INCLUDES += common
-#INCLUDES += common
-#INCLUDES += common
-#INCLUDES += common
-#INCLUDES += decoder
-
-
-
-# decoder
-#define ARM
-#define DISABLE_THREAD
-
-#INCLUDES += algo/vpx_common/vpx_mem/include
-#INCLUDES += common
-#INCLUDES += common
-#INCLUDES += common
-#INCLUDES += common
-#INCLUDES += decoder
-
-VP8_DX_SRCS-yes += decoder/asm_dec_offsets.c
VP8_DX_SRCS-yes += decoder/dboolhuff.c
VP8_DX_SRCS-yes += decoder/decodemv.c
VP8_DX_SRCS-yes += decoder/decodframe.c
diff --git a/libvpx/vp8_multi_resolution_encoder.c b/libvpx/vp8_multi_resolution_encoder.c
index eae36a4..4c29056 100644
--- a/libvpx/vp8_multi_resolution_encoder.c
+++ b/libvpx/vp8_multi_resolution_encoder.c
@@ -216,7 +216,7 @@ int main(int argc, char **argv)
* If target bitrate for highest-resolution level is set to 0,
* (i.e. target_bitrate[0]=0), we skip encoding at that level.
*/
- unsigned int target_bitrate[NUM_ENCODERS]={1400, 500, 100};
+ unsigned int target_bitrate[NUM_ENCODERS]={1000, 500, 100};
/* Enter the frame rate of the input video */
int framerate = 30;
/* Set down-sampling factor for each resolution level.
@@ -351,27 +351,26 @@ int main(int argc, char **argv)
if(vpx_codec_control(&codec[i], VP8E_SET_CPUUSED, speed))
die_codec(&codec[i], "Failed to set cpu_used");
}
- /* Set static thresh for highest-resolution encoder. Set it to 1000 for
- * better performance. */
- {
- unsigned int static_thresh = 1000;
- if(vpx_codec_control(&codec[0], VP8E_SET_STATIC_THRESHOLD, static_thresh))
- die_codec(&codec[0], "Failed to set static threshold");
- }
- /* Set static thresh = 0 for other encoders for better quality */
- for ( i=1; i<NUM_ENCODERS; i++)
+
+ /* Set static threshold. */
+ for ( i=0; i<NUM_ENCODERS; i++)
{
- unsigned int static_thresh = 0;
+ unsigned int static_thresh = 1;
if(vpx_codec_control(&codec[i], VP8E_SET_STATIC_THRESHOLD, static_thresh))
die_codec(&codec[i], "Failed to set static threshold");
}
+
/* Set NOISE_SENSITIVITY to do TEMPORAL_DENOISING */
- for ( i=0; i< NUM_ENCODERS; i++)
+ /* Enable denoising for the highest-resolution encoder. */
+ if(vpx_codec_control(&codec[0], VP8E_SET_NOISE_SENSITIVITY, 1))
+ die_codec(&codec[0], "Failed to set noise_sensitivity");
+ for ( i=1; i< NUM_ENCODERS; i++)
{
if(vpx_codec_control(&codec[i], VP8E_SET_NOISE_SENSITIVITY, 0))
die_codec(&codec[i], "Failed to set noise_sensitivity");
}
+
frame_avail = 1;
got_data = 0;
diff --git a/libvpx/vp9/common/arm/neon/vp9_avg_neon.asm b/libvpx/vp9/common/arm/neon/vp9_avg_neon.asm
new file mode 100644
index 0000000..7d24530
--- /dev/null
+++ b/libvpx/vp9/common/arm/neon/vp9_avg_neon.asm
@@ -0,0 +1,116 @@
+;
+; Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+ EXPORT |vp9_convolve_avg_neon|
+ ARM
+ REQUIRE8
+ PRESERVE8
+
+ AREA ||.text||, CODE, READONLY, ALIGN=2
+
+|vp9_convolve_avg_neon| PROC
+ push {r4-r6, lr}
+ ldrd r4, r5, [sp, #32]
+ mov r6, r2
+
+ cmp r4, #32
+ bgt avg64
+ beq avg32
+ cmp r4, #8
+ bgt avg16
+ beq avg8
+ b avg4
+
+avg64
+ sub lr, r1, #32
+ sub r4, r3, #32
+avg64_h
+ pld [r0, r1, lsl #1]
+ vld1.8 {q0-q1}, [r0]!
+ vld1.8 {q2-q3}, [r0], lr
+ pld [r2, r3]
+ vld1.8 {q8-q9}, [r6@128]!
+ vld1.8 {q10-q11}, [r6@128], r4
+ vrhadd.u8 q0, q0, q8
+ vrhadd.u8 q1, q1, q9
+ vrhadd.u8 q2, q2, q10
+ vrhadd.u8 q3, q3, q11
+ vst1.8 {q0-q1}, [r2@128]!
+ vst1.8 {q2-q3}, [r2@128], r4
+ subs r5, r5, #1
+ bgt avg64_h
+ pop {r4-r6, pc}
+
+avg32
+ vld1.8 {q0-q1}, [r0], r1
+ vld1.8 {q2-q3}, [r0], r1
+ vld1.8 {q8-q9}, [r6@128], r3
+ vld1.8 {q10-q11}, [r6@128], r3
+ pld [r0]
+ vrhadd.u8 q0, q0, q8
+ pld [r0, r1]
+ vrhadd.u8 q1, q1, q9
+ pld [r6]
+ vrhadd.u8 q2, q2, q10
+ pld [r6, r3]
+ vrhadd.u8 q3, q3, q11
+ vst1.8 {q0-q1}, [r2@128], r3
+ vst1.8 {q2-q3}, [r2@128], r3
+ subs r5, r5, #2
+ bgt avg32
+ pop {r4-r6, pc}
+
+avg16
+ vld1.8 {q0}, [r0], r1
+ vld1.8 {q1}, [r0], r1
+ vld1.8 {q2}, [r6@128], r3
+ vld1.8 {q3}, [r6@128], r3
+ pld [r0]
+ pld [r0, r1]
+ vrhadd.u8 q0, q0, q2
+ pld [r6]
+ pld [r6, r3]
+ vrhadd.u8 q1, q1, q3
+ vst1.8 {q0}, [r2@128], r3
+ vst1.8 {q1}, [r2@128], r3
+ subs r5, r5, #2
+ bgt avg16
+ pop {r4-r6, pc}
+
+avg8
+ vld1.8 {d0}, [r0], r1
+ vld1.8 {d1}, [r0], r1
+ vld1.8 {d2}, [r6@64], r3
+ vld1.8 {d3}, [r6@64], r3
+ pld [r0]
+ pld [r0, r1]
+ vrhadd.u8 q0, q0, q1
+ pld [r6]
+ pld [r6, r3]
+ vst1.8 {d0}, [r2@64], r3
+ vst1.8 {d1}, [r2@64], r3
+ subs r5, r5, #2
+ bgt avg8
+ pop {r4-r6, pc}
+
+avg4
+ vld1.32 {d0[0]}, [r0], r1
+ vld1.32 {d0[1]}, [r0], r1
+ vld1.32 {d2[0]}, [r6@32], r3
+ vld1.32 {d2[1]}, [r6@32], r3
+ vrhadd.u8 d0, d0, d2
+ vst1.32 {d0[0]}, [r2@32], r3
+ vst1.32 {d0[1]}, [r2@32], r3
+ subs r5, r5, #2
+ bgt avg4
+ pop {r4-r6, pc}
+ ENDP
+
+ END
diff --git a/libvpx/vp9/common/arm/neon/vp9_convolve8_avg_neon.asm b/libvpx/vp9/common/arm/neon/vp9_convolve8_avg_neon.asm
new file mode 100644
index 0000000..6b20cb9
--- /dev/null
+++ b/libvpx/vp9/common/arm/neon/vp9_convolve8_avg_neon.asm
@@ -0,0 +1,302 @@
+;
+; Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+ ; These functions are only valid when:
+ ; x_step_q4 == 16
+ ; w%4 == 0
+ ; h%4 == 0
+ ; taps == 8
+ ; VP9_FILTER_WEIGHT == 128
+ ; VP9_FILTER_SHIFT == 7
+
+ EXPORT |vp9_convolve8_avg_horiz_neon|
+ EXPORT |vp9_convolve8_avg_vert_neon|
+ IMPORT |vp9_convolve8_avg_horiz_c|
+ IMPORT |vp9_convolve8_avg_vert_c|
+ ARM
+ REQUIRE8
+ PRESERVE8
+
+ AREA ||.text||, CODE, READONLY, ALIGN=2
+
+ ; Multiply and accumulate by q0
+ MACRO
+ MULTIPLY_BY_Q0 $dst, $src0, $src1, $src2, $src3, $src4, $src5, $src6, $src7
+ vmull.s16 $dst, $src0, d0[0]
+ vmlal.s16 $dst, $src1, d0[1]
+ vmlal.s16 $dst, $src2, d0[2]
+ vmlal.s16 $dst, $src3, d0[3]
+ vmlal.s16 $dst, $src4, d1[0]
+ vmlal.s16 $dst, $src5, d1[1]
+ vmlal.s16 $dst, $src6, d1[2]
+ vmlal.s16 $dst, $src7, d1[3]
+ MEND
+
+; r0 const uint8_t *src
+; r1 int src_stride
+; r2 uint8_t *dst
+; r3 int dst_stride
+; sp[]const int16_t *filter_x
+; sp[]int x_step_q4
+; sp[]const int16_t *filter_y ; unused
+; sp[]int y_step_q4 ; unused
+; sp[]int w
+; sp[]int h
+
+|vp9_convolve8_avg_horiz_neon| PROC
+ ldr r12, [sp, #4] ; x_step_q4
+ cmp r12, #16
+ bne vp9_convolve8_avg_horiz_c
+
+ push {r4-r10, lr}
+
+ sub r0, r0, #3 ; adjust for taps
+
+ ldr r5, [sp, #32] ; filter_x
+ ldr r6, [sp, #48] ; w
+ ldr r7, [sp, #52] ; h
+
+ vld1.s16 {q0}, [r5] ; filter_x
+
+ sub r8, r1, r1, lsl #2 ; -src_stride * 3
+ add r8, r8, #4 ; -src_stride * 3 + 4
+
+ sub r4, r3, r3, lsl #2 ; -dst_stride * 3
+ add r4, r4, #4 ; -dst_stride * 3 + 4
+
+ rsb r9, r6, r1, lsl #2 ; reset src for outer loop
+ sub r9, r9, #7
+ rsb r12, r6, r3, lsl #2 ; reset dst for outer loop
+
+ mov r10, r6 ; w loop counter
+
+loop_horiz_v
+ vld1.8 {d24}, [r0], r1
+ vld1.8 {d25}, [r0], r1
+ vld1.8 {d26}, [r0], r1
+ vld1.8 {d27}, [r0], r8
+
+ vtrn.16 q12, q13
+ vtrn.8 d24, d25
+ vtrn.8 d26, d27
+
+ pld [r0, r1, lsl #2]
+
+ vmovl.u8 q8, d24
+ vmovl.u8 q9, d25
+ vmovl.u8 q10, d26
+ vmovl.u8 q11, d27
+
+ ; save a few instructions in the inner loop
+ vswp d17, d18
+ vmov d23, d21
+
+ add r0, r0, #3
+
+loop_horiz
+ add r5, r0, #64
+
+ vld1.32 {d28[]}, [r0], r1
+ vld1.32 {d29[]}, [r0], r1
+ vld1.32 {d31[]}, [r0], r1
+ vld1.32 {d30[]}, [r0], r8
+
+ pld [r5]
+
+ vtrn.16 d28, d31
+ vtrn.16 d29, d30
+ vtrn.8 d28, d29
+ vtrn.8 d31, d30
+
+ pld [r5, r1]
+
+ ; extract to s16
+ vtrn.32 q14, q15
+ vmovl.u8 q12, d28
+ vmovl.u8 q13, d29
+
+ pld [r5, r1, lsl #1]
+
+ ; slightly out of order load to match the existing data
+ vld1.u32 {d6[0]}, [r2], r3
+ vld1.u32 {d7[0]}, [r2], r3
+ vld1.u32 {d6[1]}, [r2], r3
+ vld1.u32 {d7[1]}, [r2], r3
+
+ sub r2, r2, r3, lsl #2 ; reset for store
+
+ ; src[] * filter_x
+ MULTIPLY_BY_Q0 q1, d16, d17, d20, d22, d18, d19, d23, d24
+ MULTIPLY_BY_Q0 q2, d17, d20, d22, d18, d19, d23, d24, d26
+ MULTIPLY_BY_Q0 q14, d20, d22, d18, d19, d23, d24, d26, d27
+ MULTIPLY_BY_Q0 q15, d22, d18, d19, d23, d24, d26, d27, d25
+
+ pld [r5, -r8]
+
+ ; += 64 >> 7
+ vqrshrun.s32 d2, q1, #7
+ vqrshrun.s32 d3, q2, #7
+ vqrshrun.s32 d4, q14, #7
+ vqrshrun.s32 d5, q15, #7
+
+ ; saturate
+ vqmovn.u16 d2, q1
+ vqmovn.u16 d3, q2
+
+ ; transpose
+ vtrn.16 d2, d3
+ vtrn.32 d2, d3
+ vtrn.8 d2, d3
+
+ ; average the new value and the dst value
+ vrhadd.u8 q1, q1, q3
+
+ vst1.u32 {d2[0]}, [r2@32], r3
+ vst1.u32 {d3[0]}, [r2@32], r3
+ vst1.u32 {d2[1]}, [r2@32], r3
+ vst1.u32 {d3[1]}, [r2@32], r4
+
+ vmov q8, q9
+ vmov d20, d23
+ vmov q11, q12
+ vmov q9, q13
+
+ subs r6, r6, #4 ; w -= 4
+ bgt loop_horiz
+
+ ; outer loop
+ mov r6, r10 ; restore w counter
+ add r0, r0, r9 ; src += src_stride * 4 - w
+ add r2, r2, r12 ; dst += dst_stride * 4 - w
+ subs r7, r7, #4 ; h -= 4
+ bgt loop_horiz_v
+
+ pop {r4-r10, pc}
+
+ ENDP
+
+|vp9_convolve8_avg_vert_neon| PROC
+ ldr r12, [sp, #12]
+ cmp r12, #16
+ bne vp9_convolve8_avg_vert_c
+
+ push {r4-r8, lr}
+
+ ; adjust for taps
+ sub r0, r0, r1
+ sub r0, r0, r1, lsl #1
+
+ ldr r4, [sp, #32] ; filter_y
+ ldr r6, [sp, #40] ; w
+ ldr lr, [sp, #44] ; h
+
+ vld1.s16 {q0}, [r4] ; filter_y
+
+ lsl r1, r1, #1
+ lsl r3, r3, #1
+
+loop_vert_h
+ mov r4, r0
+ add r7, r0, r1, asr #1
+ mov r5, r2
+ add r8, r2, r3, asr #1
+ mov r12, lr ; h loop counter
+
+ vld1.u32 {d16[0]}, [r4], r1
+ vld1.u32 {d16[1]}, [r7], r1
+ vld1.u32 {d18[0]}, [r4], r1
+ vld1.u32 {d18[1]}, [r7], r1
+ vld1.u32 {d20[0]}, [r4], r1
+ vld1.u32 {d20[1]}, [r7], r1
+ vld1.u32 {d22[0]}, [r4], r1
+
+ vmovl.u8 q8, d16
+ vmovl.u8 q9, d18
+ vmovl.u8 q10, d20
+ vmovl.u8 q11, d22
+
+loop_vert
+ ; always process a 4x4 block at a time
+ vld1.u32 {d24[0]}, [r7], r1
+ vld1.u32 {d26[0]}, [r4], r1
+ vld1.u32 {d26[1]}, [r7], r1
+ vld1.u32 {d24[1]}, [r4], r1
+
+ ; extract to s16
+ vmovl.u8 q12, d24
+ vmovl.u8 q13, d26
+
+ vld1.u32 {d6[0]}, [r5@32], r3
+ vld1.u32 {d6[1]}, [r8@32], r3
+ vld1.u32 {d7[0]}, [r5@32], r3
+ vld1.u32 {d7[1]}, [r8@32], r3
+
+ pld [r7]
+ pld [r4]
+
+ ; src[] * filter_y
+ MULTIPLY_BY_Q0 q1, d16, d17, d18, d19, d20, d21, d22, d24
+
+ pld [r7, r1]
+ pld [r4, r1]
+
+ MULTIPLY_BY_Q0 q2, d17, d18, d19, d20, d21, d22, d24, d26
+
+ pld [r5]
+ pld [r8]
+
+ MULTIPLY_BY_Q0 q14, d18, d19, d20, d21, d22, d24, d26, d27
+
+ pld [r5, r3]
+ pld [r8, r3]
+
+ MULTIPLY_BY_Q0 q15, d19, d20, d21, d22, d24, d26, d27, d25
+
+ ; += 64 >> 7
+ vqrshrun.s32 d2, q1, #7
+ vqrshrun.s32 d3, q2, #7
+ vqrshrun.s32 d4, q14, #7
+ vqrshrun.s32 d5, q15, #7
+
+ ; saturate
+ vqmovn.u16 d2, q1
+ vqmovn.u16 d3, q2
+
+ ; average the new value and the dst value
+ vrhadd.u8 q1, q1, q3
+
+ sub r5, r5, r3, lsl #1 ; reset for store
+ sub r8, r8, r3, lsl #1
+
+ vst1.u32 {d2[0]}, [r5@32], r3
+ vst1.u32 {d2[1]}, [r8@32], r3
+ vst1.u32 {d3[0]}, [r5@32], r3
+ vst1.u32 {d3[1]}, [r8@32], r3
+
+ vmov q8, q10
+ vmov d18, d22
+ vmov d19, d24
+ vmov q10, q13
+ vmov d22, d25
+
+ subs r12, r12, #4 ; h -= 4
+ bgt loop_vert
+
+ ; outer loop
+ add r0, r0, #4
+ add r2, r2, #4
+ subs r6, r6, #4 ; w -= 4
+ bgt loop_vert_h
+
+ pop {r4-r8, pc}
+
+ ENDP
+ END
diff --git a/libvpx/vp9/common/arm/neon/vp9_convolve8_neon.asm b/libvpx/vp9/common/arm/neon/vp9_convolve8_neon.asm
new file mode 100644
index 0000000..4525845
--- /dev/null
+++ b/libvpx/vp9/common/arm/neon/vp9_convolve8_neon.asm
@@ -0,0 +1,280 @@
+;
+; Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+ ; These functions are only valid when:
+ ; x_step_q4 == 16
+ ; w%4 == 0
+ ; h%4 == 0
+ ; taps == 8
+ ; VP9_FILTER_WEIGHT == 128
+ ; VP9_FILTER_SHIFT == 7
+
+ EXPORT |vp9_convolve8_horiz_neon|
+ EXPORT |vp9_convolve8_vert_neon|
+ IMPORT |vp9_convolve8_horiz_c|
+ IMPORT |vp9_convolve8_vert_c|
+ ARM
+ REQUIRE8
+ PRESERVE8
+
+ AREA ||.text||, CODE, READONLY, ALIGN=2
+
+ ; Multiply and accumulate by q0
+ MACRO
+ MULTIPLY_BY_Q0 $dst, $src0, $src1, $src2, $src3, $src4, $src5, $src6, $src7
+ vmull.s16 $dst, $src0, d0[0]
+ vmlal.s16 $dst, $src1, d0[1]
+ vmlal.s16 $dst, $src2, d0[2]
+ vmlal.s16 $dst, $src3, d0[3]
+ vmlal.s16 $dst, $src4, d1[0]
+ vmlal.s16 $dst, $src5, d1[1]
+ vmlal.s16 $dst, $src6, d1[2]
+ vmlal.s16 $dst, $src7, d1[3]
+ MEND
+
+; r0 const uint8_t *src
+; r1 int src_stride
+; r2 uint8_t *dst
+; r3 int dst_stride
+; sp[]const int16_t *filter_x
+; sp[]int x_step_q4
+; sp[]const int16_t *filter_y ; unused
+; sp[]int y_step_q4 ; unused
+; sp[]int w
+; sp[]int h
+
+|vp9_convolve8_horiz_neon| PROC
+ ldr r12, [sp, #4] ; x_step_q4
+ cmp r12, #16
+ bne vp9_convolve8_horiz_c
+
+ push {r4-r10, lr}
+
+ sub r0, r0, #3 ; adjust for taps
+
+ ldr r5, [sp, #32] ; filter_x
+ ldr r6, [sp, #48] ; w
+ ldr r7, [sp, #52] ; h
+
+ vld1.s16 {q0}, [r5] ; filter_x
+
+ sub r8, r1, r1, lsl #2 ; -src_stride * 3
+ add r8, r8, #4 ; -src_stride * 3 + 4
+
+ sub r4, r3, r3, lsl #2 ; -dst_stride * 3
+ add r4, r4, #4 ; -dst_stride * 3 + 4
+
+ rsb r9, r6, r1, lsl #2 ; reset src for outer loop
+ sub r9, r9, #7
+ rsb r12, r6, r3, lsl #2 ; reset dst for outer loop
+
+ mov r10, r6 ; w loop counter
+
+loop_horiz_v
+ vld1.8 {d24}, [r0], r1
+ vld1.8 {d25}, [r0], r1
+ vld1.8 {d26}, [r0], r1
+ vld1.8 {d27}, [r0], r8
+
+ vtrn.16 q12, q13
+ vtrn.8 d24, d25
+ vtrn.8 d26, d27
+
+ pld [r0, r1, lsl #2]
+
+ vmovl.u8 q8, d24
+ vmovl.u8 q9, d25
+ vmovl.u8 q10, d26
+ vmovl.u8 q11, d27
+
+ ; save a few instructions in the inner loop
+ vswp d17, d18
+ vmov d23, d21
+
+ add r0, r0, #3
+
+loop_horiz
+ add r5, r0, #64
+
+ vld1.32 {d28[]}, [r0], r1
+ vld1.32 {d29[]}, [r0], r1
+ vld1.32 {d31[]}, [r0], r1
+ vld1.32 {d30[]}, [r0], r8
+
+ pld [r5]
+
+ vtrn.16 d28, d31
+ vtrn.16 d29, d30
+ vtrn.8 d28, d29
+ vtrn.8 d31, d30
+
+ pld [r5, r1]
+
+ ; extract to s16
+ vtrn.32 q14, q15
+ vmovl.u8 q12, d28
+ vmovl.u8 q13, d29
+
+ pld [r5, r1, lsl #1]
+
+ ; src[] * filter_x
+ MULTIPLY_BY_Q0 q1, d16, d17, d20, d22, d18, d19, d23, d24
+ MULTIPLY_BY_Q0 q2, d17, d20, d22, d18, d19, d23, d24, d26
+ MULTIPLY_BY_Q0 q14, d20, d22, d18, d19, d23, d24, d26, d27
+ MULTIPLY_BY_Q0 q15, d22, d18, d19, d23, d24, d26, d27, d25
+
+ pld [r5, -r8]
+
+ ; += 64 >> 7
+ vqrshrun.s32 d2, q1, #7
+ vqrshrun.s32 d3, q2, #7
+ vqrshrun.s32 d4, q14, #7
+ vqrshrun.s32 d5, q15, #7
+
+ ; saturate
+ vqmovn.u16 d2, q1
+ vqmovn.u16 d3, q2
+
+ ; transpose
+ vtrn.16 d2, d3
+ vtrn.32 d2, d3
+ vtrn.8 d2, d3
+
+ vst1.u32 {d2[0]}, [r2@32], r3
+ vst1.u32 {d3[0]}, [r2@32], r3
+ vst1.u32 {d2[1]}, [r2@32], r3
+ vst1.u32 {d3[1]}, [r2@32], r4
+
+ vmov q8, q9
+ vmov d20, d23
+ vmov q11, q12
+ vmov q9, q13
+
+ subs r6, r6, #4 ; w -= 4
+ bgt loop_horiz
+
+ ; outer loop
+ mov r6, r10 ; restore w counter
+ add r0, r0, r9 ; src += src_stride * 4 - w
+ add r2, r2, r12 ; dst += dst_stride * 4 - w
+ subs r7, r7, #4 ; h -= 4
+ bgt loop_horiz_v
+
+ pop {r4-r10, pc}
+
+ ENDP
+
+|vp9_convolve8_vert_neon| PROC
+ ldr r12, [sp, #12]
+ cmp r12, #16
+ bne vp9_convolve8_vert_c
+
+ push {r4-r8, lr}
+
+ ; adjust for taps
+ sub r0, r0, r1
+ sub r0, r0, r1, lsl #1
+
+ ldr r4, [sp, #32] ; filter_y
+ ldr r6, [sp, #40] ; w
+ ldr lr, [sp, #44] ; h
+
+ vld1.s16 {q0}, [r4] ; filter_y
+
+ lsl r1, r1, #1
+ lsl r3, r3, #1
+
+loop_vert_h
+ mov r4, r0
+ add r7, r0, r1, asr #1
+ mov r5, r2
+ add r8, r2, r3, asr #1
+ mov r12, lr ; h loop counter
+
+ vld1.u32 {d16[0]}, [r4], r1
+ vld1.u32 {d16[1]}, [r7], r1
+ vld1.u32 {d18[0]}, [r4], r1
+ vld1.u32 {d18[1]}, [r7], r1
+ vld1.u32 {d20[0]}, [r4], r1
+ vld1.u32 {d20[1]}, [r7], r1
+ vld1.u32 {d22[0]}, [r4], r1
+
+ vmovl.u8 q8, d16
+ vmovl.u8 q9, d18
+ vmovl.u8 q10, d20
+ vmovl.u8 q11, d22
+
+loop_vert
+ ; always process a 4x4 block at a time
+ vld1.u32 {d24[0]}, [r7], r1
+ vld1.u32 {d26[0]}, [r4], r1
+ vld1.u32 {d26[1]}, [r7], r1
+ vld1.u32 {d24[1]}, [r4], r1
+
+ ; extract to s16
+ vmovl.u8 q12, d24
+ vmovl.u8 q13, d26
+
+ pld [r5]
+ pld [r8]
+
+ ; src[] * filter_y
+ MULTIPLY_BY_Q0 q1, d16, d17, d18, d19, d20, d21, d22, d24
+
+ pld [r5, r3]
+ pld [r8, r3]
+
+ MULTIPLY_BY_Q0 q2, d17, d18, d19, d20, d21, d22, d24, d26
+
+ pld [r7]
+ pld [r4]
+
+ MULTIPLY_BY_Q0 q14, d18, d19, d20, d21, d22, d24, d26, d27
+
+ pld [r7, r1]
+ pld [r4, r1]
+
+ MULTIPLY_BY_Q0 q15, d19, d20, d21, d22, d24, d26, d27, d25
+
+ ; += 64 >> 7
+ vqrshrun.s32 d2, q1, #7
+ vqrshrun.s32 d3, q2, #7
+ vqrshrun.s32 d4, q14, #7
+ vqrshrun.s32 d5, q15, #7
+
+ ; saturate
+ vqmovn.u16 d2, q1
+ vqmovn.u16 d3, q2
+
+ vst1.u32 {d2[0]}, [r5@32], r3
+ vst1.u32 {d2[1]}, [r8@32], r3
+ vst1.u32 {d3[0]}, [r5@32], r3
+ vst1.u32 {d3[1]}, [r8@32], r3
+
+ vmov q8, q10
+ vmov d18, d22
+ vmov d19, d24
+ vmov q10, q13
+ vmov d22, d25
+
+ subs r12, r12, #4 ; h -= 4
+ bgt loop_vert
+
+ ; outer loop
+ add r0, r0, #4
+ add r2, r2, #4
+ subs r6, r6, #4 ; w -= 4
+ bgt loop_vert_h
+
+ pop {r4-r8, pc}
+
+ ENDP
+ END
diff --git a/libvpx/vp9/common/arm/neon/vp9_convolve_neon.c b/libvpx/vp9/common/arm/neon/vp9_convolve_neon.c
new file mode 100644
index 0000000..d8b24bf
--- /dev/null
+++ b/libvpx/vp9/common/arm/neon/vp9_convolve_neon.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vp9_rtcd.h"
+#include "vp9/common/vp9_common.h"
+#include "vpx_ports/mem.h"
+
+void vp9_convolve8_neon(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h) {
+ /* Given our constraints: w <= 64, h <= 64, taps == 8 we can reduce the
+ * maximum buffer size to 64 * 64 + 7 (+ 1 to make it divisible by 4).
+ */
+ DECLARE_ALIGNED_ARRAY(8, uint8_t, temp, 64 * 72);
+
+ // Account for the vertical phase needing 3 lines prior and 4 lines post
+ int intermediate_height = h + 7;
+
+ if (x_step_q4 != 16 || y_step_q4 != 16)
+ return vp9_convolve8_c(src, src_stride,
+ dst, dst_stride,
+ filter_x, x_step_q4,
+ filter_y, y_step_q4,
+ w, h);
+
+ /* Filter starting 3 lines back. The neon implementation will ignore the
+ * given height and filter a multiple of 4 lines. Since this goes in to
+ * the temp buffer which has lots of extra room and is subsequently discarded
+ * this is safe if somewhat less than ideal.
+ */
+ vp9_convolve8_horiz_neon(src - src_stride * 3, src_stride,
+ temp, 64,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, intermediate_height);
+
+ /* Step into the temp buffer 3 lines to get the actual frame data */
+ vp9_convolve8_vert_neon(temp + 64 * 3, 64,
+ dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, h);
+}
+
+void vp9_convolve8_avg_neon(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h) {
+ DECLARE_ALIGNED_ARRAY(8, uint8_t, temp, 64 * 72);
+ int intermediate_height = h + 7;
+
+ if (x_step_q4 != 16 || y_step_q4 != 16)
+ return vp9_convolve8_avg_c(src, src_stride,
+ dst, dst_stride,
+ filter_x, x_step_q4,
+ filter_y, y_step_q4,
+ w, h);
+
+ /* This implementation has the same issues as above. In addition, we only want
+ * to average the values after both passes.
+ */
+ vp9_convolve8_horiz_neon(src - src_stride * 3, src_stride,
+ temp, 64,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, intermediate_height);
+ vp9_convolve8_avg_vert_neon(temp + 64 * 3,
+ 64, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, h);
+}
diff --git a/libvpx/vp9/common/arm/neon/vp9_copy_neon.asm b/libvpx/vp9/common/arm/neon/vp9_copy_neon.asm
new file mode 100644
index 0000000..a0bd04a
--- /dev/null
+++ b/libvpx/vp9/common/arm/neon/vp9_copy_neon.asm
@@ -0,0 +1,84 @@
+;
+; Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+ EXPORT |vp9_convolve_copy_neon|
+ ARM
+ REQUIRE8
+ PRESERVE8
+
+ AREA ||.text||, CODE, READONLY, ALIGN=2
+
+|vp9_convolve_copy_neon| PROC
+ push {r4-r5, lr}
+ ldrd r4, r5, [sp, #28]
+
+ cmp r4, #32
+ bgt copy64
+ beq copy32
+ cmp r4, #8
+ bgt copy16
+ beq copy8
+ b copy4
+
+copy64
+ sub lr, r1, #32
+ sub r3, r3, #32
+copy64_h
+ pld [r0, r1, lsl #1]
+ vld1.8 {q0-q1}, [r0]!
+ vld1.8 {q2-q3}, [r0], lr
+ vst1.8 {q0-q1}, [r2@128]!
+ vst1.8 {q2-q3}, [r2@128], r3
+ subs r5, r5, #1
+ bgt copy64_h
+ pop {r4-r5, pc}
+
+copy32
+ pld [r0, r1, lsl #1]
+ vld1.8 {q0-q1}, [r0], r1
+ pld [r0, r1, lsl #1]
+ vld1.8 {q2-q3}, [r0], r1
+ vst1.8 {q0-q1}, [r2@128], r3
+ vst1.8 {q2-q3}, [r2@128], r3
+ subs r5, r5, #2
+ bgt copy32
+ pop {r4-r5, pc}
+
+copy16
+ pld [r0, r1, lsl #1]
+ vld1.8 {q0}, [r0], r1
+ pld [r0, r1, lsl #1]
+ vld1.8 {q1}, [r0], r1
+ vst1.8 {q0}, [r2@128], r3
+ vst1.8 {q1}, [r2@128], r3
+ subs r5, r5, #2
+ bgt copy16
+ pop {r4-r5, pc}
+
+copy8
+ pld [r0, r1, lsl #1]
+ vld1.8 {d0}, [r0], r1
+ pld [r0, r1, lsl #1]
+ vld1.8 {d2}, [r0], r1
+ vst1.8 {d0}, [r2@64], r3
+ vst1.8 {d2}, [r2@64], r3
+ subs r5, r5, #2
+ bgt copy8
+ pop {r4-r5, pc}
+
+copy4
+ ldr r12, [r0], r1
+ str r12, [r2], r3
+ subs r5, r5, #1
+ bgt copy4
+ pop {r4-r5, pc}
+ ENDP
+
+ END
diff --git a/libvpx/vp9/common/arm/neon/vp9_dc_only_idct_add_neon.asm b/libvpx/vp9/common/arm/neon/vp9_dc_only_idct_add_neon.asm
new file mode 100644
index 0000000..60a0d98
--- /dev/null
+++ b/libvpx/vp9/common/arm/neon/vp9_dc_only_idct_add_neon.asm
@@ -0,0 +1,69 @@
+;
+; Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license and patent
+; grant that can be found in the LICENSE file in the root of the source
+; tree. All contributing project authors may be found in the AUTHORS
+; file in the root of the source tree.
+;
+
+
+ EXPORT |vp9_dc_only_idct_add_neon|
+ ARM
+ REQUIRE8
+ PRESERVE8
+
+ AREA ||.text||, CODE, READONLY, ALIGN=2
+
+;void vp9_dc_only_idct_add_neon(int input_dc, uint8_t *pred_ptr,
+; uint8_t *dst_ptr, int pitch, int stride)
+;
+; r0 int input_dc
+; r1 uint8_t *pred_ptr
+; r2 uint8_t *dst_ptr
+; r3 int pitch
+; sp int stride
+
+|vp9_dc_only_idct_add_neon| PROC
+
+ ; generate cospi_16_64 = 11585
+ mov r12, #0x2d00
+ add r12, #0x41
+
+ ; dct_const_round_shift(input_dc * cospi_16_64)
+ mul r0, r0, r12 ; input_dc * cospi_16_64
+ add r0, r0, #0x2000 ; +(1 << ((DCT_CONST_BITS) - 1))
+ asr r0, r0, #14 ; >> DCT_CONST_BITS
+
+ ; dct_const_round_shift(out * cospi_16_64)
+ mul r0, r0, r12 ; out * cospi_16_64
+ add r0, r0, #0x2000 ; +(1 << ((DCT_CONST_BITS) - 1))
+ asr r0, r0, #14 ; >> DCT_CONST_BITS
+
+ ; ROUND_POWER_OF_TWO(out, 4)
+ add r0, r0, #8 ; + (1 <<((4) - 1))
+ asr r0, r0, #4 ; >> 4
+
+ vdup.16 q0, r0; ; duplicate a1
+ ldr r12, [sp] ; load stride
+
+ vld1.32 {d2[0]}, [r1], r3
+ vld1.32 {d2[1]}, [r1], r3
+ vld1.32 {d4[0]}, [r1], r3
+ vld1.32 {d4[1]}, [r1]
+
+ vaddw.u8 q1, q0, d2 ; a1 + pred_ptr[c]
+ vaddw.u8 q2, q0, d4
+
+ vqmovun.s16 d2, q1 ; clip_pixel
+ vqmovun.s16 d4, q2
+
+ vst1.32 {d2[0]}, [r2], r12
+ vst1.32 {d2[1]}, [r2], r12
+ vst1.32 {d4[0]}, [r2], r12
+ vst1.32 {d4[1]}, [r2]
+
+ bx lr
+ ENDP ; |vp9_dc_only_idct_add_neon|
+
+ END
diff --git a/libvpx/vp9/common/arm/neon/vp9_idct16x16_neon.c b/libvpx/vp9/common/arm/neon/vp9_idct16x16_neon.c
new file mode 100644
index 0000000..3e3e400
--- /dev/null
+++ b/libvpx/vp9/common/arm/neon/vp9_idct16x16_neon.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vp9_rtcd.h"
+#include "vp9/common/vp9_common.h"
+
+extern void vp9_short_idct16x16_add_neon_pass1(int16_t *input,
+ int16_t *output,
+ int output_stride);
+extern void vp9_short_idct16x16_add_neon_pass2(int16_t *src,
+ int16_t *output,
+ int16_t *pass1Output,
+ int16_t skip_adding,
+ uint8_t *dest,
+ int dest_stride);
+extern void vp9_short_idct10_16x16_add_neon_pass1(int16_t *input,
+ int16_t *output,
+ int output_stride);
+extern void vp9_short_idct10_16x16_add_neon_pass2(int16_t *src,
+ int16_t *output,
+ int16_t *pass1Output,
+ int16_t skip_adding,
+ uint8_t *dest,
+ int dest_stride);
+extern void save_neon_registers();
+extern void restore_neon_registers();
+
+
+void vp9_short_idct16x16_add_neon(int16_t *input,
+ uint8_t *dest, int dest_stride) {
+ int16_t pass1_output[16*16] = {0};
+ int16_t row_idct_output[16*16] = {0};
+
+ // save d8-d15 register values.
+ save_neon_registers();
+
+ /* Parallel idct on the upper 8 rows */
+ // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
+ // stage 6 result in pass1_output.
+ vp9_short_idct16x16_add_neon_pass1(input, pass1_output, 8);
+
+ // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
+ // with result in pass1(pass1_output) to calculate final result in stage 7
+ // which will be saved into row_idct_output.
+ vp9_short_idct16x16_add_neon_pass2(input+1,
+ row_idct_output,
+ pass1_output,
+ 0,
+ dest,
+ dest_stride);
+
+ /* Parallel idct on the lower 8 rows */
+ // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
+ // stage 6 result in pass1_output.
+ vp9_short_idct16x16_add_neon_pass1(input+8*16, pass1_output, 8);
+
+ // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
+ // with result in pass1(pass1_output) to calculate final result in stage 7
+ // which will be saved into row_idct_output.
+ vp9_short_idct16x16_add_neon_pass2(input+8*16+1,
+ row_idct_output+8,
+ pass1_output,
+ 0,
+ dest,
+ dest_stride);
+
+ /* Parallel idct on the left 8 columns */
+ // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
+ // stage 6 result in pass1_output.
+ vp9_short_idct16x16_add_neon_pass1(row_idct_output, pass1_output, 8);
+
+ // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
+ // with result in pass1(pass1_output) to calculate final result in stage 7.
+ // Then add the result to the destination data.
+ vp9_short_idct16x16_add_neon_pass2(row_idct_output+1,
+ row_idct_output,
+ pass1_output,
+ 1,
+ dest,
+ dest_stride);
+
+ /* Parallel idct on the right 8 columns */
+ // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
+ // stage 6 result in pass1_output.
+ vp9_short_idct16x16_add_neon_pass1(row_idct_output+8*16, pass1_output, 8);
+
+ // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
+ // with result in pass1(pass1_output) to calculate final result in stage 7.
+ // Then add the result to the destination data.
+ vp9_short_idct16x16_add_neon_pass2(row_idct_output+8*16+1,
+ row_idct_output+8,
+ pass1_output,
+ 1,
+ dest+8,
+ dest_stride);
+
+ // restore d8-d15 register values.
+ restore_neon_registers();
+
+ return;
+}
+
+void vp9_short_idct10_16x16_add_neon(int16_t *input,
+ uint8_t *dest, int dest_stride) {
+ int16_t pass1_output[16*16] = {0};
+ int16_t row_idct_output[16*16] = {0};
+
+ // save d8-d15 register values.
+ save_neon_registers();
+
+ /* Parallel idct on the upper 8 rows */
+ // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
+ // stage 6 result in pass1_output.
+ vp9_short_idct10_16x16_add_neon_pass1(input, pass1_output, 8);
+
+ // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
+ // with result in pass1(pass1_output) to calculate final result in stage 7
+ // which will be saved into row_idct_output.
+ vp9_short_idct10_16x16_add_neon_pass2(input+1,
+ row_idct_output,
+ pass1_output,
+ 0,
+ dest,
+ dest_stride);
+
+ /* Skip Parallel idct on the lower 8 rows as they are all 0s */
+
+ /* Parallel idct on the left 8 columns */
+ // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
+ // stage 6 result in pass1_output.
+ vp9_short_idct16x16_add_neon_pass1(row_idct_output, pass1_output, 8);
+
+ // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
+ // with result in pass1(pass1_output) to calculate final result in stage 7.
+ // Then add the result to the destination data.
+ vp9_short_idct16x16_add_neon_pass2(row_idct_output+1,
+ row_idct_output,
+ pass1_output,
+ 1,
+ dest,
+ dest_stride);
+
+ /* Parallel idct on the right 8 columns */
+ // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
+ // stage 6 result in pass1_output.
+ vp9_short_idct16x16_add_neon_pass1(row_idct_output+8*16, pass1_output, 8);
+
+ // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
+ // with result in pass1(pass1_output) to calculate final result in stage 7.
+ // Then add the result to the destination data.
+ vp9_short_idct16x16_add_neon_pass2(row_idct_output+8*16+1,
+ row_idct_output+8,
+ pass1_output,
+ 1,
+ dest+8,
+ dest_stride);
+
+ // restore d8-d15 register values.
+ restore_neon_registers();
+
+ return;
+}
diff --git a/libvpx/vp9/common/arm/neon/vp9_idct32x32_neon.c b/libvpx/vp9/common/arm/neon/vp9_idct32x32_neon.c
new file mode 100644
index 0000000..ceecd6f
--- /dev/null
+++ b/libvpx/vp9/common/arm/neon/vp9_idct32x32_neon.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp9/common/vp9_common.h"
+
+// defined in vp9/common/arm/neon/vp9_short_idct32x32_add_neon.asm
+extern void idct32_transpose_and_transform(int16_t *transpose_buffer,
+ int16_t *output, int16_t *input);
+extern void idct32_combine_add(uint8_t *dest, int16_t *out, int dest_stride);
+
+
+// defined in vp9/common/arm/neon/vp9_short_idct16x16_add_neon.asm
+extern void save_neon_registers();
+extern void restore_neon_registers();
+
+void vp9_short_idct32x32_add_neon(int16_t *input, uint8_t *dest,
+ int dest_stride) {
+ // TODO(cd): move the creation of these buffers within the ASM file
+ // internal buffer used to transpose 8 lines into before transforming them
+ int16_t transpose_buffer[32 * 8];
+ // results of the first pass (transpose and transform rows)
+ int16_t pass1[32 * 32];
+ // results of the second pass (transpose and transform columns)
+ int16_t pass2[32 * 32];
+
+ // save register we need to preserve
+ save_neon_registers();
+ // process rows
+ idct32_transpose_and_transform(transpose_buffer, pass1, input);
+ // process columns
+ // TODO(cd): do these two steps/passes within the ASM file
+ idct32_transpose_and_transform(transpose_buffer, pass2, pass1);
+ // combine and add to dest
+ // TODO(cd): integrate this within the last storage step of the second pass
+ idct32_combine_add(dest, pass2, dest_stride);
+ // restore register we need to preserve
+ restore_neon_registers();
+}
+
+// TODO(cd): Eliminate this file altogether when everything is in ASM file
diff --git a/libvpx/vp9/common/arm/neon/vp9_loopfilter_neon.asm b/libvpx/vp9/common/arm/neon/vp9_loopfilter_neon.asm
new file mode 100644
index 0000000..8b4fe5d
--- /dev/null
+++ b/libvpx/vp9/common/arm/neon/vp9_loopfilter_neon.asm
@@ -0,0 +1,708 @@
+;
+; Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+ EXPORT |vp9_loop_filter_horizontal_edge_neon|
+ EXPORT |vp9_loop_filter_vertical_edge_neon|
+ EXPORT |vp9_mbloop_filter_horizontal_edge_neon|
+ EXPORT |vp9_mbloop_filter_vertical_edge_neon|
+ ARM
+
+ AREA ||.text||, CODE, READONLY, ALIGN=2
+
+; Currently vp9 only works on iterations 8 at a time. The vp8 loop filter
+; works on 16 iterations at a time.
+; TODO(fgalligan): See about removing the count code as this function is only
+; called with a count of 1.
+;
+; void vp9_loop_filter_horizontal_edge_neon(uint8_t *s,
+; int p /* pitch */,
+; const uint8_t *blimit,
+; const uint8_t *limit,
+; const uint8_t *thresh,
+; int count)
+;
+; r0 uint8_t *s,
+; r1 int p, /* pitch */
+; r2 const uint8_t *blimit,
+; r3 const uint8_t *limit,
+; sp const uint8_t *thresh,
+; sp+4 int count
+|vp9_loop_filter_horizontal_edge_neon| PROC
+ push {lr}
+
+ vld1.8 {d0[]}, [r2] ; duplicate *blimit
+ ldr r12, [sp, #8] ; load count
+ ldr r2, [sp, #4] ; load thresh
+ add r1, r1, r1 ; double pitch
+
+ cmp r12, #0
+ beq end_vp9_lf_h_edge
+
+ vld1.8 {d1[]}, [r3] ; duplicate *limit
+ vld1.8 {d2[]}, [r2] ; duplicate *thresh
+
+count_lf_h_loop
+ sub r2, r0, r1, lsl #1 ; move src pointer down by 4 lines
+ add r3, r2, r1, lsr #1 ; set to 3 lines down
+
+ vld1.u8 {d3}, [r2@64], r1 ; p3
+ vld1.u8 {d4}, [r3@64], r1 ; p2
+ vld1.u8 {d5}, [r2@64], r1 ; p1
+ vld1.u8 {d6}, [r3@64], r1 ; p0
+ vld1.u8 {d7}, [r2@64], r1 ; q0
+ vld1.u8 {d16}, [r3@64], r1 ; q1
+ vld1.u8 {d17}, [r2@64] ; q2
+ vld1.u8 {d18}, [r3@64] ; q3
+
+ sub r2, r2, r1, lsl #1
+ sub r3, r3, r1, lsl #1
+
+ bl vp9_loop_filter_neon
+
+ vst1.u8 {d4}, [r2@64], r1 ; store op1
+ vst1.u8 {d5}, [r3@64], r1 ; store op0
+ vst1.u8 {d6}, [r2@64], r1 ; store oq0
+ vst1.u8 {d7}, [r3@64], r1 ; store oq1
+
+ add r0, r0, #8
+ subs r12, r12, #1
+ bne count_lf_h_loop
+
+end_vp9_lf_h_edge
+ pop {pc}
+ ENDP ; |vp9_loop_filter_horizontal_edge_neon|
+
+; Currently vp9 only works on iterations 8 at a time. The vp8 loop filter
+; works on 16 iterations at a time.
+; TODO(fgalligan): See about removing the count code as this function is only
+; called with a count of 1.
+;
+; void vp9_loop_filter_vertical_edge_neon(uint8_t *s,
+; int p /* pitch */,
+; const uint8_t *blimit,
+; const uint8_t *limit,
+; const uint8_t *thresh,
+; int count)
+;
+; r0 uint8_t *s,
+; r1 int p, /* pitch */
+; r2 const uint8_t *blimit,
+; r3 const uint8_t *limit,
+; sp const uint8_t *thresh,
+; sp+4 int count
+|vp9_loop_filter_vertical_edge_neon| PROC
+ push {lr}
+
+ vld1.8 {d0[]}, [r2] ; duplicate *blimit
+ ldr r12, [sp, #8] ; load count
+ vld1.8 {d1[]}, [r3] ; duplicate *limit
+
+ ldr r3, [sp, #4] ; load thresh
+ sub r2, r0, #4 ; move s pointer down by 4 columns
+ cmp r12, #0
+ beq end_vp9_lf_v_edge
+
+ vld1.8 {d2[]}, [r3] ; duplicate *thresh
+
+count_lf_v_loop
+ vld1.u8 {d3}, [r2], r1 ; load s data
+ vld1.u8 {d4}, [r2], r1
+ vld1.u8 {d5}, [r2], r1
+ vld1.u8 {d6}, [r2], r1
+ vld1.u8 {d7}, [r2], r1
+ vld1.u8 {d16}, [r2], r1
+ vld1.u8 {d17}, [r2], r1
+ vld1.u8 {d18}, [r2]
+
+ ;transpose to 8x16 matrix
+ vtrn.32 d3, d7
+ vtrn.32 d4, d16
+ vtrn.32 d5, d17
+ vtrn.32 d6, d18
+
+ vtrn.16 d3, d5
+ vtrn.16 d4, d6
+ vtrn.16 d7, d17
+ vtrn.16 d16, d18
+
+ vtrn.8 d3, d4
+ vtrn.8 d5, d6
+ vtrn.8 d7, d16
+ vtrn.8 d17, d18
+
+ bl vp9_loop_filter_neon
+
+ sub r0, r0, #2
+
+ ;store op1, op0, oq0, oq1
+ vst4.8 {d4[0], d5[0], d6[0], d7[0]}, [r0], r1
+ vst4.8 {d4[1], d5[1], d6[1], d7[1]}, [r0], r1
+ vst4.8 {d4[2], d5[2], d6[2], d7[2]}, [r0], r1
+ vst4.8 {d4[3], d5[3], d6[3], d7[3]}, [r0], r1
+ vst4.8 {d4[4], d5[4], d6[4], d7[4]}, [r0], r1
+ vst4.8 {d4[5], d5[5], d6[5], d7[5]}, [r0], r1
+ vst4.8 {d4[6], d5[6], d6[6], d7[6]}, [r0], r1
+ vst4.8 {d4[7], d5[7], d6[7], d7[7]}, [r0]
+
+ add r0, r0, r1, lsl #3 ; s += pitch * 8
+ subs r12, r12, #1
+ subne r2, r0, #4 ; move s pointer down by 4 columns
+ bne count_lf_v_loop
+
+end_vp9_lf_v_edge
+ pop {pc}
+ ENDP ; |vp9_loop_filter_vertical_edge_neon|
+
+; void vp9_loop_filter_neon();
+; This is a helper function for the loopfilters. The invidual functions do the
+; necessary load, transpose (if necessary) and store. The function does not use
+; registers d8-d15.
+;
+; Inputs:
+; r0-r3, r12 PRESERVE
+; d0 blimit
+; d1 limit
+; d2 thresh
+; d3 p3
+; d4 p2
+; d5 p1
+; d6 p0
+; d7 q0
+; d16 q1
+; d17 q2
+; d18 q3
+;
+; Outputs:
+; d4 op1
+; d5 op0
+; d6 oq0
+; d7 oq1
+|vp9_loop_filter_neon| PROC
+ ; filter_mask
+ vabd.u8 d19, d3, d4 ; m1 = abs(p3 - p2)
+ vabd.u8 d20, d4, d5 ; m2 = abs(p2 - p1)
+ vabd.u8 d21, d5, d6 ; m3 = abs(p1 - p0)
+ vabd.u8 d22, d16, d7 ; m4 = abs(q1 - q0)
+ vabd.u8 d3, d17, d16 ; m5 = abs(q2 - q1)
+ vabd.u8 d4, d18, d17 ; m6 = abs(q3 - q2)
+
+ ; only compare the largest value to limit
+ vmax.u8 d19, d19, d20 ; m1 = max(m1, m2)
+ vmax.u8 d20, d21, d22 ; m2 = max(m3, m4)
+
+ vabd.u8 d17, d6, d7 ; abs(p0 - q0)
+
+ vmax.u8 d3, d3, d4 ; m3 = max(m5, m6)
+
+ vmov.u8 d18, #0x80
+
+ vmax.u8 d23, d19, d20 ; m1 = max(m1, m2)
+
+ ; hevmask
+ vcgt.u8 d21, d21, d2 ; (abs(p1 - p0) > thresh)*-1
+ vcgt.u8 d22, d22, d2 ; (abs(q1 - q0) > thresh)*-1
+ vmax.u8 d23, d23, d3 ; m1 = max(m1, m3)
+
+ vabd.u8 d28, d5, d16 ; a = abs(p1 - q1)
+ vqadd.u8 d17, d17, d17 ; b = abs(p0 - q0) * 2
+
+ veor d7, d7, d18 ; qs0
+
+ vcge.u8 d23, d1, d23 ; abs(m1) > limit
+
+ ; filter() function
+ ; convert to signed
+
+ vshr.u8 d28, d28, #1 ; a = a / 2
+ veor d6, d6, d18 ; ps0
+
+ veor d5, d5, d18 ; ps1
+ vqadd.u8 d17, d17, d28 ; a = b + a
+
+ veor d16, d16, d18 ; qs1
+
+ vmov.u8 d19, #3
+
+ vsub.s8 d28, d7, d6 ; ( qs0 - ps0)
+
+ vcge.u8 d17, d0, d17 ; a > blimit
+
+ vqsub.s8 d27, d5, d16 ; filter = clamp(ps1-qs1)
+ vorr d22, d21, d22 ; hevmask
+
+ vmull.s8 q12, d28, d19 ; 3 * ( qs0 - ps0)
+
+ vand d27, d27, d22 ; filter &= hev
+ vand d23, d23, d17 ; filter_mask
+
+ vaddw.s8 q12, q12, d27 ; filter + 3 * (qs0 - ps0)
+
+ vmov.u8 d17, #4
+
+ ; filter = clamp(filter + 3 * ( qs0 - ps0))
+ vqmovn.s16 d27, q12
+
+ vand d27, d27, d23 ; filter &= mask
+
+ vqadd.s8 d28, d27, d19 ; filter2 = clamp(filter+3)
+ vqadd.s8 d27, d27, d17 ; filter1 = clamp(filter+4)
+ vshr.s8 d28, d28, #3 ; filter2 >>= 3
+ vshr.s8 d27, d27, #3 ; filter1 >>= 3
+
+ vqadd.s8 d19, d6, d28 ; u = clamp(ps0 + filter2)
+ vqsub.s8 d26, d7, d27 ; u = clamp(qs0 - filter1)
+
+ ; outer tap adjustments
+ vrshr.s8 d27, d27, #1 ; filter = ++filter1 >> 1
+
+ veor d6, d26, d18 ; *oq0 = u^0x80
+
+ vbic d27, d27, d22 ; filter &= ~hev
+
+ vqadd.s8 d21, d5, d27 ; u = clamp(ps1 + filter)
+ vqsub.s8 d20, d16, d27 ; u = clamp(qs1 - filter)
+
+ veor d5, d19, d18 ; *op0 = u^0x80
+ veor d4, d21, d18 ; *op1 = u^0x80
+ veor d7, d20, d18 ; *oq1 = u^0x80
+
+ bx lr
+ ENDP ; |vp9_loop_filter_neon|
+
+; void vp9_mbloop_filter_horizontal_edge_neon(uint8_t *s, int p,
+; const uint8_t *blimit,
+; const uint8_t *limit,
+; const uint8_t *thresh,
+; int count)
+; r0 uint8_t *s,
+; r1 int p, /* pitch */
+; r2 const uint8_t *blimit,
+; r3 const uint8_t *limit,
+; sp const uint8_t *thresh,
+; sp+4 int count
+|vp9_mbloop_filter_horizontal_edge_neon| PROC
+ push {r4-r5, lr}
+
+ vld1.8 {d0[]}, [r2] ; duplicate *blimit
+ ldr r12, [sp, #16] ; load count
+ ldr r2, [sp, #12] ; load thresh
+ add r1, r1, r1 ; double pitch
+
+ cmp r12, #0
+ beq end_vp9_mblf_h_edge
+
+ vld1.8 {d1[]}, [r3] ; duplicate *limit
+ vld1.8 {d2[]}, [r2] ; duplicate *thresh
+
+count_mblf_h_loop
+ sub r3, r0, r1, lsl #1 ; move src pointer down by 4 lines
+ add r2, r3, r1, lsr #1 ; set to 3 lines down
+
+ vld1.u8 {d3}, [r3@64], r1 ; p3
+ vld1.u8 {d4}, [r2@64], r1 ; p2
+ vld1.u8 {d5}, [r3@64], r1 ; p1
+ vld1.u8 {d6}, [r2@64], r1 ; p0
+ vld1.u8 {d7}, [r3@64], r1 ; q0
+ vld1.u8 {d16}, [r2@64], r1 ; q1
+ vld1.u8 {d17}, [r3@64] ; q2
+ vld1.u8 {d18}, [r2@64], r1 ; q3
+
+ sub r3, r3, r1, lsl #1
+ sub r2, r2, r1, lsl #2
+
+ bl vp9_mbloop_filter_neon
+
+ vst1.u8 {d0}, [r2@64], r1 ; store op2
+ vst1.u8 {d1}, [r3@64], r1 ; store op1
+ vst1.u8 {d2}, [r2@64], r1 ; store op0
+ vst1.u8 {d3}, [r3@64], r1 ; store oq0
+ vst1.u8 {d4}, [r2@64], r1 ; store oq1
+ vst1.u8 {d5}, [r3@64], r1 ; store oq2
+
+ add r0, r0, #8
+ subs r12, r12, #1
+ bne count_mblf_h_loop
+
+end_vp9_mblf_h_edge
+ pop {r4-r5, pc}
+
+ ENDP ; |vp9_mbloop_filter_horizontal_edge_neon|
+
+; void vp9_mbloop_filter_vertical_edge_neon(uint8_t *s,
+; int pitch,
+; const uint8_t *blimit,
+; const uint8_t *limit,
+; const uint8_t *thresh,
+; int count)
+;
+; r0 uint8_t *s,
+; r1 int pitch,
+; r2 const uint8_t *blimit,
+; r3 const uint8_t *limit,
+; sp const uint8_t *thresh,
+; sp+4 int count
+|vp9_mbloop_filter_vertical_edge_neon| PROC
+ push {r4-r5, lr}
+
+ vld1.8 {d0[]}, [r2] ; duplicate *blimit
+ ldr r12, [sp, #16] ; load count
+ vld1.8 {d1[]}, [r3] ; duplicate *limit
+
+ ldr r3, [sp, #12] ; load thresh
+ sub r2, r0, #4 ; move s pointer down by 4 columns
+ cmp r12, #0
+ beq end_vp9_mblf_v_edge
+
+ vld1.8 {d2[]}, [r3] ; duplicate *thresh
+
+count_mblf_v_loop
+ vld1.u8 {d3}, [r2], r1 ; load s data
+ vld1.u8 {d4}, [r2], r1
+ vld1.u8 {d5}, [r2], r1
+ vld1.u8 {d6}, [r2], r1
+ vld1.u8 {d7}, [r2], r1
+ vld1.u8 {d16}, [r2], r1
+ vld1.u8 {d17}, [r2], r1
+ vld1.u8 {d18}, [r2]
+
+ ;transpose to 8x16 matrix
+ vtrn.32 d3, d7
+ vtrn.32 d4, d16
+ vtrn.32 d5, d17
+ vtrn.32 d6, d18
+
+ vtrn.16 d3, d5
+ vtrn.16 d4, d6
+ vtrn.16 d7, d17
+ vtrn.16 d16, d18
+
+ vtrn.8 d3, d4
+ vtrn.8 d5, d6
+ vtrn.8 d7, d16
+ vtrn.8 d17, d18
+
+ sub r2, r0, #3
+ add r3, r0, #1
+
+ bl vp9_mbloop_filter_neon
+
+ ;store op2, op1, op0, oq0
+ vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [r2], r1
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r2], r1
+ vst4.8 {d0[2], d1[2], d2[2], d3[2]}, [r2], r1
+ vst4.8 {d0[3], d1[3], d2[3], d3[3]}, [r2], r1
+ vst4.8 {d0[4], d1[4], d2[4], d3[4]}, [r2], r1
+ vst4.8 {d0[5], d1[5], d2[5], d3[5]}, [r2], r1
+ vst4.8 {d0[6], d1[6], d2[6], d3[6]}, [r2], r1
+ vst4.8 {d0[7], d1[7], d2[7], d3[7]}, [r2]
+
+ ;store oq1, oq2
+ vst2.8 {d4[0], d5[0]}, [r3], r1
+ vst2.8 {d4[1], d5[1]}, [r3], r1
+ vst2.8 {d4[2], d5[2]}, [r3], r1
+ vst2.8 {d4[3], d5[3]}, [r3], r1
+ vst2.8 {d4[4], d5[4]}, [r3], r1
+ vst2.8 {d4[5], d5[5]}, [r3], r1
+ vst2.8 {d4[6], d5[6]}, [r3], r1
+ vst2.8 {d4[7], d5[7]}, [r3]
+
+ add r0, r0, r1, lsl #3 ; s += pitch * 8
+ subs r12, r12, #1
+ subne r2, r0, #4 ; move s pointer down by 4 columns
+ bne count_mblf_v_loop
+
+end_vp9_mblf_v_edge
+ pop {r4-r5, pc}
+ ENDP ; |vp9_mbloop_filter_vertical_edge_neon|
+
+; void vp9_mbloop_filter_neon();
+; This is a helper function for the loopfilters. The invidual functions do the
+; necessary load, transpose (if necessary) and store. The function does not use
+; registers d8-d15.
+;
+; Inputs:
+; r0-r3, r12 PRESERVE
+; d0 blimit
+; d1 limit
+; d2 thresh
+; d3 p3
+; d4 p2
+; d5 p1
+; d6 p0
+; d7 q0
+; d16 q1
+; d17 q2
+; d18 q3
+;
+; Outputs:
+; d0 op2
+; d1 op1
+; d2 op0
+; d3 oq0
+; d4 oq1
+; d5 oq2
+|vp9_mbloop_filter_neon| PROC
+ ; filter_mask
+ vabd.u8 d19, d3, d4 ; m1 = abs(p3 - p2)
+ vabd.u8 d20, d4, d5 ; m2 = abs(p2 - p1)
+ vabd.u8 d21, d5, d6 ; m3 = abs(p1 - p0)
+ vabd.u8 d22, d16, d7 ; m4 = abs(q1 - q0)
+ vabd.u8 d23, d17, d16 ; m5 = abs(q2 - q1)
+ vabd.u8 d24, d18, d17 ; m6 = abs(q3 - q2)
+
+ ; only compare the largest value to limit
+ vmax.u8 d19, d19, d20 ; m1 = max(m1, m2)
+ vmax.u8 d20, d21, d22 ; m2 = max(m3, m4)
+
+ vabd.u8 d25, d6, d4 ; m7 = abs(p0 - p2)
+
+ vmax.u8 d23, d23, d24 ; m3 = max(m5, m6)
+
+ vabd.u8 d26, d7, d17 ; m8 = abs(q0 - q2)
+
+ vmax.u8 d19, d19, d20
+
+ vabd.u8 d24, d6, d7 ; m9 = abs(p0 - q0)
+ vabd.u8 d27, d3, d6 ; m10 = abs(p3 - p0)
+ vabd.u8 d28, d18, d7 ; m11 = abs(q3 - q0)
+
+ vmax.u8 d19, d19, d23
+
+ vabd.u8 d23, d5, d16 ; a = abs(p1 - q1)
+ vqadd.u8 d24, d24, d24 ; b = abs(p0 - q0) * 2
+
+ ; abs () > limit
+ vcge.u8 d19, d1, d19
+
+ ; only compare the largest value to thresh
+ vmax.u8 d25, d25, d26 ; m4 = max(m7, m8)
+ vmax.u8 d26, d27, d28 ; m5 = max(m10, m11)
+
+ vshr.u8 d23, d23, #1 ; a = a / 2
+
+ vmax.u8 d25, d25, d26 ; m4 = max(m4, m5)
+
+ vqadd.u8 d24, d24, d23 ; a = b + a
+
+ vmax.u8 d20, d20, d25 ; m2 = max(m2, m4)
+
+ vmov.u8 d23, #1
+ vcge.u8 d24, d0, d24 ; a > blimit
+
+ vcgt.u8 d21, d21, d2 ; (abs(p1 - p0) > thresh)*-1
+
+ vcge.u8 d20, d23, d20 ; flat
+
+ vand d19, d19, d24 ; mask
+
+ vcgt.u8 d23, d22, d2 ; (abs(q1 - q0) > thresh)*-1
+
+ vand d20, d20, d19 ; flat & mask
+
+ vmov.u8 d22, #0x80
+
+ vorr d23, d21, d23 ; hev
+
+ ; This instruction will truncate the "flat & mask" masks down to 4 bits
+ ; each to fit into one 32 bit arm register. The values are stored in
+ ; q10.64[0].
+ vshrn.u16 d30, q10, #4
+ vmov.u32 r4, d30[0] ; flat & mask 4bits
+
+ adds r5, r4, #1 ; Check for all 1's
+
+ ; If mask and flat are 1's for all vectors, then we only need to execute
+ ; the power branch for all vectors.
+ beq power_branch_only
+
+ cmp r4, #0 ; Check for 0, set flag for later
+
+ ; mbfilter() function
+ ; filter() function
+ ; convert to signed
+ veor d21, d7, d22 ; qs0
+ veor d24, d6, d22 ; ps0
+ veor d25, d5, d22 ; ps1
+ veor d26, d16, d22 ; qs1
+
+ vmov.u8 d27, #3
+
+ vsub.s8 d28, d21, d24 ; ( qs0 - ps0)
+
+ vqsub.s8 d29, d25, d26 ; filter = clamp(ps1-qs1)
+
+ vmull.s8 q15, d28, d27 ; 3 * ( qs0 - ps0)
+
+ vand d29, d29, d23 ; filter &= hev
+
+ vaddw.s8 q15, q15, d29 ; filter + 3 * (qs0 - ps0)
+
+ vmov.u8 d29, #4
+
+ ; filter = clamp(filter + 3 * ( qs0 - ps0))
+ vqmovn.s16 d28, q15
+
+ vand d28, d28, d19 ; filter &= mask
+
+ vqadd.s8 d30, d28, d27 ; filter2 = clamp(filter+3)
+ vqadd.s8 d29, d28, d29 ; filter1 = clamp(filter+4)
+ vshr.s8 d30, d30, #3 ; filter2 >>= 3
+ vshr.s8 d29, d29, #3 ; filter1 >>= 3
+
+ vqadd.s8 d24, d24, d30 ; op0 = clamp(ps0 + filter2)
+ vqsub.s8 d21, d21, d29 ; oq0 = clamp(qs0 - filter1)
+
+ ; outer tap adjustments: ++filter1 >> 1
+ vrshr.s8 d29, d29, #1
+ vbic d29, d29, d23 ; filter &= ~hev
+
+ vqadd.s8 d25, d25, d29 ; op1 = clamp(ps1 + filter)
+ vqsub.s8 d26, d26, d29 ; oq1 = clamp(qs1 - filter)
+
+ ; If mask and flat are 0's for all vectors, then we only need to execute
+ ; the filter branch for all vectors.
+ beq filter_branch_only
+
+ ; If mask and flat are mixed then we must perform both branches and
+ ; combine the data.
+ veor d24, d24, d22 ; *f_op0 = u^0x80
+ veor d21, d21, d22 ; *f_oq0 = u^0x80
+ veor d25, d25, d22 ; *f_op1 = u^0x80
+ veor d26, d26, d22 ; *f_oq1 = u^0x80
+
+ ; At this point we have already executed the filter branch. The filter
+ ; branch does not set op2 or oq2, so use p2 and q2. Execute the power
+ ; branch and combine the data.
+ vmov.u8 d23, #2
+ vaddl.u8 q14, d6, d7 ; r_op2 = p0 + q0
+ vmlal.u8 q14, d3, d27 ; r_op2 += p3 * 3
+ vmlal.u8 q14, d4, d23 ; r_op2 += p2 * 2
+
+ vbif d0, d4, d20 ; op2 |= p2 & ~(flat & mask)
+
+ vaddw.u8 q14, d5 ; r_op2 += p1
+
+ vbif d1, d25, d20 ; op1 |= f_op1 & ~(flat & mask)
+
+ vqrshrn.u16 d30, q14, #3 ; r_op2
+
+ vsubw.u8 q14, d3 ; r_op1 = r_op2 - p3
+ vsubw.u8 q14, d4 ; r_op1 -= p2
+ vaddw.u8 q14, d5 ; r_op1 += p1
+ vaddw.u8 q14, d16 ; r_op1 += q1
+
+ vbif d2, d24, d20 ; op0 |= f_op0 & ~(flat & mask)
+
+ vqrshrn.u16 d31, q14, #3 ; r_op1
+
+ vsubw.u8 q14, d3 ; r_op0 = r_op1 - p3
+ vsubw.u8 q14, d5 ; r_op0 -= p1
+ vaddw.u8 q14, d6 ; r_op0 += p0
+ vaddw.u8 q14, d17 ; r_op0 += q2
+
+ vbit d0, d30, d20 ; op2 |= r_op2 & (flat & mask)
+
+ vqrshrn.u16 d23, q14, #3 ; r_op0
+
+ vsubw.u8 q14, d3 ; r_oq0 = r_op0 - p3
+ vsubw.u8 q14, d6 ; r_oq0 -= p0
+ vaddw.u8 q14, d7 ; r_oq0 += q0
+
+ vbit d1, d31, d20 ; op1 |= r_op1 & (flat & mask)
+
+ vaddw.u8 q14, d18 ; oq0 += q3
+
+ vbit d2, d23, d20 ; op0 |= r_op0 & (flat & mask)
+
+ vqrshrn.u16 d22, q14, #3 ; r_oq0
+
+ vsubw.u8 q14, d4 ; r_oq1 = r_oq0 - p2
+ vsubw.u8 q14, d7 ; r_oq1 -= q0
+ vaddw.u8 q14, d16 ; r_oq1 += q1
+
+ vbif d3, d21, d20 ; oq0 |= f_oq0 & ~(flat & mask)
+
+ vaddw.u8 q14, d18 ; r_oq1 += q3
+
+ vbif d4, d26, d20 ; oq1 |= f_oq1 & ~(flat & mask)
+
+ vqrshrn.u16 d6, q14, #3 ; r_oq1
+
+ vsubw.u8 q14, d5 ; r_oq2 = r_oq1 - p1
+ vsubw.u8 q14, d16 ; r_oq2 -= q1
+ vaddw.u8 q14, d17 ; r_oq2 += q2
+ vaddw.u8 q14, d18 ; r_oq2 += q3
+
+ vbif d5, d17, d20 ; oq2 |= q2 & ~(flat & mask)
+
+ vqrshrn.u16 d7, q14, #3 ; r_oq2
+
+ vbit d3, d22, d20 ; oq0 |= r_oq0 & (flat & mask)
+ vbit d4, d6, d20 ; oq1 |= r_oq1 & (flat & mask)
+ vbit d5, d7, d20 ; oq2 |= r_oq2 & (flat & mask)
+
+ bx lr
+
+power_branch_only
+ vmov.u8 d27, #3
+ vmov.u8 d21, #2
+ vaddl.u8 q14, d6, d7 ; op2 = p0 + q0
+ vmlal.u8 q14, d3, d27 ; op2 += p3 * 3
+ vmlal.u8 q14, d4, d21 ; op2 += p2 * 2
+ vaddw.u8 q14, d5 ; op2 += p1
+ vqrshrn.u16 d0, q14, #3 ; op2
+
+ vsubw.u8 q14, d3 ; op1 = op2 - p3
+ vsubw.u8 q14, d4 ; op1 -= p2
+ vaddw.u8 q14, d5 ; op1 += p1
+ vaddw.u8 q14, d16 ; op1 += q1
+ vqrshrn.u16 d1, q14, #3 ; op1
+
+ vsubw.u8 q14, d3 ; op0 = op1 - p3
+ vsubw.u8 q14, d5 ; op0 -= p1
+ vaddw.u8 q14, d6 ; op0 += p0
+ vaddw.u8 q14, d17 ; op0 += q2
+ vqrshrn.u16 d2, q14, #3 ; op0
+
+ vsubw.u8 q14, d3 ; oq0 = op0 - p3
+ vsubw.u8 q14, d6 ; oq0 -= p0
+ vaddw.u8 q14, d7 ; oq0 += q0
+ vaddw.u8 q14, d18 ; oq0 += q3
+ vqrshrn.u16 d3, q14, #3 ; oq0
+
+ vsubw.u8 q14, d4 ; oq1 = oq0 - p2
+ vsubw.u8 q14, d7 ; oq1 -= q0
+ vaddw.u8 q14, d16 ; oq1 += q1
+ vaddw.u8 q14, d18 ; oq1 += q3
+ vqrshrn.u16 d4, q14, #3 ; oq1
+
+ vsubw.u8 q14, d5 ; oq2 = oq1 - p1
+ vsubw.u8 q14, d16 ; oq2 -= q1
+ vaddw.u8 q14, d17 ; oq2 += q2
+ vaddw.u8 q14, d18 ; oq2 += q3
+ vqrshrn.u16 d5, q14, #3 ; oq2
+
+ bx lr
+
+filter_branch_only
+ ; TODO(fgalligan): See if we can rearange registers so we do not need to
+ ; do the 2 vswp.
+ vswp d0, d4 ; op2
+ vswp d5, d17 ; oq2
+ veor d2, d24, d22 ; *op0 = u^0x80
+ veor d3, d21, d22 ; *oq0 = u^0x80
+ veor d1, d25, d22 ; *op1 = u^0x80
+ veor d4, d26, d22 ; *oq1 = u^0x80
+
+ bx lr
+
+ ENDP ; |vp9_mbloop_filter_neon|
+
+ END
diff --git a/libvpx/vp9/common/arm/neon/vp9_mb_lpf_neon.asm b/libvpx/vp9/common/arm/neon/vp9_mb_lpf_neon.asm
new file mode 100644
index 0000000..2e8001b
--- /dev/null
+++ b/libvpx/vp9/common/arm/neon/vp9_mb_lpf_neon.asm
@@ -0,0 +1,603 @@
+;
+; Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+ EXPORT |vp9_mb_lpf_horizontal_edge_w_neon|
+ EXPORT |vp9_mb_lpf_vertical_edge_w_neon|
+ ARM
+
+ AREA ||.text||, CODE, READONLY, ALIGN=2
+
+; void vp9_mb_lpf_horizontal_edge_w_neon(uint8_t *s, int p,
+; const uint8_t *blimit,
+; const uint8_t *limit,
+; const uint8_t *thresh
+; int count)
+; r0 uint8_t *s,
+; r1 int p, /* pitch */
+; r2 const uint8_t *blimit,
+; r3 const uint8_t *limit,
+; sp const uint8_t *thresh,
+|vp9_mb_lpf_horizontal_edge_w_neon| PROC
+ push {r4-r8, lr}
+ vpush {d8-d15}
+ ldr r4, [sp, #88] ; load thresh
+ ldr r12, [sp, #92] ; load count
+
+h_count
+ vld1.8 {d16[]}, [r2] ; load *blimit
+ vld1.8 {d17[]}, [r3] ; load *limit
+ vld1.8 {d18[]}, [r4] ; load *thresh
+
+ sub r8, r0, r1, lsl #3 ; move src pointer down by 8 lines
+
+ vld1.u8 {d0}, [r8@64], r1 ; p7
+ vld1.u8 {d1}, [r8@64], r1 ; p6
+ vld1.u8 {d2}, [r8@64], r1 ; p5
+ vld1.u8 {d3}, [r8@64], r1 ; p4
+ vld1.u8 {d4}, [r8@64], r1 ; p3
+ vld1.u8 {d5}, [r8@64], r1 ; p2
+ vld1.u8 {d6}, [r8@64], r1 ; p1
+ vld1.u8 {d7}, [r8@64], r1 ; p0
+ vld1.u8 {d8}, [r8@64], r1 ; q0
+ vld1.u8 {d9}, [r8@64], r1 ; q1
+ vld1.u8 {d10}, [r8@64], r1 ; q2
+ vld1.u8 {d11}, [r8@64], r1 ; q3
+ vld1.u8 {d12}, [r8@64], r1 ; q4
+ vld1.u8 {d13}, [r8@64], r1 ; q5
+ vld1.u8 {d14}, [r8@64], r1 ; q6
+ vld1.u8 {d15}, [r8@64], r1 ; q7
+
+ bl vp9_wide_mbfilter_neon
+
+ tst r7, #1
+ beq h_mbfilter
+
+ ; flat && mask were not set for any of the channels. Just store the values
+ ; from filter.
+ sub r8, r0, r1, lsl #1
+
+ vst1.u8 {d25}, [r8@64], r1 ; store op1
+ vst1.u8 {d24}, [r8@64], r1 ; store op0
+ vst1.u8 {d23}, [r8@64], r1 ; store oq0
+ vst1.u8 {d26}, [r8@64], r1 ; store oq1
+
+ b h_next
+
+h_mbfilter
+ tst r7, #2
+ beq h_wide_mbfilter
+
+ ; flat2 was not set for any of the channels. Just store the values from
+ ; mbfilter.
+ sub r8, r0, r1, lsl #1
+ sub r8, r8, r1
+
+ vst1.u8 {d18}, [r8@64], r1 ; store op2
+ vst1.u8 {d19}, [r8@64], r1 ; store op1
+ vst1.u8 {d20}, [r8@64], r1 ; store op0
+ vst1.u8 {d21}, [r8@64], r1 ; store oq0
+ vst1.u8 {d22}, [r8@64], r1 ; store oq1
+ vst1.u8 {d23}, [r8@64], r1 ; store oq2
+
+ b h_next
+
+h_wide_mbfilter
+ sub r8, r0, r1, lsl #3
+ add r8, r8, r1
+
+ vst1.u8 {d16}, [r8@64], r1 ; store op6
+ vst1.u8 {d24}, [r8@64], r1 ; store op5
+ vst1.u8 {d25}, [r8@64], r1 ; store op4
+ vst1.u8 {d26}, [r8@64], r1 ; store op3
+ vst1.u8 {d27}, [r8@64], r1 ; store op2
+ vst1.u8 {d18}, [r8@64], r1 ; store op1
+ vst1.u8 {d19}, [r8@64], r1 ; store op0
+ vst1.u8 {d20}, [r8@64], r1 ; store oq0
+ vst1.u8 {d21}, [r8@64], r1 ; store oq1
+ vst1.u8 {d22}, [r8@64], r1 ; store oq2
+ vst1.u8 {d23}, [r8@64], r1 ; store oq3
+ vst1.u8 {d1}, [r8@64], r1 ; store oq4
+ vst1.u8 {d2}, [r8@64], r1 ; store oq5
+ vst1.u8 {d3}, [r8@64], r1 ; store oq6
+
+h_next
+ add r0, r0, #8
+ subs r12, r12, #1
+ bne h_count
+
+ vpop {d8-d15}
+ pop {r4-r8, pc}
+
+ ENDP ; |vp9_mb_lpf_horizontal_edge_w_neon|
+
+; void vp9_mb_lpf_vertical_edge_w_neon(uint8_t *s, int p,
+; const uint8_t *blimit,
+; const uint8_t *limit,
+; const uint8_t *thresh)
+; r0 uint8_t *s,
+; r1 int p, /* pitch */
+; r2 const uint8_t *blimit,
+; r3 const uint8_t *limit,
+; sp const uint8_t *thresh,
+|vp9_mb_lpf_vertical_edge_w_neon| PROC
+ push {r4-r8, lr}
+ vpush {d8-d15}
+ ldr r4, [sp, #88] ; load thresh
+
+ vld1.8 {d16[]}, [r2] ; load *blimit
+ vld1.8 {d17[]}, [r3] ; load *limit
+ vld1.8 {d18[]}, [r4] ; load *thresh
+
+ sub r8, r0, #8
+
+ vld1.8 {d0}, [r8@64], r1
+ vld1.8 {d8}, [r0@64], r1
+ vld1.8 {d1}, [r8@64], r1
+ vld1.8 {d9}, [r0@64], r1
+ vld1.8 {d2}, [r8@64], r1
+ vld1.8 {d10}, [r0@64], r1
+ vld1.8 {d3}, [r8@64], r1
+ vld1.8 {d11}, [r0@64], r1
+ vld1.8 {d4}, [r8@64], r1
+ vld1.8 {d12}, [r0@64], r1
+ vld1.8 {d5}, [r8@64], r1
+ vld1.8 {d13}, [r0@64], r1
+ vld1.8 {d6}, [r8@64], r1
+ vld1.8 {d14}, [r0@64], r1
+ vld1.8 {d7}, [r8@64], r1
+ vld1.8 {d15}, [r0@64], r1
+
+ sub r0, r0, r1, lsl #3
+
+ vtrn.32 q0, q2
+ vtrn.32 q1, q3
+ vtrn.32 q4, q6
+ vtrn.32 q5, q7
+
+ vtrn.16 q0, q1
+ vtrn.16 q2, q3
+ vtrn.16 q4, q5
+ vtrn.16 q6, q7
+
+ vtrn.8 d0, d1
+ vtrn.8 d2, d3
+ vtrn.8 d4, d5
+ vtrn.8 d6, d7
+
+ vtrn.8 d8, d9
+ vtrn.8 d10, d11
+ vtrn.8 d12, d13
+ vtrn.8 d14, d15
+
+ bl vp9_wide_mbfilter_neon
+
+ tst r7, #1
+ beq v_mbfilter
+
+ ; flat && mask were not set for any of the channels. Just store the values
+ ; from filter.
+ sub r8, r0, #2
+
+ vswp d23, d25
+
+ vst4.8 {d23[0], d24[0], d25[0], d26[0]}, [r8], r1
+ vst4.8 {d23[1], d24[1], d25[1], d26[1]}, [r8], r1
+ vst4.8 {d23[2], d24[2], d25[2], d26[2]}, [r8], r1
+ vst4.8 {d23[3], d24[3], d25[3], d26[3]}, [r8], r1
+ vst4.8 {d23[4], d24[4], d25[4], d26[4]}, [r8], r1
+ vst4.8 {d23[5], d24[5], d25[5], d26[5]}, [r8], r1
+ vst4.8 {d23[6], d24[6], d25[6], d26[6]}, [r8], r1
+ vst4.8 {d23[7], d24[7], d25[7], d26[7]}, [r8], r1
+
+ b v_end
+
+v_mbfilter
+ tst r7, #2
+ beq v_wide_mbfilter
+
+ ; flat2 was not set for any of the channels. Just store the values from
+ ; mbfilter.
+ sub r8, r0, #3
+
+ vst3.8 {d18[0], d19[0], d20[0]}, [r8], r1
+ vst3.8 {d21[0], d22[0], d23[0]}, [r0], r1
+ vst3.8 {d18[1], d19[1], d20[1]}, [r8], r1
+ vst3.8 {d21[1], d22[1], d23[1]}, [r0], r1
+ vst3.8 {d18[2], d19[2], d20[2]}, [r8], r1
+ vst3.8 {d21[2], d22[2], d23[2]}, [r0], r1
+ vst3.8 {d18[3], d19[3], d20[3]}, [r8], r1
+ vst3.8 {d21[3], d22[3], d23[3]}, [r0], r1
+ vst3.8 {d18[4], d19[4], d20[4]}, [r8], r1
+ vst3.8 {d21[4], d22[4], d23[4]}, [r0], r1
+ vst3.8 {d18[5], d19[5], d20[5]}, [r8], r1
+ vst3.8 {d21[5], d22[5], d23[5]}, [r0], r1
+ vst3.8 {d18[6], d19[6], d20[6]}, [r8], r1
+ vst3.8 {d21[6], d22[6], d23[6]}, [r0], r1
+ vst3.8 {d18[7], d19[7], d20[7]}, [r8], r1
+ vst3.8 {d21[7], d22[7], d23[7]}, [r0], r1
+
+ b v_end
+
+v_wide_mbfilter
+ sub r8, r0, #8
+
+ vtrn.32 d0, d26
+ vtrn.32 d16, d27
+ vtrn.32 d24, d18
+ vtrn.32 d25, d19
+
+ vtrn.16 d0, d24
+ vtrn.16 d16, d25
+ vtrn.16 d26, d18
+ vtrn.16 d27, d19
+
+ vtrn.8 d0, d16
+ vtrn.8 d24, d25
+ vtrn.8 d26, d27
+ vtrn.8 d18, d19
+
+ vtrn.32 d20, d1
+ vtrn.32 d21, d2
+ vtrn.32 d22, d3
+ vtrn.32 d23, d15
+
+ vtrn.16 d20, d22
+ vtrn.16 d21, d23
+ vtrn.16 d1, d3
+ vtrn.16 d2, d15
+
+ vtrn.8 d20, d21
+ vtrn.8 d22, d23
+ vtrn.8 d1, d2
+ vtrn.8 d3, d15
+
+ vst1.8 {d0}, [r8@64], r1
+ vst1.8 {d20}, [r0@64], r1
+ vst1.8 {d16}, [r8@64], r1
+ vst1.8 {d21}, [r0@64], r1
+ vst1.8 {d24}, [r8@64], r1
+ vst1.8 {d22}, [r0@64], r1
+ vst1.8 {d25}, [r8@64], r1
+ vst1.8 {d23}, [r0@64], r1
+ vst1.8 {d26}, [r8@64], r1
+ vst1.8 {d1}, [r0@64], r1
+ vst1.8 {d27}, [r8@64], r1
+ vst1.8 {d2}, [r0@64], r1
+ vst1.8 {d18}, [r8@64], r1
+ vst1.8 {d3}, [r0@64], r1
+ vst1.8 {d19}, [r8@64], r1
+ vst1.8 {d15}, [r0@64], r1
+
+v_end
+ vpop {d8-d15}
+ pop {r4-r8, pc}
+
+ ENDP ; |vp9_mb_lpf_vertical_edge_w_neon|
+
+; void vp9_wide_mbfilter_neon();
+; This is a helper function for the loopfilters. The invidual functions do the
+; necessary load, transpose (if necessary) and store.
+;
+; r0-r3 PRESERVE
+; d16 blimit
+; d17 limit
+; d18 thresh
+; d0 p7
+; d1 p6
+; d2 p5
+; d3 p4
+; d4 p3
+; d5 p2
+; d6 p1
+; d7 p0
+; d8 q0
+; d9 q1
+; d10 q2
+; d11 q3
+; d12 q4
+; d13 q5
+; d14 q6
+; d15 q7
+|vp9_wide_mbfilter_neon| PROC
+ mov r7, #0
+
+ ; filter_mask
+ vabd.u8 d19, d4, d5 ; abs(p3 - p2)
+ vabd.u8 d20, d5, d6 ; abs(p2 - p1)
+ vabd.u8 d21, d6, d7 ; abs(p1 - p0)
+ vabd.u8 d22, d9, d8 ; abs(q1 - q0)
+ vabd.u8 d23, d10, d9 ; abs(q2 - q1)
+ vabd.u8 d24, d11, d10 ; abs(q3 - q2)
+
+ ; only compare the largest value to limit
+ vmax.u8 d19, d19, d20 ; max(abs(p3 - p2), abs(p2 - p1))
+ vmax.u8 d20, d21, d22 ; max(abs(p1 - p0), abs(q1 - q0))
+ vmax.u8 d23, d23, d24 ; max(abs(q2 - q1), abs(q3 - q2))
+ vmax.u8 d19, d19, d20
+
+ vabd.u8 d24, d7, d8 ; abs(p0 - q0)
+
+ vmax.u8 d19, d19, d23
+
+ vabd.u8 d23, d6, d9 ; a = abs(p1 - q1)
+ vqadd.u8 d24, d24, d24 ; b = abs(p0 - q0) * 2
+
+ ; abs () > limit
+ vcge.u8 d19, d17, d19
+
+ ; flatmask4
+ vabd.u8 d25, d7, d5 ; abs(p0 - p2)
+ vabd.u8 d26, d8, d10 ; abs(q0 - q2)
+ vabd.u8 d27, d4, d7 ; abs(p3 - p0)
+ vabd.u8 d28, d11, d8 ; abs(q3 - q0)
+
+ ; only compare the largest value to thresh
+ vmax.u8 d25, d25, d26 ; max(abs(p0 - p2), abs(q0 - q2))
+ vmax.u8 d26, d27, d28 ; max(abs(p3 - p0), abs(q3 - q0))
+ vmax.u8 d25, d25, d26
+ vmax.u8 d20, d20, d25
+
+ vshr.u8 d23, d23, #1 ; a = a / 2
+ vqadd.u8 d24, d24, d23 ; a = b + a
+
+ vmov.u8 d30, #1
+ vcge.u8 d24, d16, d24 ; (a > blimit * 2 + limit) * -1
+
+ vcge.u8 d20, d30, d20 ; flat
+
+ vand d19, d19, d24 ; mask
+
+ ; hevmask
+ vcgt.u8 d21, d21, d18 ; (abs(p1 - p0) > thresh)*-1
+ vcgt.u8 d22, d22, d18 ; (abs(q1 - q0) > thresh)*-1
+ vorr d21, d21, d22 ; hev
+
+ vand d16, d20, d19 ; flat && mask
+ vmov r5, r6, d16
+
+ ; flatmask5(1, p7, p6, p5, p4, p0, q0, q4, q5, q6, q7)
+ vabd.u8 d22, d3, d7 ; abs(p4 - p0)
+ vabd.u8 d23, d12, d8 ; abs(q4 - q0)
+ vabd.u8 d24, d7, d2 ; abs(p0 - p5)
+ vabd.u8 d25, d8, d13 ; abs(q0 - q5)
+ vabd.u8 d26, d1, d7 ; abs(p6 - p0)
+ vabd.u8 d27, d14, d8 ; abs(q6 - q0)
+ vabd.u8 d28, d0, d7 ; abs(p7 - p0)
+ vabd.u8 d29, d15, d8 ; abs(q7 - q0)
+
+ ; only compare the largest value to thresh
+ vmax.u8 d22, d22, d23 ; max(abs(p4 - p0), abs(q4 - q0))
+ vmax.u8 d23, d24, d25 ; max(abs(p0 - p5), abs(q0 - q5))
+ vmax.u8 d24, d26, d27 ; max(abs(p6 - p0), abs(q6 - q0))
+ vmax.u8 d25, d28, d29 ; max(abs(p7 - p0), abs(q7 - q0))
+
+ vmax.u8 d26, d22, d23
+ vmax.u8 d27, d24, d25
+ vmax.u8 d23, d26, d27
+
+ vcge.u8 d18, d30, d23 ; flat2
+
+ vmov.u8 d22, #0x80
+
+ orrs r5, r5, r6 ; Check for 0
+ orreq r7, r7, #1 ; Only do filter branch
+
+ vand d17, d18, d16 ; flat2 && flat && mask
+ vmov r5, r6, d17
+
+ ; mbfilter() function
+
+ ; filter() function
+ ; convert to signed
+ veor d23, d8, d22 ; qs0
+ veor d24, d7, d22 ; ps0
+ veor d25, d6, d22 ; ps1
+ veor d26, d9, d22 ; qs1
+
+ vmov.u8 d27, #3
+
+ vsub.s8 d28, d23, d24 ; ( qs0 - ps0)
+ vqsub.s8 d29, d25, d26 ; filter = clamp(ps1-qs1)
+ vmull.s8 q15, d28, d27 ; 3 * ( qs0 - ps0)
+ vand d29, d29, d21 ; filter &= hev
+ vaddw.s8 q15, q15, d29 ; filter + 3 * (qs0 - ps0)
+ vmov.u8 d29, #4
+
+ ; filter = clamp(filter + 3 * ( qs0 - ps0))
+ vqmovn.s16 d28, q15
+
+ vand d28, d28, d19 ; filter &= mask
+
+ vqadd.s8 d30, d28, d27 ; filter2 = clamp(filter+3)
+ vqadd.s8 d29, d28, d29 ; filter1 = clamp(filter+4)
+ vshr.s8 d30, d30, #3 ; filter2 >>= 3
+ vshr.s8 d29, d29, #3 ; filter1 >>= 3
+
+
+ vqadd.s8 d24, d24, d30 ; op0 = clamp(ps0 + filter2)
+ vqsub.s8 d23, d23, d29 ; oq0 = clamp(qs0 - filter1)
+
+ ; outer tap adjustments: ++filter1 >> 1
+ vrshr.s8 d29, d29, #1
+ vbic d29, d29, d21 ; filter &= ~hev
+
+ vqadd.s8 d25, d25, d29 ; op1 = clamp(ps1 + filter)
+ vqsub.s8 d26, d26, d29 ; oq1 = clamp(qs1 - filter)
+
+ veor d24, d24, d22 ; *f_op0 = u^0x80
+ veor d23, d23, d22 ; *f_oq0 = u^0x80
+ veor d25, d25, d22 ; *f_op1 = u^0x80
+ veor d26, d26, d22 ; *f_oq1 = u^0x80
+
+ tst r7, #1
+ bxne lr
+
+ ; mbfilter flat && mask branch
+ ; TODO(fgalligan): Can I decrease the cycles shifting to consective d's
+ ; and using vibt on the q's?
+ vmov.u8 d29, #2
+ vaddl.u8 q15, d7, d8 ; op2 = p0 + q0
+ vmlal.u8 q15, d4, d27 ; op2 = p0 + q0 + p3 * 3
+ vmlal.u8 q15, d5, d29 ; op2 = p0 + q0 + p3 * 3 + p2 * 2
+ vaddl.u8 q10, d4, d5
+ vaddw.u8 q15, d6 ; op2=p1 + p0 + q0 + p3 * 3 + p2 *2
+ vaddl.u8 q14, d6, d9
+ vqrshrn.u16 d18, q15, #3 ; r_op2
+
+ vsub.i16 q15, q10
+ vaddl.u8 q10, d4, d6
+ vadd.i16 q15, q14
+ vaddl.u8 q14, d7, d10
+ vqrshrn.u16 d19, q15, #3 ; r_op1
+
+ vsub.i16 q15, q10
+ vadd.i16 q15, q14
+ vaddl.u8 q14, d8, d11
+ vqrshrn.u16 d20, q15, #3 ; r_op0
+
+ vsubw.u8 q15, d4 ; oq0 = op0 - p3
+ vsubw.u8 q15, d7 ; oq0 -= p0
+ vadd.i16 q15, q14
+ vaddl.u8 q14, d9, d11
+ vqrshrn.u16 d21, q15, #3 ; r_oq0
+
+ vsubw.u8 q15, d5 ; oq1 = oq0 - p2
+ vsubw.u8 q15, d8 ; oq1 -= q0
+ vadd.i16 q15, q14
+ vaddl.u8 q14, d10, d11
+ vqrshrn.u16 d22, q15, #3 ; r_oq1
+
+ vsubw.u8 q15, d6 ; oq2 = oq0 - p1
+ vsubw.u8 q15, d9 ; oq2 -= q1
+ vadd.i16 q15, q14
+ vqrshrn.u16 d27, q15, #3 ; r_oq2
+
+ ; Filter does not set op2 or oq2, so use p2 and q2.
+ vbif d18, d5, d16 ; t_op2 |= p2 & ~(flat & mask)
+ vbif d19, d25, d16 ; t_op1 |= f_op1 & ~(flat & mask)
+ vbif d20, d24, d16 ; t_op0 |= f_op0 & ~(flat & mask)
+ vbif d21, d23, d16 ; t_oq0 |= f_oq0 & ~(flat & mask)
+ vbif d22, d26, d16 ; t_oq1 |= f_oq1 & ~(flat & mask)
+
+ vbit d23, d27, d16 ; t_oq2 |= r_oq2 & (flat & mask)
+ vbif d23, d10, d16 ; t_oq2 |= q2 & ~(flat & mask)
+
+ tst r7, #2
+ bxne lr
+
+ ; wide_mbfilter flat2 && flat && mask branch
+ vmov.u8 d16, #7
+ vaddl.u8 q15, d7, d8 ; op6 = p0 + q0
+ vaddl.u8 q12, d2, d3
+ vaddl.u8 q13, d4, d5
+ vaddl.u8 q14, d1, d6
+ vmlal.u8 q15, d0, d16 ; op6 += p7 * 3
+ vadd.i16 q12, q13
+ vadd.i16 q15, q14
+ vaddl.u8 q14, d2, d9
+ vadd.i16 q15, q12
+ vaddl.u8 q12, d0, d1
+ vaddw.u8 q15, d1
+ vaddl.u8 q13, d0, d2
+ vadd.i16 q14, q15, q14
+ vqrshrn.u16 d16, q15, #4 ; w_op6
+
+ vsub.i16 q15, q14, q12
+ vaddl.u8 q14, d3, d10
+ vqrshrn.u16 d24, q15, #4 ; w_op5
+
+ vsub.i16 q15, q13
+ vaddl.u8 q13, d0, d3
+ vadd.i16 q15, q14
+ vaddl.u8 q14, d4, d11
+ vqrshrn.u16 d25, q15, #4 ; w_op4
+
+ vadd.i16 q15, q14
+ vaddl.u8 q14, d0, d4
+ vsub.i16 q15, q13
+ vsub.i16 q14, q15, q14
+ vqrshrn.u16 d26, q15, #4 ; w_op3
+
+ vaddw.u8 q15, q14, d5 ; op2 += p2
+ vaddl.u8 q14, d0, d5
+ vaddw.u8 q15, d12 ; op2 += q4
+ vbif d26, d4, d17 ; op3 |= p3 & ~(f2 & f & m)
+ vqrshrn.u16 d27, q15, #4 ; w_op2
+
+ vsub.i16 q15, q14
+ vaddl.u8 q14, d0, d6
+ vaddw.u8 q15, d6 ; op1 += p1
+ vaddw.u8 q15, d13 ; op1 += q5
+ vbif d27, d18, d17 ; op2 |= t_op2 & ~(f2 & f & m)
+ vqrshrn.u16 d18, q15, #4 ; w_op1
+
+ vsub.i16 q15, q14
+ vaddl.u8 q14, d0, d7
+ vaddw.u8 q15, d7 ; op0 += p0
+ vaddw.u8 q15, d14 ; op0 += q6
+ vbif d18, d19, d17 ; op1 |= t_op1 & ~(f2 & f & m)
+ vqrshrn.u16 d19, q15, #4 ; w_op0
+
+ vsub.i16 q15, q14
+ vaddl.u8 q14, d1, d8
+ vaddw.u8 q15, d8 ; oq0 += q0
+ vaddw.u8 q15, d15 ; oq0 += q7
+ vbif d19, d20, d17 ; op0 |= t_op0 & ~(f2 & f & m)
+ vqrshrn.u16 d20, q15, #4 ; w_oq0
+
+ vsub.i16 q15, q14
+ vaddl.u8 q14, d2, d9
+ vaddw.u8 q15, d9 ; oq1 += q1
+ vaddl.u8 q4, d10, d15
+ vaddw.u8 q15, d15 ; oq1 += q7
+ vbif d20, d21, d17 ; oq0 |= t_oq0 & ~(f2 & f & m)
+ vqrshrn.u16 d21, q15, #4 ; w_oq1
+
+ vsub.i16 q15, q14
+ vaddl.u8 q14, d3, d10
+ vadd.i16 q15, q4
+ vaddl.u8 q4, d11, d15
+ vbif d21, d22, d17 ; oq1 |= t_oq1 & ~(f2 & f & m)
+ vqrshrn.u16 d22, q15, #4 ; w_oq2
+
+ vsub.i16 q15, q14
+ vaddl.u8 q14, d4, d11
+ vadd.i16 q15, q4
+ vaddl.u8 q4, d12, d15
+ vbif d22, d23, d17 ; oq2 |= t_oq2 & ~(f2 & f & m)
+ vqrshrn.u16 d23, q15, #4 ; w_oq3
+
+ vsub.i16 q15, q14
+ vaddl.u8 q14, d5, d12
+ vadd.i16 q15, q4
+ vaddl.u8 q4, d13, d15
+ vbif d16, d1, d17 ; op6 |= p6 & ~(f2 & f & m)
+ vqrshrn.u16 d1, q15, #4 ; w_oq4
+
+ vsub.i16 q15, q14
+ vaddl.u8 q14, d6, d13
+ vadd.i16 q15, q4
+ vaddl.u8 q4, d14, d15
+ vbif d24, d2, d17 ; op5 |= p5 & ~(f2 & f & m)
+ vqrshrn.u16 d2, q15, #4 ; w_oq5
+
+ vsub.i16 q15, q14
+ vbif d25, d3, d17 ; op4 |= p4 & ~(f2 & f & m)
+ vadd.i16 q15, q4
+ vbif d23, d11, d17 ; oq3 |= q3 & ~(f2 & f & m)
+ vqrshrn.u16 d3, q15, #4 ; w_oq6
+ vbif d1, d12, d17 ; oq4 |= q4 & ~(f2 & f & m)
+ vbif d2, d13, d17 ; oq5 |= q5 & ~(f2 & f & m)
+ vbif d3, d14, d17 ; oq6 |= q6 & ~(f2 & f & m)
+
+ bx lr
+ ENDP ; |vp9_wide_mbfilter_neon|
+
+ END
diff --git a/libvpx/vp9/common/arm/neon/vp9_short_idct16x16_1_add_neon.asm b/libvpx/vp9/common/arm/neon/vp9_short_idct16x16_1_add_neon.asm
new file mode 100644
index 0000000..cf5c8f7
--- /dev/null
+++ b/libvpx/vp9/common/arm/neon/vp9_short_idct16x16_1_add_neon.asm
@@ -0,0 +1,198 @@
+;
+; Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license and patent
+; grant that can be found in the LICENSE file in the root of the source
+; tree. All contributing project authors may be found in the AUTHORS
+; file in the root of the source tree.
+;
+
+
+ EXPORT |vp9_short_idct16x16_1_add_neon|
+ ARM
+ REQUIRE8
+ PRESERVE8
+
+ AREA ||.text||, CODE, READONLY, ALIGN=2
+
+;void vp9_short_idct16x16_1_add_neon(int16_t *input, uint8_t *dest,
+; int dest_stride)
+;
+; r0 int16_t input
+; r1 uint8_t *dest
+; r2 int dest_stride)
+
+|vp9_short_idct16x16_1_add_neon| PROC
+ ldrsh r0, [r0]
+
+ ; generate cospi_16_64 = 11585
+ mov r12, #0x2d00
+ add r12, #0x41
+
+ ; out = dct_const_round_shift(input[0] * cospi_16_64)
+ mul r0, r0, r12 ; input[0] * cospi_16_64
+ add r0, r0, #0x2000 ; +(1 << ((DCT_CONST_BITS) - 1))
+ asr r0, r0, #14 ; >> DCT_CONST_BITS
+
+ ; out = dct_const_round_shift(out * cospi_16_64)
+ mul r0, r0, r12 ; out * cospi_16_64
+ mov r12, r1 ; save dest
+ add r0, r0, #0x2000 ; +(1 << ((DCT_CONST_BITS) - 1))
+ asr r0, r0, #14 ; >> DCT_CONST_BITS
+
+ ; a1 = ROUND_POWER_OF_TWO(out, 6)
+ add r0, r0, #32 ; + (1 <<((6) - 1))
+ asr r0, r0, #6 ; >> 6
+
+ vdup.s16 q0, r0 ; duplicate a1
+ mov r0, #8
+ sub r2, #8
+
+ ; load destination data row0 - row3
+ vld1.64 {d2}, [r1], r0
+ vld1.64 {d3}, [r1], r2
+ vld1.64 {d4}, [r1], r0
+ vld1.64 {d5}, [r1], r2
+ vld1.64 {d6}, [r1], r0
+ vld1.64 {d7}, [r1], r2
+ vld1.64 {d16}, [r1], r0
+ vld1.64 {d17}, [r1], r2
+
+ vaddw.u8 q9, q0, d2 ; dest[x] + a1
+ vaddw.u8 q10, q0, d3 ; dest[x] + a1
+ vaddw.u8 q11, q0, d4 ; dest[x] + a1
+ vaddw.u8 q12, q0, d5 ; dest[x] + a1
+ vqmovun.s16 d2, q9 ; clip_pixel
+ vqmovun.s16 d3, q10 ; clip_pixel
+ vqmovun.s16 d30, q11 ; clip_pixel
+ vqmovun.s16 d31, q12 ; clip_pixel
+ vst1.64 {d2}, [r12], r0
+ vst1.64 {d3}, [r12], r2
+ vst1.64 {d30}, [r12], r0
+ vst1.64 {d31}, [r12], r2
+
+ vaddw.u8 q9, q0, d6 ; dest[x] + a1
+ vaddw.u8 q10, q0, d7 ; dest[x] + a1
+ vaddw.u8 q11, q0, d16 ; dest[x] + a1
+ vaddw.u8 q12, q0, d17 ; dest[x] + a1
+ vqmovun.s16 d2, q9 ; clip_pixel
+ vqmovun.s16 d3, q10 ; clip_pixel
+ vqmovun.s16 d30, q11 ; clip_pixel
+ vqmovun.s16 d31, q12 ; clip_pixel
+ vst1.64 {d2}, [r12], r0
+ vst1.64 {d3}, [r12], r2
+ vst1.64 {d30}, [r12], r0
+ vst1.64 {d31}, [r12], r2
+
+ ; load destination data row4 - row7
+ vld1.64 {d2}, [r1], r0
+ vld1.64 {d3}, [r1], r2
+ vld1.64 {d4}, [r1], r0
+ vld1.64 {d5}, [r1], r2
+ vld1.64 {d6}, [r1], r0
+ vld1.64 {d7}, [r1], r2
+ vld1.64 {d16}, [r1], r0
+ vld1.64 {d17}, [r1], r2
+
+ vaddw.u8 q9, q0, d2 ; dest[x] + a1
+ vaddw.u8 q10, q0, d3 ; dest[x] + a1
+ vaddw.u8 q11, q0, d4 ; dest[x] + a1
+ vaddw.u8 q12, q0, d5 ; dest[x] + a1
+ vqmovun.s16 d2, q9 ; clip_pixel
+ vqmovun.s16 d3, q10 ; clip_pixel
+ vqmovun.s16 d30, q11 ; clip_pixel
+ vqmovun.s16 d31, q12 ; clip_pixel
+ vst1.64 {d2}, [r12], r0
+ vst1.64 {d3}, [r12], r2
+ vst1.64 {d30}, [r12], r0
+ vst1.64 {d31}, [r12], r2
+
+ vaddw.u8 q9, q0, d6 ; dest[x] + a1
+ vaddw.u8 q10, q0, d7 ; dest[x] + a1
+ vaddw.u8 q11, q0, d16 ; dest[x] + a1
+ vaddw.u8 q12, q0, d17 ; dest[x] + a1
+ vqmovun.s16 d2, q9 ; clip_pixel
+ vqmovun.s16 d3, q10 ; clip_pixel
+ vqmovun.s16 d30, q11 ; clip_pixel
+ vqmovun.s16 d31, q12 ; clip_pixel
+ vst1.64 {d2}, [r12], r0
+ vst1.64 {d3}, [r12], r2
+ vst1.64 {d30}, [r12], r0
+ vst1.64 {d31}, [r12], r2
+
+ ; load destination data row8 - row11
+ vld1.64 {d2}, [r1], r0
+ vld1.64 {d3}, [r1], r2
+ vld1.64 {d4}, [r1], r0
+ vld1.64 {d5}, [r1], r2
+ vld1.64 {d6}, [r1], r0
+ vld1.64 {d7}, [r1], r2
+ vld1.64 {d16}, [r1], r0
+ vld1.64 {d17}, [r1], r2
+
+ vaddw.u8 q9, q0, d2 ; dest[x] + a1
+ vaddw.u8 q10, q0, d3 ; dest[x] + a1
+ vaddw.u8 q11, q0, d4 ; dest[x] + a1
+ vaddw.u8 q12, q0, d5 ; dest[x] + a1
+ vqmovun.s16 d2, q9 ; clip_pixel
+ vqmovun.s16 d3, q10 ; clip_pixel
+ vqmovun.s16 d30, q11 ; clip_pixel
+ vqmovun.s16 d31, q12 ; clip_pixel
+ vst1.64 {d2}, [r12], r0
+ vst1.64 {d3}, [r12], r2
+ vst1.64 {d30}, [r12], r0
+ vst1.64 {d31}, [r12], r2
+
+ vaddw.u8 q9, q0, d6 ; dest[x] + a1
+ vaddw.u8 q10, q0, d7 ; dest[x] + a1
+ vaddw.u8 q11, q0, d16 ; dest[x] + a1
+ vaddw.u8 q12, q0, d17 ; dest[x] + a1
+ vqmovun.s16 d2, q9 ; clip_pixel
+ vqmovun.s16 d3, q10 ; clip_pixel
+ vqmovun.s16 d30, q11 ; clip_pixel
+ vqmovun.s16 d31, q12 ; clip_pixel
+ vst1.64 {d2}, [r12], r0
+ vst1.64 {d3}, [r12], r2
+ vst1.64 {d30}, [r12], r0
+ vst1.64 {d31}, [r12], r2
+
+ ; load destination data row12 - row15
+ vld1.64 {d2}, [r1], r0
+ vld1.64 {d3}, [r1], r2
+ vld1.64 {d4}, [r1], r0
+ vld1.64 {d5}, [r1], r2
+ vld1.64 {d6}, [r1], r0
+ vld1.64 {d7}, [r1], r2
+ vld1.64 {d16}, [r1], r0
+ vld1.64 {d17}, [r1], r2
+
+ vaddw.u8 q9, q0, d2 ; dest[x] + a1
+ vaddw.u8 q10, q0, d3 ; dest[x] + a1
+ vaddw.u8 q11, q0, d4 ; dest[x] + a1
+ vaddw.u8 q12, q0, d5 ; dest[x] + a1
+ vqmovun.s16 d2, q9 ; clip_pixel
+ vqmovun.s16 d3, q10 ; clip_pixel
+ vqmovun.s16 d30, q11 ; clip_pixel
+ vqmovun.s16 d31, q12 ; clip_pixel
+ vst1.64 {d2}, [r12], r0
+ vst1.64 {d3}, [r12], r2
+ vst1.64 {d30}, [r12], r0
+ vst1.64 {d31}, [r12], r2
+
+ vaddw.u8 q9, q0, d6 ; dest[x] + a1
+ vaddw.u8 q10, q0, d7 ; dest[x] + a1
+ vaddw.u8 q11, q0, d16 ; dest[x] + a1
+ vaddw.u8 q12, q0, d17 ; dest[x] + a1
+ vqmovun.s16 d2, q9 ; clip_pixel
+ vqmovun.s16 d3, q10 ; clip_pixel
+ vqmovun.s16 d30, q11 ; clip_pixel
+ vqmovun.s16 d31, q12 ; clip_pixel
+ vst1.64 {d2}, [r12], r0
+ vst1.64 {d3}, [r12], r2
+ vst1.64 {d30}, [r12], r0
+ vst1.64 {d31}, [r12], r2
+
+ bx lr
+ ENDP ; |vp9_short_idct16x16_1_add_neon|
+
+ END
diff --git a/libvpx/vp9/common/arm/neon/vp9_short_idct16x16_add_neon.asm b/libvpx/vp9/common/arm/neon/vp9_short_idct16x16_add_neon.asm
new file mode 100644
index 0000000..7464e80
--- /dev/null
+++ b/libvpx/vp9/common/arm/neon/vp9_short_idct16x16_add_neon.asm
@@ -0,0 +1,1191 @@
+;
+; Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+ EXPORT |vp9_short_idct16x16_add_neon_pass1|
+ EXPORT |vp9_short_idct16x16_add_neon_pass2|
+ EXPORT |vp9_short_idct10_16x16_add_neon_pass1|
+ EXPORT |vp9_short_idct10_16x16_add_neon_pass2|
+ EXPORT |save_neon_registers|
+ EXPORT |restore_neon_registers|
+ ARM
+ REQUIRE8
+ PRESERVE8
+
+ AREA ||.text||, CODE, READONLY, ALIGN=2
+
+ ; Transpose a 8x8 16bit data matrix. Datas are loaded in q8-q15.
+ MACRO
+ TRANSPOSE8X8
+ vswp d17, d24
+ vswp d23, d30
+ vswp d21, d28
+ vswp d19, d26
+ vtrn.32 q8, q10
+ vtrn.32 q9, q11
+ vtrn.32 q12, q14
+ vtrn.32 q13, q15
+ vtrn.16 q8, q9
+ vtrn.16 q10, q11
+ vtrn.16 q12, q13
+ vtrn.16 q14, q15
+ MEND
+
+ AREA Block, CODE, READONLY ; name this block of code
+;void |vp9_short_idct16x16_add_neon_pass1|(int16_t *input,
+; int16_t *output, int output_stride)
+;
+; r0 int16_t input
+; r1 int16_t *output
+; r2 int output_stride)
+
+; idct16 stage1 - stage6 on all the elements loaded in q8-q15. The output
+; will be stored back into q8-q15 registers. This function will touch q0-q7
+; registers and use them as buffer during calculation.
+|vp9_short_idct16x16_add_neon_pass1| PROC
+
+ ; TODO(hkuang): Find a better way to load the elements.
+ ; load elements of 0, 2, 4, 6, 8, 10, 12, 14 into q8 - q15
+ vld2.s16 {q8,q9}, [r0]!
+ vld2.s16 {q9,q10}, [r0]!
+ vld2.s16 {q10,q11}, [r0]!
+ vld2.s16 {q11,q12}, [r0]!
+ vld2.s16 {q12,q13}, [r0]!
+ vld2.s16 {q13,q14}, [r0]!
+ vld2.s16 {q14,q15}, [r0]!
+ vld2.s16 {q1,q2}, [r0]!
+ vmov.s16 q15, q1
+
+ ; generate cospi_28_64 = 3196
+ mov r3, #0xc00
+ add r3, #0x7c
+
+ ; generate cospi_4_64 = 16069
+ mov r12, #0x3e00
+ add r12, #0xc5
+
+ ; transpose the input data
+ TRANSPOSE8X8
+
+ ; stage 3
+ vdup.16 d0, r3 ; duplicate cospi_28_64
+ vdup.16 d1, r12 ; duplicate cospi_4_64
+
+ ; preloading to avoid stall
+ ; generate cospi_12_64 = 13623
+ mov r3, #0x3500
+ add r3, #0x37
+
+ ; generate cospi_20_64 = 9102
+ mov r12, #0x2300
+ add r12, #0x8e
+
+ ; step2[4] * cospi_28_64
+ vmull.s16 q2, d18, d0
+ vmull.s16 q3, d19, d0
+
+ ; step2[4] * cospi_4_64
+ vmull.s16 q5, d18, d1
+ vmull.s16 q6, d19, d1
+
+ ; temp1 = step2[4] * cospi_28_64 - step2[7] * cospi_4_64
+ vmlsl.s16 q2, d30, d1
+ vmlsl.s16 q3, d31, d1
+
+ ; temp2 = step2[4] * cospi_4_64 + step2[7] * cospi_28_64
+ vmlal.s16 q5, d30, d0
+ vmlal.s16 q6, d31, d0
+
+ vdup.16 d2, r3 ; duplicate cospi_12_64
+ vdup.16 d3, r12 ; duplicate cospi_20_64
+
+ ; dct_const_round_shift(temp1)
+ vqrshrn.s32 d8, q2, #14 ; >> 14
+ vqrshrn.s32 d9, q3, #14 ; >> 14
+
+ ; dct_const_round_shift(temp2)
+ vqrshrn.s32 d14, q5, #14 ; >> 14
+ vqrshrn.s32 d15, q6, #14 ; >> 14
+
+ ; preloading to avoid stall
+ ; generate cospi_16_64 = 11585
+ mov r3, #0x2d00
+ add r3, #0x41
+
+ ; generate cospi_24_64 = 6270
+ mov r12, #0x1800
+ add r12, #0x7e
+
+ ; step2[5] * cospi_12_64
+ vmull.s16 q2, d26, d2
+ vmull.s16 q3, d27, d2
+
+ ; step2[5] * cospi_20_64
+ vmull.s16 q9, d26, d3
+ vmull.s16 q15, d27, d3
+
+ ; temp1 = input[5] * cospi_12_64 - input[3] * cospi_20_64
+ vmlsl.s16 q2, d22, d3
+ vmlsl.s16 q3, d23, d3
+
+ ; temp2 = step2[5] * cospi_20_64 + step2[6] * cospi_12_64
+ vmlal.s16 q9, d22, d2
+ vmlal.s16 q15, d23, d2
+
+ ; dct_const_round_shift(temp1)
+ vqrshrn.s32 d10, q2, #14 ; >> 14
+ vqrshrn.s32 d11, q3, #14 ; >> 14
+
+ ; dct_const_round_shift(temp2)
+ vqrshrn.s32 d12, q9, #14 ; >> 14
+ vqrshrn.s32 d13, q15, #14 ; >> 14
+
+ ; stage 4
+ vdup.16 d30, r3 ; cospi_16_64
+
+ ; step1[0] * cospi_16_64
+ vmull.s16 q2, d16, d30
+ vmull.s16 q11, d17, d30
+
+ ; step1[1] * cospi_16_64
+ vmull.s16 q0, d24, d30
+ vmull.s16 q1, d25, d30
+
+ ; generate cospi_8_64 = 15137
+ mov r3, #0x3b00
+ add r3, #0x21
+
+ vdup.16 d30, r12 ; duplicate cospi_24_64
+ vdup.16 d31, r3 ; duplicate cospi_8_64
+
+ ; temp1 = (step1[0] + step1[1]) * cospi_16_64
+ vadd.s32 q3, q2, q0
+ vadd.s32 q12, q11, q1
+
+ ; temp2 = (step1[0] - step1[1]) * cospi_16_64
+ vsub.s32 q13, q2, q0
+ vsub.s32 q1, q11, q1
+
+ ; dct_const_round_shift(temp1)
+ vqrshrn.s32 d16, q3, #14 ; >> 14
+ vqrshrn.s32 d17, q12, #14 ; >> 14
+
+ ; dct_const_round_shift(temp2)
+ vqrshrn.s32 d18, q13, #14 ; >> 14
+ vqrshrn.s32 d19, q1, #14 ; >> 14
+
+ ; step1[2] * cospi_24_64 - step1[3] * cospi_8_64;
+ ; step1[2] * cospi_8_64
+ vmull.s16 q0, d20, d31
+ vmull.s16 q1, d21, d31
+
+ ; step1[2] * cospi_24_64
+ vmull.s16 q12, d20, d30
+ vmull.s16 q13, d21, d30
+
+ ; temp2 = input[1] * cospi_8_64 + input[3] * cospi_24_64
+ vmlal.s16 q0, d28, d30
+ vmlal.s16 q1, d29, d30
+
+ ; temp1 = input[1] * cospi_24_64 - input[3] * cospi_8_64
+ vmlsl.s16 q12, d28, d31
+ vmlsl.s16 q13, d29, d31
+
+ ; dct_const_round_shift(temp2)
+ vqrshrn.s32 d22, q0, #14 ; >> 14
+ vqrshrn.s32 d23, q1, #14 ; >> 14
+
+ ; dct_const_round_shift(temp1)
+ vqrshrn.s32 d20, q12, #14 ; >> 14
+ vqrshrn.s32 d21, q13, #14 ; >> 14
+
+ vsub.s16 q13, q4, q5 ; step2[5] = step1[4] - step1[5];
+ vadd.s16 q4, q4, q5 ; step2[4] = step1[4] + step1[5];
+ vsub.s16 q14, q7, q6 ; step2[6] = -step1[6] + step1[7];
+ vadd.s16 q15, q6, q7 ; step2[7] = step1[6] + step1[7];
+
+ ; generate cospi_16_64 = 11585
+ mov r3, #0x2d00
+ add r3, #0x41
+
+ ; stage 5
+ vadd.s16 q0, q8, q11 ; step1[0] = step2[0] + step2[3];
+ vadd.s16 q1, q9, q10 ; step1[1] = step2[1] + step2[2];
+ vsub.s16 q2, q9, q10 ; step1[2] = step2[1] - step2[2];
+ vsub.s16 q3, q8, q11 ; step1[3] = step2[0] - step2[3];
+
+ vdup.16 d16, r3; ; duplicate cospi_16_64
+
+ ; step2[5] * cospi_16_64
+ vmull.s16 q11, d26, d16
+ vmull.s16 q12, d27, d16
+
+ ; step2[6] * cospi_16_64
+ vmull.s16 q9, d28, d16
+ vmull.s16 q10, d29, d16
+
+ ; temp1 = (step2[6] - step2[5]) * cospi_16_64
+ vsub.s32 q6, q9, q11
+ vsub.s32 q13, q10, q12
+
+ ; temp2 = (step2[5] + step2[6]) * cospi_16_64
+ vadd.s32 q9, q9, q11
+ vadd.s32 q10, q10, q12
+
+ ; dct_const_round_shift(temp1)
+ vqrshrn.s32 d10, q6, #14 ; >> 14
+ vqrshrn.s32 d11, q13, #14 ; >> 14
+
+ ; dct_const_round_shift(temp2)
+ vqrshrn.s32 d12, q9, #14 ; >> 14
+ vqrshrn.s32 d13, q10, #14 ; >> 14
+
+ ; stage 6
+ vadd.s16 q8, q0, q15 ; step2[0] = step1[0] + step1[7];
+ vadd.s16 q9, q1, q6 ; step2[1] = step1[1] + step1[6];
+ vadd.s16 q10, q2, q5 ; step2[2] = step1[2] + step1[5];
+ vadd.s16 q11, q3, q4 ; step2[3] = step1[3] + step1[4];
+ vsub.s16 q12, q3, q4 ; step2[4] = step1[3] - step1[4];
+ vsub.s16 q13, q2, q5 ; step2[5] = step1[2] - step1[5];
+ vsub.s16 q14, q1, q6 ; step2[6] = step1[1] - step1[6];
+ vsub.s16 q15, q0, q15 ; step2[7] = step1[0] - step1[7];
+
+ ; store the data
+ vst1.64 {d16}, [r1], r2
+ vst1.64 {d17}, [r1], r2
+ vst1.64 {d18}, [r1], r2
+ vst1.64 {d19}, [r1], r2
+ vst1.64 {d20}, [r1], r2
+ vst1.64 {d21}, [r1], r2
+ vst1.64 {d22}, [r1], r2
+ vst1.64 {d23}, [r1], r2
+ vst1.64 {d24}, [r1], r2
+ vst1.64 {d25}, [r1], r2
+ vst1.64 {d26}, [r1], r2
+ vst1.64 {d27}, [r1], r2
+ vst1.64 {d28}, [r1], r2
+ vst1.64 {d29}, [r1], r2
+ vst1.64 {d30}, [r1], r2
+ vst1.64 {d31}, [r1], r2
+
+ bx lr
+ ENDP ; |vp9_short_idct16x16_add_neon_pass1|
+
+;void vp9_short_idct16x16_add_neon_pass2(int16_t *src,
+; int16_t *output,
+; int16_t *pass1Output,
+; int16_t skip_adding,
+; uint8_t *dest,
+; int dest_stride)
+;
+; r0 int16_t *src
+; r1 int16_t *output,
+; r2 int16_t *pass1Output,
+; r3 int16_t skip_adding,
+; r4 uint8_t *dest,
+; r5 int dest_stride)
+
+; idct16 stage1 - stage7 on all the elements loaded in q8-q15. The output
+; will be stored back into q8-q15 registers. This function will touch q0-q7
+; registers and use them as buffer during calculation.
+|vp9_short_idct16x16_add_neon_pass2| PROC
+ push {r3-r9}
+
+ ; TODO(hkuang): Find a better way to load the elements.
+ ; load elements of 1, 3, 5, 7, 9, 11, 13, 15 into q8 - q15
+ vld2.s16 {q8,q9}, [r0]!
+ vld2.s16 {q9,q10}, [r0]!
+ vld2.s16 {q10,q11}, [r0]!
+ vld2.s16 {q11,q12}, [r0]!
+ vld2.s16 {q12,q13}, [r0]!
+ vld2.s16 {q13,q14}, [r0]!
+ vld2.s16 {q14,q15}, [r0]!
+ vld2.s16 {q0,q1}, [r0]!
+ vmov.s16 q15, q0;
+
+ ; generate cospi_30_64 = 1606
+ mov r3, #0x0600
+ add r3, #0x46
+
+ ; generate cospi_2_64 = 16305
+ mov r12, #0x3f00
+ add r12, #0xb1
+
+ ; transpose the input data
+ TRANSPOSE8X8
+
+ ; stage 3
+ vdup.16 d12, r3 ; duplicate cospi_30_64
+ vdup.16 d13, r12 ; duplicate cospi_2_64
+
+ ; preloading to avoid stall
+ ; generate cospi_14_64 = 12665
+ mov r3, #0x3100
+ add r3, #0x79
+
+ ; generate cospi_18_64 = 10394
+ mov r12, #0x2800
+ add r12, #0x9a
+
+ ; step1[8] * cospi_30_64
+ vmull.s16 q2, d16, d12
+ vmull.s16 q3, d17, d12
+
+ ; step1[8] * cospi_2_64
+ vmull.s16 q1, d16, d13
+ vmull.s16 q4, d17, d13
+
+ ; temp1 = step1[8] * cospi_30_64 - step1[15] * cospi_2_64
+ vmlsl.s16 q2, d30, d13
+ vmlsl.s16 q3, d31, d13
+
+ ; temp2 = step1[8] * cospi_2_64 + step1[15] * cospi_30_64
+ vmlal.s16 q1, d30, d12
+ vmlal.s16 q4, d31, d12
+
+ vdup.16 d30, r3 ; duplicate cospi_14_64
+ vdup.16 d31, r12 ; duplicate cospi_18_64
+
+ ; dct_const_round_shift(temp1)
+ vqrshrn.s32 d0, q2, #14 ; >> 14
+ vqrshrn.s32 d1, q3, #14 ; >> 14
+
+ ; dct_const_round_shift(temp2)
+ vqrshrn.s32 d14, q1, #14 ; >> 14
+ vqrshrn.s32 d15, q4, #14 ; >> 14
+
+ ; preloading to avoid stall
+ ; generate cospi_22_64 = 7723
+ mov r3, #0x1e00
+ add r3, #0x2b
+
+ ; generate cospi_10_64 = 14449
+ mov r12, #0x3800
+ add r12, #0x71
+
+ ; step1[9] * cospi_14_64
+ vmull.s16 q2, d24, d30
+ vmull.s16 q3, d25, d30
+
+ ; step1[9] * cospi_18_64
+ vmull.s16 q4, d24, d31
+ vmull.s16 q5, d25, d31
+
+ ; temp1 = step1[9] * cospi_14_64 - step1[14] * cospi_18_64
+ vmlsl.s16 q2, d22, d31
+ vmlsl.s16 q3, d23, d31
+
+ ; temp2 = step1[9] * cospi_18_64 + step1[14] * cospi_14_64
+ vmlal.s16 q4, d22, d30
+ vmlal.s16 q5, d23, d30
+
+ vdup.16 d30, r3 ; duplicate cospi_22_64
+ vdup.16 d31, r12 ; duplicate cospi_10_64
+
+ ; dct_const_round_shift(temp1)
+ vqrshrn.s32 d2, q2, #14 ; >> 14
+ vqrshrn.s32 d3, q3, #14 ; >> 14
+
+ ; dct_const_round_shift(temp2)
+ vqrshrn.s32 d12, q4, #14 ; >> 14
+ vqrshrn.s32 d13, q5, #14 ; >> 14
+
+ ; step1[10] * cospi_22_64
+ vmull.s16 q11, d20, d30
+ vmull.s16 q12, d21, d30
+
+ ; step1[10] * cospi_10_64
+ vmull.s16 q4, d20, d31
+ vmull.s16 q5, d21, d31
+
+ ; temp1 = step1[10] * cospi_22_64 - step1[13] * cospi_10_64
+ vmlsl.s16 q11, d26, d31
+ vmlsl.s16 q12, d27, d31
+
+ ; temp2 = step1[10] * cospi_10_64 + step1[13] * cospi_22_64
+ vmlal.s16 q4, d26, d30
+ vmlal.s16 q5, d27, d30
+
+ ; preloading to avoid stall
+ ; generate cospi_6_64 = 15679
+ mov r3, #0x3d00
+ add r3, #0x3f
+
+ ; generate cospi_26_64 = 4756
+ mov r12, #0x1200
+ add r12, #0x94
+
+ vdup.16 d30, r3 ; duplicate cospi_6_64
+ vdup.16 d31, r12 ; duplicate cospi_26_64
+
+ ; dct_const_round_shift(temp1)
+ vqrshrn.s32 d4, q11, #14 ; >> 14
+ vqrshrn.s32 d5, q12, #14 ; >> 14
+
+ ; dct_const_round_shift(temp2)
+ vqrshrn.s32 d11, q5, #14 ; >> 14
+ vqrshrn.s32 d10, q4, #14 ; >> 14
+
+ ; step1[11] * cospi_6_64
+ vmull.s16 q10, d28, d30
+ vmull.s16 q11, d29, d30
+
+ ; step1[11] * cospi_26_64
+ vmull.s16 q12, d28, d31
+ vmull.s16 q13, d29, d31
+
+ ; temp1 = step1[11] * cospi_6_64 - step1[12] * cospi_26_64
+ vmlsl.s16 q10, d18, d31
+ vmlsl.s16 q11, d19, d31
+
+ ; temp2 = step1[11] * cospi_26_64 + step1[12] * cospi_6_64
+ vmlal.s16 q12, d18, d30
+ vmlal.s16 q13, d19, d30
+
+ vsub.s16 q9, q0, q1 ; step1[9]=step2[8]-step2[9]
+ vadd.s16 q0, q0, q1 ; step1[8]=step2[8]+step2[9]
+
+ ; dct_const_round_shift(temp1)
+ vqrshrn.s32 d6, q10, #14 ; >> 14
+ vqrshrn.s32 d7, q11, #14 ; >> 14
+
+ ; dct_const_round_shift(temp2)
+ vqrshrn.s32 d8, q12, #14 ; >> 14
+ vqrshrn.s32 d9, q13, #14 ; >> 14
+
+ ; stage 3
+ vsub.s16 q10, q3, q2 ; step1[10]=-step2[10]+step2[11]
+ vadd.s16 q11, q2, q3 ; step1[11]=step2[10]+step2[11]
+ vadd.s16 q12, q4, q5 ; step1[12]=step2[12]+step2[13]
+ vsub.s16 q13, q4, q5 ; step1[13]=step2[12]-step2[13]
+ vsub.s16 q14, q7, q6 ; step1[14]=-step2[14]+tep2[15]
+ vadd.s16 q7, q6, q7 ; step1[15]=step2[14]+step2[15]
+
+ ; stage 4
+ ; generate cospi_24_64 = 6270
+ mov r3, #0x1800
+ add r3, #0x7e
+
+ ; generate cospi_8_64 = 15137
+ mov r12, #0x3b00
+ add r12, #0x21
+
+ ; -step1[9] * cospi_8_64 + step1[14] * cospi_24_64
+ vdup.16 d30, r12 ; duplicate cospi_8_64
+ vdup.16 d31, r3 ; duplicate cospi_24_64
+
+ ; step1[9] * cospi_24_64
+ vmull.s16 q2, d18, d31
+ vmull.s16 q3, d19, d31
+
+ ; step1[14] * cospi_24_64
+ vmull.s16 q4, d28, d31
+ vmull.s16 q5, d29, d31
+
+ ; temp2 = step1[9] * cospi_24_64 + step1[14] * cospi_8_64
+ vmlal.s16 q2, d28, d30
+ vmlal.s16 q3, d29, d30
+
+ ; temp1 = -step1[9] * cospi_8_64 + step1[14] * cospi_24_64
+ vmlsl.s16 q4, d18, d30
+ vmlsl.s16 q5, d19, d30
+
+ rsb r12, #0
+ vdup.16 d30, r12 ; duplicate -cospi_8_64
+
+ ; dct_const_round_shift(temp2)
+ vqrshrn.s32 d12, q2, #14 ; >> 14
+ vqrshrn.s32 d13, q3, #14 ; >> 14
+
+ ; dct_const_round_shift(temp1)
+ vqrshrn.s32 d2, q4, #14 ; >> 14
+ vqrshrn.s32 d3, q5, #14 ; >> 14
+
+ vmov.s16 q3, q11
+ vmov.s16 q4, q12
+
+ ; - step1[13] * cospi_8_64
+ vmull.s16 q11, d26, d30
+ vmull.s16 q12, d27, d30
+
+ ; -step1[10] * cospi_8_64
+ vmull.s16 q8, d20, d30
+ vmull.s16 q9, d21, d30
+
+ ; temp2 = -step1[10] * cospi_8_64 + step1[13] * cospi_24_64
+ vmlsl.s16 q11, d20, d31
+ vmlsl.s16 q12, d21, d31
+
+ ; temp1 = -step1[10] * cospi_8_64 + step1[13] * cospi_24_64
+ vmlal.s16 q8, d26, d31
+ vmlal.s16 q9, d27, d31
+
+ ; dct_const_round_shift(temp2)
+ vqrshrn.s32 d4, q11, #14 ; >> 14
+ vqrshrn.s32 d5, q12, #14 ; >> 14
+
+ ; dct_const_round_shift(temp1)
+ vqrshrn.s32 d10, q8, #14 ; >> 14
+ vqrshrn.s32 d11, q9, #14 ; >> 14
+
+ ; stage 5
+ vadd.s16 q8, q0, q3 ; step1[8] = step2[8]+step2[11];
+ vadd.s16 q9, q1, q2 ; step1[9] = step2[9]+step2[10];
+ vsub.s16 q10, q1, q2 ; step1[10] = step2[9]-step2[10];
+ vsub.s16 q11, q0, q3 ; step1[11] = step2[8]-step2[11];
+ vsub.s16 q12, q7, q4 ; step1[12] =-step2[12]+step2[15];
+ vsub.s16 q13, q6, q5 ; step1[13] =-step2[13]+step2[14];
+ vadd.s16 q14, q6, q5 ; step1[14] =step2[13]+step2[14];
+ vadd.s16 q15, q7, q4 ; step1[15] =step2[12]+step2[15];
+
+ ; stage 6.
+ ; generate cospi_16_64 = 11585
+ mov r12, #0x2d00
+ add r12, #0x41
+
+ vdup.16 d14, r12 ; duplicate cospi_16_64
+
+ ; step1[13] * cospi_16_64
+ vmull.s16 q3, d26, d14
+ vmull.s16 q4, d27, d14
+
+ ; step1[10] * cospi_16_64
+ vmull.s16 q0, d20, d14
+ vmull.s16 q1, d21, d14
+
+ ; temp1 = (-step1[10] + step1[13]) * cospi_16_64
+ vsub.s32 q5, q3, q0
+ vsub.s32 q6, q4, q1
+
+ ; temp2 = (step1[10] + step1[13]) * cospi_16_64
+ vadd.s32 q10, q3, q0
+ vadd.s32 q4, q4, q1
+
+ ; dct_const_round_shift(temp1)
+ vqrshrn.s32 d4, q5, #14 ; >> 14
+ vqrshrn.s32 d5, q6, #14 ; >> 14
+
+ ; dct_const_round_shift(temp2)
+ vqrshrn.s32 d10, q10, #14 ; >> 14
+ vqrshrn.s32 d11, q4, #14 ; >> 14
+
+ ; step1[11] * cospi_16_64
+ vmull.s16 q0, d22, d14
+ vmull.s16 q1, d23, d14
+
+ ; step1[12] * cospi_16_64
+ vmull.s16 q13, d24, d14
+ vmull.s16 q6, d25, d14
+
+ ; temp1 = (-step1[11] + step1[12]) * cospi_16_64
+ vsub.s32 q10, q13, q0
+ vsub.s32 q4, q6, q1
+
+ ; temp2 = (step1[11] + step1[12]) * cospi_16_64
+ vadd.s32 q13, q13, q0
+ vadd.s32 q6, q6, q1
+
+ ; dct_const_round_shift(temp1)
+ vqrshrn.s32 d6, q10, #14 ; >> 14
+ vqrshrn.s32 d7, q4, #14 ; >> 14
+
+ ; dct_const_round_shift(temp2)
+ vqrshrn.s32 d8, q13, #14 ; >> 14
+ vqrshrn.s32 d9, q6, #14 ; >> 14
+
+ mov r4, #16 ; pass1Output stride
+ ldr r3, [sp] ; load skip_adding
+ cmp r3, #0 ; check if need adding dest data
+ beq skip_adding_dest
+
+ ldr r7, [sp, #28] ; dest used to save element 0-7
+ mov r9, r7 ; save dest pointer for later use
+ ldr r8, [sp, #32] ; load dest_stride
+
+ ; stage 7
+ ; load the data in pass1
+ vld1.s16 {q0}, [r2], r4 ; load data step2[0]
+ vld1.s16 {q1}, [r2], r4 ; load data step2[1]
+ vld1.s16 {q10}, [r2], r4 ; load data step2[2]
+ vld1.s16 {q11}, [r2], r4 ; load data step2[3]
+ vld1.64 {d12}, [r7], r8 ; load destinatoin data
+ vld1.64 {d13}, [r7], r8 ; load destinatoin data
+ vadd.s16 q12, q0, q15 ; step2[0] + step2[15]
+ vadd.s16 q13, q1, q14 ; step2[1] + step2[14]
+ vrshr.s16 q12, q12, #6 ; ROUND_POWER_OF_TWO
+ vrshr.s16 q13, q13, #6 ; ROUND_POWER_OF_TWO
+ vaddw.u8 q12, q12, d12 ; + dest[j * dest_stride + i]
+ vaddw.u8 q13, q13, d13 ; + dest[j * dest_stride + i]
+ vqmovun.s16 d12, q12 ; clip pixel
+ vqmovun.s16 d13, q13 ; clip pixel
+ vst1.64 {d12}, [r9], r8 ; store the data
+ vst1.64 {d13}, [r9], r8 ; store the data
+ vsub.s16 q14, q1, q14 ; step2[1] - step2[14]
+ vsub.s16 q15, q0, q15 ; step2[0] - step2[15]
+ vld1.64 {d12}, [r7], r8 ; load destinatoin data
+ vld1.64 {d13}, [r7], r8 ; load destinatoin data
+ vadd.s16 q12, q10, q5 ; step2[2] + step2[13]
+ vadd.s16 q13, q11, q4 ; step2[3] + step2[12]
+ vrshr.s16 q12, q12, #6 ; ROUND_POWER_OF_TWO
+ vrshr.s16 q13, q13, #6 ; ROUND_POWER_OF_TWO
+ vaddw.u8 q12, q12, d12 ; + dest[j * dest_stride + i]
+ vaddw.u8 q13, q13, d13 ; + dest[j * dest_stride + i]
+ vqmovun.s16 d12, q12 ; clip pixel
+ vqmovun.s16 d13, q13 ; clip pixel
+ vst1.64 {d12}, [r9], r8 ; store the data
+ vst1.64 {d13}, [r9], r8 ; store the data
+ vsub.s16 q4, q11, q4 ; step2[3] - step2[12]
+ vsub.s16 q5, q10, q5 ; step2[2] - step2[13]
+ vld1.s16 {q0}, [r2], r4 ; load data step2[4]
+ vld1.s16 {q1}, [r2], r4 ; load data step2[5]
+ vld1.s16 {q10}, [r2], r4 ; load data step2[6]
+ vld1.s16 {q11}, [r2], r4 ; load data step2[7]
+ vld1.64 {d12}, [r7], r8 ; load destinatoin data
+ vld1.64 {d13}, [r7], r8 ; load destinatoin data
+ vadd.s16 q12, q0, q3 ; step2[4] + step2[11]
+ vadd.s16 q13, q1, q2 ; step2[5] + step2[10]
+ vrshr.s16 q12, q12, #6 ; ROUND_POWER_OF_TWO
+ vrshr.s16 q13, q13, #6 ; ROUND_POWER_OF_TWO
+ vaddw.u8 q12, q12, d12 ; + dest[j * dest_stride + i]
+ vaddw.u8 q13, q13, d13 ; + dest[j * dest_stride + i]
+ vqmovun.s16 d12, q12 ; clip pixel
+ vqmovun.s16 d13, q13 ; clip pixel
+ vst1.64 {d12}, [r9], r8 ; store the data
+ vst1.64 {d13}, [r9], r8 ; store the data
+ vsub.s16 q2, q1, q2 ; step2[5] - step2[10]
+ vsub.s16 q3, q0, q3 ; step2[4] - step2[11]
+ vld1.64 {d12}, [r7], r8 ; load destinatoin data
+ vld1.64 {d13}, [r7], r8 ; load destinatoin data
+ vadd.s16 q12, q10, q9 ; step2[6] + step2[9]
+ vadd.s16 q13, q11, q8 ; step2[7] + step2[8]
+ vrshr.s16 q12, q12, #6 ; ROUND_POWER_OF_TWO
+ vrshr.s16 q13, q13, #6 ; ROUND_POWER_OF_TWO
+ vaddw.u8 q12, q12, d12 ; + dest[j * dest_stride + i]
+ vaddw.u8 q13, q13, d13 ; + dest[j * dest_stride + i]
+ vqmovun.s16 d12, q12 ; clip pixel
+ vqmovun.s16 d13, q13 ; clip pixel
+ vst1.64 {d12}, [r9], r8 ; store the data
+ vst1.64 {d13}, [r9], r8 ; store the data
+ vld1.64 {d12}, [r7], r8 ; load destinatoin data
+ vld1.64 {d13}, [r7], r8 ; load destinatoin data
+ vsub.s16 q8, q11, q8 ; step2[7] - step2[8]
+ vsub.s16 q9, q10, q9 ; step2[6] - step2[9]
+
+ ; store the data output 8,9,10,11,12,13,14,15
+ vrshr.s16 q8, q8, #6 ; ROUND_POWER_OF_TWO
+ vaddw.u8 q8, q8, d12 ; + dest[j * dest_stride + i]
+ vqmovun.s16 d12, q8 ; clip pixel
+ vst1.64 {d12}, [r9], r8 ; store the data
+ vld1.64 {d12}, [r7], r8 ; load destinatoin data
+ vrshr.s16 q9, q9, #6
+ vaddw.u8 q9, q9, d13 ; + dest[j * dest_stride + i]
+ vqmovun.s16 d13, q9 ; clip pixel
+ vst1.64 {d13}, [r9], r8 ; store the data
+ vld1.64 {d13}, [r7], r8 ; load destinatoin data
+ vrshr.s16 q2, q2, #6
+ vaddw.u8 q2, q2, d12 ; + dest[j * dest_stride + i]
+ vqmovun.s16 d12, q2 ; clip pixel
+ vst1.64 {d12}, [r9], r8 ; store the data
+ vld1.64 {d12}, [r7], r8 ; load destinatoin data
+ vrshr.s16 q3, q3, #6
+ vaddw.u8 q3, q3, d13 ; + dest[j * dest_stride + i]
+ vqmovun.s16 d13, q3 ; clip pixel
+ vst1.64 {d13}, [r9], r8 ; store the data
+ vld1.64 {d13}, [r7], r8 ; load destinatoin data
+ vrshr.s16 q4, q4, #6
+ vaddw.u8 q4, q4, d12 ; + dest[j * dest_stride + i]
+ vqmovun.s16 d12, q4 ; clip pixel
+ vst1.64 {d12}, [r9], r8 ; store the data
+ vld1.64 {d12}, [r7], r8 ; load destinatoin data
+ vrshr.s16 q5, q5, #6
+ vaddw.u8 q5, q5, d13 ; + dest[j * dest_stride + i]
+ vqmovun.s16 d13, q5 ; clip pixel
+ vst1.64 {d13}, [r9], r8 ; store the data
+ vld1.64 {d13}, [r7], r8 ; load destinatoin data
+ vrshr.s16 q14, q14, #6
+ vaddw.u8 q14, q14, d12 ; + dest[j * dest_stride + i]
+ vqmovun.s16 d12, q14 ; clip pixel
+ vst1.64 {d12}, [r9], r8 ; store the data
+ vld1.64 {d12}, [r7], r8 ; load destinatoin data
+ vrshr.s16 q15, q15, #6
+ vaddw.u8 q15, q15, d13 ; + dest[j * dest_stride + i]
+ vqmovun.s16 d13, q15 ; clip pixel
+ vst1.64 {d13}, [r9], r8 ; store the data
+ b end_idct16x16_pass2
+
+skip_adding_dest
+ ; stage 7
+ ; load the data in pass1
+ mov r5, #24
+ mov r3, #8
+
+ vld1.s16 {q0}, [r2], r4 ; load data step2[0]
+ vld1.s16 {q1}, [r2], r4 ; load data step2[1]
+ vadd.s16 q12, q0, q15 ; step2[0] + step2[15]
+ vadd.s16 q13, q1, q14 ; step2[1] + step2[14]
+ vld1.s16 {q10}, [r2], r4 ; load data step2[2]
+ vld1.s16 {q11}, [r2], r4 ; load data step2[3]
+ vst1.64 {d24}, [r1], r3 ; store output[0]
+ vst1.64 {d25}, [r1], r5
+ vst1.64 {d26}, [r1], r3 ; store output[1]
+ vst1.64 {d27}, [r1], r5
+ vadd.s16 q12, q10, q5 ; step2[2] + step2[13]
+ vadd.s16 q13, q11, q4 ; step2[3] + step2[12]
+ vsub.s16 q14, q1, q14 ; step2[1] - step2[14]
+ vsub.s16 q15, q0, q15 ; step2[0] - step2[15]
+ vst1.64 {d24}, [r1], r3 ; store output[2]
+ vst1.64 {d25}, [r1], r5
+ vst1.64 {d26}, [r1], r3 ; store output[3]
+ vst1.64 {d27}, [r1], r5
+ vsub.s16 q4, q11, q4 ; step2[3] - step2[12]
+ vsub.s16 q5, q10, q5 ; step2[2] - step2[13]
+ vld1.s16 {q0}, [r2], r4 ; load data step2[4]
+ vld1.s16 {q1}, [r2], r4 ; load data step2[5]
+ vadd.s16 q12, q0, q3 ; step2[4] + step2[11]
+ vadd.s16 q13, q1, q2 ; step2[5] + step2[10]
+ vld1.s16 {q10}, [r2], r4 ; load data step2[6]
+ vld1.s16 {q11}, [r2], r4 ; load data step2[7]
+ vst1.64 {d24}, [r1], r3 ; store output[4]
+ vst1.64 {d25}, [r1], r5
+ vst1.64 {d26}, [r1], r3 ; store output[5]
+ vst1.64 {d27}, [r1], r5
+ vadd.s16 q12, q10, q9 ; step2[6] + step2[9]
+ vadd.s16 q13, q11, q8 ; step2[7] + step2[8]
+ vsub.s16 q2, q1, q2 ; step2[5] - step2[10]
+ vsub.s16 q3, q0, q3 ; step2[4] - step2[11]
+ vsub.s16 q8, q11, q8 ; step2[7] - step2[8]
+ vsub.s16 q9, q10, q9 ; step2[6] - step2[9]
+ vst1.64 {d24}, [r1], r3 ; store output[6]
+ vst1.64 {d25}, [r1], r5
+ vst1.64 {d26}, [r1], r3 ; store output[7]
+ vst1.64 {d27}, [r1], r5
+
+ ; store the data output 8,9,10,11,12,13,14,15
+ vst1.64 {d16}, [r1], r3
+ vst1.64 {d17}, [r1], r5
+ vst1.64 {d18}, [r1], r3
+ vst1.64 {d19}, [r1], r5
+ vst1.64 {d4}, [r1], r3
+ vst1.64 {d5}, [r1], r5
+ vst1.64 {d6}, [r1], r3
+ vst1.64 {d7}, [r1], r5
+ vst1.64 {d8}, [r1], r3
+ vst1.64 {d9}, [r1], r5
+ vst1.64 {d10}, [r1], r3
+ vst1.64 {d11}, [r1], r5
+ vst1.64 {d28}, [r1], r3
+ vst1.64 {d29}, [r1], r5
+ vst1.64 {d30}, [r1], r3
+ vst1.64 {d31}, [r1], r5
+end_idct16x16_pass2
+ pop {r3-r9}
+ bx lr
+ ENDP ; |vp9_short_idct16x16_add_neon_pass2|
+
+;void |vp9_short_idct10_16x16_add_neon_pass1|(int16_t *input,
+; int16_t *output, int output_stride)
+;
+; r0 int16_t input
+; r1 int16_t *output
+; r2 int output_stride)
+
+; idct16 stage1 - stage6 on all the elements loaded in q8-q15. The output
+; will be stored back into q8-q15 registers. This function will touch q0-q7
+; registers and use them as buffer during calculation.
+|vp9_short_idct10_16x16_add_neon_pass1| PROC
+
+ ; TODO(hkuang): Find a better way to load the elements.
+ ; load elements of 0, 2, 4, 6, 8, 10, 12, 14 into q8 - q15
+ vld2.s16 {q8,q9}, [r0]!
+ vld2.s16 {q9,q10}, [r0]!
+ vld2.s16 {q10,q11}, [r0]!
+ vld2.s16 {q11,q12}, [r0]!
+ vld2.s16 {q12,q13}, [r0]!
+ vld2.s16 {q13,q14}, [r0]!
+ vld2.s16 {q14,q15}, [r0]!
+ vld2.s16 {q1,q2}, [r0]!
+ vmov.s16 q15, q1
+
+ ; generate cospi_28_64*2 = 6392
+ mov r3, #0x1800
+ add r3, #0xf8
+
+ ; generate cospi_4_64*2 = 32138
+ mov r12, #0x7d00
+ add r12, #0x8a
+
+ ; transpose the input data
+ TRANSPOSE8X8
+
+ ; stage 3
+ vdup.16 q0, r3 ; duplicate cospi_28_64*2
+ vdup.16 q1, r12 ; duplicate cospi_4_64*2
+
+ ; The following instructions use vqrdmulh to do the
+ ; dct_const_round_shift(step2[4] * cospi_28_64). vvqrdmulh will multiply,
+ ; double, and return the high 16 bits, effectively giving >> 15. Doubling
+ ; the constant will change this to >> 14.
+ ; dct_const_round_shift(step2[4] * cospi_28_64);
+ vqrdmulh.s16 q4, q9, q0
+
+ ; preloading to avoid stall
+ ; generate cospi_16_64*2 = 23170
+ mov r3, #0x5a00
+ add r3, #0x82
+
+ ; dct_const_round_shift(step2[4] * cospi_4_64);
+ vqrdmulh.s16 q7, q9, q1
+
+ ; stage 4
+ vdup.16 q1, r3 ; cospi_16_64*2
+
+ ; generate cospi_16_64 = 11585
+ mov r3, #0x2d00
+ add r3, #0x41
+
+ vdup.16 d4, r3; ; duplicate cospi_16_64
+
+ ; dct_const_round_shift(step1[0] * cospi_16_64)
+ vqrdmulh.s16 q8, q8, q1
+
+ ; step2[6] * cospi_16_64
+ vmull.s16 q9, d14, d4
+ vmull.s16 q10, d15, d4
+
+ ; step2[5] * cospi_16_64
+ vmull.s16 q12, d9, d4
+ vmull.s16 q11, d8, d4
+
+ ; temp1 = (step2[6] - step2[5]) * cospi_16_64
+ vsub.s32 q15, q10, q12
+ vsub.s32 q6, q9, q11
+
+ ; temp2 = (step2[5] + step2[6]) * cospi_16_64
+ vadd.s32 q9, q9, q11
+ vadd.s32 q10, q10, q12
+
+ ; dct_const_round_shift(temp1)
+ vqrshrn.s32 d11, q15, #14 ; >> 14
+ vqrshrn.s32 d10, q6, #14 ; >> 14
+
+ ; dct_const_round_shift(temp2)
+ vqrshrn.s32 d12, q9, #14 ; >> 14
+ vqrshrn.s32 d13, q10, #14 ; >> 14
+
+ ; stage 6
+ vadd.s16 q2, q8, q7 ; step2[0] = step1[0] + step1[7];
+ vadd.s16 q10, q8, q5 ; step2[2] = step1[2] + step1[5];
+ vadd.s16 q11, q8, q4 ; step2[3] = step1[3] + step1[4];
+ vadd.s16 q9, q8, q6 ; step2[1] = step1[1] + step1[6];
+ vsub.s16 q12, q8, q4 ; step2[4] = step1[3] - step1[4];
+ vsub.s16 q13, q8, q5 ; step2[5] = step1[2] - step1[5];
+ vsub.s16 q14, q8, q6 ; step2[6] = step1[1] - step1[6];
+ vsub.s16 q15, q8, q7 ; step2[7] = step1[0] - step1[7];
+
+ ; store the data
+ vst1.64 {d4}, [r1], r2
+ vst1.64 {d5}, [r1], r2
+ vst1.64 {d18}, [r1], r2
+ vst1.64 {d19}, [r1], r2
+ vst1.64 {d20}, [r1], r2
+ vst1.64 {d21}, [r1], r2
+ vst1.64 {d22}, [r1], r2
+ vst1.64 {d23}, [r1], r2
+ vst1.64 {d24}, [r1], r2
+ vst1.64 {d25}, [r1], r2
+ vst1.64 {d26}, [r1], r2
+ vst1.64 {d27}, [r1], r2
+ vst1.64 {d28}, [r1], r2
+ vst1.64 {d29}, [r1], r2
+ vst1.64 {d30}, [r1], r2
+ vst1.64 {d31}, [r1], r2
+
+ bx lr
+ ENDP ; |vp9_short_idct10_16x16_add_neon_pass1|
+
+;void vp9_short_idct10_16x16_add_neon_pass2(int16_t *src,
+; int16_t *output,
+; int16_t *pass1Output,
+; int16_t skip_adding,
+; uint8_t *dest,
+; int dest_stride)
+;
+; r0 int16_t *src
+; r1 int16_t *output,
+; r2 int16_t *pass1Output,
+; r3 int16_t skip_adding,
+; r4 uint8_t *dest,
+; r5 int dest_stride)
+
+; idct16 stage1 - stage7 on all the elements loaded in q8-q15. The output
+; will be stored back into q8-q15 registers. This function will touch q0-q7
+; registers and use them as buffer during calculation.
+|vp9_short_idct10_16x16_add_neon_pass2| PROC
+ push {r3-r9}
+
+ ; TODO(hkuang): Find a better way to load the elements.
+ ; load elements of 1, 3, 5, 7, 9, 11, 13, 15 into q8 - q15
+ vld2.s16 {q8,q9}, [r0]!
+ vld2.s16 {q9,q10}, [r0]!
+ vld2.s16 {q10,q11}, [r0]!
+ vld2.s16 {q11,q12}, [r0]!
+ vld2.s16 {q12,q13}, [r0]!
+ vld2.s16 {q13,q14}, [r0]!
+ vld2.s16 {q14,q15}, [r0]!
+ vld2.s16 {q0,q1}, [r0]!
+ vmov.s16 q15, q0;
+
+ ; generate 2*cospi_30_64 = 3212
+ mov r3, #0xc00
+ add r3, #0x8c
+
+ ; generate 2*cospi_2_64 = 32610
+ mov r12, #0x7f00
+ add r12, #0x62
+
+ ; transpose the input data
+ TRANSPOSE8X8
+
+ ; stage 3
+ vdup.16 q6, r3 ; duplicate 2*cospi_30_64
+
+ ; dct_const_round_shift(step1[8] * cospi_30_64)
+ vqrdmulh.s16 q0, q8, q6
+
+ vdup.16 q6, r12 ; duplicate 2*cospi_2_64
+
+ ; dct_const_round_shift(step1[8] * cospi_2_64)
+ vqrdmulh.s16 q7, q8, q6
+
+ ; preloading to avoid stall
+ ; generate 2*cospi_26_64 = 9512
+ mov r12, #0x2500
+ add r12, #0x28
+ rsb r12, #0
+ vdup.16 q15, r12 ; duplicate -2*cospi_26_64
+
+ ; generate 2*cospi_6_64 = 31358
+ mov r3, #0x7a00
+ add r3, #0x7e
+ vdup.16 q14, r3 ; duplicate 2*cospi_6_64
+
+ ; dct_const_round_shift(- step1[12] * cospi_26_64)
+ vqrdmulh.s16 q3, q9, q15
+
+ ; dct_const_round_shift(step1[12] * cospi_6_64)
+ vqrdmulh.s16 q4, q9, q14
+
+ ; stage 4
+ ; generate cospi_24_64 = 6270
+ mov r3, #0x1800
+ add r3, #0x7e
+ vdup.16 d31, r3 ; duplicate cospi_24_64
+
+ ; generate cospi_8_64 = 15137
+ mov r12, #0x3b00
+ add r12, #0x21
+ vdup.16 d30, r12 ; duplicate cospi_8_64
+
+ ; step1[14] * cospi_24_64
+ vmull.s16 q12, d14, d31
+ vmull.s16 q5, d15, d31
+
+ ; step1[9] * cospi_24_64
+ vmull.s16 q2, d0, d31
+ vmull.s16 q11, d1, d31
+
+ ; temp1 = -step1[9] * cospi_8_64 + step1[14] * cospi_24_64
+ vmlsl.s16 q12, d0, d30
+ vmlsl.s16 q5, d1, d30
+
+ ; temp2 = step1[9] * cospi_24_64 + step1[14] * cospi_8_64
+ vmlal.s16 q2, d14, d30
+ vmlal.s16 q11, d15, d30
+
+ rsb r12, #0
+ vdup.16 d30, r12 ; duplicate -cospi_8_64
+
+ ; dct_const_round_shift(temp1)
+ vqrshrn.s32 d2, q12, #14 ; >> 14
+ vqrshrn.s32 d3, q5, #14 ; >> 14
+
+ ; dct_const_round_shift(temp2)
+ vqrshrn.s32 d12, q2, #14 ; >> 14
+ vqrshrn.s32 d13, q11, #14 ; >> 14
+
+ ; - step1[13] * cospi_8_64
+ vmull.s16 q10, d8, d30
+ vmull.s16 q13, d9, d30
+
+ ; -step1[10] * cospi_8_64
+ vmull.s16 q8, d6, d30
+ vmull.s16 q9, d7, d30
+
+ ; temp1 = -step1[10] * cospi_24_64 - step1[13] * cospi_8_64
+ vmlsl.s16 q10, d6, d31
+ vmlsl.s16 q13, d7, d31
+
+ ; temp2 = -step1[10] * cospi_8_64 + step1[13] * cospi_24_64
+ vmlal.s16 q8, d8, d31
+ vmlal.s16 q9, d9, d31
+
+ ; dct_const_round_shift(temp1)
+ vqrshrn.s32 d4, q10, #14 ; >> 14
+ vqrshrn.s32 d5, q13, #14 ; >> 14
+
+ ; dct_const_round_shift(temp2)
+ vqrshrn.s32 d10, q8, #14 ; >> 14
+ vqrshrn.s32 d11, q9, #14 ; >> 14
+
+ ; stage 5
+ vadd.s16 q8, q0, q3 ; step1[8] = step2[8]+step2[11];
+ vadd.s16 q9, q1, q2 ; step1[9] = step2[9]+step2[10];
+ vsub.s16 q10, q1, q2 ; step1[10] = step2[9]-step2[10];
+ vsub.s16 q11, q0, q3 ; step1[11] = step2[8]-step2[11];
+ vsub.s16 q12, q7, q4 ; step1[12] =-step2[12]+step2[15];
+ vsub.s16 q13, q6, q5 ; step1[13] =-step2[13]+step2[14];
+ vadd.s16 q14, q6, q5 ; step1[14] =step2[13]+step2[14];
+ vadd.s16 q15, q7, q4 ; step1[15] =step2[12]+step2[15];
+
+ ; stage 6.
+ ; generate cospi_16_64 = 11585
+ mov r12, #0x2d00
+ add r12, #0x41
+
+ vdup.16 d14, r12 ; duplicate cospi_16_64
+
+ ; step1[13] * cospi_16_64
+ vmull.s16 q3, d26, d14
+ vmull.s16 q4, d27, d14
+
+ ; step1[10] * cospi_16_64
+ vmull.s16 q0, d20, d14
+ vmull.s16 q1, d21, d14
+
+ ; temp1 = (-step1[10] + step1[13]) * cospi_16_64
+ vsub.s32 q5, q3, q0
+ vsub.s32 q6, q4, q1
+
+ ; temp2 = (step1[10] + step1[13]) * cospi_16_64
+ vadd.s32 q0, q3, q0
+ vadd.s32 q1, q4, q1
+
+ ; dct_const_round_shift(temp1)
+ vqrshrn.s32 d4, q5, #14 ; >> 14
+ vqrshrn.s32 d5, q6, #14 ; >> 14
+
+ ; dct_const_round_shift(temp2)
+ vqrshrn.s32 d10, q0, #14 ; >> 14
+ vqrshrn.s32 d11, q1, #14 ; >> 14
+
+ ; step1[11] * cospi_16_64
+ vmull.s16 q0, d22, d14
+ vmull.s16 q1, d23, d14
+
+ ; step1[12] * cospi_16_64
+ vmull.s16 q13, d24, d14
+ vmull.s16 q6, d25, d14
+
+ ; temp1 = (-step1[11] + step1[12]) * cospi_16_64
+ vsub.s32 q10, q13, q0
+ vsub.s32 q4, q6, q1
+
+ ; temp2 = (step1[11] + step1[12]) * cospi_16_64
+ vadd.s32 q13, q13, q0
+ vadd.s32 q6, q6, q1
+
+ ; dct_const_round_shift(input_dc * cospi_16_64)
+ vqrshrn.s32 d6, q10, #14 ; >> 14
+ vqrshrn.s32 d7, q4, #14 ; >> 14
+
+ ; dct_const_round_shift((step1[11] + step1[12]) * cospi_16_64);
+ vqrshrn.s32 d8, q13, #14 ; >> 14
+ vqrshrn.s32 d9, q6, #14 ; >> 14
+
+ mov r4, #16 ; pass1Output stride
+ ldr r3, [sp] ; load skip_adding
+
+ ; stage 7
+ ; load the data in pass1
+ mov r5, #24
+ mov r3, #8
+
+ vld1.s16 {q0}, [r2], r4 ; load data step2[0]
+ vld1.s16 {q1}, [r2], r4 ; load data step2[1]
+ vadd.s16 q12, q0, q15 ; step2[0] + step2[15]
+ vadd.s16 q13, q1, q14 ; step2[1] + step2[14]
+ vld1.s16 {q10}, [r2], r4 ; load data step2[2]
+ vld1.s16 {q11}, [r2], r4 ; load data step2[3]
+ vst1.64 {d24}, [r1], r3 ; store output[0]
+ vst1.64 {d25}, [r1], r5
+ vst1.64 {d26}, [r1], r3 ; store output[1]
+ vst1.64 {d27}, [r1], r5
+ vadd.s16 q12, q10, q5 ; step2[2] + step2[13]
+ vadd.s16 q13, q11, q4 ; step2[3] + step2[12]
+ vsub.s16 q14, q1, q14 ; step2[1] - step2[14]
+ vsub.s16 q15, q0, q15 ; step2[0] - step2[15]
+ vst1.64 {d24}, [r1], r3 ; store output[2]
+ vst1.64 {d25}, [r1], r5
+ vst1.64 {d26}, [r1], r3 ; store output[3]
+ vst1.64 {d27}, [r1], r5
+ vsub.s16 q4, q11, q4 ; step2[3] - step2[12]
+ vsub.s16 q5, q10, q5 ; step2[2] - step2[13]
+ vld1.s16 {q0}, [r2], r4 ; load data step2[4]
+ vld1.s16 {q1}, [r2], r4 ; load data step2[5]
+ vadd.s16 q12, q0, q3 ; step2[4] + step2[11]
+ vadd.s16 q13, q1, q2 ; step2[5] + step2[10]
+ vld1.s16 {q10}, [r2], r4 ; load data step2[6]
+ vld1.s16 {q11}, [r2], r4 ; load data step2[7]
+ vst1.64 {d24}, [r1], r3 ; store output[4]
+ vst1.64 {d25}, [r1], r5
+ vst1.64 {d26}, [r1], r3 ; store output[5]
+ vst1.64 {d27}, [r1], r5
+ vadd.s16 q12, q10, q9 ; step2[6] + step2[9]
+ vadd.s16 q13, q11, q8 ; step2[7] + step2[8]
+ vsub.s16 q2, q1, q2 ; step2[5] - step2[10]
+ vsub.s16 q3, q0, q3 ; step2[4] - step2[11]
+ vsub.s16 q8, q11, q8 ; step2[7] - step2[8]
+ vsub.s16 q9, q10, q9 ; step2[6] - step2[9]
+ vst1.64 {d24}, [r1], r3 ; store output[6]
+ vst1.64 {d25}, [r1], r5
+ vst1.64 {d26}, [r1], r3 ; store output[7]
+ vst1.64 {d27}, [r1], r5
+
+ ; store the data output 8,9,10,11,12,13,14,15
+ vst1.64 {d16}, [r1], r3
+ vst1.64 {d17}, [r1], r5
+ vst1.64 {d18}, [r1], r3
+ vst1.64 {d19}, [r1], r5
+ vst1.64 {d4}, [r1], r3
+ vst1.64 {d5}, [r1], r5
+ vst1.64 {d6}, [r1], r3
+ vst1.64 {d7}, [r1], r5
+ vst1.64 {d8}, [r1], r3
+ vst1.64 {d9}, [r1], r5
+ vst1.64 {d10}, [r1], r3
+ vst1.64 {d11}, [r1], r5
+ vst1.64 {d28}, [r1], r3
+ vst1.64 {d29}, [r1], r5
+ vst1.64 {d30}, [r1], r3
+ vst1.64 {d31}, [r1], r5
+end_idct10_16x16_pass2
+ pop {r3-r9}
+ bx lr
+ ENDP ; |vp9_short_idct10_16x16_add_neon_pass2|
+;void |save_neon_registers|()
+|save_neon_registers| PROC
+ vpush {d8-d15}
+ bx lr
+ ENDP ; |save_registers|
+;void |restore_neon_registers|()
+|restore_neon_registers| PROC
+ vpop {d8-d15}
+ bx lr
+ ENDP ; |restore_registers|
+ END
diff --git a/libvpx/vp9/common/arm/neon/vp9_short_idct32x32_add_neon.asm b/libvpx/vp9/common/arm/neon/vp9_short_idct32x32_add_neon.asm
new file mode 100644
index 0000000..5c097cc
--- /dev/null
+++ b/libvpx/vp9/common/arm/neon/vp9_short_idct32x32_add_neon.asm
@@ -0,0 +1,1013 @@
+;
+; Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+;TODO(cd): adjust these constant to be able to use vqdmulh for faster
+; dct_const_round_shift(a * b) within butterfly calculations.
+cospi_1_64 EQU 16364
+cospi_2_64 EQU 16305
+cospi_3_64 EQU 16207
+cospi_4_64 EQU 16069
+cospi_5_64 EQU 15893
+cospi_6_64 EQU 15679
+cospi_7_64 EQU 15426
+cospi_8_64 EQU 15137
+cospi_9_64 EQU 14811
+cospi_10_64 EQU 14449
+cospi_11_64 EQU 14053
+cospi_12_64 EQU 13623
+cospi_13_64 EQU 13160
+cospi_14_64 EQU 12665
+cospi_15_64 EQU 12140
+cospi_16_64 EQU 11585
+cospi_17_64 EQU 11003
+cospi_18_64 EQU 10394
+cospi_19_64 EQU 9760
+cospi_20_64 EQU 9102
+cospi_21_64 EQU 8423
+cospi_22_64 EQU 7723
+cospi_23_64 EQU 7005
+cospi_24_64 EQU 6270
+cospi_25_64 EQU 5520
+cospi_26_64 EQU 4756
+cospi_27_64 EQU 3981
+cospi_28_64 EQU 3196
+cospi_29_64 EQU 2404
+cospi_30_64 EQU 1606
+cospi_31_64 EQU 804
+
+
+ EXPORT |idct32_transpose_and_transform|
+ EXPORT |idct32_combine_add|
+ ARM
+ REQUIRE8
+ PRESERVE8
+
+ AREA ||.text||, CODE, READONLY, ALIGN=2
+
+ AREA Block, CODE, READONLY
+
+ ; --------------------------------------------------------------------------
+ ; Load from transposed_buffer
+ ; q13 = transposed_buffer[first_offset]
+ ; q14 = transposed_buffer[second_offset]
+ ; for proper address calculation, the last offset used when manipulating
+ ; transposed_buffer must be passed in. use 0 for first use.
+ MACRO
+ LOAD_FROM_TRANSPOSED $prev_offset, $first_offset, $second_offset
+ ; address calculation with proper stride and loading
+ add r0, #($first_offset - $prev_offset )*8*2
+ vld1.s16 {q14}, [r0]
+ add r0, #($second_offset - $first_offset)*8*2
+ vld1.s16 {q13}, [r0]
+ ; (used) two registers (q14, q13)
+ MEND
+ ; --------------------------------------------------------------------------
+ ; Load from output (used as temporary storage)
+ ; reg1 = output[first_offset]
+ ; reg2 = output[second_offset]
+ ; for proper address calculation, the last offset used when manipulating
+ ; output, wethere reading or storing) must be passed in. use 0 for first
+ ; use.
+ MACRO
+ LOAD_FROM_OUTPUT $prev_offset, $first_offset, $second_offset, $reg1, $reg2
+ ; address calculation with proper stride and loading
+ add r1, #($first_offset - $prev_offset )*32*2
+ vld1.s16 {$reg1}, [r1]
+ add r1, #($second_offset - $first_offset)*32*2
+ vld1.s16 {$reg2}, [r1]
+ ; (used) two registers ($reg1, $reg2)
+ MEND
+ ; --------------------------------------------------------------------------
+ ; Store into output (sometimes as as temporary storage)
+ ; output[first_offset] = reg1
+ ; output[second_offset] = reg2
+ ; for proper address calculation, the last offset used when manipulating
+ ; output, wethere reading or storing) must be passed in. use 0 for first
+ ; use.
+ MACRO
+ STORE_IN_OUTPUT $prev_offset, $first_offset, $second_offset, $reg1, $reg2
+ ; address calculation with proper stride and storing
+ add r1, #($first_offset - $prev_offset )*32*2
+ vst1.16 {$reg1}, [r1]
+ add r1, #($second_offset - $first_offset)*32*2
+ vst1.16 {$reg2}, [r1]
+ MEND
+ ; --------------------------------------------------------------------------
+ ; Touches q8-q12, q15 (q13-q14 are preserved)
+ ; valid output registers are anything but q8-q11
+ MACRO
+ DO_BUTTERFLY $regC, $regD, $regA, $regB, $first_constant, $second_constant, $reg1, $reg2, $reg3, $reg4
+ ; TODO(cd): have special case to re-use constants when they are similar for
+ ; consecutive butterflies
+ ; TODO(cd): have special case when both constants are the same, do the
+ ; additions/substractions before the multiplies.
+ ; generate the constants
+ ; generate scalar constants
+ mov r3, #$first_constant & 0xFF00
+ add r3, #$first_constant & 0x00FF
+ mov r12, #$second_constant & 0xFF00
+ add r12, #$second_constant & 0x00FF
+ ; generate vector constants
+ vdup.16 d30, r3
+ vdup.16 d31, r12
+ ; (used) two for inputs (regA-regD), one for constants (q15)
+ ; do some multiplications (ordered for maximum latency hiding)
+ vmull.s16 q8, $regC, d30
+ vmull.s16 q10, $regA, d31
+ vmull.s16 q9, $regD, d30
+ vmull.s16 q11, $regB, d31
+ vmull.s16 q12, $regC, d31
+ ; (used) five for intermediate (q8-q12), one for constants (q15)
+ ; do some addition/substractions (to get back two register)
+ vsub.s32 q8, q8, q10
+ vsub.s32 q9, q9, q11
+ ; do more multiplications (ordered for maximum latency hiding)
+ vmull.s16 q10, $regD, d31
+ vmull.s16 q11, $regA, d30
+ vmull.s16 q15, $regB, d30
+ ; (used) six for intermediate (q8-q12, q15)
+ ; do more addition/substractions
+ vadd.s32 q11, q12, q11
+ vadd.s32 q10, q10, q15
+ ; (used) four for intermediate (q8-q11)
+ ; dct_const_round_shift
+ vqrshrn.s32 $reg1, q8, #14
+ vqrshrn.s32 $reg2, q9, #14
+ vqrshrn.s32 $reg3, q11, #14
+ vqrshrn.s32 $reg4, q10, #14
+ ; (used) two for results, well four d registers
+ MEND
+ ; --------------------------------------------------------------------------
+ ; Touches q8-q12, q15 (q13-q14 are preserved)
+ ; valid output registers are anything but q8-q11
+ MACRO
+ DO_BUTTERFLY_STD $first_constant, $second_constant, $reg1, $reg2, $reg3, $reg4
+ DO_BUTTERFLY d28, d29, d26, d27, $first_constant, $second_constant, $reg1, $reg2, $reg3, $reg4
+ MEND
+ ; --------------------------------------------------------------------------
+
+;void idct32_transpose_and_transform(int16_t *transpose_buffer, int16_t *output, int16_t *input);
+;
+; r0 int16_t *transpose_buffer
+; r1 int16_t *output
+; r2 int16_t *input)
+; TODO(cd): have more logical parameter ordering but this issue will disappear
+; when functions are combined.
+
+|idct32_transpose_and_transform| PROC
+ ; This function does one pass of idct32x32 transform.
+ ;
+ ; This is done by transposing the input and then doing a 1d transform on
+ ; columns. In the first pass, the transposed columns are the original
+ ; rows. In the second pass, after the transposition, the colums are the
+ ; original columns.
+ ; The 1d transform is done by looping over bands of eight columns (the
+ ; idct32_bands loop). For each band, the transform input transposition
+ ; is done on demand, one band of four 8x8 matrices at a time. The four
+ ; matrices are trsnposed by pairs (the idct32_transpose_pair loop).
+ push {r4}
+ mov r4, #0 ; initialize bands loop counter
+idct32_bands_loop
+ ; TODO(cd) get rid of these push/pop by properly adjusting register
+ ; content at end of loop
+ push {r0}
+ push {r1}
+ push {r2}
+ mov r3, #0 ; initialize transpose loop counter
+idct32_transpose_pair_loop
+ ; Load two horizontally consecutive 8x8 16bit data matrices. The first one
+ ; into q0-q7 and the second one into q8-q15. There is a stride of 64,
+ ; adjusted to 32 because of the two post-increments.
+ vld1.s16 {q8}, [r2]!
+ vld1.s16 {q0}, [r2]!
+ add r2, #32
+ vld1.s16 {q9}, [r2]!
+ vld1.s16 {q1}, [r2]!
+ add r2, #32
+ vld1.s16 {q10}, [r2]!
+ vld1.s16 {q2}, [r2]!
+ add r2, #32
+ vld1.s16 {q11}, [r2]!
+ vld1.s16 {q3}, [r2]!
+ add r2, #32
+ vld1.s16 {q12}, [r2]!
+ vld1.s16 {q4}, [r2]!
+ add r2, #32
+ vld1.s16 {q13}, [r2]!
+ vld1.s16 {q5}, [r2]!
+ add r2, #32
+ vld1.s16 {q14}, [r2]!
+ vld1.s16 {q6}, [r2]!
+ add r2, #32
+ vld1.s16 {q15}, [r2]!
+ vld1.s16 {q7}, [r2]!
+
+ ; Transpose the two 8x8 16bit data matrices.
+ vswp d17, d24
+ vswp d23, d30
+ vswp d21, d28
+ vswp d19, d26
+ vswp d1, d8
+ vswp d7, d14
+ vswp d5, d12
+ vswp d3, d10
+ vtrn.32 q8, q10
+ vtrn.32 q9, q11
+ vtrn.32 q12, q14
+ vtrn.32 q13, q15
+ vtrn.32 q0, q2
+ vtrn.32 q1, q3
+ vtrn.32 q4, q6
+ vtrn.32 q5, q7
+ vtrn.16 q8, q9
+ vtrn.16 q10, q11
+ vtrn.16 q12, q13
+ vtrn.16 q14, q15
+ vtrn.16 q0, q1
+ vtrn.16 q2, q3
+ vtrn.16 q4, q5
+ vtrn.16 q6, q7
+
+ ; Store both matrices after each other. There is a stride of 32, which
+ ; adjusts to nothing because of the post-increments.
+ vst1.16 {q8}, [r0]!
+ vst1.16 {q9}, [r0]!
+ vst1.16 {q10}, [r0]!
+ vst1.16 {q11}, [r0]!
+ vst1.16 {q12}, [r0]!
+ vst1.16 {q13}, [r0]!
+ vst1.16 {q14}, [r0]!
+ vst1.16 {q15}, [r0]!
+ vst1.16 {q0}, [r0]!
+ vst1.16 {q1}, [r0]!
+ vst1.16 {q2}, [r0]!
+ vst1.16 {q3}, [r0]!
+ vst1.16 {q4}, [r0]!
+ vst1.16 {q5}, [r0]!
+ vst1.16 {q6}, [r0]!
+ vst1.16 {q7}, [r0]!
+
+ ; increment pointers by adjusted stride (not necessary for r0/out)
+ sub r2, r2, #8*32*2-32-16*2
+ ; transpose pair loop processing
+ add r3, r3, #1
+ cmp r3, #1
+ BLE idct32_transpose_pair_loop
+
+ ; restore r0/input to its original value
+ sub r0, r0, #32*8*2
+
+ ; Instead of doing the transforms stage by stage, it is done by loading
+ ; some input values and doing as many stages as possible to minimize the
+ ; storing/loading of intermediate results. To fit within registers, the
+ ; final coefficients are cut into four blocks:
+ ; BLOCK A: 16-19,28-31
+ ; BLOCK B: 20-23,24-27
+ ; BLOCK C: 8-10,11-15
+ ; BLOCK D: 0-3,4-7
+ ; Blocks A and C are straight calculation through the various stages. In
+ ; block B, further calculations are performed using the results from
+ ; block A. In block D, further calculations are performed using the results
+ ; from block C and then the final calculations are done using results from
+ ; block A and B which have been combined at the end of block B.
+
+ ; --------------------------------------------------------------------------
+ ; BLOCK A: 16-19,28-31
+ ; --------------------------------------------------------------------------
+ ; generate 16,17,30,31
+ ; --------------------------------------------------------------------------
+ ; part of stage 1
+ ;temp1 = input[1 * 32] * cospi_31_64 - input[31 * 32] * cospi_1_64;
+ ;temp2 = input[1 * 32] * cospi_1_64 + input[31 * 32] * cospi_31_64;
+ ;step1b[16][i] = dct_const_round_shift(temp1);
+ ;step1b[31][i] = dct_const_round_shift(temp2);
+ LOAD_FROM_TRANSPOSED 0, 1, 31
+ DO_BUTTERFLY_STD cospi_31_64, cospi_1_64, d0, d1, d4, d5
+ ; --------------------------------------------------------------------------
+ ; part of stage 1
+ ;temp1 = input[17 * 32] * cospi_15_64 - input[15 * 32] * cospi_17_64;
+ ;temp2 = input[17 * 32] * cospi_17_64 + input[15 * 32] * cospi_15_64;
+ ;step1b[17][i] = dct_const_round_shift(temp1);
+ ;step1b[30][i] = dct_const_round_shift(temp2);
+ LOAD_FROM_TRANSPOSED 31, 17, 15
+ DO_BUTTERFLY_STD cospi_15_64, cospi_17_64, d2, d3, d6, d7
+ ; --------------------------------------------------------------------------
+ ; part of stage 2
+ ;step2[16] = step1b[16][i] + step1b[17][i];
+ ;step2[17] = step1b[16][i] - step1b[17][i];
+ ;step2[30] = -step1b[30][i] + step1b[31][i];
+ ;step2[31] = step1b[30][i] + step1b[31][i];
+ vadd.s16 q4, q0, q1
+ vsub.s16 q13, q0, q1
+ vadd.s16 q6, q2, q3
+ vsub.s16 q14, q2, q3
+ ; --------------------------------------------------------------------------
+ ; part of stage 3
+ ;temp1 = step1b[30][i] * cospi_28_64 - step1b[17][i] * cospi_4_64;
+ ;temp2 = step1b[30][i] * cospi_4_64 - step1b[17][i] * cospi_28_64;
+ ;step3[17] = dct_const_round_shift(temp1);
+ ;step3[30] = dct_const_round_shift(temp2);
+ DO_BUTTERFLY_STD cospi_28_64, cospi_4_64, d10, d11, d14, d15
+ ; --------------------------------------------------------------------------
+ ; generate 18,19,28,29
+ ; --------------------------------------------------------------------------
+ ; part of stage 1
+ ;temp1 = input[9 * 32] * cospi_23_64 - input[23 * 32] * cospi_9_64;
+ ;temp2 = input[9 * 32] * cospi_9_64 + input[23 * 32] * cospi_23_64;
+ ;step1b[18][i] = dct_const_round_shift(temp1);
+ ;step1b[29][i] = dct_const_round_shift(temp2);
+ LOAD_FROM_TRANSPOSED 15, 9, 23
+ DO_BUTTERFLY_STD cospi_23_64, cospi_9_64, d0, d1, d4, d5
+ ; --------------------------------------------------------------------------
+ ; part of stage 1
+ ;temp1 = input[25 * 32] * cospi_7_64 - input[7 * 32] * cospi_25_64;
+ ;temp2 = input[25 * 32] * cospi_25_64 + input[7 * 32] * cospi_7_64;
+ ;step1b[19][i] = dct_const_round_shift(temp1);
+ ;step1b[28][i] = dct_const_round_shift(temp2);
+ LOAD_FROM_TRANSPOSED 23, 25, 7
+ DO_BUTTERFLY_STD cospi_7_64, cospi_25_64, d2, d3, d6, d7
+ ; --------------------------------------------------------------------------
+ ; part of stage 2
+ ;step2[18] = -step1b[18][i] + step1b[19][i];
+ ;step2[19] = step1b[18][i] + step1b[19][i];
+ ;step2[28] = step1b[28][i] + step1b[29][i];
+ ;step2[29] = step1b[28][i] - step1b[29][i];
+ vsub.s16 q13, q3, q2
+ vadd.s16 q3, q3, q2
+ vsub.s16 q14, q1, q0
+ vadd.s16 q2, q1, q0
+ ; --------------------------------------------------------------------------
+ ; part of stage 3
+ ;temp1 = step1b[18][i] * (-cospi_4_64) - step1b[29][i] * (-cospi_28_64);
+ ;temp2 = step1b[18][i] * (-cospi_28_64) + step1b[29][i] * (-cospi_4_64);
+ ;step3[29] = dct_const_round_shift(temp1);
+ ;step3[18] = dct_const_round_shift(temp2);
+ DO_BUTTERFLY_STD (-cospi_4_64), (-cospi_28_64), d2, d3, d0, d1
+ ; --------------------------------------------------------------------------
+ ; combine 16-19,28-31
+ ; --------------------------------------------------------------------------
+ ; part of stage 4
+ ;step1[16] = step1b[16][i] + step1b[19][i];
+ ;step1[17] = step1b[17][i] + step1b[18][i];
+ ;step1[18] = step1b[17][i] - step1b[18][i];
+ ;step1[29] = step1b[30][i] - step1b[29][i];
+ ;step1[30] = step1b[30][i] + step1b[29][i];
+ ;step1[31] = step1b[31][i] + step1b[28][i];
+ vadd.s16 q8, q4, q2
+ vadd.s16 q9, q5, q0
+ vadd.s16 q10, q7, q1
+ vadd.s16 q15, q6, q3
+ vsub.s16 q13, q5, q0
+ vsub.s16 q14, q7, q1
+ STORE_IN_OUTPUT 0, 16, 31, q8, q15
+ STORE_IN_OUTPUT 31, 17, 30, q9, q10
+ ; --------------------------------------------------------------------------
+ ; part of stage 5
+ ;temp1 = step1b[29][i] * cospi_24_64 - step1b[18][i] * cospi_8_64;
+ ;temp2 = step1b[29][i] * cospi_8_64 + step1b[18][i] * cospi_24_64;
+ ;step2[18] = dct_const_round_shift(temp1);
+ ;step2[29] = dct_const_round_shift(temp2);
+ DO_BUTTERFLY_STD cospi_24_64, cospi_8_64, d0, d1, d2, d3
+ STORE_IN_OUTPUT 30, 29, 18, q1, q0
+ ; --------------------------------------------------------------------------
+ ; part of stage 4
+ ;step1[19] = step1b[16][i] - step1b[19][i];
+ ;step1[28] = step1b[31][i] - step1b[28][i];
+ vsub.s16 q13, q4, q2
+ vsub.s16 q14, q6, q3
+ ; --------------------------------------------------------------------------
+ ; part of stage 5
+ ;temp1 = step1b[28][i] * cospi_24_64 - step1b[19][i] * cospi_8_64;
+ ;temp2 = step1b[28][i] * cospi_8_64 + step1b[19][i] * cospi_24_64;
+ ;step2[19] = dct_const_round_shift(temp1);
+ ;step2[28] = dct_const_round_shift(temp2);
+ DO_BUTTERFLY_STD cospi_24_64, cospi_8_64, d8, d9, d12, d13
+ STORE_IN_OUTPUT 18, 19, 28, q4, q6
+ ; --------------------------------------------------------------------------
+
+
+ ; --------------------------------------------------------------------------
+ ; BLOCK B: 20-23,24-27
+ ; --------------------------------------------------------------------------
+ ; generate 20,21,26,27
+ ; --------------------------------------------------------------------------
+ ; part of stage 1
+ ;temp1 = input[5 * 32] * cospi_27_64 - input[27 * 32] * cospi_5_64;
+ ;temp2 = input[5 * 32] * cospi_5_64 + input[27 * 32] * cospi_27_64;
+ ;step1b[20][i] = dct_const_round_shift(temp1);
+ ;step1b[27][i] = dct_const_round_shift(temp2);
+ LOAD_FROM_TRANSPOSED 7, 5, 27
+ DO_BUTTERFLY_STD cospi_27_64, cospi_5_64, d0, d1, d4, d5
+ ; --------------------------------------------------------------------------
+ ; part of stage 1
+ ;temp1 = input[21 * 32] * cospi_11_64 - input[11 * 32] * cospi_21_64;
+ ;temp2 = input[21 * 32] * cospi_21_64 + input[11 * 32] * cospi_11_64;
+ ;step1b[21][i] = dct_const_round_shift(temp1);
+ ;step1b[26][i] = dct_const_round_shift(temp2);
+ LOAD_FROM_TRANSPOSED 27, 21, 11
+ DO_BUTTERFLY_STD cospi_11_64, cospi_21_64, d2, d3, d6, d7
+ ; --------------------------------------------------------------------------
+ ; part of stage 2
+ ;step2[20] = step1b[20][i] + step1b[21][i];
+ ;step2[21] = step1b[20][i] - step1b[21][i];
+ ;step2[26] = -step1b[26][i] + step1b[27][i];
+ ;step2[27] = step1b[26][i] + step1b[27][i];
+ vsub.s16 q13, q0, q1
+ vadd.s16 q0, q0, q1
+ vsub.s16 q14, q2, q3
+ vadd.s16 q2, q2, q3
+ ; --------------------------------------------------------------------------
+ ; part of stage 3
+ ;temp1 = step1b[26][i] * cospi_12_64 - step1b[21][i] * cospi_20_64;
+ ;temp2 = step1b[26][i] * cospi_20_64 + step1b[21][i] * cospi_12_64;
+ ;step3[21] = dct_const_round_shift(temp1);
+ ;step3[26] = dct_const_round_shift(temp2);
+ DO_BUTTERFLY_STD cospi_12_64, cospi_20_64, d2, d3, d6, d7
+ ; --------------------------------------------------------------------------
+ ; generate 22,23,24,25
+ ; --------------------------------------------------------------------------
+ ; part of stage 1
+ ;temp1 = input[13 * 32] * cospi_19_64 - input[19 * 32] * cospi_13_64;
+ ;temp2 = input[13 * 32] * cospi_13_64 + input[19 * 32] * cospi_19_64;
+ ;step1b[22][i] = dct_const_round_shift(temp1);
+ ;step1b[25][i] = dct_const_round_shift(temp2);
+ LOAD_FROM_TRANSPOSED 11, 13, 19
+ DO_BUTTERFLY_STD cospi_19_64, cospi_13_64, d10, d11, d14, d15
+ ; --------------------------------------------------------------------------
+ ; part of stage 1
+ ;temp1 = input[29 * 32] * cospi_3_64 - input[3 * 32] * cospi_29_64;
+ ;temp2 = input[29 * 32] * cospi_29_64 + input[3 * 32] * cospi_3_64;
+ ;step1b[23][i] = dct_const_round_shift(temp1);
+ ;step1b[24][i] = dct_const_round_shift(temp2);
+ LOAD_FROM_TRANSPOSED 19, 29, 3
+ DO_BUTTERFLY_STD cospi_3_64, cospi_29_64, d8, d9, d12, d13
+ ; --------------------------------------------------------------------------
+ ; part of stage 2
+ ;step2[22] = -step1b[22][i] + step1b[23][i];
+ ;step2[23] = step1b[22][i] + step1b[23][i];
+ ;step2[24] = step1b[24][i] + step1b[25][i];
+ ;step2[25] = step1b[24][i] - step1b[25][i];
+ vsub.s16 q14, q4, q5
+ vadd.s16 q5, q4, q5
+ vsub.s16 q13, q6, q7
+ vadd.s16 q6, q6, q7
+ ; --------------------------------------------------------------------------
+ ; part of stage 3
+ ;temp1 = step1b[22][i] * (-cospi_20_64) - step1b[25][i] * (-cospi_12_64);
+ ;temp2 = step1b[22][i] * (-cospi_12_64) + step1b[25][i] * (-cospi_20_64);
+ ;step3[25] = dct_const_round_shift(temp1);
+ ;step3[22] = dct_const_round_shift(temp2);
+ DO_BUTTERFLY_STD (-cospi_20_64), (-cospi_12_64), d8, d9, d14, d15
+ ; --------------------------------------------------------------------------
+ ; combine 20-23,24-27
+ ; --------------------------------------------------------------------------
+ ; part of stage 4
+ ;step1[22] = step1b[22][i] + step1b[21][i];
+ ;step1[23] = step1b[23][i] + step1b[20][i];
+ vadd.s16 q10, q7, q1
+ vadd.s16 q11, q5, q0
+ ;step1[24] = step1b[24][i] + step1b[27][i];
+ ;step1[25] = step1b[25][i] + step1b[26][i];
+ vadd.s16 q12, q6, q2
+ vadd.s16 q15, q4, q3
+ ; --------------------------------------------------------------------------
+ ; part of stage 6
+ ;step3[16] = step1b[16][i] + step1b[23][i];
+ ;step3[17] = step1b[17][i] + step1b[22][i];
+ ;step3[22] = step1b[17][i] - step1b[22][i];
+ ;step3[23] = step1b[16][i] - step1b[23][i];
+ LOAD_FROM_OUTPUT 28, 16, 17, q14, q13
+ vadd.s16 q8, q14, q11
+ vadd.s16 q9, q13, q10
+ vsub.s16 q13, q13, q10
+ vsub.s16 q11, q14, q11
+ STORE_IN_OUTPUT 17, 17, 16, q9, q8
+ ; --------------------------------------------------------------------------
+ ; part of stage 6
+ ;step3[24] = step1b[31][i] - step1b[24][i];
+ ;step3[25] = step1b[30][i] - step1b[25][i];
+ ;step3[30] = step1b[30][i] + step1b[25][i];
+ ;step3[31] = step1b[31][i] + step1b[24][i];
+ LOAD_FROM_OUTPUT 16, 30, 31, q14, q9
+ vsub.s16 q8, q9, q12
+ vadd.s16 q10, q14, q15
+ vsub.s16 q14, q14, q15
+ vadd.s16 q12, q9, q12
+ STORE_IN_OUTPUT 31, 30, 31, q10, q12
+ ; --------------------------------------------------------------------------
+ ; TODO(cd) do some register allocation change to remove these push/pop
+ vpush {q8} ; [24]
+ vpush {q11} ; [23]
+ ; --------------------------------------------------------------------------
+ ; part of stage 7
+ ;temp1 = (step1b[25][i] - step1b[22][i]) * cospi_16_64;
+ ;temp2 = (step1b[25][i] + step1b[22][i]) * cospi_16_64;
+ ;step1[22] = dct_const_round_shift(temp1);
+ ;step1[25] = dct_const_round_shift(temp2);
+ DO_BUTTERFLY_STD cospi_16_64, cospi_16_64, d26, d27, d28, d29
+ STORE_IN_OUTPUT 31, 25, 22, q14, q13
+ ; --------------------------------------------------------------------------
+ ; part of stage 7
+ ;temp1 = (step1b[24][i] - step1b[23][i]) * cospi_16_64;
+ ;temp2 = (step1b[24][i] + step1b[23][i]) * cospi_16_64;
+ ;step1[23] = dct_const_round_shift(temp1);
+ ;step1[24] = dct_const_round_shift(temp2);
+ ; TODO(cd) do some register allocation change to remove these push/pop
+ vpop {q13} ; [23]
+ vpop {q14} ; [24]
+ DO_BUTTERFLY_STD cospi_16_64, cospi_16_64, d26, d27, d28, d29
+ STORE_IN_OUTPUT 22, 24, 23, q14, q13
+ ; --------------------------------------------------------------------------
+ ; part of stage 4
+ ;step1[20] = step1b[23][i] - step1b[20][i];
+ ;step1[27] = step1b[24][i] - step1b[27][i];
+ vsub.s16 q14, q5, q0
+ vsub.s16 q13, q6, q2
+ ; --------------------------------------------------------------------------
+ ; part of stage 5
+ ;temp1 = step1b[20][i] * (-cospi_8_64) - step1b[27][i] * (-cospi_24_64);
+ ;temp2 = step1b[20][i] * (-cospi_24_64) + step1b[27][i] * (-cospi_8_64);
+ ;step2[27] = dct_const_round_shift(temp1);
+ ;step2[20] = dct_const_round_shift(temp2);
+ DO_BUTTERFLY_STD (-cospi_8_64), (-cospi_24_64), d10, d11, d12, d13
+ ; --------------------------------------------------------------------------
+ ; part of stage 4
+ ;step1[21] = step1b[22][i] - step1b[21][i];
+ ;step1[26] = step1b[25][i] - step1b[26][i];
+ vsub.s16 q14, q7, q1
+ vsub.s16 q13, q4, q3
+ ; --------------------------------------------------------------------------
+ ; part of stage 5
+ ;temp1 = step1b[21][i] * (-cospi_8_64) - step1b[26][i] * (-cospi_24_64);
+ ;temp2 = step1b[21][i] * (-cospi_24_64) + step1b[26][i] * (-cospi_8_64);
+ ;step2[26] = dct_const_round_shift(temp1);
+ ;step2[21] = dct_const_round_shift(temp2);
+ DO_BUTTERFLY_STD (-cospi_8_64), (-cospi_24_64), d0, d1, d2, d3
+ ; --------------------------------------------------------------------------
+ ; part of stage 6
+ ;step3[18] = step1b[18][i] + step1b[21][i];
+ ;step3[19] = step1b[19][i] + step1b[20][i];
+ ;step3[20] = step1b[19][i] - step1b[20][i];
+ ;step3[21] = step1b[18][i] - step1b[21][i];
+ LOAD_FROM_OUTPUT 23, 18, 19, q14, q13
+ vadd.s16 q8, q14, q1
+ vadd.s16 q9, q13, q6
+ vsub.s16 q13, q13, q6
+ vsub.s16 q1, q14, q1
+ STORE_IN_OUTPUT 19, 18, 19, q8, q9
+ ; --------------------------------------------------------------------------
+ ; part of stage 6
+ ;step3[27] = step1b[28][i] - step1b[27][i];
+ ;step3[28] = step1b[28][i] + step1b[27][i];
+ ;step3[29] = step1b[29][i] + step1b[26][i];
+ ;step3[26] = step1b[29][i] - step1b[26][i];
+ LOAD_FROM_OUTPUT 19, 28, 29, q8, q9
+ vsub.s16 q14, q8, q5
+ vadd.s16 q10, q8, q5
+ vadd.s16 q11, q9, q0
+ vsub.s16 q0, q9, q0
+ STORE_IN_OUTPUT 29, 28, 29, q10, q11
+ ; --------------------------------------------------------------------------
+ ; part of stage 7
+ ;temp1 = (step1b[27][i] - step1b[20][i]) * cospi_16_64;
+ ;temp2 = (step1b[27][i] + step1b[20][i]) * cospi_16_64;
+ ;step1[20] = dct_const_round_shift(temp1);
+ ;step1[27] = dct_const_round_shift(temp2);
+ DO_BUTTERFLY_STD cospi_16_64, cospi_16_64, d26, d27, d28, d29
+ STORE_IN_OUTPUT 29, 20, 27, q13, q14
+ ; --------------------------------------------------------------------------
+ ; part of stage 7
+ ;temp1 = (step1b[26][i] - step1b[21][i]) * cospi_16_64;
+ ;temp2 = (step1b[26][i] + step1b[21][i]) * cospi_16_64;
+ ;step1[21] = dct_const_round_shift(temp1);
+ ;step1[26] = dct_const_round_shift(temp2);
+ DO_BUTTERFLY d0, d1, d2, d3, cospi_16_64, cospi_16_64, d2, d3, d0, d1
+ STORE_IN_OUTPUT 27, 21, 26, q1, q0
+ ; --------------------------------------------------------------------------
+
+
+ ; --------------------------------------------------------------------------
+ ; BLOCK C: 8-10,11-15
+ ; --------------------------------------------------------------------------
+ ; generate 8,9,14,15
+ ; --------------------------------------------------------------------------
+ ; part of stage 2
+ ;temp1 = input[2 * 32] * cospi_30_64 - input[30 * 32] * cospi_2_64;
+ ;temp2 = input[2 * 32] * cospi_2_64 + input[30 * 32] * cospi_30_64;
+ ;step2[8] = dct_const_round_shift(temp1);
+ ;step2[15] = dct_const_round_shift(temp2);
+ LOAD_FROM_TRANSPOSED 3, 2, 30
+ DO_BUTTERFLY_STD cospi_30_64, cospi_2_64, d0, d1, d4, d5
+ ; --------------------------------------------------------------------------
+ ; part of stage 2
+ ;temp1 = input[18 * 32] * cospi_14_64 - input[14 * 32] * cospi_18_64;
+ ;temp2 = input[18 * 32] * cospi_18_64 + input[14 * 32] * cospi_14_64;
+ ;step2[9] = dct_const_round_shift(temp1);
+ ;step2[14] = dct_const_round_shift(temp2);
+ LOAD_FROM_TRANSPOSED 30, 18, 14
+ DO_BUTTERFLY_STD cospi_14_64, cospi_18_64, d2, d3, d6, d7
+ ; --------------------------------------------------------------------------
+ ; part of stage 3
+ ;step3[8] = step1b[8][i] + step1b[9][i];
+ ;step3[9] = step1b[8][i] - step1b[9][i];
+ ;step3[14] = step1b[15][i] - step1b[14][i];
+ ;step3[15] = step1b[15][i] + step1b[14][i];
+ vsub.s16 q13, q0, q1
+ vadd.s16 q0, q0, q1
+ vsub.s16 q14, q2, q3
+ vadd.s16 q2, q2, q3
+ ; --------------------------------------------------------------------------
+ ; part of stage 4
+ ;temp1 = step1b[14][i] * cospi_24_64 - step1b[9][i] * cospi_8_64;
+ ;temp2 = step1b[14][i] * cospi_8_64 + step1b[9][i] * cospi_24_64;
+ ;step1[9] = dct_const_round_shift(temp1);
+ ;step1[14] = dct_const_round_shift(temp2);
+ DO_BUTTERFLY_STD cospi_24_64, cospi_8_64, d2, d3, d6, d7
+ ; --------------------------------------------------------------------------
+ ; generate 10,11,12,13
+ ; --------------------------------------------------------------------------
+ ; part of stage 2
+ ;temp1 = input[10 * 32] * cospi_22_64 - input[22 * 32] * cospi_10_64;
+ ;temp2 = input[10 * 32] * cospi_10_64 + input[22 * 32] * cospi_22_64;
+ ;step2[10] = dct_const_round_shift(temp1);
+ ;step2[13] = dct_const_round_shift(temp2);
+ LOAD_FROM_TRANSPOSED 14, 10, 22
+ DO_BUTTERFLY_STD cospi_22_64, cospi_10_64, d10, d11, d14, d15
+ ; --------------------------------------------------------------------------
+ ; part of stage 2
+ ;temp1 = input[26 * 32] * cospi_6_64 - input[6 * 32] * cospi_26_64;
+ ;temp2 = input[26 * 32] * cospi_26_64 + input[6 * 32] * cospi_6_64;
+ ;step2[11] = dct_const_round_shift(temp1);
+ ;step2[12] = dct_const_round_shift(temp2);
+ LOAD_FROM_TRANSPOSED 22, 26, 6
+ DO_BUTTERFLY_STD cospi_6_64, cospi_26_64, d8, d9, d12, d13
+ ; --------------------------------------------------------------------------
+ ; part of stage 3
+ ;step3[10] = step1b[11][i] - step1b[10][i];
+ ;step3[11] = step1b[11][i] + step1b[10][i];
+ ;step3[12] = step1b[12][i] + step1b[13][i];
+ ;step3[13] = step1b[12][i] - step1b[13][i];
+ vsub.s16 q14, q4, q5
+ vadd.s16 q5, q4, q5
+ vsub.s16 q13, q6, q7
+ vadd.s16 q6, q6, q7
+ ; --------------------------------------------------------------------------
+ ; part of stage 4
+ ;temp1 = step1b[10][i] * (-cospi_8_64) - step1b[13][i] * (-cospi_24_64);
+ ;temp2 = step1b[10][i] * (-cospi_24_64) + step1b[13][i] * (-cospi_8_64);
+ ;step1[13] = dct_const_round_shift(temp1);
+ ;step1[10] = dct_const_round_shift(temp2);
+ DO_BUTTERFLY_STD (-cospi_8_64), (-cospi_24_64), d8, d9, d14, d15
+ ; --------------------------------------------------------------------------
+ ; combine 8-10,11-15
+ ; --------------------------------------------------------------------------
+ ; part of stage 5
+ ;step2[8] = step1b[8][i] + step1b[11][i];
+ ;step2[9] = step1b[9][i] + step1b[10][i];
+ ;step2[10] = step1b[9][i] - step1b[10][i];
+ vadd.s16 q8, q0, q5
+ vadd.s16 q9, q1, q7
+ vsub.s16 q13, q1, q7
+ ;step2[13] = step1b[14][i] - step1b[13][i];
+ ;step2[14] = step1b[14][i] + step1b[13][i];
+ ;step2[15] = step1b[15][i] + step1b[12][i];
+ vsub.s16 q14, q3, q4
+ vadd.s16 q10, q3, q4
+ vadd.s16 q15, q2, q6
+ STORE_IN_OUTPUT 26, 8, 15, q8, q15
+ STORE_IN_OUTPUT 15, 9, 14, q9, q10
+ ; --------------------------------------------------------------------------
+ ; part of stage 6
+ ;temp1 = (step1b[13][i] - step1b[10][i]) * cospi_16_64;
+ ;temp2 = (step1b[13][i] + step1b[10][i]) * cospi_16_64;
+ ;step3[10] = dct_const_round_shift(temp1);
+ ;step3[13] = dct_const_round_shift(temp2);
+ DO_BUTTERFLY_STD cospi_16_64, cospi_16_64, d2, d3, d6, d7
+ STORE_IN_OUTPUT 14, 13, 10, q3, q1
+ ; --------------------------------------------------------------------------
+ ; part of stage 5
+ ;step2[11] = step1b[8][i] - step1b[11][i];
+ ;step2[12] = step1b[15][i] - step1b[12][i];
+ vsub.s16 q13, q0, q5
+ vsub.s16 q14, q2, q6
+ ; --------------------------------------------------------------------------
+ ; part of stage 6
+ ;temp1 = (step1b[12][i] - step1b[11][i]) * cospi_16_64;
+ ;temp2 = (step1b[12][i] + step1b[11][i]) * cospi_16_64;
+ ;step3[11] = dct_const_round_shift(temp1);
+ ;step3[12] = dct_const_round_shift(temp2);
+ DO_BUTTERFLY_STD cospi_16_64, cospi_16_64, d2, d3, d6, d7
+ STORE_IN_OUTPUT 10, 11, 12, q1, q3
+ ; --------------------------------------------------------------------------
+
+
+ ; --------------------------------------------------------------------------
+ ; BLOCK D: 0-3,4-7
+ ; --------------------------------------------------------------------------
+ ; generate 4,5,6,7
+ ; --------------------------------------------------------------------------
+ ; part of stage 3
+ ;temp1 = input[4 * 32] * cospi_28_64 - input[28 * 32] * cospi_4_64;
+ ;temp2 = input[4 * 32] * cospi_4_64 + input[28 * 32] * cospi_28_64;
+ ;step3[4] = dct_const_round_shift(temp1);
+ ;step3[7] = dct_const_round_shift(temp2);
+ LOAD_FROM_TRANSPOSED 6, 4, 28
+ DO_BUTTERFLY_STD cospi_28_64, cospi_4_64, d0, d1, d4, d5
+ ; --------------------------------------------------------------------------
+ ; part of stage 3
+ ;temp1 = input[20 * 32] * cospi_12_64 - input[12 * 32] * cospi_20_64;
+ ;temp2 = input[20 * 32] * cospi_20_64 + input[12 * 32] * cospi_12_64;
+ ;step3[5] = dct_const_round_shift(temp1);
+ ;step3[6] = dct_const_round_shift(temp2);
+ LOAD_FROM_TRANSPOSED 28, 20, 12
+ DO_BUTTERFLY_STD cospi_12_64, cospi_20_64, d2, d3, d6, d7
+ ; --------------------------------------------------------------------------
+ ; part of stage 4
+ ;step1[4] = step1b[4][i] + step1b[5][i];
+ ;step1[5] = step1b[4][i] - step1b[5][i];
+ ;step1[6] = step1b[7][i] - step1b[6][i];
+ ;step1[7] = step1b[7][i] + step1b[6][i];
+ vsub.s16 q13, q0, q1
+ vadd.s16 q0, q0, q1
+ vsub.s16 q14, q2, q3
+ vadd.s16 q2, q2, q3
+ ; --------------------------------------------------------------------------
+ ; part of stage 5
+ ;temp1 = (step1b[6][i] - step1b[5][i]) * cospi_16_64;
+ ;temp2 = (step1b[5][i] + step1b[6][i]) * cospi_16_64;
+ ;step2[5] = dct_const_round_shift(temp1);
+ ;step2[6] = dct_const_round_shift(temp2);
+ DO_BUTTERFLY_STD cospi_16_64, cospi_16_64, d2, d3, d6, d7
+ ; --------------------------------------------------------------------------
+ ; generate 0,1,2,3
+ ; --------------------------------------------------------------------------
+ ; part of stage 4
+ ;temp1 = (input[0 * 32] - input[16 * 32]) * cospi_16_64;
+ ;temp2 = (input[0 * 32] + input[16 * 32]) * cospi_16_64;
+ ;step1[1] = dct_const_round_shift(temp1);
+ ;step1[0] = dct_const_round_shift(temp2);
+ LOAD_FROM_TRANSPOSED 12, 0, 16
+ DO_BUTTERFLY_STD cospi_16_64, cospi_16_64, d10, d11, d14, d15
+ ; --------------------------------------------------------------------------
+ ; part of stage 4
+ ;temp1 = input[8 * 32] * cospi_24_64 - input[24 * 32] * cospi_8_64;
+ ;temp2 = input[8 * 32] * cospi_8_64 + input[24 * 32] * cospi_24_64;
+ ;step1[2] = dct_const_round_shift(temp1);
+ ;step1[3] = dct_const_round_shift(temp2);
+ LOAD_FROM_TRANSPOSED 16, 8, 24
+ DO_BUTTERFLY_STD cospi_24_64, cospi_8_64, d28, d29, d12, d13
+ ; --------------------------------------------------------------------------
+ ; part of stage 5
+ ;step2[0] = step1b[0][i] + step1b[3][i];
+ ;step2[1] = step1b[1][i] + step1b[2][i];
+ ;step2[2] = step1b[1][i] - step1b[2][i];
+ ;step2[3] = step1b[0][i] - step1b[3][i];
+ vadd.s16 q4, q7, q6
+ vsub.s16 q7, q7, q6
+ vsub.s16 q6, q5, q14
+ vadd.s16 q5, q5, q14
+ ; --------------------------------------------------------------------------
+ ; combine 0-3,4-7
+ ; --------------------------------------------------------------------------
+ ; part of stage 6
+ ;step3[0] = step1b[0][i] + step1b[7][i];
+ ;step3[1] = step1b[1][i] + step1b[6][i];
+ ;step3[2] = step1b[2][i] + step1b[5][i];
+ ;step3[3] = step1b[3][i] + step1b[4][i];
+ vadd.s16 q8, q4, q2
+ vadd.s16 q9, q5, q3
+ vadd.s16 q10, q6, q1
+ vadd.s16 q11, q7, q0
+ ;step3[4] = step1b[3][i] - step1b[4][i];
+ ;step3[5] = step1b[2][i] - step1b[5][i];
+ ;step3[6] = step1b[1][i] - step1b[6][i];
+ ;step3[7] = step1b[0][i] - step1b[7][i];
+ vsub.s16 q12, q7, q0
+ vsub.s16 q13, q6, q1
+ vsub.s16 q14, q5, q3
+ vsub.s16 q15, q4, q2
+ ; --------------------------------------------------------------------------
+ ; part of stage 7
+ ;step1[0] = step1b[0][i] + step1b[15][i];
+ ;step1[1] = step1b[1][i] + step1b[14][i];
+ ;step1[14] = step1b[1][i] - step1b[14][i];
+ ;step1[15] = step1b[0][i] - step1b[15][i];
+ LOAD_FROM_OUTPUT 12, 14, 15, q0, q1
+ vadd.s16 q2, q8, q1
+ vadd.s16 q3, q9, q0
+ vsub.s16 q4, q9, q0
+ vsub.s16 q5, q8, q1
+ ; --------------------------------------------------------------------------
+ ; part of final stage
+ ;output[14 * 32] = step1b[14][i] + step1b[17][i];
+ ;output[15 * 32] = step1b[15][i] + step1b[16][i];
+ ;output[16 * 32] = step1b[15][i] - step1b[16][i];
+ ;output[17 * 32] = step1b[14][i] - step1b[17][i];
+ LOAD_FROM_OUTPUT 15, 16, 17, q0, q1
+ vadd.s16 q8, q4, q1
+ vadd.s16 q9, q5, q0
+ vsub.s16 q6, q5, q0
+ vsub.s16 q7, q4, q1
+ STORE_IN_OUTPUT 17, 17, 16, q7, q6
+ STORE_IN_OUTPUT 16, 15, 14, q9, q8
+ ; --------------------------------------------------------------------------
+ ; part of final stage
+ ;output[ 0 * 32] = step1b[0][i] + step1b[31][i];
+ ;output[ 1 * 32] = step1b[1][i] + step1b[30][i];
+ ;output[30 * 32] = step1b[1][i] - step1b[30][i];
+ ;output[31 * 32] = step1b[0][i] - step1b[31][i];
+ LOAD_FROM_OUTPUT 14, 30, 31, q0, q1
+ vadd.s16 q4, q2, q1
+ vadd.s16 q5, q3, q0
+ vsub.s16 q6, q3, q0
+ vsub.s16 q7, q2, q1
+ STORE_IN_OUTPUT 31, 31, 30, q7, q6
+ STORE_IN_OUTPUT 30, 0, 1, q4, q5
+ ; --------------------------------------------------------------------------
+ ; part of stage 7
+ ;step1[2] = step1b[2][i] + step1b[13][i];
+ ;step1[3] = step1b[3][i] + step1b[12][i];
+ ;step1[12] = step1b[3][i] - step1b[12][i];
+ ;step1[13] = step1b[2][i] - step1b[13][i];
+ LOAD_FROM_OUTPUT 1, 12, 13, q0, q1
+ vadd.s16 q2, q10, q1
+ vadd.s16 q3, q11, q0
+ vsub.s16 q4, q11, q0
+ vsub.s16 q5, q10, q1
+ ; --------------------------------------------------------------------------
+ ; part of final stage
+ ;output[12 * 32] = step1b[12][i] + step1b[19][i];
+ ;output[13 * 32] = step1b[13][i] + step1b[18][i];
+ ;output[18 * 32] = step1b[13][i] - step1b[18][i];
+ ;output[19 * 32] = step1b[12][i] - step1b[19][i];
+ LOAD_FROM_OUTPUT 13, 18, 19, q0, q1
+ vadd.s16 q6, q4, q1
+ vadd.s16 q7, q5, q0
+ vsub.s16 q8, q5, q0
+ vsub.s16 q9, q4, q1
+ STORE_IN_OUTPUT 19, 19, 18, q9, q8
+ STORE_IN_OUTPUT 18, 13, 12, q7, q6
+ ; --------------------------------------------------------------------------
+ ; part of final stage
+ ;output[ 2 * 32] = step1b[2][i] + step1b[29][i];
+ ;output[ 3 * 32] = step1b[3][i] + step1b[28][i];
+ ;output[28 * 32] = step1b[3][i] - step1b[28][i];
+ ;output[29 * 32] = step1b[2][i] - step1b[29][i];
+ LOAD_FROM_OUTPUT 12, 28, 29, q0, q1
+ vadd.s16 q4, q2, q1
+ vadd.s16 q5, q3, q0
+ vsub.s16 q6, q3, q0
+ vsub.s16 q7, q2, q1
+ STORE_IN_OUTPUT 29, 29, 28, q7, q6
+ STORE_IN_OUTPUT 28, 2, 3, q4, q5
+ ; --------------------------------------------------------------------------
+ ; part of stage 7
+ ;step1[4] = step1b[4][i] + step1b[11][i];
+ ;step1[5] = step1b[5][i] + step1b[10][i];
+ ;step1[10] = step1b[5][i] - step1b[10][i];
+ ;step1[11] = step1b[4][i] - step1b[11][i];
+ LOAD_FROM_OUTPUT 3, 10, 11, q0, q1
+ vadd.s16 q2, q12, q1
+ vadd.s16 q3, q13, q0
+ vsub.s16 q4, q13, q0
+ vsub.s16 q5, q12, q1
+ ; --------------------------------------------------------------------------
+ ; part of final stage
+ ;output[10 * 32] = step1b[10][i] + step1b[21][i];
+ ;output[11 * 32] = step1b[11][i] + step1b[20][i];
+ ;output[20 * 32] = step1b[11][i] - step1b[20][i];
+ ;output[21 * 32] = step1b[10][i] - step1b[21][i];
+ LOAD_FROM_OUTPUT 11, 20, 21, q0, q1
+ vadd.s16 q6, q4, q1
+ vadd.s16 q7, q5, q0
+ vsub.s16 q8, q5, q0
+ vsub.s16 q9, q4, q1
+ STORE_IN_OUTPUT 21, 21, 20, q9, q8
+ STORE_IN_OUTPUT 20, 11, 10, q7, q6
+ ; --------------------------------------------------------------------------
+ ; part of final stage
+ ;output[ 4 * 32] = step1b[4][i] + step1b[27][i];
+ ;output[ 5 * 32] = step1b[5][i] + step1b[26][i];
+ ;output[26 * 32] = step1b[5][i] - step1b[26][i];
+ ;output[27 * 32] = step1b[4][i] - step1b[27][i];
+ LOAD_FROM_OUTPUT 10, 26, 27, q0, q1
+ vadd.s16 q4, q2, q1
+ vadd.s16 q5, q3, q0
+ vsub.s16 q6, q3, q0
+ vsub.s16 q7, q2, q1
+ STORE_IN_OUTPUT 27, 27, 26, q7, q6
+ STORE_IN_OUTPUT 26, 4, 5, q4, q5
+ ; --------------------------------------------------------------------------
+ ; part of stage 7
+ ;step1[6] = step1b[6][i] + step1b[9][i];
+ ;step1[7] = step1b[7][i] + step1b[8][i];
+ ;step1[8] = step1b[7][i] - step1b[8][i];
+ ;step1[9] = step1b[6][i] - step1b[9][i];
+ LOAD_FROM_OUTPUT 5, 8, 9, q0, q1
+ vadd.s16 q2, q14, q1
+ vadd.s16 q3, q15, q0
+ vsub.s16 q4, q15, q0
+ vsub.s16 q5, q14, q1
+ ; --------------------------------------------------------------------------
+ ; part of final stage
+ ;output[ 8 * 32] = step1b[8][i] + step1b[23][i];
+ ;output[ 9 * 32] = step1b[9][i] + step1b[22][i];
+ ;output[22 * 32] = step1b[9][i] - step1b[22][i];
+ ;output[23 * 32] = step1b[8][i] - step1b[23][i];
+ LOAD_FROM_OUTPUT 9, 22, 23, q0, q1
+ vadd.s16 q6, q4, q1
+ vadd.s16 q7, q5, q0
+ vsub.s16 q8, q5, q0
+ vsub.s16 q9, q4, q1
+ STORE_IN_OUTPUT 23, 23, 22, q9, q8
+ STORE_IN_OUTPUT 22, 9, 8, q7, q6
+ ; --------------------------------------------------------------------------
+ ; part of final stage
+ ;output[ 6 * 32] = step1b[6][i] + step1b[25][i];
+ ;output[ 7 * 32] = step1b[7][i] + step1b[24][i];
+ ;output[24 * 32] = step1b[7][i] - step1b[24][i];
+ ;output[25 * 32] = step1b[6][i] - step1b[25][i];
+ LOAD_FROM_OUTPUT 8, 24, 25, q0, q1
+ vadd.s16 q4, q2, q1
+ vadd.s16 q5, q3, q0
+ vsub.s16 q6, q3, q0
+ vsub.s16 q7, q2, q1
+ STORE_IN_OUTPUT 25, 25, 24, q7, q6
+ STORE_IN_OUTPUT 24, 6, 7, q4, q5
+ ; --------------------------------------------------------------------------
+
+ ; TODO(cd) get rid of these push/pop by properly adjusting register
+ ; content at end of loop
+ pop {r2}
+ pop {r1}
+ pop {r0}
+ add r1, r1, #8*2
+ add r2, r2, #8*32*2
+
+ ; bands loop processing
+ add r4, r4, #1
+ cmp r4, #3
+ BLE idct32_bands_loop
+
+ pop {r4}
+ bx lr
+ ENDP ; |idct32_transpose_and_transform|
+
+;void idct32_combine_add(uint8_t *dest, int16_t *out, int dest_stride);
+;
+; r0 uint8_t *dest
+; r1 int16_t *out
+; r2 int dest_stride)
+
+|idct32_combine_add| PROC
+
+ mov r12, r0 ; dest pointer used for stores
+ sub r2, r2, #32 ; adjust the stride (remove the post-increments)
+ mov r3, #0 ; initialize loop counter
+
+idct32_combine_add_loop
+ ; load out[j * 32 + 0-31]
+ vld1.s16 {q12}, [r1]!
+ vld1.s16 {q13}, [r1]!
+ vld1.s16 {q14}, [r1]!
+ vld1.s16 {q15}, [r1]!
+ ; load dest[j * dest_stride + 0-31]
+ vld1.s16 {q6}, [r0]!
+ vld1.s16 {q7}, [r0]!
+ ; ROUND_POWER_OF_TWO
+ vrshr.s16 q12, q12, #6
+ vrshr.s16 q13, q13, #6
+ vrshr.s16 q14, q14, #6
+ vrshr.s16 q15, q15, #6
+ ; add to dest[j * dest_stride + 0-31]
+ vaddw.u8 q12, q12, d12
+ vaddw.u8 q13, q13, d13
+ vaddw.u8 q14, q14, d14
+ vaddw.u8 q15, q15, d15
+ ; clip pixel
+ vqmovun.s16 d12, q12
+ vqmovun.s16 d13, q13
+ vqmovun.s16 d14, q14
+ vqmovun.s16 d15, q15
+ ; store back into dest[j * dest_stride + 0-31]
+ vst1.16 {q6}, [r12]!
+ vst1.16 {q7}, [r12]!
+ ; increment pointers by adjusted stride (not necessary for r1/out)
+ add r0, r0, r2
+ add r12, r12, r2
+ ; loop processing
+ add r3, r3, #1
+ cmp r3, #31
+ BLE idct32_combine_add_loop
+
+ bx lr
+ ENDP ; |idct32_transpose|
+
+ END
diff --git a/libvpx/vp9/common/arm/neon/vp9_short_idct4x4_1_add_neon.asm b/libvpx/vp9/common/arm/neon/vp9_short_idct4x4_1_add_neon.asm
new file mode 100644
index 0000000..869ee5f
--- /dev/null
+++ b/libvpx/vp9/common/arm/neon/vp9_short_idct4x4_1_add_neon.asm
@@ -0,0 +1,68 @@
+;
+; Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license and patent
+; grant that can be found in the LICENSE file in the root of the source
+; tree. All contributing project authors may be found in the AUTHORS
+; file in the root of the source tree.
+;
+
+
+ EXPORT |vp9_short_idct4x4_1_add_neon|
+ ARM
+ REQUIRE8
+ PRESERVE8
+
+ AREA ||.text||, CODE, READONLY, ALIGN=2
+
+;void vp9_short_idct4x4_1_add_neon(int16_t *input, uint8_t *dest,
+; int dest_stride)
+;
+; r0 int16_t input
+; r1 uint8_t *dest
+; r2 int dest_stride)
+
+|vp9_short_idct4x4_1_add_neon| PROC
+ ldrsh r0, [r0]
+
+ ; generate cospi_16_64 = 11585
+ mov r12, #0x2d00
+ add r12, #0x41
+
+ ; out = dct_const_round_shift(input[0] * cospi_16_64)
+ mul r0, r0, r12 ; input[0] * cospi_16_64
+ add r0, r0, #0x2000 ; +(1 << ((DCT_CONST_BITS) - 1))
+ asr r0, r0, #14 ; >> DCT_CONST_BITS
+
+ ; out = dct_const_round_shift(out * cospi_16_64)
+ mul r0, r0, r12 ; out * cospi_16_64
+ mov r12, r1 ; save dest
+ add r0, r0, #0x2000 ; +(1 << ((DCT_CONST_BITS) - 1))
+ asr r0, r0, #14 ; >> DCT_CONST_BITS
+
+ ; a1 = ROUND_POWER_OF_TWO(out, 4)
+ add r0, r0, #8 ; + (1 <<((4) - 1))
+ asr r0, r0, #4 ; >> 4
+
+ vdup.s16 q0, r0 ; duplicate a1
+
+ vld1.32 {d2[0]}, [r1], r2
+ vld1.32 {d2[1]}, [r1], r2
+ vld1.32 {d4[0]}, [r1], r2
+ vld1.32 {d4[1]}, [r1]
+
+ vaddw.u8 q8, q0, d2 ; dest[x] + a1
+ vaddw.u8 q9, q0, d4
+
+ vqmovun.s16 d6, q8 ; clip_pixel
+ vqmovun.s16 d7, q9
+
+ vst1.32 {d6[0]}, [r12], r2
+ vst1.32 {d6[1]}, [r12], r2
+ vst1.32 {d7[0]}, [r12], r2
+ vst1.32 {d7[1]}, [r12]
+
+ bx lr
+ ENDP ; |vp9_short_idct4x4_1_add_neon|
+
+ END
diff --git a/libvpx/vp9/common/arm/neon/vp9_short_idct4x4_add_neon.asm b/libvpx/vp9/common/arm/neon/vp9_short_idct4x4_add_neon.asm
new file mode 100644
index 0000000..640fb93
--- /dev/null
+++ b/libvpx/vp9/common/arm/neon/vp9_short_idct4x4_add_neon.asm
@@ -0,0 +1,190 @@
+;
+; Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+ EXPORT |vp9_short_idct4x4_add_neon|
+ ARM
+ REQUIRE8
+ PRESERVE8
+
+ AREA ||.text||, CODE, READONLY, ALIGN=2
+
+ AREA Block, CODE, READONLY ; name this block of code
+;void vp9_short_idct4x4_add_neon(int16_t *input, uint8_t *dest, int dest_stride)
+;
+; r0 int16_t input
+; r1 uint8_t *dest
+; r2 int dest_stride)
+
+|vp9_short_idct4x4_add_neon| PROC
+
+ ; The 2D transform is done with two passes which are actually pretty
+ ; similar. We first transform the rows. This is done by transposing
+ ; the inputs, doing an SIMD column transform (the columns are the
+ ; transposed rows) and then transpose the results (so that it goes back
+ ; in normal/row positions). Then, we transform the columns by doing
+ ; another SIMD column transform.
+ ; So, two passes of a transpose followed by a column transform.
+
+ ; load the inputs into q8-q9, d16-d19
+ vld1.s16 {q8,q9}, [r0]!
+
+ ; generate scalar constants
+ ; cospi_8_64 = 15137 = 0x3b21
+ mov r0, #0x3b00
+ add r0, #0x21
+ ; cospi_16_64 = 11585 = 0x2d41
+ mov r3, #0x2d00
+ add r3, #0x41
+ ; cospi_24_64 = 6270 = 0x 187e
+ mov r12, #0x1800
+ add r12, #0x7e
+
+ ; transpose the input data
+ ; 00 01 02 03 d16
+ ; 10 11 12 13 d17
+ ; 20 21 22 23 d18
+ ; 30 31 32 33 d19
+ vtrn.16 d16, d17
+ vtrn.16 d18, d19
+
+ ; generate constant vectors
+ vdup.16 d20, r0 ; replicate cospi_8_64
+ vdup.16 d21, r3 ; replicate cospi_16_64
+
+ ; 00 10 02 12 d16
+ ; 01 11 03 13 d17
+ ; 20 30 22 32 d18
+ ; 21 31 23 33 d19
+ vtrn.32 q8, q9
+ ; 00 10 20 30 d16
+ ; 01 11 21 31 d17
+ ; 02 12 22 32 d18
+ ; 03 13 23 33 d19
+
+ vdup.16 d22, r12 ; replicate cospi_24_64
+
+ ; do the transform on transposed rows
+
+ ; stage 1
+ vadd.s16 d23, d16, d18 ; (input[0] + input[2])
+ vsub.s16 d24, d16, d18 ; (input[0] - input[2])
+
+ vmull.s16 q15, d17, d22 ; input[1] * cospi_24_64
+ vmull.s16 q1, d17, d20 ; input[1] * cospi_8_64
+
+ ; (input[0] + input[2]) * cospi_16_64;
+ ; (input[0] - input[2]) * cospi_16_64;
+ vmull.s16 q13, d23, d21
+ vmull.s16 q14, d24, d21
+
+ ; input[1] * cospi_24_64 - input[3] * cospi_8_64;
+ ; input[1] * cospi_8_64 + input[3] * cospi_24_64;
+ vmlsl.s16 q15, d19, d20
+ vmlal.s16 q1, d19, d22
+
+ ; dct_const_round_shift
+ vqrshrn.s32 d26, q13, #14
+ vqrshrn.s32 d27, q14, #14
+ vqrshrn.s32 d29, q15, #14
+ vqrshrn.s32 d28, q1, #14
+
+ ; stage 2
+ ; output[0] = step[0] + step[3];
+ ; output[1] = step[1] + step[2];
+ ; output[3] = step[0] - step[3];
+ ; output[2] = step[1] - step[2];
+ vadd.s16 q8, q13, q14
+ vsub.s16 q9, q13, q14
+ vswp d18, d19
+
+ ; transpose the results
+ ; 00 01 02 03 d16
+ ; 10 11 12 13 d17
+ ; 20 21 22 23 d18
+ ; 30 31 32 33 d19
+ vtrn.16 d16, d17
+ vtrn.16 d18, d19
+ ; 00 10 02 12 d16
+ ; 01 11 03 13 d17
+ ; 20 30 22 32 d18
+ ; 21 31 23 33 d19
+ vtrn.32 q8, q9
+ ; 00 10 20 30 d16
+ ; 01 11 21 31 d17
+ ; 02 12 22 32 d18
+ ; 03 13 23 33 d19
+
+ ; do the transform on columns
+
+ ; stage 1
+ vadd.s16 d23, d16, d18 ; (input[0] + input[2])
+ vsub.s16 d24, d16, d18 ; (input[0] - input[2])
+
+ vmull.s16 q15, d17, d22 ; input[1] * cospi_24_64
+ vmull.s16 q1, d17, d20 ; input[1] * cospi_8_64
+
+ ; (input[0] + input[2]) * cospi_16_64;
+ ; (input[0] - input[2]) * cospi_16_64;
+ vmull.s16 q13, d23, d21
+ vmull.s16 q14, d24, d21
+
+ ; input[1] * cospi_24_64 - input[3] * cospi_8_64;
+ ; input[1] * cospi_8_64 + input[3] * cospi_24_64;
+ vmlsl.s16 q15, d19, d20
+ vmlal.s16 q1, d19, d22
+
+ ; dct_const_round_shift
+ vqrshrn.s32 d26, q13, #14
+ vqrshrn.s32 d27, q14, #14
+ vqrshrn.s32 d29, q15, #14
+ vqrshrn.s32 d28, q1, #14
+
+ ; stage 2
+ ; output[0] = step[0] + step[3];
+ ; output[1] = step[1] + step[2];
+ ; output[3] = step[0] - step[3];
+ ; output[2] = step[1] - step[2];
+ vadd.s16 q8, q13, q14
+ vsub.s16 q9, q13, q14
+
+ ; The results are in two registers, one of them being swapped. This will
+ ; be taken care of by loading the 'dest' value in a swapped fashion and
+ ; also storing them in the same swapped fashion.
+ ; temp_out[0, 1] = d16, d17 = q8
+ ; temp_out[2, 3] = d19, d18 = q9 swapped
+
+ ; ROUND_POWER_OF_TWO(temp_out[j], 4)
+ vrshr.s16 q8, q8, #4
+ vrshr.s16 q9, q9, #4
+
+ vld1.32 {d26[0]}, [r1], r2
+ vld1.32 {d26[1]}, [r1], r2
+ vld1.32 {d27[1]}, [r1], r2
+ vld1.32 {d27[0]}, [r1] ; no post-increment
+
+ ; ROUND_POWER_OF_TWO(temp_out[j], 4) + dest[j * dest_stride + i]
+ vaddw.u8 q8, q8, d26
+ vaddw.u8 q9, q9, d27
+
+ ; clip_pixel
+ vqmovun.s16 d26, q8
+ vqmovun.s16 d27, q9
+
+ ; do the stores in reverse order with negative post-increment, by changing
+ ; the sign of the stride
+ rsb r2, r2, #0
+ vst1.32 {d27[0]}, [r1], r2
+ vst1.32 {d27[1]}, [r1], r2
+ vst1.32 {d26[1]}, [r1], r2
+ vst1.32 {d26[0]}, [r1] ; no post-increment
+ bx lr
+ ENDP ; |vp9_short_idct4x4_add_neon|
+
+ END
diff --git a/libvpx/vp9/common/arm/neon/vp9_short_idct8x8_1_add_neon.asm b/libvpx/vp9/common/arm/neon/vp9_short_idct8x8_1_add_neon.asm
new file mode 100644
index 0000000..923804f
--- /dev/null
+++ b/libvpx/vp9/common/arm/neon/vp9_short_idct8x8_1_add_neon.asm
@@ -0,0 +1,88 @@
+;
+; Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license and patent
+; grant that can be found in the LICENSE file in the root of the source
+; tree. All contributing project authors may be found in the AUTHORS
+; file in the root of the source tree.
+;
+
+
+ EXPORT |vp9_short_idct8x8_1_add_neon|
+ ARM
+ REQUIRE8
+ PRESERVE8
+
+ AREA ||.text||, CODE, READONLY, ALIGN=2
+
+;void vp9_short_idct8x8_1_add_neon(int16_t *input, uint8_t *dest,
+; int dest_stride)
+;
+; r0 int16_t input
+; r1 uint8_t *dest
+; r2 int dest_stride)
+
+|vp9_short_idct8x8_1_add_neon| PROC
+ ldrsh r0, [r0]
+
+ ; generate cospi_16_64 = 11585
+ mov r12, #0x2d00
+ add r12, #0x41
+
+ ; out = dct_const_round_shift(input[0] * cospi_16_64)
+ mul r0, r0, r12 ; input[0] * cospi_16_64
+ add r0, r0, #0x2000 ; +(1 << ((DCT_CONST_BITS) - 1))
+ asr r0, r0, #14 ; >> DCT_CONST_BITS
+
+ ; out = dct_const_round_shift(out * cospi_16_64)
+ mul r0, r0, r12 ; out * cospi_16_64
+ mov r12, r1 ; save dest
+ add r0, r0, #0x2000 ; +(1 << ((DCT_CONST_BITS) - 1))
+ asr r0, r0, #14 ; >> DCT_CONST_BITS
+
+ ; a1 = ROUND_POWER_OF_TWO(out, 5)
+ add r0, r0, #16 ; + (1 <<((5) - 1))
+ asr r0, r0, #5 ; >> 5
+
+ vdup.s16 q0, r0 ; duplicate a1
+
+ ; load destination data
+ vld1.64 {d2}, [r1], r2
+ vld1.64 {d3}, [r1], r2
+ vld1.64 {d4}, [r1], r2
+ vld1.64 {d5}, [r1], r2
+ vld1.64 {d6}, [r1], r2
+ vld1.64 {d7}, [r1], r2
+ vld1.64 {d16}, [r1], r2
+ vld1.64 {d17}, [r1]
+
+ vaddw.u8 q9, q0, d2 ; dest[x] + a1
+ vaddw.u8 q10, q0, d3 ; dest[x] + a1
+ vaddw.u8 q11, q0, d4 ; dest[x] + a1
+ vaddw.u8 q12, q0, d5 ; dest[x] + a1
+ vqmovun.s16 d2, q9 ; clip_pixel
+ vqmovun.s16 d3, q10 ; clip_pixel
+ vqmovun.s16 d30, q11 ; clip_pixel
+ vqmovun.s16 d31, q12 ; clip_pixel
+ vst1.64 {d2}, [r12], r2
+ vst1.64 {d3}, [r12], r2
+ vst1.64 {d30}, [r12], r2
+ vst1.64 {d31}, [r12], r2
+
+ vaddw.u8 q9, q0, d6 ; dest[x] + a1
+ vaddw.u8 q10, q0, d7 ; dest[x] + a1
+ vaddw.u8 q11, q0, d16 ; dest[x] + a1
+ vaddw.u8 q12, q0, d17 ; dest[x] + a1
+ vqmovun.s16 d2, q9 ; clip_pixel
+ vqmovun.s16 d3, q10 ; clip_pixel
+ vqmovun.s16 d30, q11 ; clip_pixel
+ vqmovun.s16 d31, q12 ; clip_pixel
+ vst1.64 {d2}, [r12], r2
+ vst1.64 {d3}, [r12], r2
+ vst1.64 {d30}, [r12], r2
+ vst1.64 {d31}, [r12], r2
+
+ bx lr
+ ENDP ; |vp9_short_idct8x8_1_add_neon|
+
+ END
diff --git a/libvpx/vp9/common/arm/neon/vp9_short_idct8x8_add_neon.asm b/libvpx/vp9/common/arm/neon/vp9_short_idct8x8_add_neon.asm
new file mode 100644
index 0000000..a744f59
--- /dev/null
+++ b/libvpx/vp9/common/arm/neon/vp9_short_idct8x8_add_neon.asm
@@ -0,0 +1,519 @@
+;
+; Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+ EXPORT |vp9_short_idct8x8_add_neon|
+ EXPORT |vp9_short_idct10_8x8_add_neon|
+ ARM
+ REQUIRE8
+ PRESERVE8
+
+ AREA ||.text||, CODE, READONLY, ALIGN=2
+
+ ; Parallel 1D IDCT on all the columns of a 8x8 16bit data matrix which are
+ ; loaded in q8-q15. The output will be stored back into q8-q15 registers.
+ ; This macro will touch q0-q7 registers and use them as buffer during
+ ; calculation.
+ MACRO
+ IDCT8x8_1D
+ ; stage 1
+ vdup.16 d0, r3 ; duplicate cospi_28_64
+ vdup.16 d1, r4 ; duplicate cospi_4_64
+ vdup.16 d2, r5 ; duplicate cospi_12_64
+ vdup.16 d3, r6 ; duplicate cospi_20_64
+
+ ; input[1] * cospi_28_64
+ vmull.s16 q2, d18, d0
+ vmull.s16 q3, d19, d0
+
+ ; input[5] * cospi_12_64
+ vmull.s16 q5, d26, d2
+ vmull.s16 q6, d27, d2
+
+ ; input[1]*cospi_28_64-input[7]*cospi_4_64
+ vmlsl.s16 q2, d30, d1
+ vmlsl.s16 q3, d31, d1
+
+ ; input[5] * cospi_12_64 - input[3] * cospi_20_64
+ vmlsl.s16 q5, d22, d3
+ vmlsl.s16 q6, d23, d3
+
+ ; dct_const_round_shift(input_dc * cospi_16_64)
+ vqrshrn.s32 d8, q2, #14 ; >> 14
+ vqrshrn.s32 d9, q3, #14 ; >> 14
+
+ ; dct_const_round_shift(input_dc * cospi_16_64)
+ vqrshrn.s32 d10, q5, #14 ; >> 14
+ vqrshrn.s32 d11, q6, #14 ; >> 14
+
+ ; input[1] * cospi_4_64
+ vmull.s16 q2, d18, d1
+ vmull.s16 q3, d19, d1
+
+ ; input[5] * cospi_20_64
+ vmull.s16 q9, d26, d3
+ vmull.s16 q13, d27, d3
+
+ ; input[1]*cospi_4_64+input[7]*cospi_28_64
+ vmlal.s16 q2, d30, d0
+ vmlal.s16 q3, d31, d0
+
+ ; input[5] * cospi_20_64 + input[3] * cospi_12_64
+ vmlal.s16 q9, d22, d2
+ vmlal.s16 q13, d23, d2
+
+ ; dct_const_round_shift(input_dc * cospi_16_64)
+ vqrshrn.s32 d14, q2, #14 ; >> 14
+ vqrshrn.s32 d15, q3, #14 ; >> 14
+
+ ; stage 2 & stage 3 - even half
+ vdup.16 d0, r7 ; duplicate cospi_16_64
+
+ ; dct_const_round_shift(input_dc * cospi_16_64)
+ vqrshrn.s32 d12, q9, #14 ; >> 14
+ vqrshrn.s32 d13, q13, #14 ; >> 14
+
+ ; input[0] * cospi_16_64
+ vmull.s16 q2, d16, d0
+ vmull.s16 q3, d17, d0
+
+ ; input[0] * cospi_16_64
+ vmull.s16 q13, d16, d0
+ vmull.s16 q15, d17, d0
+
+ ; (input[0] + input[2]) * cospi_16_64
+ vmlal.s16 q2, d24, d0
+ vmlal.s16 q3, d25, d0
+
+ ; (input[0] - input[2]) * cospi_16_64
+ vmlsl.s16 q13, d24, d0
+ vmlsl.s16 q15, d25, d0
+
+ vdup.16 d0, r8 ; duplicate cospi_24_64
+ vdup.16 d1, r9 ; duplicate cospi_8_64
+
+ ; dct_const_round_shift(input_dc * cospi_16_64)
+ vqrshrn.s32 d18, q2, #14 ; >> 14
+ vqrshrn.s32 d19, q3, #14 ; >> 14
+
+ ; dct_const_round_shift(input_dc * cospi_16_64)
+ vqrshrn.s32 d22, q13, #14 ; >> 14
+ vqrshrn.s32 d23, q15, #14 ; >> 14
+
+ ; input[1] * cospi_24_64 - input[3] * cospi_8_64
+ ; input[1] * cospi_24_64
+ vmull.s16 q2, d20, d0
+ vmull.s16 q3, d21, d0
+
+ ; input[1] * cospi_8_64
+ vmull.s16 q8, d20, d1
+ vmull.s16 q12, d21, d1
+
+ ; input[1] * cospi_24_64 - input[3] * cospi_8_64
+ vmlsl.s16 q2, d28, d1
+ vmlsl.s16 q3, d29, d1
+
+ ; input[1] * cospi_8_64 + input[3] * cospi_24_64
+ vmlal.s16 q8, d28, d0
+ vmlal.s16 q12, d29, d0
+
+ ; dct_const_round_shift(input_dc * cospi_16_64)
+ vqrshrn.s32 d26, q2, #14 ; >> 14
+ vqrshrn.s32 d27, q3, #14 ; >> 14
+
+ ; dct_const_round_shift(input_dc * cospi_16_64)
+ vqrshrn.s32 d30, q8, #14 ; >> 14
+ vqrshrn.s32 d31, q12, #14 ; >> 14
+
+ vadd.s16 q0, q9, q15 ; output[0] = step[0] + step[3]
+ vadd.s16 q1, q11, q13 ; output[1] = step[1] + step[2]
+ vsub.s16 q2, q11, q13 ; output[2] = step[1] - step[2]
+ vsub.s16 q3, q9, q15 ; output[3] = step[0] - step[3]
+
+ ; stage 3 -odd half
+ vdup.16 d16, r7 ; duplicate cospi_16_64
+
+ ; stage 2 - odd half
+ vsub.s16 q13, q4, q5 ; step2[5] = step1[4] - step1[5]
+ vadd.s16 q4, q4, q5 ; step2[4] = step1[4] + step1[5]
+ vsub.s16 q14, q7, q6 ; step2[6] = -step1[6] + step1[7]
+ vadd.s16 q7, q7, q6 ; step2[7] = step1[6] + step1[7]
+
+ ; step2[6] * cospi_16_64
+ vmull.s16 q9, d28, d16
+ vmull.s16 q10, d29, d16
+
+ ; step2[6] * cospi_16_64
+ vmull.s16 q11, d28, d16
+ vmull.s16 q12, d29, d16
+
+ ; (step2[6] - step2[5]) * cospi_16_64
+ vmlsl.s16 q9, d26, d16
+ vmlsl.s16 q10, d27, d16
+
+ ; (step2[5] + step2[6]) * cospi_16_64
+ vmlal.s16 q11, d26, d16
+ vmlal.s16 q12, d27, d16
+
+ ; dct_const_round_shift(input_dc * cospi_16_64)
+ vqrshrn.s32 d10, q9, #14 ; >> 14
+ vqrshrn.s32 d11, q10, #14 ; >> 14
+
+ ; dct_const_round_shift(input_dc * cospi_16_64)
+ vqrshrn.s32 d12, q11, #14 ; >> 14
+ vqrshrn.s32 d13, q12, #14 ; >> 14
+
+ ; stage 4
+ vadd.s16 q8, q0, q7 ; output[0] = step1[0] + step1[7];
+ vadd.s16 q9, q1, q6 ; output[1] = step1[1] + step1[6];
+ vadd.s16 q10, q2, q5 ; output[2] = step1[2] + step1[5];
+ vadd.s16 q11, q3, q4 ; output[3] = step1[3] + step1[4];
+ vsub.s16 q12, q3, q4 ; output[4] = step1[3] - step1[4];
+ vsub.s16 q13, q2, q5 ; output[5] = step1[2] - step1[5];
+ vsub.s16 q14, q1, q6 ; output[6] = step1[1] - step1[6];
+ vsub.s16 q15, q0, q7 ; output[7] = step1[0] - step1[7];
+ MEND
+
+ ; Transpose a 8x8 16bit data matrix. Datas are loaded in q8-q15.
+ MACRO
+ TRANSPOSE8X8
+ vswp d17, d24
+ vswp d23, d30
+ vswp d21, d28
+ vswp d19, d26
+ vtrn.32 q8, q10
+ vtrn.32 q9, q11
+ vtrn.32 q12, q14
+ vtrn.32 q13, q15
+ vtrn.16 q8, q9
+ vtrn.16 q10, q11
+ vtrn.16 q12, q13
+ vtrn.16 q14, q15
+ MEND
+
+ AREA Block, CODE, READONLY ; name this block of code
+;void vp9_short_idct8x8_add_neon(int16_t *input, uint8_t *dest, int dest_stride)
+;
+; r0 int16_t input
+; r1 uint8_t *dest
+; r2 int dest_stride)
+
+|vp9_short_idct8x8_add_neon| PROC
+ push {r4-r9}
+ vpush {d8-d15}
+ vld1.s16 {q8,q9}, [r0]!
+ vld1.s16 {q10,q11}, [r0]!
+ vld1.s16 {q12,q13}, [r0]!
+ vld1.s16 {q14,q15}, [r0]!
+
+ ; transpose the input data
+ TRANSPOSE8X8
+
+ ; generate cospi_28_64 = 3196
+ mov r3, #0x0c00
+ add r3, #0x7c
+
+ ; generate cospi_4_64 = 16069
+ mov r4, #0x3e00
+ add r4, #0xc5
+
+ ; generate cospi_12_64 = 13623
+ mov r5, #0x3500
+ add r5, #0x37
+
+ ; generate cospi_20_64 = 9102
+ mov r6, #0x2300
+ add r6, #0x8e
+
+ ; generate cospi_16_64 = 11585
+ mov r7, #0x2d00
+ add r7, #0x41
+
+ ; generate cospi_24_64 = 6270
+ mov r8, #0x1800
+ add r8, #0x7e
+
+ ; generate cospi_8_64 = 15137
+ mov r9, #0x3b00
+ add r9, #0x21
+
+ ; First transform rows
+ IDCT8x8_1D
+
+ ; Transpose the matrix
+ TRANSPOSE8X8
+
+ ; Then transform columns
+ IDCT8x8_1D
+
+ ; ROUND_POWER_OF_TWO(temp_out[j], 5)
+ vrshr.s16 q8, q8, #5
+ vrshr.s16 q9, q9, #5
+ vrshr.s16 q10, q10, #5
+ vrshr.s16 q11, q11, #5
+ vrshr.s16 q12, q12, #5
+ vrshr.s16 q13, q13, #5
+ vrshr.s16 q14, q14, #5
+ vrshr.s16 q15, q15, #5
+
+ ; save dest pointer
+ mov r0, r1
+
+ ; load destination data
+ vld1.64 {d0}, [r1], r2
+ vld1.64 {d1}, [r1], r2
+ vld1.64 {d2}, [r1], r2
+ vld1.64 {d3}, [r1], r2
+ vld1.64 {d4}, [r1], r2
+ vld1.64 {d5}, [r1], r2
+ vld1.64 {d6}, [r1], r2
+ vld1.64 {d7}, [r1]
+
+ ; ROUND_POWER_OF_TWO(temp_out[j], 5) + dest[j * dest_stride + i]
+ vaddw.u8 q8, q8, d0
+ vaddw.u8 q9, q9, d1
+ vaddw.u8 q10, q10, d2
+ vaddw.u8 q11, q11, d3
+ vaddw.u8 q12, q12, d4
+ vaddw.u8 q13, q13, d5
+ vaddw.u8 q14, q14, d6
+ vaddw.u8 q15, q15, d7
+
+ ; clip_pixel
+ vqmovun.s16 d0, q8
+ vqmovun.s16 d1, q9
+ vqmovun.s16 d2, q10
+ vqmovun.s16 d3, q11
+ vqmovun.s16 d4, q12
+ vqmovun.s16 d5, q13
+ vqmovun.s16 d6, q14
+ vqmovun.s16 d7, q15
+
+ ; store the data
+ vst1.64 {d0}, [r0], r2
+ vst1.64 {d1}, [r0], r2
+ vst1.64 {d2}, [r0], r2
+ vst1.64 {d3}, [r0], r2
+ vst1.64 {d4}, [r0], r2
+ vst1.64 {d5}, [r0], r2
+ vst1.64 {d6}, [r0], r2
+ vst1.64 {d7}, [r0], r2
+
+ vpop {d8-d15}
+ pop {r4-r9}
+ bx lr
+ ENDP ; |vp9_short_idct8x8_add_neon|
+
+;void vp9_short_idct10_8x8_add_neon(int16_t *input, uint8_t *dest, int dest_stride)
+;
+; r0 int16_t input
+; r1 uint8_t *dest
+; r2 int dest_stride)
+
+|vp9_short_idct10_8x8_add_neon| PROC
+ push {r4-r9}
+ vpush {d8-d15}
+ vld1.s16 {q8,q9}, [r0]!
+ vld1.s16 {q10,q11}, [r0]!
+ vld1.s16 {q12,q13}, [r0]!
+ vld1.s16 {q14,q15}, [r0]!
+
+ ; transpose the input data
+ TRANSPOSE8X8
+
+ ; generate cospi_28_64 = 3196
+ mov r3, #0x0c00
+ add r3, #0x7c
+
+ ; generate cospi_4_64 = 16069
+ mov r4, #0x3e00
+ add r4, #0xc5
+
+ ; generate cospi_12_64 = 13623
+ mov r5, #0x3500
+ add r5, #0x37
+
+ ; generate cospi_20_64 = 9102
+ mov r6, #0x2300
+ add r6, #0x8e
+
+ ; generate cospi_16_64 = 11585
+ mov r7, #0x2d00
+ add r7, #0x41
+
+ ; generate cospi_24_64 = 6270
+ mov r8, #0x1800
+ add r8, #0x7e
+
+ ; generate cospi_8_64 = 15137
+ mov r9, #0x3b00
+ add r9, #0x21
+
+ ; First transform rows
+ ; stage 1
+ ; The following instructions use vqrdmulh to do the
+ ; dct_const_round_shift(input[1] * cospi_28_64). vqrdmulh will do doubling
+ ; multiply and shift the result by 16 bits instead of 14 bits. So we need
+ ; to double the constants before multiplying to compensate this.
+ mov r12, r3, lsl #1
+ vdup.16 q0, r12 ; duplicate cospi_28_64*2
+ mov r12, r4, lsl #1
+ vdup.16 q1, r12 ; duplicate cospi_4_64*2
+
+ ; dct_const_round_shift(input[1] * cospi_28_64)
+ vqrdmulh.s16 q4, q9, q0
+
+ mov r12, r6, lsl #1
+ rsb r12, #0
+ vdup.16 q0, r12 ; duplicate -cospi_20_64*2
+
+ ; dct_const_round_shift(input[1] * cospi_4_64)
+ vqrdmulh.s16 q7, q9, q1
+
+ mov r12, r5, lsl #1
+ vdup.16 q1, r12 ; duplicate cospi_12_64*2
+
+ ; dct_const_round_shift(- input[3] * cospi_20_64)
+ vqrdmulh.s16 q5, q11, q0
+
+ mov r12, r7, lsl #1
+ vdup.16 q0, r12 ; duplicate cospi_16_64*2
+
+ ; dct_const_round_shift(input[3] * cospi_12_64)
+ vqrdmulh.s16 q6, q11, q1
+
+ ; stage 2 & stage 3 - even half
+ mov r12, r8, lsl #1
+ vdup.16 q1, r12 ; duplicate cospi_24_64*2
+
+ ; dct_const_round_shift(input_dc * cospi_16_64)
+ vqrdmulh.s16 q9, q8, q0
+
+ mov r12, r9, lsl #1
+ vdup.16 q0, r12 ; duplicate cospi_8_64*2
+
+ ; dct_const_round_shift(input[1] * cospi_24_64)
+ vqrdmulh.s16 q13, q10, q1
+
+ ; dct_const_round_shift(input[1] * cospi_8_64)
+ vqrdmulh.s16 q15, q10, q0
+
+ ; stage 3 -odd half
+ vdup.16 d16, r7 ; duplicate cospi_16_64
+
+ vadd.s16 q0, q9, q15 ; output[0] = step[0] + step[3]
+ vadd.s16 q1, q9, q13 ; output[1] = step[1] + step[2]
+ vsub.s16 q2, q9, q13 ; output[2] = step[1] - step[2]
+ vsub.s16 q3, q9, q15 ; output[3] = step[0] - step[3]
+
+ ; stage 2 - odd half
+ vsub.s16 q13, q4, q5 ; step2[5] = step1[4] - step1[5]
+ vadd.s16 q4, q4, q5 ; step2[4] = step1[4] + step1[5]
+ vsub.s16 q14, q7, q6 ; step2[6] = -step1[6] + step1[7]
+ vadd.s16 q7, q7, q6 ; step2[7] = step1[6] + step1[7]
+
+ ; step2[6] * cospi_16_64
+ vmull.s16 q9, d28, d16
+ vmull.s16 q10, d29, d16
+
+ ; step2[6] * cospi_16_64
+ vmull.s16 q11, d28, d16
+ vmull.s16 q12, d29, d16
+
+ ; (step2[6] - step2[5]) * cospi_16_64
+ vmlsl.s16 q9, d26, d16
+ vmlsl.s16 q10, d27, d16
+
+ ; (step2[5] + step2[6]) * cospi_16_64
+ vmlal.s16 q11, d26, d16
+ vmlal.s16 q12, d27, d16
+
+ ; dct_const_round_shift(input_dc * cospi_16_64)
+ vqrshrn.s32 d10, q9, #14 ; >> 14
+ vqrshrn.s32 d11, q10, #14 ; >> 14
+
+ ; dct_const_round_shift(input_dc * cospi_16_64)
+ vqrshrn.s32 d12, q11, #14 ; >> 14
+ vqrshrn.s32 d13, q12, #14 ; >> 14
+
+ ; stage 4
+ vadd.s16 q8, q0, q7 ; output[0] = step1[0] + step1[7];
+ vadd.s16 q9, q1, q6 ; output[1] = step1[1] + step1[6];
+ vadd.s16 q10, q2, q5 ; output[2] = step1[2] + step1[5];
+ vadd.s16 q11, q3, q4 ; output[3] = step1[3] + step1[4];
+ vsub.s16 q12, q3, q4 ; output[4] = step1[3] - step1[4];
+ vsub.s16 q13, q2, q5 ; output[5] = step1[2] - step1[5];
+ vsub.s16 q14, q1, q6 ; output[6] = step1[1] - step1[6];
+ vsub.s16 q15, q0, q7 ; output[7] = step1[0] - step1[7];
+
+ ; Transpose the matrix
+ TRANSPOSE8X8
+
+ ; Then transform columns
+ IDCT8x8_1D
+
+ ; ROUND_POWER_OF_TWO(temp_out[j], 5)
+ vrshr.s16 q8, q8, #5
+ vrshr.s16 q9, q9, #5
+ vrshr.s16 q10, q10, #5
+ vrshr.s16 q11, q11, #5
+ vrshr.s16 q12, q12, #5
+ vrshr.s16 q13, q13, #5
+ vrshr.s16 q14, q14, #5
+ vrshr.s16 q15, q15, #5
+
+ ; save dest pointer
+ mov r0, r1
+
+ ; load destination data
+ vld1.64 {d0}, [r1], r2
+ vld1.64 {d1}, [r1], r2
+ vld1.64 {d2}, [r1], r2
+ vld1.64 {d3}, [r1], r2
+ vld1.64 {d4}, [r1], r2
+ vld1.64 {d5}, [r1], r2
+ vld1.64 {d6}, [r1], r2
+ vld1.64 {d7}, [r1]
+
+ ; ROUND_POWER_OF_TWO(temp_out[j], 5) + dest[j * dest_stride + i]
+ vaddw.u8 q8, q8, d0
+ vaddw.u8 q9, q9, d1
+ vaddw.u8 q10, q10, d2
+ vaddw.u8 q11, q11, d3
+ vaddw.u8 q12, q12, d4
+ vaddw.u8 q13, q13, d5
+ vaddw.u8 q14, q14, d6
+ vaddw.u8 q15, q15, d7
+
+ ; clip_pixel
+ vqmovun.s16 d0, q8
+ vqmovun.s16 d1, q9
+ vqmovun.s16 d2, q10
+ vqmovun.s16 d3, q11
+ vqmovun.s16 d4, q12
+ vqmovun.s16 d5, q13
+ vqmovun.s16 d6, q14
+ vqmovun.s16 d7, q15
+
+ ; store the data
+ vst1.64 {d0}, [r0], r2
+ vst1.64 {d1}, [r0], r2
+ vst1.64 {d2}, [r0], r2
+ vst1.64 {d3}, [r0], r2
+ vst1.64 {d4}, [r0], r2
+ vst1.64 {d5}, [r0], r2
+ vst1.64 {d6}, [r0], r2
+ vst1.64 {d7}, [r0], r2
+
+ vpop {d8-d15}
+ pop {r4-r9}
+ bx lr
+ ENDP ; |vp9_short_idct10_8x8_add_neon|
+
+ END
diff --git a/libvpx/vp9/common/arm/neon/vp9_short_iht4x4_add_neon.asm b/libvpx/vp9/common/arm/neon/vp9_short_iht4x4_add_neon.asm
new file mode 100644
index 0000000..963ef35
--- /dev/null
+++ b/libvpx/vp9/common/arm/neon/vp9_short_iht4x4_add_neon.asm
@@ -0,0 +1,237 @@
+;
+; Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+ EXPORT |vp9_short_iht4x4_add_neon|
+ ARM
+ REQUIRE8
+ PRESERVE8
+
+ AREA ||.text||, CODE, READONLY, ALIGN=2
+
+ ; Parallel 1D IDCT on all the columns of a 4x4 16bits data matrix which are
+ ; loaded in d16-d19. d0 must contain cospi_8_64. d1 must contain
+ ; cospi_16_64. d2 must contain cospi_24_64. The output will be stored back
+ ; into d16-d19 registers. This macro will touch q10- q15 registers and use
+ ; them as buffer during calculation.
+ MACRO
+ IDCT4x4_1D
+ ; stage 1
+ vadd.s16 d23, d16, d18 ; (input[0] + input[2])
+ vsub.s16 d24, d16, d18 ; (input[0] - input[2])
+
+ vmull.s16 q15, d17, d2 ; input[1] * cospi_24_64
+ vmull.s16 q10, d17, d0 ; input[1] * cospi_8_64
+ vmull.s16 q13, d23, d1 ; (input[0] + input[2]) * cospi_16_64
+ vmull.s16 q14, d24, d1 ; (input[0] - input[2]) * cospi_16_64
+ vmlsl.s16 q15, d19, d0 ; input[1] * cospi_24_64 - input[3] * cospi_8_64
+ vmlal.s16 q10, d19, d2 ; input[1] * cospi_8_64 + input[3] * cospi_24_64
+
+ ; dct_const_round_shift
+ vqrshrn.s32 d26, q13, #14
+ vqrshrn.s32 d27, q14, #14
+ vqrshrn.s32 d29, q15, #14
+ vqrshrn.s32 d28, q10, #14
+
+ ; stage 2
+ ; output[0] = step[0] + step[3];
+ ; output[1] = step[1] + step[2];
+ ; output[3] = step[0] - step[3];
+ ; output[2] = step[1] - step[2];
+ vadd.s16 q8, q13, q14
+ vsub.s16 q9, q13, q14
+ vswp d18, d19
+ MEND
+
+ ; Parallel 1D IADST on all the columns of a 4x4 16bits data matrix which
+ ; loaded in d16-d19. d3 must contain sinpi_1_9. d4 must contain sinpi_2_9.
+ ; d5 must contain sinpi_4_9. d6 must contain sinpi_3_9. The output will be
+ ; stored back into d16-d19 registers. This macro will touch q11,q12,q13,
+ ; q14,q15 registers and use them as buffer during calculation.
+ MACRO
+ IADST4x4_1D
+ vmull.s16 q10, d3, d16 ; s0 = sinpi_1_9 * x0
+ vmull.s16 q11, d4, d16 ; s1 = sinpi_2_9 * x0
+ vmull.s16 q12, d6, d17 ; s2 = sinpi_3_9 * x1
+ vmull.s16 q13, d5, d18 ; s3 = sinpi_4_9 * x2
+ vmull.s16 q14, d3, d18 ; s4 = sinpi_1_9 * x2
+ vmovl.s16 q15, d16 ; expand x0 from 16 bit to 32 bit
+ vaddw.s16 q15, q15, d19 ; x0 + x3
+ vmull.s16 q8, d4, d19 ; s5 = sinpi_2_9 * x3
+ vsubw.s16 q15, q15, d18 ; s7 = x0 + x3 - x2
+ vmull.s16 q9, d5, d19 ; s6 = sinpi_4_9 * x3
+
+ vadd.s32 q10, q10, q13 ; x0 = s0 + s3 + s5
+ vadd.s32 q10, q10, q8
+ vsub.s32 q11, q11, q14 ; x1 = s1 - s4 - s6
+ vdup.32 q8, r0 ; duplicate sinpi_3_9
+ vsub.s32 q11, q11, q9
+ vmul.s32 q15, q15, q8 ; x2 = sinpi_3_9 * s7
+
+ vadd.s32 q13, q10, q12 ; s0 = x0 + x3
+ vadd.s32 q10, q10, q11 ; x0 + x1
+ vadd.s32 q14, q11, q12 ; s1 = x1 + x3
+ vsub.s32 q10, q10, q12 ; s3 = x0 + x1 - x3
+
+ ; dct_const_round_shift
+ vqrshrn.s32 d16, q13, #14
+ vqrshrn.s32 d17, q14, #14
+ vqrshrn.s32 d18, q15, #14
+ vqrshrn.s32 d19, q10, #14
+ MEND
+
+ ; Generate cosine constants in d6 - d8 for the IDCT
+ MACRO
+ GENERATE_COSINE_CONSTANTS
+ ; cospi_8_64 = 15137 = 0x3b21
+ mov r0, #0x3b00
+ add r0, #0x21
+ ; cospi_16_64 = 11585 = 0x2d41
+ mov r3, #0x2d00
+ add r3, #0x41
+ ; cospi_24_64 = 6270 = 0x187e
+ mov r12, #0x1800
+ add r12, #0x7e
+
+ ; generate constant vectors
+ vdup.16 d0, r0 ; duplicate cospi_8_64
+ vdup.16 d1, r3 ; duplicate cospi_16_64
+ vdup.16 d2, r12 ; duplicate cospi_24_64
+ MEND
+
+ ; Generate sine constants in d1 - d4 for the IADST.
+ MACRO
+ GENERATE_SINE_CONSTANTS
+ ; sinpi_1_9 = 5283 = 0x14A3
+ mov r0, #0x1400
+ add r0, #0xa3
+ ; sinpi_2_9 = 9929 = 0x26C9
+ mov r3, #0x2600
+ add r3, #0xc9
+ ; sinpi_4_9 = 15212 = 0x3B6C
+ mov r12, #0x3b00
+ add r12, #0x6c
+
+ ; generate constant vectors
+ vdup.16 d3, r0 ; duplicate sinpi_1_9
+
+ ; sinpi_3_9 = 13377 = 0x3441
+ mov r0, #0x3400
+ add r0, #0x41
+
+ vdup.16 d4, r3 ; duplicate sinpi_2_9
+ vdup.16 d5, r12 ; duplicate sinpi_4_9
+ vdup.16 q3, r0 ; duplicate sinpi_3_9
+ MEND
+
+ ; Transpose a 4x4 16bits data matrix. Datas are loaded in d16-d19.
+ MACRO
+ TRANSPOSE4X4
+ vtrn.16 d16, d17
+ vtrn.16 d18, d19
+ vtrn.32 q8, q9
+ MEND
+
+ AREA Block, CODE, READONLY ; name this block of code
+;void vp9_short_iht4x4_add_neon(int16_t *input, uint8_t *dest,
+; int dest_stride, int tx_type)
+;
+; r0 int16_t input
+; r1 uint8_t *dest
+; r2 int dest_stride
+; r3 int tx_type)
+; This function will only handle tx_type of 1,2,3.
+|vp9_short_iht4x4_add_neon| PROC
+
+ ; load the inputs into d16-d19
+ vld1.s16 {q8,q9}, [r0]!
+
+ ; transpose the input data
+ TRANSPOSE4X4
+
+ ; decide the type of transform
+ cmp r3, #2
+ beq idct_iadst
+ cmp r3, #3
+ beq iadst_iadst
+
+iadst_idct
+ ; generate constants
+ GENERATE_COSINE_CONSTANTS
+ GENERATE_SINE_CONSTANTS
+
+ ; first transform rows
+ IDCT4x4_1D
+
+ ; transpose the matrix
+ TRANSPOSE4X4
+
+ ; then transform columns
+ IADST4x4_1D
+
+ b end_vp9_short_iht4x4_add_neon
+
+idct_iadst
+ ; generate constants
+ GENERATE_COSINE_CONSTANTS
+ GENERATE_SINE_CONSTANTS
+
+ ; first transform rows
+ IADST4x4_1D
+
+ ; transpose the matrix
+ TRANSPOSE4X4
+
+ ; then transform columns
+ IDCT4x4_1D
+
+ b end_vp9_short_iht4x4_add_neon
+
+iadst_iadst
+ ; generate constants
+ GENERATE_SINE_CONSTANTS
+
+ ; first transform rows
+ IADST4x4_1D
+
+ ; transpose the matrix
+ TRANSPOSE4X4
+
+ ; then transform columns
+ IADST4x4_1D
+
+end_vp9_short_iht4x4_add_neon
+ ; ROUND_POWER_OF_TWO(temp_out[j], 4)
+ vrshr.s16 q8, q8, #4
+ vrshr.s16 q9, q9, #4
+
+ vld1.32 {d26[0]}, [r1], r2
+ vld1.32 {d26[1]}, [r1], r2
+ vld1.32 {d27[0]}, [r1], r2
+ vld1.32 {d27[1]}, [r1]
+
+ ; ROUND_POWER_OF_TWO(temp_out[j], 4) + dest[j * dest_stride + i]
+ vaddw.u8 q8, q8, d26
+ vaddw.u8 q9, q9, d27
+
+ ; clip_pixel
+ vqmovun.s16 d26, q8
+ vqmovun.s16 d27, q9
+
+ ; do the stores in reverse order with negative post-increment, by changing
+ ; the sign of the stride
+ rsb r2, r2, #0
+ vst1.32 {d27[1]}, [r1], r2
+ vst1.32 {d27[0]}, [r1], r2
+ vst1.32 {d26[1]}, [r1], r2
+ vst1.32 {d26[0]}, [r1] ; no post-increment
+ bx lr
+ ENDP ; |vp9_short_iht4x4_add_neon|
+
+ END
diff --git a/libvpx/vp9/common/arm/neon/vp9_short_iht8x8_add_neon.asm b/libvpx/vp9/common/arm/neon/vp9_short_iht8x8_add_neon.asm
new file mode 100644
index 0000000..bab9cb4
--- /dev/null
+++ b/libvpx/vp9/common/arm/neon/vp9_short_iht8x8_add_neon.asm
@@ -0,0 +1,696 @@
+;
+; Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+ EXPORT |vp9_short_iht8x8_add_neon|
+ ARM
+ REQUIRE8
+ PRESERVE8
+
+ AREA ||.text||, CODE, READONLY, ALIGN=2
+
+ ; Generate IADST constants in r0 - r12 for the IADST.
+ MACRO
+ GENERATE_IADST_CONSTANTS
+ ; generate cospi_2_64 = 16305
+ mov r0, #0x3f00
+ add r0, #0xb1
+
+ ; generate cospi_30_64 = 1606
+ mov r1, #0x600
+ add r1, #0x46
+
+ ; generate cospi_10_64 = 14449
+ mov r2, #0x3800
+ add r2, #0x71
+
+ ; generate cospi_22_64 = 7723
+ mov r3, #0x1e00
+ add r3, #0x2b
+
+ ; generate cospi_18_64 = 10394
+ mov r4, #0x2800
+ add r4, #0x9a
+
+ ; generate cospi_14_64 = 12665
+ mov r5, #0x3100
+ add r5, #0x79
+
+ ; generate cospi_26_64 = 4756
+ mov r6, #0x1200
+ add r6, #0x94
+
+ ; generate cospi_6_64 = 15679
+ mov r7, #0x3d00
+ add r7, #0x3f
+
+ ; generate cospi_8_64 = 15137
+ mov r8, #0x3b00
+ add r8, #0x21
+
+ ; generate cospi_24_64 = 6270
+ mov r9, #0x1800
+ add r9, #0x7e
+
+ ; generate 0
+ mov r10, #0
+
+ ; generate cospi_16_64 = 11585
+ mov r12, #0x2d00
+ add r12, #0x41
+ MEND
+
+ ; Generate IDCT constants in r3 - r9 for the IDCT.
+ MACRO
+ GENERATE_IDCT_CONSTANTS
+ ; generate cospi_28_64 = 3196
+ mov r3, #0x0c00
+ add r3, #0x7c
+
+ ; generate cospi_4_64 = 16069
+ mov r4, #0x3e00
+ add r4, #0xc5
+
+ ; generate cospi_12_64 = 13623
+ mov r5, #0x3500
+ add r5, #0x37
+
+ ; generate cospi_20_64 = 9102
+ mov r6, #0x2300
+ add r6, #0x8e
+
+ ; generate cospi_16_64 = 11585
+ mov r7, #0x2d00
+ add r7, #0x41
+
+ ; generate cospi_24_64 = 6270
+ mov r8, #0x1800
+ add r8, #0x7e
+
+ ; generate cospi_8_64 = 15137
+ mov r9, #0x3b00
+ add r9, #0x21
+ MEND
+
+ ; Transpose a 8x8 16bits data matrix. Datas are loaded in q8-q15.
+ MACRO
+ TRANSPOSE8X8
+ vswp d17, d24
+ vswp d23, d30
+ vswp d21, d28
+ vswp d19, d26
+ vtrn.32 q8, q10
+ vtrn.32 q9, q11
+ vtrn.32 q12, q14
+ vtrn.32 q13, q15
+ vtrn.16 q8, q9
+ vtrn.16 q10, q11
+ vtrn.16 q12, q13
+ vtrn.16 q14, q15
+ MEND
+
+ ; Parallel 1D IDCT on all the columns of a 8x8 16bits data matrix which are
+ ; loaded in q8-q15. The IDCT constants are loaded in r3 - r9. The output
+ ; will be stored back into q8-q15 registers. This macro will touch q0-q7
+ ; registers and use them as buffer during calculation.
+ MACRO
+ IDCT8x8_1D
+ ; stage 1
+ vdup.16 d0, r3 ; duplicate cospi_28_64
+ vdup.16 d1, r4 ; duplicate cospi_4_64
+ vdup.16 d2, r5 ; duplicate cospi_12_64
+ vdup.16 d3, r6 ; duplicate cospi_20_64
+
+ ; input[1] * cospi_28_64
+ vmull.s16 q2, d18, d0
+ vmull.s16 q3, d19, d0
+
+ ; input[5] * cospi_12_64
+ vmull.s16 q5, d26, d2
+ vmull.s16 q6, d27, d2
+
+ ; input[1]*cospi_28_64-input[7]*cospi_4_64
+ vmlsl.s16 q2, d30, d1
+ vmlsl.s16 q3, d31, d1
+
+ ; input[5] * cospi_12_64 - input[3] * cospi_20_64
+ vmlsl.s16 q5, d22, d3
+ vmlsl.s16 q6, d23, d3
+
+ ; dct_const_round_shift(input_dc * cospi_16_64)
+ vqrshrn.s32 d8, q2, #14 ; >> 14
+ vqrshrn.s32 d9, q3, #14 ; >> 14
+
+ ; dct_const_round_shift(input_dc * cospi_16_64)
+ vqrshrn.s32 d10, q5, #14 ; >> 14
+ vqrshrn.s32 d11, q6, #14 ; >> 14
+
+ ; input[1] * cospi_4_64
+ vmull.s16 q2, d18, d1
+ vmull.s16 q3, d19, d1
+
+ ; input[5] * cospi_20_64
+ vmull.s16 q9, d26, d3
+ vmull.s16 q13, d27, d3
+
+ ; input[1]*cospi_4_64+input[7]*cospi_28_64
+ vmlal.s16 q2, d30, d0
+ vmlal.s16 q3, d31, d0
+
+ ; input[5] * cospi_20_64 + input[3] * cospi_12_64
+ vmlal.s16 q9, d22, d2
+ vmlal.s16 q13, d23, d2
+
+ ; dct_const_round_shift(input_dc * cospi_16_64)
+ vqrshrn.s32 d14, q2, #14 ; >> 14
+ vqrshrn.s32 d15, q3, #14 ; >> 14
+
+ ; stage 2 & stage 3 - even half
+ vdup.16 d0, r7 ; duplicate cospi_16_64
+
+ ; dct_const_round_shift(input_dc * cospi_16_64)
+ vqrshrn.s32 d12, q9, #14 ; >> 14
+ vqrshrn.s32 d13, q13, #14 ; >> 14
+
+ ; input[0] * cospi_16_64
+ vmull.s16 q2, d16, d0
+ vmull.s16 q3, d17, d0
+
+ ; input[0] * cospi_16_64
+ vmull.s16 q13, d16, d0
+ vmull.s16 q15, d17, d0
+
+ ; (input[0] + input[2]) * cospi_16_64
+ vmlal.s16 q2, d24, d0
+ vmlal.s16 q3, d25, d0
+
+ ; (input[0] - input[2]) * cospi_16_64
+ vmlsl.s16 q13, d24, d0
+ vmlsl.s16 q15, d25, d0
+
+ vdup.16 d0, r8 ; duplicate cospi_24_64
+ vdup.16 d1, r9 ; duplicate cospi_8_64
+
+ ; dct_const_round_shift(input_dc * cospi_16_64)
+ vqrshrn.s32 d18, q2, #14 ; >> 14
+ vqrshrn.s32 d19, q3, #14 ; >> 14
+
+ ; dct_const_round_shift(input_dc * cospi_16_64)
+ vqrshrn.s32 d22, q13, #14 ; >> 14
+ vqrshrn.s32 d23, q15, #14 ; >> 14
+
+ ; input[1] * cospi_24_64
+ vmull.s16 q2, d20, d0
+ vmull.s16 q3, d21, d0
+
+ ; input[1] * cospi_8_64
+ vmull.s16 q8, d20, d1
+ vmull.s16 q12, d21, d1
+
+ ; input[1] * cospi_24_64 - input[3] * cospi_8_64
+ vmlsl.s16 q2, d28, d1
+ vmlsl.s16 q3, d29, d1
+
+ ; input[1] * cospi_8_64 + input[3] * cospi_24_64
+ vmlal.s16 q8, d28, d0
+ vmlal.s16 q12, d29, d0
+
+ ; dct_const_round_shift(input_dc * cospi_16_64)
+ vqrshrn.s32 d26, q2, #14 ; >> 14
+ vqrshrn.s32 d27, q3, #14 ; >> 14
+
+ ; dct_const_round_shift(input_dc * cospi_16_64)
+ vqrshrn.s32 d30, q8, #14 ; >> 14
+ vqrshrn.s32 d31, q12, #14 ; >> 14
+
+ vadd.s16 q0, q9, q15 ; output[0] = step[0] + step[3]
+ vadd.s16 q1, q11, q13 ; output[1] = step[1] + step[2]
+ vsub.s16 q2, q11, q13 ; output[2] = step[1] - step[2]
+ vsub.s16 q3, q9, q15 ; output[3] = step[0] - step[3]
+
+ ; stage 3 -odd half
+ vdup.16 d16, r7 ; duplicate cospi_16_64
+
+ ; stage 2 - odd half
+ vsub.s16 q13, q4, q5 ; step2[5] = step1[4] - step1[5]
+ vadd.s16 q4, q4, q5 ; step2[4] = step1[4] + step1[5]
+ vsub.s16 q14, q7, q6 ; step2[6] = -step1[6] + step1[7]
+ vadd.s16 q7, q7, q6 ; step2[7] = step1[6] + step1[7]
+
+ ; step2[6] * cospi_16_64
+ vmull.s16 q9, d28, d16
+ vmull.s16 q10, d29, d16
+
+ ; step2[6] * cospi_16_64
+ vmull.s16 q11, d28, d16
+ vmull.s16 q12, d29, d16
+
+ ; (step2[6] - step2[5]) * cospi_16_64
+ vmlsl.s16 q9, d26, d16
+ vmlsl.s16 q10, d27, d16
+
+ ; (step2[5] + step2[6]) * cospi_16_64
+ vmlal.s16 q11, d26, d16
+ vmlal.s16 q12, d27, d16
+
+ ; dct_const_round_shift(input_dc * cospi_16_64)
+ vqrshrn.s32 d10, q9, #14 ; >> 14
+ vqrshrn.s32 d11, q10, #14 ; >> 14
+
+ ; dct_const_round_shift(input_dc * cospi_16_64)
+ vqrshrn.s32 d12, q11, #14 ; >> 14
+ vqrshrn.s32 d13, q12, #14 ; >> 14
+
+ ; stage 4
+ vadd.s16 q8, q0, q7 ; output[0] = step1[0] + step1[7];
+ vadd.s16 q9, q1, q6 ; output[1] = step1[1] + step1[6];
+ vadd.s16 q10, q2, q5 ; output[2] = step1[2] + step1[5];
+ vadd.s16 q11, q3, q4 ; output[3] = step1[3] + step1[4];
+ vsub.s16 q12, q3, q4 ; output[4] = step1[3] - step1[4];
+ vsub.s16 q13, q2, q5 ; output[5] = step1[2] - step1[5];
+ vsub.s16 q14, q1, q6 ; output[6] = step1[1] - step1[6];
+ vsub.s16 q15, q0, q7 ; output[7] = step1[0] - step1[7];
+ MEND
+
+ ; Parallel 1D IADST on all the columns of a 8x8 16bits data matrix which
+ ; loaded in q8-q15. IADST constants are loaded in r0 - r12 registers. The
+ ; output will be stored back into q8-q15 registers. This macro will touch
+ ; q0 - q7 registers and use them as buffer during calculation.
+ MACRO
+ IADST8X8_1D
+ vdup.16 d14, r0 ; duplicate cospi_2_64
+ vdup.16 d15, r1 ; duplicate cospi_30_64
+
+ ; cospi_2_64 * x0
+ vmull.s16 q1, d30, d14
+ vmull.s16 q2, d31, d14
+
+ ; cospi_30_64 * x0
+ vmull.s16 q3, d30, d15
+ vmull.s16 q4, d31, d15
+
+ vdup.16 d30, r4 ; duplicate cospi_18_64
+ vdup.16 d31, r5 ; duplicate cospi_14_64
+
+ ; s0 = cospi_2_64 * x0 + cospi_30_64 * x1;
+ vmlal.s16 q1, d16, d15
+ vmlal.s16 q2, d17, d15
+
+ ; s1 = cospi_30_64 * x0 - cospi_2_64 * x1
+ vmlsl.s16 q3, d16, d14
+ vmlsl.s16 q4, d17, d14
+
+ ; cospi_18_64 * x4
+ vmull.s16 q5, d22, d30
+ vmull.s16 q6, d23, d30
+
+ ; cospi_14_64 * x4
+ vmull.s16 q7, d22, d31
+ vmull.s16 q8, d23, d31
+
+ ; s4 = cospi_18_64 * x4 + cospi_14_64 * x5;
+ vmlal.s16 q5, d24, d31
+ vmlal.s16 q6, d25, d31
+
+ ; s5 = cospi_14_64 * x4 - cospi_18_64 * x5
+ vmlsl.s16 q7, d24, d30
+ vmlsl.s16 q8, d25, d30
+
+ ; (s0 + s4)
+ vadd.s32 q11, q1, q5
+ vadd.s32 q12, q2, q6
+
+ vdup.16 d0, r2 ; duplicate cospi_10_64
+ vdup.16 d1, r3 ; duplicate cospi_22_64
+
+ ; (s0 - s4)
+ vsub.s32 q1, q1, q5
+ vsub.s32 q2, q2, q6
+
+ ; x0 = dct_const_round_shift(s0 + s4);
+ vqrshrn.s32 d22, q11, #14 ; >> 14
+ vqrshrn.s32 d23, q12, #14 ; >> 14
+
+ ; (s1 + s5)
+ vadd.s32 q12, q3, q7
+ vadd.s32 q15, q4, q8
+
+ ; (s1 - s5)
+ vsub.s32 q3, q3, q7
+ vsub.s32 q4, q4, q8
+
+ ; x4 = dct_const_round_shift(s0 - s4);
+ vqrshrn.s32 d2, q1, #14 ; >> 14
+ vqrshrn.s32 d3, q2, #14 ; >> 14
+
+ ; x1 = dct_const_round_shift(s1 + s5);
+ vqrshrn.s32 d24, q12, #14 ; >> 14
+ vqrshrn.s32 d25, q15, #14 ; >> 14
+
+ ; x5 = dct_const_round_shift(s1 - s5);
+ vqrshrn.s32 d6, q3, #14 ; >> 14
+ vqrshrn.s32 d7, q4, #14 ; >> 14
+
+ ; cospi_10_64 * x2
+ vmull.s16 q4, d26, d0
+ vmull.s16 q5, d27, d0
+
+ ; cospi_22_64 * x2
+ vmull.s16 q2, d26, d1
+ vmull.s16 q6, d27, d1
+
+ vdup.16 d30, r6 ; duplicate cospi_26_64
+ vdup.16 d31, r7 ; duplicate cospi_6_64
+
+ ; s2 = cospi_10_64 * x2 + cospi_22_64 * x3;
+ vmlal.s16 q4, d20, d1
+ vmlal.s16 q5, d21, d1
+
+ ; s3 = cospi_22_64 * x2 - cospi_10_64 * x3;
+ vmlsl.s16 q2, d20, d0
+ vmlsl.s16 q6, d21, d0
+
+ ; cospi_26_64 * x6
+ vmull.s16 q0, d18, d30
+ vmull.s16 q13, d19, d30
+
+ ; s6 = cospi_26_64 * x6 + cospi_6_64 * x7;
+ vmlal.s16 q0, d28, d31
+ vmlal.s16 q13, d29, d31
+
+ ; cospi_6_64 * x6
+ vmull.s16 q10, d18, d31
+ vmull.s16 q9, d19, d31
+
+ ; s7 = cospi_6_64 * x6 - cospi_26_64 * x7;
+ vmlsl.s16 q10, d28, d30
+ vmlsl.s16 q9, d29, d30
+
+ ; (s3 + s7)
+ vadd.s32 q14, q2, q10
+ vadd.s32 q15, q6, q9
+
+ ; (s3 - s7)
+ vsub.s32 q2, q2, q10
+ vsub.s32 q6, q6, q9
+
+ ; x3 = dct_const_round_shift(s3 + s7);
+ vqrshrn.s32 d28, q14, #14 ; >> 14
+ vqrshrn.s32 d29, q15, #14 ; >> 14
+
+ ; x7 = dct_const_round_shift(s3 - s7);
+ vqrshrn.s32 d4, q2, #14 ; >> 14
+ vqrshrn.s32 d5, q6, #14 ; >> 14
+
+ ; (s2 + s6)
+ vadd.s32 q9, q4, q0
+ vadd.s32 q10, q5, q13
+
+ ; (s2 - s6)
+ vsub.s32 q4, q4, q0
+ vsub.s32 q5, q5, q13
+
+ vdup.16 d30, r8 ; duplicate cospi_8_64
+ vdup.16 d31, r9 ; duplicate cospi_24_64
+
+ ; x2 = dct_const_round_shift(s2 + s6);
+ vqrshrn.s32 d18, q9, #14 ; >> 14
+ vqrshrn.s32 d19, q10, #14 ; >> 14
+
+ ; x6 = dct_const_round_shift(s2 - s6);
+ vqrshrn.s32 d8, q4, #14 ; >> 14
+ vqrshrn.s32 d9, q5, #14 ; >> 14
+
+ ; cospi_8_64 * x4
+ vmull.s16 q5, d2, d30
+ vmull.s16 q6, d3, d30
+
+ ; cospi_24_64 * x4
+ vmull.s16 q7, d2, d31
+ vmull.s16 q0, d3, d31
+
+ ; s4 = cospi_8_64 * x4 + cospi_24_64 * x5;
+ vmlal.s16 q5, d6, d31
+ vmlal.s16 q6, d7, d31
+
+ ; s5 = cospi_24_64 * x4 - cospi_8_64 * x5;
+ vmlsl.s16 q7, d6, d30
+ vmlsl.s16 q0, d7, d30
+
+ ; cospi_8_64 * x7
+ vmull.s16 q1, d4, d30
+ vmull.s16 q3, d5, d30
+
+ ; cospi_24_64 * x7
+ vmull.s16 q10, d4, d31
+ vmull.s16 q2, d5, d31
+
+ ; s6 = -cospi_24_64 * x6 + cospi_8_64 * x7;
+ vmlsl.s16 q1, d8, d31
+ vmlsl.s16 q3, d9, d31
+
+ ; s7 = cospi_8_64 * x6 + cospi_24_64 * x7;
+ vmlal.s16 q10, d8, d30
+ vmlal.s16 q2, d9, d30
+
+ vadd.s16 q8, q11, q9 ; x0 = s0 + s2;
+
+ vsub.s16 q11, q11, q9 ; x2 = s0 - s2;
+
+ vadd.s16 q4, q12, q14 ; x1 = s1 + s3;
+
+ vsub.s16 q12, q12, q14 ; x3 = s1 - s3;
+
+ ; (s4 + s6)
+ vadd.s32 q14, q5, q1
+ vadd.s32 q15, q6, q3
+
+ ; (s4 - s6)
+ vsub.s32 q5, q5, q1
+ vsub.s32 q6, q6, q3
+
+ ; x4 = dct_const_round_shift(s4 + s6);
+ vqrshrn.s32 d18, q14, #14 ; >> 14
+ vqrshrn.s32 d19, q15, #14 ; >> 14
+
+ ; x6 = dct_const_round_shift(s4 - s6);
+ vqrshrn.s32 d10, q5, #14 ; >> 14
+ vqrshrn.s32 d11, q6, #14 ; >> 14
+
+ ; (s5 + s7)
+ vadd.s32 q1, q7, q10
+ vadd.s32 q3, q0, q2
+
+ ; (s5 - s7))
+ vsub.s32 q7, q7, q10
+ vsub.s32 q0, q0, q2
+
+ ; x5 = dct_const_round_shift(s5 + s7);
+ vqrshrn.s32 d28, q1, #14 ; >> 14
+ vqrshrn.s32 d29, q3, #14 ; >> 14
+
+ ; x7 = dct_const_round_shift(s5 - s7);
+ vqrshrn.s32 d14, q7, #14 ; >> 14
+ vqrshrn.s32 d15, q0, #14 ; >> 14
+
+ vdup.16 d30, r12 ; duplicate cospi_16_64
+
+ ; cospi_16_64 * x2
+ vmull.s16 q2, d22, d30
+ vmull.s16 q3, d23, d30
+
+ ; cospi_6_64 * x6
+ vmull.s16 q13, d22, d30
+ vmull.s16 q1, d23, d30
+
+ ; cospi_16_64 * x2 + cospi_16_64 * x3;
+ vmlal.s16 q2, d24, d30
+ vmlal.s16 q3, d25, d30
+
+ ; cospi_16_64 * x2 - cospi_16_64 * x3;
+ vmlsl.s16 q13, d24, d30
+ vmlsl.s16 q1, d25, d30
+
+ ; x2 = dct_const_round_shift(s2);
+ vqrshrn.s32 d4, q2, #14 ; >> 14
+ vqrshrn.s32 d5, q3, #14 ; >> 14
+
+ ;x3 = dct_const_round_shift(s3);
+ vqrshrn.s32 d24, q13, #14 ; >> 14
+ vqrshrn.s32 d25, q1, #14 ; >> 14
+
+ ; cospi_16_64 * x6
+ vmull.s16 q13, d10, d30
+ vmull.s16 q1, d11, d30
+
+ ; cospi_6_64 * x6
+ vmull.s16 q11, d10, d30
+ vmull.s16 q0, d11, d30
+
+ ; cospi_16_64 * x6 + cospi_16_64 * x7;
+ vmlal.s16 q13, d14, d30
+ vmlal.s16 q1, d15, d30
+
+ ; cospi_16_64 * x6 - cospi_16_64 * x7;
+ vmlsl.s16 q11, d14, d30
+ vmlsl.s16 q0, d15, d30
+
+ ; x6 = dct_const_round_shift(s6);
+ vqrshrn.s32 d20, q13, #14 ; >> 14
+ vqrshrn.s32 d21, q1, #14 ; >> 14
+
+ ;x7 = dct_const_round_shift(s7);
+ vqrshrn.s32 d12, q11, #14 ; >> 14
+ vqrshrn.s32 d13, q0, #14 ; >> 14
+
+ vdup.16 q5, r10 ; duplicate 0
+
+ vsub.s16 q9, q5, q9 ; output[1] = -x4;
+ vsub.s16 q11, q5, q2 ; output[3] = -x2;
+ vsub.s16 q13, q5, q6 ; output[5] = -x7;
+ vsub.s16 q15, q5, q4 ; output[7] = -x1;
+ MEND
+
+
+ AREA Block, CODE, READONLY ; name this block of code
+;void vp9_short_iht8x8_add_neon(int16_t *input, uint8_t *dest,
+; int dest_stride, int tx_type)
+;
+; r0 int16_t input
+; r1 uint8_t *dest
+; r2 int dest_stride
+; r3 int tx_type)
+; This function will only handle tx_type of 1,2,3.
+|vp9_short_iht8x8_add_neon| PROC
+
+ ; load the inputs into d16-d19
+ vld1.s16 {q8,q9}, [r0]!
+ vld1.s16 {q10,q11}, [r0]!
+ vld1.s16 {q12,q13}, [r0]!
+ vld1.s16 {q14,q15}, [r0]!
+
+ push {r0-r10}
+
+ ; transpose the input data
+ TRANSPOSE8X8
+
+ ; decide the type of transform
+ cmp r3, #2
+ beq idct_iadst
+ cmp r3, #3
+ beq iadst_iadst
+
+iadst_idct
+ ; generate IDCT constants
+ GENERATE_IDCT_CONSTANTS
+
+ ; first transform rows
+ IDCT8x8_1D
+
+ ; transpose the matrix
+ TRANSPOSE8X8
+
+ ; generate IADST constants
+ GENERATE_IADST_CONSTANTS
+
+ ; then transform columns
+ IADST8X8_1D
+
+ b end_vp9_short_iht8x8_add_neon
+
+idct_iadst
+ ; generate IADST constants
+ GENERATE_IADST_CONSTANTS
+
+ ; first transform rows
+ IADST8X8_1D
+
+ ; transpose the matrix
+ TRANSPOSE8X8
+
+ ; generate IDCT constants
+ GENERATE_IDCT_CONSTANTS
+
+ ; then transform columns
+ IDCT8x8_1D
+
+ b end_vp9_short_iht8x8_add_neon
+
+iadst_iadst
+ ; generate IADST constants
+ GENERATE_IADST_CONSTANTS
+
+ ; first transform rows
+ IADST8X8_1D
+
+ ; transpose the matrix
+ TRANSPOSE8X8
+
+ ; then transform columns
+ IADST8X8_1D
+
+end_vp9_short_iht8x8_add_neon
+ pop {r0-r10}
+
+ ; ROUND_POWER_OF_TWO(temp_out[j], 5)
+ vrshr.s16 q8, q8, #5
+ vrshr.s16 q9, q9, #5
+ vrshr.s16 q10, q10, #5
+ vrshr.s16 q11, q11, #5
+ vrshr.s16 q12, q12, #5
+ vrshr.s16 q13, q13, #5
+ vrshr.s16 q14, q14, #5
+ vrshr.s16 q15, q15, #5
+
+ ; save dest pointer
+ mov r0, r1
+
+ ; load destination data
+ vld1.64 {d0}, [r1], r2
+ vld1.64 {d1}, [r1], r2
+ vld1.64 {d2}, [r1], r2
+ vld1.64 {d3}, [r1], r2
+ vld1.64 {d4}, [r1], r2
+ vld1.64 {d5}, [r1], r2
+ vld1.64 {d6}, [r1], r2
+ vld1.64 {d7}, [r1]
+
+ ; ROUND_POWER_OF_TWO(temp_out[j], 5) + dest[j * dest_stride + i]
+ vaddw.u8 q8, q8, d0
+ vaddw.u8 q9, q9, d1
+ vaddw.u8 q10, q10, d2
+ vaddw.u8 q11, q11, d3
+ vaddw.u8 q12, q12, d4
+ vaddw.u8 q13, q13, d5
+ vaddw.u8 q14, q14, d6
+ vaddw.u8 q15, q15, d7
+
+ ; clip_pixel
+ vqmovun.s16 d0, q8
+ vqmovun.s16 d1, q9
+ vqmovun.s16 d2, q10
+ vqmovun.s16 d3, q11
+ vqmovun.s16 d4, q12
+ vqmovun.s16 d5, q13
+ vqmovun.s16 d6, q14
+ vqmovun.s16 d7, q15
+
+ ; store the data
+ vst1.64 {d0}, [r0], r2
+ vst1.64 {d1}, [r0], r2
+ vst1.64 {d2}, [r0], r2
+ vst1.64 {d3}, [r0], r2
+ vst1.64 {d4}, [r0], r2
+ vst1.64 {d5}, [r0], r2
+ vst1.64 {d6}, [r0], r2
+ vst1.64 {d7}, [r0], r2
+ bx lr
+ ENDP ; |vp9_short_iht8x8_add_neon|
+
+ END
diff --git a/libvpx/vp9/common/generic/vp9_systemdependent.c b/libvpx/vp9/common/generic/vp9_systemdependent.c
new file mode 100644
index 0000000..f144721
--- /dev/null
+++ b/libvpx/vp9/common/generic/vp9_systemdependent.c
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "./vpx_config.h"
+#include "vp9_rtcd.h"
+#include "vp9/common/vp9_onyxc_int.h"
+
+void vp9_machine_specific_config(VP9_COMMON *cm) {
+ (void)cm;
+ vp9_rtcd();
+}
diff --git a/libvpx/vp9/common/vp9_alloccommon.c b/libvpx/vp9/common/vp9_alloccommon.c
new file mode 100644
index 0000000..864e27e
--- /dev/null
+++ b/libvpx/vp9/common/vp9_alloccommon.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "./vpx_config.h"
+#include "vpx_mem/vpx_mem.h"
+
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/common/vp9_entropymode.h"
+#include "vp9/common/vp9_entropymv.h"
+#include "vp9/common/vp9_findnearmv.h"
+#include "vp9/common/vp9_onyxc_int.h"
+#include "vp9/common/vp9_systemdependent.h"
+
+void vp9_update_mode_info_border(VP9_COMMON *cm, MODE_INFO *mi) {
+ const int stride = cm->mode_info_stride;
+ int i;
+
+ // Clear down top border row
+ vpx_memset(mi, 0, sizeof(MODE_INFO) * stride);
+
+ // Clear left border column
+ for (i = 1; i < cm->mi_rows + 1; i++)
+ vpx_memset(&mi[i * stride], 0, sizeof(MODE_INFO));
+}
+
+void vp9_free_frame_buffers(VP9_COMMON *cm) {
+ int i;
+
+ for (i = 0; i < NUM_YV12_BUFFERS; i++)
+ vp9_free_frame_buffer(&cm->yv12_fb[i]);
+
+ vp9_free_frame_buffer(&cm->post_proc_buffer);
+
+ vpx_free(cm->mip);
+ vpx_free(cm->prev_mip);
+ vpx_free(cm->above_seg_context);
+ vpx_free(cm->last_frame_seg_map);
+ vpx_free(cm->mi_grid_base);
+ vpx_free(cm->prev_mi_grid_base);
+
+ vpx_free(cm->above_context[0]);
+ for (i = 0; i < MAX_MB_PLANE; i++)
+ cm->above_context[i] = 0;
+ cm->mip = NULL;
+ cm->prev_mip = NULL;
+ cm->above_seg_context = NULL;
+ cm->last_frame_seg_map = NULL;
+ cm->mi_grid_base = NULL;
+ cm->prev_mi_grid_base = NULL;
+}
+
+static void set_mb_mi(VP9_COMMON *cm, int aligned_width, int aligned_height) {
+ cm->mb_cols = (aligned_width + 8) >> 4;
+ cm->mb_rows = (aligned_height + 8) >> 4;
+ cm->MBs = cm->mb_rows * cm->mb_cols;
+
+ cm->mi_cols = aligned_width >> MI_SIZE_LOG2;
+ cm->mi_rows = aligned_height >> MI_SIZE_LOG2;
+ cm->mode_info_stride = cm->mi_cols + MI_BLOCK_SIZE;
+}
+
+static void setup_mi(VP9_COMMON *cm) {
+ cm->mi = cm->mip + cm->mode_info_stride + 1;
+ cm->prev_mi = cm->prev_mip + cm->mode_info_stride + 1;
+ cm->mi_grid_visible = cm->mi_grid_base + cm->mode_info_stride + 1;
+ cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mode_info_stride + 1;
+
+ vpx_memset(cm->mip, 0,
+ cm->mode_info_stride * (cm->mi_rows + 1) * sizeof(MODE_INFO));
+
+ vpx_memset(cm->mi_grid_base, 0,
+ cm->mode_info_stride * (cm->mi_rows + 1) *
+ sizeof(*cm->mi_grid_base));
+
+ vp9_update_mode_info_border(cm, cm->mip);
+ vp9_update_mode_info_border(cm, cm->prev_mip);
+}
+
+int vp9_alloc_frame_buffers(VP9_COMMON *cm, int width, int height) {
+ int i, mi_cols;
+
+ const int aligned_width = ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2);
+ const int aligned_height = ALIGN_POWER_OF_TWO(height, MI_SIZE_LOG2);
+ const int ss_x = cm->subsampling_x;
+ const int ss_y = cm->subsampling_y;
+ int mi_size;
+
+ vp9_free_frame_buffers(cm);
+
+ for (i = 0; i < NUM_YV12_BUFFERS; i++) {
+ cm->fb_idx_ref_cnt[i] = 0;
+ if (vp9_alloc_frame_buffer(&cm->yv12_fb[i], width, height, ss_x, ss_y,
+ VP9BORDERINPIXELS) < 0)
+ goto fail;
+ }
+
+ cm->new_fb_idx = NUM_YV12_BUFFERS - 1;
+ cm->fb_idx_ref_cnt[cm->new_fb_idx] = 1;
+
+ for (i = 0; i < ALLOWED_REFS_PER_FRAME; i++)
+ cm->active_ref_idx[i] = i;
+
+ for (i = 0; i < NUM_REF_FRAMES; i++) {
+ cm->ref_frame_map[i] = i;
+ cm->fb_idx_ref_cnt[i] = 1;
+ }
+
+ if (vp9_alloc_frame_buffer(&cm->post_proc_buffer, width, height, ss_x, ss_y,
+ VP9BORDERINPIXELS) < 0)
+ goto fail;
+
+ set_mb_mi(cm, aligned_width, aligned_height);
+
+ // Allocation
+ mi_size = cm->mode_info_stride * (cm->mi_rows + MI_BLOCK_SIZE);
+
+ cm->mip = vpx_calloc(mi_size, sizeof(MODE_INFO));
+ if (!cm->mip)
+ goto fail;
+
+ cm->prev_mip = vpx_calloc(mi_size, sizeof(MODE_INFO));
+ if (!cm->prev_mip)
+ goto fail;
+
+ cm->mi_grid_base = vpx_calloc(mi_size, sizeof(*cm->mi_grid_base));
+ if (!cm->mi_grid_base)
+ goto fail;
+
+ cm->prev_mi_grid_base = vpx_calloc(mi_size, sizeof(*cm->prev_mi_grid_base));
+ if (!cm->prev_mi_grid_base)
+ goto fail;
+
+ setup_mi(cm);
+
+ // FIXME(jkoleszar): allocate subsampled arrays for U/V once subsampling
+ // information is exposed at this level
+ mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
+
+ // 2 contexts per 'mi unit', so that we have one context per 4x4 txfm
+ // block where mi unit size is 8x8.
+ cm->above_context[0] = vpx_calloc(sizeof(ENTROPY_CONTEXT) * MAX_MB_PLANE *
+ (2 * mi_cols), 1);
+ if (!cm->above_context[0])
+ goto fail;
+
+ cm->above_seg_context = vpx_calloc(sizeof(PARTITION_CONTEXT) * mi_cols, 1);
+ if (!cm->above_seg_context)
+ goto fail;
+
+ // Create the segmentation map structure and set to 0.
+ cm->last_frame_seg_map = vpx_calloc(cm->mi_rows * cm->mi_cols, 1);
+ if (!cm->last_frame_seg_map)
+ goto fail;
+
+ return 0;
+
+ fail:
+ vp9_free_frame_buffers(cm);
+ return 1;
+}
+
+void vp9_create_common(VP9_COMMON *cm) {
+ vp9_machine_specific_config(cm);
+
+ vp9_init_mbmode_probs(cm);
+
+ cm->tx_mode = ONLY_4X4;
+ cm->comp_pred_mode = HYBRID_PREDICTION;
+
+ // Initialize reference frame sign bias structure to defaults
+ vpx_memset(cm->ref_frame_sign_bias, 0, sizeof(cm->ref_frame_sign_bias));
+}
+
+void vp9_remove_common(VP9_COMMON *cm) {
+ vp9_free_frame_buffers(cm);
+}
+
+void vp9_initialize_common() {
+ vp9_coef_tree_initialize();
+ vp9_entropy_mode_init();
+ vp9_entropy_mv_init();
+}
+
+void vp9_update_frame_size(VP9_COMMON *cm) {
+ int i, mi_cols;
+ const int aligned_width = ALIGN_POWER_OF_TWO(cm->width, MI_SIZE_LOG2);
+ const int aligned_height = ALIGN_POWER_OF_TWO(cm->height, MI_SIZE_LOG2);
+
+ set_mb_mi(cm, aligned_width, aligned_height);
+ setup_mi(cm);
+
+ mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
+ for (i = 1; i < MAX_MB_PLANE; i++)
+ cm->above_context[i] =
+ cm->above_context[0] + i * sizeof(ENTROPY_CONTEXT) * 2 * mi_cols;
+
+ // Initialize the previous frame segment map to 0.
+ if (cm->last_frame_seg_map)
+ vpx_memset(cm->last_frame_seg_map, 0, cm->mi_rows * cm->mi_cols);
+}
diff --git a/libvpx/vp9/common/vp9_alloccommon.h b/libvpx/vp9/common/vp9_alloccommon.h
new file mode 100644
index 0000000..5d5fae9
--- /dev/null
+++ b/libvpx/vp9/common/vp9_alloccommon.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_COMMON_VP9_ALLOCCOMMON_H_
+#define VP9_COMMON_VP9_ALLOCCOMMON_H_
+
+#include "vp9/common/vp9_onyxc_int.h"
+
+void vp9_initialize_common();
+
+void vp9_update_mode_info_border(VP9_COMMON *cm, MODE_INFO *mi);
+
+void vp9_create_common(VP9_COMMON *cm);
+void vp9_remove_common(VP9_COMMON *cm);
+
+int vp9_alloc_frame_buffers(VP9_COMMON *cm, int width, int height);
+void vp9_free_frame_buffers(VP9_COMMON *cm);
+
+
+void vp9_update_frame_size(VP9_COMMON *cm);
+
+#endif // VP9_COMMON_VP9_ALLOCCOMMON_H_
diff --git a/libvpx/vp9/common/vp9_blockd.h b/libvpx/vp9/common/vp9_blockd.h
new file mode 100644
index 0000000..c8d677f
--- /dev/null
+++ b/libvpx/vp9/common/vp9_blockd.h
@@ -0,0 +1,595 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_COMMON_VP9_BLOCKD_H_
+#define VP9_COMMON_VP9_BLOCKD_H_
+
+#include "./vpx_config.h"
+
+#include "vpx_ports/mem.h"
+#include "vpx_scale/yv12config.h"
+
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_common_data.h"
+#include "vp9/common/vp9_enums.h"
+#include "vp9/common/vp9_mv.h"
+#include "vp9/common/vp9_scale.h"
+#include "vp9/common/vp9_seg_common.h"
+#include "vp9/common/vp9_treecoder.h"
+
+#define BLOCK_SIZE_GROUPS 4
+#define MBSKIP_CONTEXTS 3
+
+/* Segment Feature Masks */
+#define MAX_MV_REF_CANDIDATES 2
+
+#define INTRA_INTER_CONTEXTS 4
+#define COMP_INTER_CONTEXTS 5
+#define REF_CONTEXTS 5
+
+typedef enum {
+ PLANE_TYPE_Y_WITH_DC,
+ PLANE_TYPE_UV,
+} PLANE_TYPE;
+
+typedef char ENTROPY_CONTEXT;
+
+typedef char PARTITION_CONTEXT;
+
+static INLINE int combine_entropy_contexts(ENTROPY_CONTEXT a,
+ ENTROPY_CONTEXT b) {
+ return (a != 0) + (b != 0);
+}
+
+typedef enum {
+ KEY_FRAME = 0,
+ INTER_FRAME = 1,
+ NUM_FRAME_TYPES,
+} FRAME_TYPE;
+
+typedef enum {
+ EIGHTTAP = 0,
+ EIGHTTAP_SMOOTH = 1,
+ EIGHTTAP_SHARP = 2,
+ BILINEAR = 3,
+ SWITCHABLE = 4 /* should be the last one */
+} INTERPOLATIONFILTERTYPE;
+
+typedef enum {
+ DC_PRED, // Average of above and left pixels
+ V_PRED, // Vertical
+ H_PRED, // Horizontal
+ D45_PRED, // Directional 45 deg = round(arctan(1/1) * 180/pi)
+ D135_PRED, // Directional 135 deg = 180 - 45
+ D117_PRED, // Directional 117 deg = 180 - 63
+ D153_PRED, // Directional 153 deg = 180 - 27
+ D207_PRED, // Directional 207 deg = 180 + 27
+ D63_PRED, // Directional 63 deg = round(arctan(2/1) * 180/pi)
+ TM_PRED, // True-motion
+ NEARESTMV,
+ NEARMV,
+ ZEROMV,
+ NEWMV,
+ MB_MODE_COUNT
+} MB_PREDICTION_MODE;
+
+static INLINE int is_intra_mode(MB_PREDICTION_MODE mode) {
+ return mode <= TM_PRED;
+}
+
+static INLINE int is_inter_mode(MB_PREDICTION_MODE mode) {
+ return mode >= NEARESTMV && mode <= NEWMV;
+}
+
+#define INTRA_MODES (TM_PRED + 1)
+
+#define INTER_MODES (1 + NEWMV - NEARESTMV)
+
+static INLINE int inter_mode_offset(MB_PREDICTION_MODE mode) {
+ return (mode - NEARESTMV);
+}
+
+/* For keyframes, intra block modes are predicted by the (already decoded)
+ modes for the Y blocks to the left and above us; for interframes, there
+ is a single probability table. */
+
+union b_mode_info {
+ MB_PREDICTION_MODE as_mode;
+ int_mv as_mv[2]; // first, second inter predictor motion vectors
+};
+
+typedef enum {
+ NONE = -1,
+ INTRA_FRAME = 0,
+ LAST_FRAME = 1,
+ GOLDEN_FRAME = 2,
+ ALTREF_FRAME = 3,
+ MAX_REF_FRAMES = 4
+} MV_REFERENCE_FRAME;
+
+static INLINE int b_width_log2(BLOCK_SIZE sb_type) {
+ return b_width_log2_lookup[sb_type];
+}
+static INLINE int b_height_log2(BLOCK_SIZE sb_type) {
+ return b_height_log2_lookup[sb_type];
+}
+
+static INLINE int mi_width_log2(BLOCK_SIZE sb_type) {
+ return mi_width_log2_lookup[sb_type];
+}
+
+static INLINE int mi_height_log2(BLOCK_SIZE sb_type) {
+ return mi_height_log2_lookup[sb_type];
+}
+
+// This structure now relates to 8x8 block regions.
+typedef struct {
+ MB_PREDICTION_MODE mode, uv_mode;
+ MV_REFERENCE_FRAME ref_frame[2];
+ TX_SIZE tx_size;
+ int_mv mv[2]; // for each reference frame used
+ int_mv ref_mvs[MAX_REF_FRAMES][MAX_MV_REF_CANDIDATES];
+ int_mv best_mv, best_second_mv;
+
+ uint8_t mode_context[MAX_REF_FRAMES];
+
+ unsigned char skip_coeff; // 0=need to decode coeffs, 1=no coefficients
+ unsigned char segment_id; // Segment id for this block.
+
+ // Flags used for prediction status of various bit-stream signals
+ unsigned char seg_id_predicted;
+
+ INTERPOLATIONFILTERTYPE interp_filter;
+
+ BLOCK_SIZE sb_type;
+} MB_MODE_INFO;
+
+typedef struct {
+ MB_MODE_INFO mbmi;
+ union b_mode_info bmi[4];
+} MODE_INFO;
+
+static INLINE int is_inter_block(const MB_MODE_INFO *mbmi) {
+ return mbmi->ref_frame[0] > INTRA_FRAME;
+}
+
+static INLINE int has_second_ref(const MB_MODE_INFO *mbmi) {
+ return mbmi->ref_frame[1] > INTRA_FRAME;
+}
+
+enum mv_precision {
+ MV_PRECISION_Q3,
+ MV_PRECISION_Q4
+};
+
+#if CONFIG_ALPHA
+enum { MAX_MB_PLANE = 4 };
+#else
+enum { MAX_MB_PLANE = 3 };
+#endif
+
+struct buf_2d {
+ uint8_t *buf;
+ int stride;
+};
+
+struct macroblockd_plane {
+ DECLARE_ALIGNED(16, int16_t, qcoeff[64 * 64]);
+ DECLARE_ALIGNED(16, int16_t, dqcoeff[64 * 64]);
+ DECLARE_ALIGNED(16, uint16_t, eobs[256]);
+ PLANE_TYPE plane_type;
+ int subsampling_x;
+ int subsampling_y;
+ struct buf_2d dst;
+ struct buf_2d pre[2];
+ int16_t *dequant;
+ ENTROPY_CONTEXT *above_context;
+ ENTROPY_CONTEXT *left_context;
+};
+
+#define BLOCK_OFFSET(x, i) ((x) + (i) * 16)
+
+typedef struct macroblockd {
+ struct macroblockd_plane plane[MAX_MB_PLANE];
+
+ struct scale_factors scale_factor[2];
+
+ MODE_INFO *last_mi;
+ MODE_INFO *this_mi;
+ int mode_info_stride;
+
+ MODE_INFO *mic_stream_ptr;
+
+ // A NULL indicates that the 8x8 is not part of the image
+ MODE_INFO **mi_8x8;
+ MODE_INFO **prev_mi_8x8;
+
+ int up_available;
+ int left_available;
+ int right_available;
+
+ // partition contexts
+ PARTITION_CONTEXT *above_seg_context;
+ PARTITION_CONTEXT *left_seg_context;
+
+ /* Distance of MB away from frame edges */
+ int mb_to_left_edge;
+ int mb_to_right_edge;
+ int mb_to_top_edge;
+ int mb_to_bottom_edge;
+
+ int lossless;
+ /* Inverse transform function pointers. */
+ void (*inv_txm4x4_1_add)(int16_t *input, uint8_t *dest, int stride);
+ void (*inv_txm4x4_add)(int16_t *input, uint8_t *dest, int stride);
+ void (*itxm_add)(int16_t *input, uint8_t *dest, int stride, int eob);
+
+ struct subpix_fn_table subpix;
+
+ int allow_high_precision_mv;
+
+ int corrupted;
+
+ unsigned char sb_index; // index of 32x32 block inside the 64x64 block
+ unsigned char mb_index; // index of 16x16 block inside the 32x32 block
+ unsigned char b_index; // index of 8x8 block inside the 16x16 block
+ unsigned char ab_index; // index of 4x4 block inside the 8x8 block
+
+ int q_index;
+
+} MACROBLOCKD;
+
+static INLINE unsigned char *get_sb_index(MACROBLOCKD *xd, BLOCK_SIZE subsize) {
+ switch (subsize) {
+ case BLOCK_64X64:
+ case BLOCK_64X32:
+ case BLOCK_32X64:
+ case BLOCK_32X32:
+ return &xd->sb_index;
+ case BLOCK_32X16:
+ case BLOCK_16X32:
+ case BLOCK_16X16:
+ return &xd->mb_index;
+ case BLOCK_16X8:
+ case BLOCK_8X16:
+ case BLOCK_8X8:
+ return &xd->b_index;
+ case BLOCK_8X4:
+ case BLOCK_4X8:
+ case BLOCK_4X4:
+ return &xd->ab_index;
+ default:
+ assert(0);
+ return NULL;
+ }
+}
+
+static INLINE void update_partition_context(MACROBLOCKD *xd, BLOCK_SIZE sb_type,
+ BLOCK_SIZE sb_size) {
+ const int bsl = b_width_log2(sb_size), bs = (1 << bsl) / 2;
+ const int bwl = b_width_log2(sb_type);
+ const int bhl = b_height_log2(sb_type);
+ const int boffset = b_width_log2(BLOCK_64X64) - bsl;
+ const char pcval0 = ~(0xe << boffset);
+ const char pcval1 = ~(0xf << boffset);
+ const char pcvalue[2] = {pcval0, pcval1};
+
+ assert(MAX(bwl, bhl) <= bsl);
+
+ // update the partition context at the end notes. set partition bits
+ // of block sizes larger than the current one to be one, and partition
+ // bits of smaller block sizes to be zero.
+ vpx_memset(xd->above_seg_context, pcvalue[bwl == bsl], bs);
+ vpx_memset(xd->left_seg_context, pcvalue[bhl == bsl], bs);
+}
+
+static INLINE int partition_plane_context(MACROBLOCKD *xd, BLOCK_SIZE sb_type) {
+ int bsl = mi_width_log2(sb_type), bs = 1 << bsl;
+ int above = 0, left = 0, i;
+ int boffset = mi_width_log2(BLOCK_64X64) - bsl;
+
+ assert(mi_width_log2(sb_type) == mi_height_log2(sb_type));
+ assert(bsl >= 0);
+ assert(boffset >= 0);
+
+ for (i = 0; i < bs; i++)
+ above |= (xd->above_seg_context[i] & (1 << boffset));
+ for (i = 0; i < bs; i++)
+ left |= (xd->left_seg_context[i] & (1 << boffset));
+
+ above = (above > 0);
+ left = (left > 0);
+
+ return (left * 2 + above) + bsl * PARTITION_PLOFFSET;
+}
+
+static BLOCK_SIZE get_subsize(BLOCK_SIZE bsize, PARTITION_TYPE partition) {
+ const BLOCK_SIZE subsize = subsize_lookup[partition][bsize];
+ assert(subsize < BLOCK_SIZES);
+ return subsize;
+}
+
+extern const TX_TYPE mode2txfm_map[MB_MODE_COUNT];
+
+static INLINE TX_TYPE get_tx_type_4x4(PLANE_TYPE plane_type,
+ const MACROBLOCKD *xd, int ib) {
+ const MODE_INFO *const mi = xd->this_mi;
+ const MB_MODE_INFO *const mbmi = &mi->mbmi;
+
+ if (plane_type != PLANE_TYPE_Y_WITH_DC ||
+ xd->lossless ||
+ is_inter_block(mbmi))
+ return DCT_DCT;
+
+ return mode2txfm_map[mbmi->sb_type < BLOCK_8X8 ?
+ mi->bmi[ib].as_mode : mbmi->mode];
+}
+
+static INLINE TX_TYPE get_tx_type_8x8(PLANE_TYPE plane_type,
+ const MACROBLOCKD *xd) {
+ return plane_type == PLANE_TYPE_Y_WITH_DC ?
+ mode2txfm_map[xd->this_mi->mbmi.mode] : DCT_DCT;
+}
+
+static INLINE TX_TYPE get_tx_type_16x16(PLANE_TYPE plane_type,
+ const MACROBLOCKD *xd) {
+ return plane_type == PLANE_TYPE_Y_WITH_DC ?
+ mode2txfm_map[xd->this_mi->mbmi.mode] : DCT_DCT;
+}
+
+static void setup_block_dptrs(MACROBLOCKD *xd, int ss_x, int ss_y) {
+ int i;
+
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ xd->plane[i].plane_type = i ? PLANE_TYPE_UV : PLANE_TYPE_Y_WITH_DC;
+ xd->plane[i].subsampling_x = i ? ss_x : 0;
+ xd->plane[i].subsampling_y = i ? ss_y : 0;
+ }
+#if CONFIG_ALPHA
+ // TODO(jkoleszar): Using the Y w/h for now
+ xd->plane[3].subsampling_x = 0;
+ xd->plane[3].subsampling_y = 0;
+#endif
+}
+
+
+static INLINE TX_SIZE get_uv_tx_size(const MB_MODE_INFO *mbmi) {
+ return MIN(mbmi->tx_size, max_uv_txsize_lookup[mbmi->sb_type]);
+}
+
+static BLOCK_SIZE get_plane_block_size(BLOCK_SIZE bsize,
+ const struct macroblockd_plane *pd) {
+ BLOCK_SIZE bs = ss_size_lookup[bsize][pd->subsampling_x][pd->subsampling_y];
+ assert(bs < BLOCK_SIZES);
+ return bs;
+}
+
+static INLINE int plane_block_width(BLOCK_SIZE bsize,
+ const struct macroblockd_plane* plane) {
+ return 4 << (b_width_log2(bsize) - plane->subsampling_x);
+}
+
+static INLINE int plane_block_height(BLOCK_SIZE bsize,
+ const struct macroblockd_plane* plane) {
+ return 4 << (b_height_log2(bsize) - plane->subsampling_y);
+}
+
+typedef void (*foreach_transformed_block_visitor)(int plane, int block,
+ BLOCK_SIZE plane_bsize,
+ TX_SIZE tx_size,
+ void *arg);
+
+static INLINE void foreach_transformed_block_in_plane(
+ const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane,
+ foreach_transformed_block_visitor visit, void *arg) {
+ const struct macroblockd_plane *const pd = &xd->plane[plane];
+ const MB_MODE_INFO* mbmi = &xd->this_mi->mbmi;
+ // block and transform sizes, in number of 4x4 blocks log 2 ("*_b")
+ // 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8
+ // transform size varies per plane, look it up in a common way.
+ const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi)
+ : mbmi->tx_size;
+ const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
+ const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
+ const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
+ const int step = 1 << (tx_size << 1);
+ int i;
+
+ // If mb_to_right_edge is < 0 we are in a situation in which
+ // the current block size extends into the UMV and we won't
+ // visit the sub blocks that are wholly within the UMV.
+ if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0) {
+ int r, c;
+
+ int max_blocks_wide = num_4x4_w;
+ int max_blocks_high = num_4x4_h;
+
+ // xd->mb_to_right_edge is in units of pixels * 8. This converts
+ // it to 4x4 block sizes.
+ if (xd->mb_to_right_edge < 0)
+ max_blocks_wide += (xd->mb_to_right_edge >> (5 + pd->subsampling_x));
+
+ if (xd->mb_to_bottom_edge < 0)
+ max_blocks_high += (xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
+
+ i = 0;
+ // Unlike the normal case - in here we have to keep track of the
+ // row and column of the blocks we use so that we know if we are in
+ // the unrestricted motion border.
+ for (r = 0; r < num_4x4_h; r += (1 << tx_size)) {
+ for (c = 0; c < num_4x4_w; c += (1 << tx_size)) {
+ if (r < max_blocks_high && c < max_blocks_wide)
+ visit(plane, i, plane_bsize, tx_size, arg);
+ i += step;
+ }
+ }
+ } else {
+ for (i = 0; i < num_4x4_w * num_4x4_h; i += step)
+ visit(plane, i, plane_bsize, tx_size, arg);
+ }
+}
+
+static INLINE void foreach_transformed_block(
+ const MACROBLOCKD* const xd, BLOCK_SIZE bsize,
+ foreach_transformed_block_visitor visit, void *arg) {
+ int plane;
+
+ for (plane = 0; plane < MAX_MB_PLANE; plane++)
+ foreach_transformed_block_in_plane(xd, bsize, plane, visit, arg);
+}
+
+static INLINE void foreach_transformed_block_uv(
+ const MACROBLOCKD* const xd, BLOCK_SIZE bsize,
+ foreach_transformed_block_visitor visit, void *arg) {
+ int plane;
+
+ for (plane = 1; plane < MAX_MB_PLANE; plane++)
+ foreach_transformed_block_in_plane(xd, bsize, plane, visit, arg);
+}
+
+static int raster_block_offset(BLOCK_SIZE plane_bsize,
+ int raster_block, int stride) {
+ const int bw = b_width_log2(plane_bsize);
+ const int y = 4 * (raster_block >> bw);
+ const int x = 4 * (raster_block & ((1 << bw) - 1));
+ return y * stride + x;
+}
+static int16_t* raster_block_offset_int16(BLOCK_SIZE plane_bsize,
+ int raster_block, int16_t *base) {
+ const int stride = 4 << b_width_log2(plane_bsize);
+ return base + raster_block_offset(plane_bsize, raster_block, stride);
+}
+static uint8_t* raster_block_offset_uint8(BLOCK_SIZE plane_bsize,
+ int raster_block, uint8_t *base,
+ int stride) {
+ return base + raster_block_offset(plane_bsize, raster_block, stride);
+}
+
+static int txfrm_block_to_raster_block(BLOCK_SIZE plane_bsize,
+ TX_SIZE tx_size, int block) {
+ const int bwl = b_width_log2(plane_bsize);
+ const int tx_cols_log2 = bwl - tx_size;
+ const int tx_cols = 1 << tx_cols_log2;
+ const int raster_mb = block >> (tx_size << 1);
+ const int x = (raster_mb & (tx_cols - 1)) << tx_size;
+ const int y = (raster_mb >> tx_cols_log2) << tx_size;
+ return x + (y << bwl);
+}
+
+static void txfrm_block_to_raster_xy(BLOCK_SIZE plane_bsize,
+ TX_SIZE tx_size, int block,
+ int *x, int *y) {
+ const int bwl = b_width_log2(plane_bsize);
+ const int tx_cols_log2 = bwl - tx_size;
+ const int tx_cols = 1 << tx_cols_log2;
+ const int raster_mb = block >> (tx_size << 1);
+ *x = (raster_mb & (tx_cols - 1)) << tx_size;
+ *y = (raster_mb >> tx_cols_log2) << tx_size;
+}
+
+static void extend_for_intra(MACROBLOCKD* const xd, BLOCK_SIZE plane_bsize,
+ int plane, int block, TX_SIZE tx_size) {
+ struct macroblockd_plane *const pd = &xd->plane[plane];
+ uint8_t *const buf = pd->dst.buf;
+ const int stride = pd->dst.stride;
+
+ int x, y;
+ txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y);
+ x = x * 4 - 1;
+ y = y * 4 - 1;
+ // Copy a pixel into the umv if we are in a situation where the block size
+ // extends into the UMV.
+ // TODO(JBB): Should be able to do the full extend in place so we don't have
+ // to do this multiple times.
+ if (xd->mb_to_right_edge < 0) {
+ const int bw = 4 << b_width_log2(plane_bsize);
+ const int umv_border_start = bw + (xd->mb_to_right_edge >>
+ (3 + pd->subsampling_x));
+
+ if (x + bw > umv_border_start)
+ vpx_memset(&buf[y * stride + umv_border_start],
+ buf[y * stride + umv_border_start - 1], bw);
+ }
+
+ if (xd->mb_to_bottom_edge < 0) {
+ const int bh = 4 << b_height_log2(plane_bsize);
+ const int umv_border_start = bh + (xd->mb_to_bottom_edge >>
+ (3 + pd->subsampling_y));
+ int i;
+ const uint8_t c = buf[(umv_border_start - 1) * stride + x];
+ uint8_t *d = &buf[umv_border_start * stride + x];
+
+ if (y + bh > umv_border_start)
+ for (i = 0; i < bh; ++i, d += stride)
+ *d = c;
+ }
+}
+static void set_contexts_on_border(MACROBLOCKD *xd,
+ struct macroblockd_plane *pd,
+ BLOCK_SIZE plane_bsize,
+ int tx_size_in_blocks, int has_eob,
+ int aoff, int loff,
+ ENTROPY_CONTEXT *A, ENTROPY_CONTEXT *L) {
+ int mi_blocks_wide = num_4x4_blocks_wide_lookup[plane_bsize];
+ int mi_blocks_high = num_4x4_blocks_high_lookup[plane_bsize];
+ int above_contexts = tx_size_in_blocks;
+ int left_contexts = tx_size_in_blocks;
+ int pt;
+
+ // xd->mb_to_right_edge is in units of pixels * 8. This converts
+ // it to 4x4 block sizes.
+ if (xd->mb_to_right_edge < 0)
+ mi_blocks_wide += (xd->mb_to_right_edge >> (5 + pd->subsampling_x));
+
+ if (xd->mb_to_bottom_edge < 0)
+ mi_blocks_high += (xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
+
+ // this code attempts to avoid copying into contexts that are outside
+ // our border. Any blocks that do are set to 0...
+ if (above_contexts + aoff > mi_blocks_wide)
+ above_contexts = mi_blocks_wide - aoff;
+
+ if (left_contexts + loff > mi_blocks_high)
+ left_contexts = mi_blocks_high - loff;
+
+ for (pt = 0; pt < above_contexts; pt++)
+ A[pt] = has_eob;
+ for (pt = above_contexts; pt < tx_size_in_blocks; pt++)
+ A[pt] = 0;
+ for (pt = 0; pt < left_contexts; pt++)
+ L[pt] = has_eob;
+ for (pt = left_contexts; pt < tx_size_in_blocks; pt++)
+ L[pt] = 0;
+}
+
+static void set_contexts(MACROBLOCKD *xd, struct macroblockd_plane *pd,
+ BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+ int has_eob, int aoff, int loff) {
+ ENTROPY_CONTEXT *const A = pd->above_context + aoff;
+ ENTROPY_CONTEXT *const L = pd->left_context + loff;
+ const int tx_size_in_blocks = 1 << tx_size;
+
+ if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0) {
+ set_contexts_on_border(xd, pd, plane_bsize, tx_size_in_blocks, has_eob,
+ aoff, loff, A, L);
+ } else {
+ vpx_memset(A, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks);
+ vpx_memset(L, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks);
+ }
+}
+
+static int get_tx_eob(struct segmentation *seg, int segment_id,
+ TX_SIZE tx_size) {
+ const int eob_max = 16 << (tx_size << 1);
+ return vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP) ? 0 : eob_max;
+}
+
+#endif // VP9_COMMON_VP9_BLOCKD_H_
diff --git a/libvpx/vp9/common/vp9_common.h b/libvpx/vp9/common/vp9_common.h
new file mode 100644
index 0000000..1796906
--- /dev/null
+++ b/libvpx/vp9/common/vp9_common.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_COMMON_H_
+#define VP9_COMMON_VP9_COMMON_H_
+
+/* Interface header for common constant data structures and lookup tables */
+
+#include <assert.h>
+
+#include "./vpx_config.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx/vpx_integer.h"
+
+#define MIN(x, y) (((x) < (y)) ? (x) : (y))
+#define MAX(x, y) (((x) > (y)) ? (x) : (y))
+
+#define ROUND_POWER_OF_TWO(value, n) \
+ (((value) + (1 << ((n) - 1))) >> (n))
+
+#define ALIGN_POWER_OF_TWO(value, n) \
+ (((value) + ((1 << (n)) - 1)) & ~((1 << (n)) - 1))
+
+// Only need this for fixed-size arrays, for structs just assign.
+#define vp9_copy(dest, src) { \
+ assert(sizeof(dest) == sizeof(src)); \
+ vpx_memcpy(dest, src, sizeof(src)); \
+ }
+
+// Use this for variably-sized arrays.
+#define vp9_copy_array(dest, src, n) { \
+ assert(sizeof(*dest) == sizeof(*src)); \
+ vpx_memcpy(dest, src, n * sizeof(*src)); \
+ }
+
+#define vp9_zero(dest) vpx_memset(&dest, 0, sizeof(dest));
+#define vp9_zero_array(dest, n) vpx_memset(dest, 0, n * sizeof(*dest));
+
+static INLINE uint8_t clip_pixel(int val) {
+ return (val > 255) ? 255u : (val < 0) ? 0u : val;
+}
+
+static INLINE int clamp(int value, int low, int high) {
+ return value < low ? low : (value > high ? high : value);
+}
+
+static INLINE double fclamp(double value, double low, double high) {
+ return value < low ? low : (value > high ? high : value);
+}
+
+static int get_unsigned_bits(unsigned int num_values) {
+ int cat = 0;
+ if (num_values <= 1)
+ return 0;
+ num_values--;
+ while (num_values > 0) {
+ cat++;
+ num_values >>= 1;
+ }
+ return cat;
+}
+
+#if CONFIG_DEBUG
+#define CHECK_MEM_ERROR(cm, lval, expr) do { \
+ lval = (expr); \
+ if (!lval) \
+ vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, \
+ "Failed to allocate "#lval" at %s:%d", \
+ __FILE__, __LINE__); \
+ } while (0)
+#else
+#define CHECK_MEM_ERROR(cm, lval, expr) do { \
+ lval = (expr); \
+ if (!lval) \
+ vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, \
+ "Failed to allocate "#lval); \
+ } while (0)
+#endif
+
+#define SYNC_CODE_0 0x49
+#define SYNC_CODE_1 0x83
+#define SYNC_CODE_2 0x42
+
+
+#endif // VP9_COMMON_VP9_COMMON_H_
diff --git a/libvpx/vp9/common/vp9_common_data.c b/libvpx/vp9/common/vp9_common_data.c
new file mode 100644
index 0000000..dc41efd
--- /dev/null
+++ b/libvpx/vp9/common/vp9_common_data.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/common/vp9_common_data.h"
+
+// Log 2 conversion lookup tables for block width and height
+const int b_width_log2_lookup[BLOCK_SIZES] =
+ {0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4};
+const int b_height_log2_lookup[BLOCK_SIZES] =
+ {0, 1, 0, 1, 2, 1, 2, 3, 2, 3, 4, 3, 4};
+const int num_4x4_blocks_wide_lookup[BLOCK_SIZES] =
+ {1, 1, 2, 2, 2, 4, 4, 4, 8, 8, 8, 16, 16};
+const int num_4x4_blocks_high_lookup[BLOCK_SIZES] =
+ {1, 2, 1, 2, 4, 2, 4, 8, 4, 8, 16, 8, 16};
+// Log 2 conversion lookup tables for modeinfo width and height
+const int mi_width_log2_lookup[BLOCK_SIZES] =
+ {0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3};
+const int num_8x8_blocks_wide_lookup[BLOCK_SIZES] =
+ {1, 1, 1, 1, 1, 2, 2, 2, 4, 4, 4, 8, 8};
+const int mi_height_log2_lookup[BLOCK_SIZES] =
+ {0, 0, 0, 0, 1, 0, 1, 2, 1, 2, 3, 2, 3};
+const int num_8x8_blocks_high_lookup[BLOCK_SIZES] =
+ {1, 1, 1, 1, 2, 1, 2, 4, 2, 4, 8, 4, 8};
+
+// MIN(3, MIN(b_width_log2(bsize), b_height_log2(bsize)))
+const int size_group_lookup[BLOCK_SIZES] =
+ {0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3};
+
+const int num_pels_log2_lookup[BLOCK_SIZES] =
+ {4, 5, 5, 6, 7, 7, 8, 9, 9, 10, 11, 11, 12};
+
+
+const PARTITION_TYPE partition_lookup[][BLOCK_SIZES] = {
+ { // 4X4
+ // 4X4, 4X8,8X4,8X8,8X16,16X8,16X16,16X32,32X16,32X32,32X64,64X32,64X64
+ PARTITION_NONE, PARTITION_INVALID, PARTITION_INVALID,
+ PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
+ PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
+ PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
+ PARTITION_INVALID
+ }, { // 8X8
+ // 4X4, 4X8,8X4,8X8,8X16,16X8,16X16,16X32,32X16,32X32,32X64,64X32,64X64
+ PARTITION_SPLIT, PARTITION_VERT, PARTITION_HORZ, PARTITION_NONE,
+ PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
+ PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
+ PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID
+ }, { // 16X16
+ // 4X4, 4X8,8X4,8X8,8X16,16X8,16X16,16X32,32X16,32X32,32X64,64X32,64X64
+ PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT,
+ PARTITION_VERT, PARTITION_HORZ, PARTITION_NONE, PARTITION_INVALID,
+ PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
+ PARTITION_INVALID, PARTITION_INVALID
+ }, { // 32X32
+ // 4X4, 4X8,8X4,8X8,8X16,16X8,16X16,16X32,32X16,32X32,32X64,64X32,64X64
+ PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT,
+ PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_VERT,
+ PARTITION_HORZ, PARTITION_NONE, PARTITION_INVALID,
+ PARTITION_INVALID, PARTITION_INVALID
+ }, { // 64X64
+ // 4X4, 4X8,8X4,8X8,8X16,16X8,16X16,16X32,32X16,32X32,32X64,64X32,64X64
+ PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT,
+ PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT,
+ PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_VERT, PARTITION_HORZ,
+ PARTITION_NONE
+ }
+};
+
+const BLOCK_SIZE subsize_lookup[PARTITION_TYPES][BLOCK_SIZES] = {
+ { // PARTITION_NONE
+ BLOCK_4X4, BLOCK_4X8, BLOCK_8X4,
+ BLOCK_8X8, BLOCK_8X16, BLOCK_16X8,
+ BLOCK_16X16, BLOCK_16X32, BLOCK_32X16,
+ BLOCK_32X32, BLOCK_32X64, BLOCK_64X32,
+ BLOCK_64X64,
+ }, { // PARTITION_HORZ
+ BLOCK_INVALID, BLOCK_INVALID, BLOCK_INVALID,
+ BLOCK_8X4, BLOCK_INVALID, BLOCK_INVALID,
+ BLOCK_16X8, BLOCK_INVALID, BLOCK_INVALID,
+ BLOCK_32X16, BLOCK_INVALID, BLOCK_INVALID,
+ BLOCK_64X32,
+ }, { // PARTITION_VERT
+ BLOCK_INVALID, BLOCK_INVALID, BLOCK_INVALID,
+ BLOCK_4X8, BLOCK_INVALID, BLOCK_INVALID,
+ BLOCK_8X16, BLOCK_INVALID, BLOCK_INVALID,
+ BLOCK_16X32, BLOCK_INVALID, BLOCK_INVALID,
+ BLOCK_32X64,
+ }, { // PARTITION_SPLIT
+ BLOCK_INVALID, BLOCK_INVALID, BLOCK_INVALID,
+ BLOCK_4X4, BLOCK_INVALID, BLOCK_INVALID,
+ BLOCK_8X8, BLOCK_INVALID, BLOCK_INVALID,
+ BLOCK_16X16, BLOCK_INVALID, BLOCK_INVALID,
+ BLOCK_32X32,
+ }
+};
+
+const TX_SIZE max_txsize_lookup[BLOCK_SIZES] = {
+ TX_4X4, TX_4X4, TX_4X4,
+ TX_8X8, TX_8X8, TX_8X8,
+ TX_16X16, TX_16X16, TX_16X16,
+ TX_32X32, TX_32X32, TX_32X32, TX_32X32
+};
+const TX_SIZE max_uv_txsize_lookup[BLOCK_SIZES] = {
+ TX_4X4, TX_4X4, TX_4X4,
+ TX_4X4, TX_4X4, TX_4X4,
+ TX_8X8, TX_8X8, TX_8X8,
+ TX_16X16, TX_16X16, TX_16X16, TX_32X32
+};
+
+const BLOCK_SIZE ss_size_lookup[BLOCK_SIZES][2][2] = {
+// ss_x == 0 ss_x == 0 ss_x == 1 ss_x == 1
+// ss_y == 0 ss_y == 1 ss_y == 0 ss_y == 1
+ {{BLOCK_4X4, BLOCK_INVALID}, {BLOCK_INVALID, BLOCK_INVALID}},
+ {{BLOCK_4X8, BLOCK_4X4}, {BLOCK_INVALID, BLOCK_INVALID}},
+ {{BLOCK_8X4, BLOCK_INVALID}, {BLOCK_4X4, BLOCK_INVALID}},
+ {{BLOCK_8X8, BLOCK_8X4}, {BLOCK_4X8, BLOCK_4X4}},
+ {{BLOCK_8X16, BLOCK_8X8}, {BLOCK_INVALID, BLOCK_4X8}},
+ {{BLOCK_16X8, BLOCK_INVALID}, {BLOCK_8X8, BLOCK_8X4}},
+ {{BLOCK_16X16, BLOCK_16X8}, {BLOCK_8X16, BLOCK_8X8}},
+ {{BLOCK_16X32, BLOCK_16X16}, {BLOCK_INVALID, BLOCK_8X16}},
+ {{BLOCK_32X16, BLOCK_INVALID}, {BLOCK_16X16, BLOCK_16X8}},
+ {{BLOCK_32X32, BLOCK_32X16}, {BLOCK_16X32, BLOCK_16X16}},
+ {{BLOCK_32X64, BLOCK_32X32}, {BLOCK_INVALID, BLOCK_16X32}},
+ {{BLOCK_64X32, BLOCK_INVALID}, {BLOCK_32X32, BLOCK_32X16}},
+ {{BLOCK_64X64, BLOCK_64X32}, {BLOCK_32X64, BLOCK_32X32}},
+};
+
diff --git a/libvpx/vp9/common/vp9_common_data.h b/libvpx/vp9/common/vp9_common_data.h
new file mode 100644
index 0000000..3822bfc
--- /dev/null
+++ b/libvpx/vp9/common/vp9_common_data.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_COMMON_DATA_H_
+#define VP9_COMMON_VP9_COMMON_DATA_H_
+
+#include "vp9/common/vp9_enums.h"
+
+extern const int b_width_log2_lookup[BLOCK_SIZES];
+extern const int b_height_log2_lookup[BLOCK_SIZES];
+extern const int mi_width_log2_lookup[BLOCK_SIZES];
+extern const int mi_height_log2_lookup[BLOCK_SIZES];
+extern const int num_8x8_blocks_wide_lookup[BLOCK_SIZES];
+extern const int num_8x8_blocks_high_lookup[BLOCK_SIZES];
+extern const int num_4x4_blocks_high_lookup[BLOCK_SIZES];
+extern const int num_4x4_blocks_wide_lookup[BLOCK_SIZES];
+extern const int size_group_lookup[BLOCK_SIZES];
+extern const int num_pels_log2_lookup[BLOCK_SIZES];
+extern const PARTITION_TYPE partition_lookup[][BLOCK_SIZES];
+extern const BLOCK_SIZE subsize_lookup[PARTITION_TYPES][BLOCK_SIZES];
+extern const TX_SIZE max_txsize_lookup[BLOCK_SIZES];
+extern const TX_SIZE max_uv_txsize_lookup[BLOCK_SIZES];
+extern const BLOCK_SIZE ss_size_lookup[BLOCK_SIZES][2][2];
+
+#endif // VP9_COMMON_VP9_COMMON_DATA_H
diff --git a/libvpx/vp9/common/vp9_convolve.c b/libvpx/vp9/common/vp9_convolve.c
new file mode 100644
index 0000000..94231a1
--- /dev/null
+++ b/libvpx/vp9/common/vp9_convolve.c
@@ -0,0 +1,305 @@
+/*
+ * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "vp9/common/vp9_convolve.h"
+
+#include <assert.h>
+
+#include "./vpx_config.h"
+#include "./vp9_rtcd.h"
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_filter.h"
+#include "vpx/vpx_integer.h"
+#include "vpx_ports/mem.h"
+
+static void convolve_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x0, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h, int taps) {
+ int x, y, k;
+
+ /* NOTE: This assumes that the filter table is 256-byte aligned. */
+ /* TODO(agrange) Modify to make independent of table alignment. */
+ const int16_t *const filter_x_base =
+ (const int16_t *)(((intptr_t)filter_x0) & ~(intptr_t)0xff);
+
+ /* Adjust base pointer address for this source line */
+ src -= taps / 2 - 1;
+
+ for (y = 0; y < h; ++y) {
+ /* Initial phase offset */
+ int x_q4 = (filter_x0 - filter_x_base) / taps;
+
+ for (x = 0; x < w; ++x) {
+ /* Per-pixel src offset */
+ const int src_x = x_q4 >> SUBPEL_BITS;
+ int sum = 0;
+
+ /* Pointer to filter to use */
+ const int16_t *const filter_x = filter_x_base +
+ (x_q4 & SUBPEL_MASK) * taps;
+
+ for (k = 0; k < taps; ++k)
+ sum += src[src_x + k] * filter_x[k];
+
+ dst[x] = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
+
+ /* Move to the next source pixel */
+ x_q4 += x_step_q4;
+ }
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+static void convolve_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x0, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h, int taps) {
+ int x, y, k;
+
+ /* NOTE: This assumes that the filter table is 256-byte aligned. */
+ /* TODO(agrange) Modify to make independent of table alignment. */
+ const int16_t *const filter_x_base =
+ (const int16_t *)(((intptr_t)filter_x0) & ~(intptr_t)0xff);
+
+ /* Adjust base pointer address for this source line */
+ src -= taps / 2 - 1;
+
+ for (y = 0; y < h; ++y) {
+ /* Initial phase offset */
+ int x_q4 = (filter_x0 - filter_x_base) / taps;
+
+ for (x = 0; x < w; ++x) {
+ /* Per-pixel src offset */
+ const int src_x = x_q4 >> SUBPEL_BITS;
+ int sum = 0;
+
+ /* Pointer to filter to use */
+ const int16_t *const filter_x = filter_x_base +
+ (x_q4 & SUBPEL_MASK) * taps;
+
+ for (k = 0; k < taps; ++k)
+ sum += src[src_x + k] * filter_x[k];
+
+ dst[x] = ROUND_POWER_OF_TWO(dst[x] +
+ clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)), 1);
+
+ /* Move to the next source pixel */
+ x_q4 += x_step_q4;
+ }
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+static void convolve_vert_c(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y0, int y_step_q4,
+ int w, int h, int taps) {
+ int x, y, k;
+
+ /* NOTE: This assumes that the filter table is 256-byte aligned. */
+ /* TODO(agrange) Modify to make independent of table alignment. */
+ const int16_t *const filter_y_base =
+ (const int16_t *)(((intptr_t)filter_y0) & ~(intptr_t)0xff);
+
+ /* Adjust base pointer address for this source column */
+ src -= src_stride * (taps / 2 - 1);
+
+ for (x = 0; x < w; ++x) {
+ /* Initial phase offset */
+ int y_q4 = (filter_y0 - filter_y_base) / taps;
+
+ for (y = 0; y < h; ++y) {
+ /* Per-pixel src offset */
+ const int src_y = y_q4 >> SUBPEL_BITS;
+ int sum = 0;
+
+ /* Pointer to filter to use */
+ const int16_t *const filter_y = filter_y_base +
+ (y_q4 & SUBPEL_MASK) * taps;
+
+ for (k = 0; k < taps; ++k)
+ sum += src[(src_y + k) * src_stride] * filter_y[k];
+
+ dst[y * dst_stride] =
+ clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
+
+ /* Move to the next source pixel */
+ y_q4 += y_step_q4;
+ }
+ ++src;
+ ++dst;
+ }
+}
+
+static void convolve_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y0, int y_step_q4,
+ int w, int h, int taps) {
+ int x, y, k;
+
+ /* NOTE: This assumes that the filter table is 256-byte aligned. */
+ /* TODO(agrange) Modify to make independent of table alignment. */
+ const int16_t *const filter_y_base =
+ (const int16_t *)(((intptr_t)filter_y0) & ~(intptr_t)0xff);
+
+ /* Adjust base pointer address for this source column */
+ src -= src_stride * (taps / 2 - 1);
+
+ for (x = 0; x < w; ++x) {
+ /* Initial phase offset */
+ int y_q4 = (filter_y0 - filter_y_base) / taps;
+
+ for (y = 0; y < h; ++y) {
+ /* Per-pixel src offset */
+ const int src_y = y_q4 >> SUBPEL_BITS;
+ int sum = 0;
+
+ /* Pointer to filter to use */
+ const int16_t *const filter_y = filter_y_base +
+ (y_q4 & SUBPEL_MASK) * taps;
+
+ for (k = 0; k < taps; ++k)
+ sum += src[(src_y + k) * src_stride] * filter_y[k];
+
+ dst[y * dst_stride] = ROUND_POWER_OF_TWO(dst[y * dst_stride] +
+ clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)), 1);
+
+ /* Move to the next source pixel */
+ y_q4 += y_step_q4;
+ }
+ ++src;
+ ++dst;
+ }
+}
+
+static void convolve_c(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h, int taps) {
+ /* Fixed size intermediate buffer places limits on parameters.
+ * Maximum intermediate_height is 324, for y_step_q4 == 80,
+ * h == 64, taps == 8.
+ * y_step_q4 of 80 allows for 1/10 scale for 5 layer svc
+ */
+ uint8_t temp[64 * 324];
+ int intermediate_height = (((h - 1) * y_step_q4 + 15) >> 4) + taps;
+
+ assert(w <= 64);
+ assert(h <= 64);
+ assert(taps <= 8);
+ assert(y_step_q4 <= 80);
+ assert(x_step_q4 <= 80);
+
+ if (intermediate_height < h)
+ intermediate_height = h;
+
+ convolve_horiz_c(src - src_stride * (taps / 2 - 1), src_stride, temp, 64,
+ filter_x, x_step_q4, filter_y, y_step_q4, w,
+ intermediate_height, taps);
+ convolve_vert_c(temp + 64 * (taps / 2 - 1), 64, dst, dst_stride, filter_x,
+ x_step_q4, filter_y, y_step_q4, w, h, taps);
+}
+
+void vp9_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h) {
+ convolve_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4, w, h, 8);
+}
+
+void vp9_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h) {
+ convolve_avg_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4, w, h, 8);
+}
+
+void vp9_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h) {
+ convolve_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4, w, h, 8);
+}
+
+void vp9_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h) {
+ convolve_avg_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4, w, h, 8);
+}
+
+void vp9_convolve8_c(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h) {
+ convolve_c(src, src_stride, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4, w, h, 8);
+}
+
+void vp9_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h) {
+ /* Fixed size intermediate buffer places limits on parameters. */
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, temp, 64 * 64);
+ assert(w <= 64);
+ assert(h <= 64);
+
+ vp9_convolve8(src, src_stride, temp, 64,
+ filter_x, x_step_q4, filter_y, y_step_q4, w, h);
+ vp9_convolve_avg(temp, 64, dst, dst_stride, NULL, 0, NULL, 0, w, h);
+}
+
+void vp9_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int filter_x_stride,
+ const int16_t *filter_y, int filter_y_stride,
+ int w, int h) {
+ int r;
+
+ for (r = h; r > 0; --r) {
+ memcpy(dst, src, w);
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+void vp9_convolve_avg_c(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int filter_x_stride,
+ const int16_t *filter_y, int filter_y_stride,
+ int w, int h) {
+ int x, y;
+
+ for (y = 0; y < h; ++y) {
+ for (x = 0; x < w; ++x)
+ dst[x] = ROUND_POWER_OF_TWO(dst[x] + src[x], 1);
+
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
diff --git a/libvpx/vp9/common/vp9_convolve.h b/libvpx/vp9/common/vp9_convolve.h
new file mode 100644
index 0000000..13220e9
--- /dev/null
+++ b/libvpx/vp9/common/vp9_convolve.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef VP9_COMMON_CONVOLVE_H_
+#define VP9_COMMON_CONVOLVE_H_
+
+#include "./vpx_config.h"
+#include "vpx/vpx_integer.h"
+
+#define FILTER_BITS 7
+
+typedef void (*convolve_fn_t)(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h);
+
+struct subpix_fn_table {
+ const int16_t (*filter_x)[8];
+ const int16_t (*filter_y)[8];
+};
+
+#endif // VP9_COMMON_CONVOLVE_H_
diff --git a/libvpx/vp9/common/vp9_debugmodes.c b/libvpx/vp9/common/vp9_debugmodes.c
new file mode 100644
index 0000000..79f769e
--- /dev/null
+++ b/libvpx/vp9/common/vp9_debugmodes.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/common/vp9_onyxc_int.h"
+
+static void log_frame_info(VP9_COMMON *cm, const char *str, FILE *f) {
+ fprintf(f, "%s", str);
+ fprintf(f, "(Frame %d, Show:%d, Q:%d): \n", cm->current_video_frame,
+ cm->show_frame, cm->base_qindex);
+}
+/* This function dereferences a pointer to the mbmi structure
+ * and uses the passed in member offset to print out the value of an integer
+ * for each mbmi member value in the mi structure.
+ */
+static void print_mi_data(VP9_COMMON *cm, FILE *file, char *descriptor,
+ size_t member_offset) {
+ int mi_row;
+ int mi_col;
+ int mi_index = 0;
+ MODE_INFO **mi_8x8 = cm->mi_grid_visible;
+ int rows = cm->mi_rows;
+ int cols = cm->mi_cols;
+ char prefix = descriptor[0];
+
+ log_frame_info(cm, descriptor, file);
+ mi_index = 0;
+ for (mi_row = 0; mi_row < rows; mi_row++) {
+ fprintf(file, "%c ", prefix);
+ for (mi_col = 0; mi_col < cols; mi_col++) {
+ fprintf(file, "%2d ",
+ *((int*) ((char *) (&mi_8x8[mi_index]->mbmi) +
+ member_offset)));
+ mi_index++;
+ }
+ fprintf(file, "\n");
+ mi_index += 8;
+ }
+ fprintf(file, "\n");
+}
+void vp9_print_modes_and_motion_vectors(VP9_COMMON *cm, char *file) {
+ int mi_row;
+ int mi_col;
+ int mi_index = 0;
+ FILE *mvs = fopen(file, "a");
+ MODE_INFO **mi_8x8 = cm->mi_grid_visible;
+ int rows = cm->mi_rows;
+ int cols = cm->mi_cols;
+
+ print_mi_data(cm, mvs, "Partitions:", offsetof(MB_MODE_INFO, sb_type));
+ print_mi_data(cm, mvs, "Modes:", offsetof(MB_MODE_INFO, mode));
+ print_mi_data(cm, mvs, "Skips:", offsetof(MB_MODE_INFO, skip_coeff));
+ print_mi_data(cm, mvs, "Ref frame:", offsetof(MB_MODE_INFO, ref_frame[0]));
+ print_mi_data(cm, mvs, "Transform:", offsetof(MB_MODE_INFO, tx_size));
+ print_mi_data(cm, mvs, "UV Modes:", offsetof(MB_MODE_INFO, uv_mode));
+
+ log_frame_info(cm, "Vectors ",mvs);
+ for (mi_row = 0; mi_row < rows; mi_row++) {
+ fprintf(mvs,"V ");
+ for (mi_col = 0; mi_col < cols; mi_col++) {
+ fprintf(mvs, "%4d:%4d ", mi_8x8[mi_index]->mbmi.mv[0].as_mv.row,
+ mi_8x8[mi_index]->mbmi.mv[0].as_mv.col);
+ mi_index++;
+ }
+ fprintf(mvs, "\n");
+ mi_index += 8;
+ }
+ fprintf(mvs, "\n");
+
+ fclose(mvs);
+}
diff --git a/libvpx/vp9/common/vp9_default_coef_probs.h b/libvpx/vp9/common/vp9_default_coef_probs.h
new file mode 100644
index 0000000..185fced
--- /dev/null
+++ b/libvpx/vp9/common/vp9_default_coef_probs.h
@@ -0,0 +1,696 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+*/
+
+/*Generated file, included by vp9_entropy.c*/
+static const vp9_coeff_probs_model default_coef_probs_4x4[BLOCK_TYPES] = {
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 195, 29, 183 },
+ { 84, 49, 136 },
+ { 8, 42, 71 }
+ }, { /* Coeff Band 1 */
+ { 31, 107, 169 },
+ { 35, 99, 159 },
+ { 17, 82, 140 },
+ { 8, 66, 114 },
+ { 2, 44, 76 },
+ { 1, 19, 32 }
+ }, { /* Coeff Band 2 */
+ { 40, 132, 201 },
+ { 29, 114, 187 },
+ { 13, 91, 157 },
+ { 7, 75, 127 },
+ { 3, 58, 95 },
+ { 1, 28, 47 }
+ }, { /* Coeff Band 3 */
+ { 69, 142, 221 },
+ { 42, 122, 201 },
+ { 15, 91, 159 },
+ { 6, 67, 121 },
+ { 1, 42, 77 },
+ { 1, 17, 31 }
+ }, { /* Coeff Band 4 */
+ { 102, 148, 228 },
+ { 67, 117, 204 },
+ { 17, 82, 154 },
+ { 6, 59, 114 },
+ { 2, 39, 75 },
+ { 1, 15, 29 }
+ }, { /* Coeff Band 5 */
+ { 156, 57, 233 },
+ { 119, 57, 212 },
+ { 58, 48, 163 },
+ { 29, 40, 124 },
+ { 12, 30, 81 },
+ { 3, 12, 31 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 191, 107, 226 },
+ { 124, 117, 204 },
+ { 25, 99, 155 }
+ }, { /* Coeff Band 1 */
+ { 29, 148, 210 },
+ { 37, 126, 194 },
+ { 8, 93, 157 },
+ { 2, 68, 118 },
+ { 1, 39, 69 },
+ { 1, 17, 33 }
+ }, { /* Coeff Band 2 */
+ { 41, 151, 213 },
+ { 27, 123, 193 },
+ { 3, 82, 144 },
+ { 1, 58, 105 },
+ { 1, 32, 60 },
+ { 1, 13, 26 }
+ }, { /* Coeff Band 3 */
+ { 59, 159, 220 },
+ { 23, 126, 198 },
+ { 4, 88, 151 },
+ { 1, 66, 114 },
+ { 1, 38, 71 },
+ { 1, 18, 34 }
+ }, { /* Coeff Band 4 */
+ { 114, 136, 232 },
+ { 51, 114, 207 },
+ { 11, 83, 155 },
+ { 3, 56, 105 },
+ { 1, 33, 65 },
+ { 1, 17, 34 }
+ }, { /* Coeff Band 5 */
+ { 149, 65, 234 },
+ { 121, 57, 215 },
+ { 61, 49, 166 },
+ { 28, 36, 114 },
+ { 12, 25, 76 },
+ { 3, 16, 42 }
+ }
+ }
+ }, { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 214, 49, 220 },
+ { 132, 63, 188 },
+ { 42, 65, 137 }
+ }, { /* Coeff Band 1 */
+ { 85, 137, 221 },
+ { 104, 131, 216 },
+ { 49, 111, 192 },
+ { 21, 87, 155 },
+ { 2, 49, 87 },
+ { 1, 16, 28 }
+ }, { /* Coeff Band 2 */
+ { 89, 163, 230 },
+ { 90, 137, 220 },
+ { 29, 100, 183 },
+ { 10, 70, 135 },
+ { 2, 42, 81 },
+ { 1, 17, 33 }
+ }, { /* Coeff Band 3 */
+ { 108, 167, 237 },
+ { 55, 133, 222 },
+ { 15, 97, 179 },
+ { 4, 72, 135 },
+ { 1, 45, 85 },
+ { 1, 19, 38 }
+ }, { /* Coeff Band 4 */
+ { 124, 146, 240 },
+ { 66, 124, 224 },
+ { 17, 88, 175 },
+ { 4, 58, 122 },
+ { 1, 36, 75 },
+ { 1, 18, 37 }
+ }, { /* Coeff Band 5 */
+ { 141, 79, 241 },
+ { 126, 70, 227 },
+ { 66, 58, 182 },
+ { 30, 44, 136 },
+ { 12, 34, 96 },
+ { 2, 20, 47 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 229, 99, 249 },
+ { 143, 111, 235 },
+ { 46, 109, 192 }
+ }, { /* Coeff Band 1 */
+ { 82, 158, 236 },
+ { 94, 146, 224 },
+ { 25, 117, 191 },
+ { 9, 87, 149 },
+ { 3, 56, 99 },
+ { 1, 33, 57 }
+ }, { /* Coeff Band 2 */
+ { 83, 167, 237 },
+ { 68, 145, 222 },
+ { 10, 103, 177 },
+ { 2, 72, 131 },
+ { 1, 41, 79 },
+ { 1, 20, 39 }
+ }, { /* Coeff Band 3 */
+ { 99, 167, 239 },
+ { 47, 141, 224 },
+ { 10, 104, 178 },
+ { 2, 73, 133 },
+ { 1, 44, 85 },
+ { 1, 22, 47 }
+ }, { /* Coeff Band 4 */
+ { 127, 145, 243 },
+ { 71, 129, 228 },
+ { 17, 93, 177 },
+ { 3, 61, 124 },
+ { 1, 41, 84 },
+ { 1, 21, 52 }
+ }, { /* Coeff Band 5 */
+ { 157, 78, 244 },
+ { 140, 72, 231 },
+ { 69, 58, 184 },
+ { 31, 44, 137 },
+ { 14, 38, 105 },
+ { 8, 23, 61 }
+ }
+ }
+ }
+};
+static const vp9_coeff_probs_model default_coef_probs_8x8[BLOCK_TYPES] = {
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 125, 34, 187 },
+ { 52, 41, 133 },
+ { 6, 31, 56 }
+ }, { /* Coeff Band 1 */
+ { 37, 109, 153 },
+ { 51, 102, 147 },
+ { 23, 87, 128 },
+ { 8, 67, 101 },
+ { 1, 41, 63 },
+ { 1, 19, 29 }
+ }, { /* Coeff Band 2 */
+ { 31, 154, 185 },
+ { 17, 127, 175 },
+ { 6, 96, 145 },
+ { 2, 73, 114 },
+ { 1, 51, 82 },
+ { 1, 28, 45 }
+ }, { /* Coeff Band 3 */
+ { 23, 163, 200 },
+ { 10, 131, 185 },
+ { 2, 93, 148 },
+ { 1, 67, 111 },
+ { 1, 41, 69 },
+ { 1, 14, 24 }
+ }, { /* Coeff Band 4 */
+ { 29, 176, 217 },
+ { 12, 145, 201 },
+ { 3, 101, 156 },
+ { 1, 69, 111 },
+ { 1, 39, 63 },
+ { 1, 14, 23 }
+ }, { /* Coeff Band 5 */
+ { 57, 192, 233 },
+ { 25, 154, 215 },
+ { 6, 109, 167 },
+ { 3, 78, 118 },
+ { 1, 48, 69 },
+ { 1, 21, 29 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 202, 105, 245 },
+ { 108, 106, 216 },
+ { 18, 90, 144 }
+ }, { /* Coeff Band 1 */
+ { 33, 172, 219 },
+ { 64, 149, 206 },
+ { 14, 117, 177 },
+ { 5, 90, 141 },
+ { 2, 61, 95 },
+ { 1, 37, 57 }
+ }, { /* Coeff Band 2 */
+ { 33, 179, 220 },
+ { 11, 140, 198 },
+ { 1, 89, 148 },
+ { 1, 60, 104 },
+ { 1, 33, 57 },
+ { 1, 12, 21 }
+ }, { /* Coeff Band 3 */
+ { 30, 181, 221 },
+ { 8, 141, 198 },
+ { 1, 87, 145 },
+ { 1, 58, 100 },
+ { 1, 31, 55 },
+ { 1, 12, 20 }
+ }, { /* Coeff Band 4 */
+ { 32, 186, 224 },
+ { 7, 142, 198 },
+ { 1, 86, 143 },
+ { 1, 58, 100 },
+ { 1, 31, 55 },
+ { 1, 12, 22 }
+ }, { /* Coeff Band 5 */
+ { 57, 192, 227 },
+ { 20, 143, 204 },
+ { 3, 96, 154 },
+ { 1, 68, 112 },
+ { 1, 42, 69 },
+ { 1, 19, 32 }
+ }
+ }
+ }, { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 212, 35, 215 },
+ { 113, 47, 169 },
+ { 29, 48, 105 }
+ }, { /* Coeff Band 1 */
+ { 74, 129, 203 },
+ { 106, 120, 203 },
+ { 49, 107, 178 },
+ { 19, 84, 144 },
+ { 4, 50, 84 },
+ { 1, 15, 25 }
+ }, { /* Coeff Band 2 */
+ { 71, 172, 217 },
+ { 44, 141, 209 },
+ { 15, 102, 173 },
+ { 6, 76, 133 },
+ { 2, 51, 89 },
+ { 1, 24, 42 }
+ }, { /* Coeff Band 3 */
+ { 64, 185, 231 },
+ { 31, 148, 216 },
+ { 8, 103, 175 },
+ { 3, 74, 131 },
+ { 1, 46, 81 },
+ { 1, 18, 30 }
+ }, { /* Coeff Band 4 */
+ { 65, 196, 235 },
+ { 25, 157, 221 },
+ { 5, 105, 174 },
+ { 1, 67, 120 },
+ { 1, 38, 69 },
+ { 1, 15, 30 }
+ }, { /* Coeff Band 5 */
+ { 65, 204, 238 },
+ { 30, 156, 224 },
+ { 7, 107, 177 },
+ { 2, 70, 124 },
+ { 1, 42, 73 },
+ { 1, 18, 34 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 225, 86, 251 },
+ { 144, 104, 235 },
+ { 42, 99, 181 }
+ }, { /* Coeff Band 1 */
+ { 85, 175, 239 },
+ { 112, 165, 229 },
+ { 29, 136, 200 },
+ { 12, 103, 162 },
+ { 6, 77, 123 },
+ { 2, 53, 84 }
+ }, { /* Coeff Band 2 */
+ { 75, 183, 239 },
+ { 30, 155, 221 },
+ { 3, 106, 171 },
+ { 1, 74, 128 },
+ { 1, 44, 76 },
+ { 1, 17, 28 }
+ }, { /* Coeff Band 3 */
+ { 73, 185, 240 },
+ { 27, 159, 222 },
+ { 2, 107, 172 },
+ { 1, 75, 127 },
+ { 1, 42, 73 },
+ { 1, 17, 29 }
+ }, { /* Coeff Band 4 */
+ { 62, 190, 238 },
+ { 21, 159, 222 },
+ { 2, 107, 172 },
+ { 1, 72, 122 },
+ { 1, 40, 71 },
+ { 1, 18, 32 }
+ }, { /* Coeff Band 5 */
+ { 61, 199, 240 },
+ { 27, 161, 226 },
+ { 4, 113, 180 },
+ { 1, 76, 129 },
+ { 1, 46, 80 },
+ { 1, 23, 41 }
+ }
+ }
+ }
+};
+static const vp9_coeff_probs_model default_coef_probs_16x16[BLOCK_TYPES] = {
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 7, 27, 153 },
+ { 5, 30, 95 },
+ { 1, 16, 30 }
+ }, { /* Coeff Band 1 */
+ { 50, 75, 127 },
+ { 57, 75, 124 },
+ { 27, 67, 108 },
+ { 10, 54, 86 },
+ { 1, 33, 52 },
+ { 1, 12, 18 }
+ }, { /* Coeff Band 2 */
+ { 43, 125, 151 },
+ { 26, 108, 148 },
+ { 7, 83, 122 },
+ { 2, 59, 89 },
+ { 1, 38, 60 },
+ { 1, 17, 27 }
+ }, { /* Coeff Band 3 */
+ { 23, 144, 163 },
+ { 13, 112, 154 },
+ { 2, 75, 117 },
+ { 1, 50, 81 },
+ { 1, 31, 51 },
+ { 1, 14, 23 }
+ }, { /* Coeff Band 4 */
+ { 18, 162, 185 },
+ { 6, 123, 171 },
+ { 1, 78, 125 },
+ { 1, 51, 86 },
+ { 1, 31, 54 },
+ { 1, 14, 23 }
+ }, { /* Coeff Band 5 */
+ { 15, 199, 227 },
+ { 3, 150, 204 },
+ { 1, 91, 146 },
+ { 1, 55, 95 },
+ { 1, 30, 53 },
+ { 1, 11, 20 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 19, 55, 240 },
+ { 19, 59, 196 },
+ { 3, 52, 105 }
+ }, { /* Coeff Band 1 */
+ { 41, 166, 207 },
+ { 104, 153, 199 },
+ { 31, 123, 181 },
+ { 14, 101, 152 },
+ { 5, 72, 106 },
+ { 1, 36, 52 }
+ }, { /* Coeff Band 2 */
+ { 35, 176, 211 },
+ { 12, 131, 190 },
+ { 2, 88, 144 },
+ { 1, 60, 101 },
+ { 1, 36, 60 },
+ { 1, 16, 28 }
+ }, { /* Coeff Band 3 */
+ { 28, 183, 213 },
+ { 8, 134, 191 },
+ { 1, 86, 142 },
+ { 1, 56, 96 },
+ { 1, 30, 53 },
+ { 1, 12, 20 }
+ }, { /* Coeff Band 4 */
+ { 20, 190, 215 },
+ { 4, 135, 192 },
+ { 1, 84, 139 },
+ { 1, 53, 91 },
+ { 1, 28, 49 },
+ { 1, 11, 20 }
+ }, { /* Coeff Band 5 */
+ { 13, 196, 216 },
+ { 2, 137, 192 },
+ { 1, 86, 143 },
+ { 1, 57, 99 },
+ { 1, 32, 56 },
+ { 1, 13, 24 }
+ }
+ }
+ }, { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 211, 29, 217 },
+ { 96, 47, 156 },
+ { 22, 43, 87 }
+ }, { /* Coeff Band 1 */
+ { 78, 120, 193 },
+ { 111, 116, 186 },
+ { 46, 102, 164 },
+ { 15, 80, 128 },
+ { 2, 49, 76 },
+ { 1, 18, 28 }
+ }, { /* Coeff Band 2 */
+ { 71, 161, 203 },
+ { 42, 132, 192 },
+ { 10, 98, 150 },
+ { 3, 69, 109 },
+ { 1, 44, 70 },
+ { 1, 18, 29 }
+ }, { /* Coeff Band 3 */
+ { 57, 186, 211 },
+ { 30, 140, 196 },
+ { 4, 93, 146 },
+ { 1, 62, 102 },
+ { 1, 38, 65 },
+ { 1, 16, 27 }
+ }, { /* Coeff Band 4 */
+ { 47, 199, 217 },
+ { 14, 145, 196 },
+ { 1, 88, 142 },
+ { 1, 57, 98 },
+ { 1, 36, 62 },
+ { 1, 15, 26 }
+ }, { /* Coeff Band 5 */
+ { 26, 219, 229 },
+ { 5, 155, 207 },
+ { 1, 94, 151 },
+ { 1, 60, 104 },
+ { 1, 36, 62 },
+ { 1, 16, 28 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 233, 29, 248 },
+ { 146, 47, 220 },
+ { 43, 52, 140 }
+ }, { /* Coeff Band 1 */
+ { 100, 163, 232 },
+ { 179, 161, 222 },
+ { 63, 142, 204 },
+ { 37, 113, 174 },
+ { 26, 89, 137 },
+ { 18, 68, 97 }
+ }, { /* Coeff Band 2 */
+ { 85, 181, 230 },
+ { 32, 146, 209 },
+ { 7, 100, 164 },
+ { 3, 71, 121 },
+ { 1, 45, 77 },
+ { 1, 18, 30 }
+ }, { /* Coeff Band 3 */
+ { 65, 187, 230 },
+ { 20, 148, 207 },
+ { 2, 97, 159 },
+ { 1, 68, 116 },
+ { 1, 40, 70 },
+ { 1, 14, 29 }
+ }, { /* Coeff Band 4 */
+ { 40, 194, 227 },
+ { 8, 147, 204 },
+ { 1, 94, 155 },
+ { 1, 65, 112 },
+ { 1, 39, 66 },
+ { 1, 14, 26 }
+ }, { /* Coeff Band 5 */
+ { 16, 208, 228 },
+ { 3, 151, 207 },
+ { 1, 98, 160 },
+ { 1, 67, 117 },
+ { 1, 41, 74 },
+ { 1, 17, 31 }
+ }
+ }
+ }
+};
+static const vp9_coeff_probs_model default_coef_probs_32x32[BLOCK_TYPES] = {
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 17, 38, 140 },
+ { 7, 34, 80 },
+ { 1, 17, 29 }
+ }, { /* Coeff Band 1 */
+ { 37, 75, 128 },
+ { 41, 76, 128 },
+ { 26, 66, 116 },
+ { 12, 52, 94 },
+ { 2, 32, 55 },
+ { 1, 10, 16 }
+ }, { /* Coeff Band 2 */
+ { 50, 127, 154 },
+ { 37, 109, 152 },
+ { 16, 82, 121 },
+ { 5, 59, 85 },
+ { 1, 35, 54 },
+ { 1, 13, 20 }
+ }, { /* Coeff Band 3 */
+ { 40, 142, 167 },
+ { 17, 110, 157 },
+ { 2, 71, 112 },
+ { 1, 44, 72 },
+ { 1, 27, 45 },
+ { 1, 11, 17 }
+ }, { /* Coeff Band 4 */
+ { 30, 175, 188 },
+ { 9, 124, 169 },
+ { 1, 74, 116 },
+ { 1, 48, 78 },
+ { 1, 30, 49 },
+ { 1, 11, 18 }
+ }, { /* Coeff Band 5 */
+ { 10, 222, 223 },
+ { 2, 150, 194 },
+ { 1, 83, 128 },
+ { 1, 48, 79 },
+ { 1, 27, 45 },
+ { 1, 11, 17 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 36, 41, 235 },
+ { 29, 36, 193 },
+ { 10, 27, 111 }
+ }, { /* Coeff Band 1 */
+ { 85, 165, 222 },
+ { 177, 162, 215 },
+ { 110, 135, 195 },
+ { 57, 113, 168 },
+ { 23, 83, 120 },
+ { 10, 49, 61 }
+ }, { /* Coeff Band 2 */
+ { 85, 190, 223 },
+ { 36, 139, 200 },
+ { 5, 90, 146 },
+ { 1, 60, 103 },
+ { 1, 38, 65 },
+ { 1, 18, 30 }
+ }, { /* Coeff Band 3 */
+ { 72, 202, 223 },
+ { 23, 141, 199 },
+ { 2, 86, 140 },
+ { 1, 56, 97 },
+ { 1, 36, 61 },
+ { 1, 16, 27 }
+ }, { /* Coeff Band 4 */
+ { 55, 218, 225 },
+ { 13, 145, 200 },
+ { 1, 86, 141 },
+ { 1, 57, 99 },
+ { 1, 35, 61 },
+ { 1, 13, 22 }
+ }, { /* Coeff Band 5 */
+ { 15, 235, 212 },
+ { 1, 132, 184 },
+ { 1, 84, 139 },
+ { 1, 57, 97 },
+ { 1, 34, 56 },
+ { 1, 14, 23 }
+ }
+ }
+ }, { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 181, 21, 201 },
+ { 61, 37, 123 },
+ { 10, 38, 71 }
+ }, { /* Coeff Band 1 */
+ { 47, 106, 172 },
+ { 95, 104, 173 },
+ { 42, 93, 159 },
+ { 18, 77, 131 },
+ { 4, 50, 81 },
+ { 1, 17, 23 }
+ }, { /* Coeff Band 2 */
+ { 62, 147, 199 },
+ { 44, 130, 189 },
+ { 28, 102, 154 },
+ { 18, 75, 115 },
+ { 2, 44, 65 },
+ { 1, 12, 19 }
+ }, { /* Coeff Band 3 */
+ { 55, 153, 210 },
+ { 24, 130, 194 },
+ { 3, 93, 146 },
+ { 1, 61, 97 },
+ { 1, 31, 50 },
+ { 1, 10, 16 }
+ }, { /* Coeff Band 4 */
+ { 49, 186, 223 },
+ { 17, 148, 204 },
+ { 1, 96, 142 },
+ { 1, 53, 83 },
+ { 1, 26, 44 },
+ { 1, 11, 17 }
+ }, { /* Coeff Band 5 */
+ { 13, 217, 212 },
+ { 2, 136, 180 },
+ { 1, 78, 124 },
+ { 1, 50, 83 },
+ { 1, 29, 49 },
+ { 1, 14, 23 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 197, 13, 247 },
+ { 82, 17, 222 },
+ { 25, 17, 162 }
+ }, { /* Coeff Band 1 */
+ { 126, 186, 247 },
+ { 234, 191, 243 },
+ { 176, 177, 234 },
+ { 104, 158, 220 },
+ { 66, 128, 186 },
+ { 55, 90, 137 }
+ }, { /* Coeff Band 2 */
+ { 111, 197, 242 },
+ { 46, 158, 219 },
+ { 9, 104, 171 },
+ { 2, 65, 125 },
+ { 1, 44, 80 },
+ { 1, 17, 91 }
+ }, { /* Coeff Band 3 */
+ { 104, 208, 245 },
+ { 39, 168, 224 },
+ { 3, 109, 162 },
+ { 1, 79, 124 },
+ { 1, 50, 102 },
+ { 1, 43, 102 }
+ }, { /* Coeff Band 4 */
+ { 84, 220, 246 },
+ { 31, 177, 231 },
+ { 2, 115, 180 },
+ { 1, 79, 134 },
+ { 1, 55, 77 },
+ { 1, 60, 79 }
+ }, { /* Coeff Band 5 */
+ { 43, 243, 240 },
+ { 8, 180, 217 },
+ { 1, 115, 166 },
+ { 1, 84, 121 },
+ { 1, 51, 67 },
+ { 1, 16, 6 }
+ }
+ }
+ }
+};
+
diff --git a/libvpx/vp9/common/vp9_entropy.c b/libvpx/vp9/common/vp9_entropy.c
new file mode 100644
index 0000000..32d9e0c
--- /dev/null
+++ b/libvpx/vp9/common/vp9_entropy.c
@@ -0,0 +1,660 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp9/common/vp9_entropy.h"
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/common/vp9_onyxc_int.h"
+#include "vp9/common/vp9_entropymode.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx/vpx_integer.h"
+
+#define MODEL_NODES (ENTROPY_NODES - UNCONSTRAINED_NODES)
+
+DECLARE_ALIGNED(16, const uint8_t, vp9_norm[256]) = {
+ 0, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+DECLARE_ALIGNED(16, const uint8_t,
+ vp9_coefband_trans_8x8plus[MAXBAND_INDEX + 1]) = {
+ 0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 5
+};
+
+DECLARE_ALIGNED(16, const uint8_t,
+ vp9_coefband_trans_4x4[MAXBAND_INDEX + 1]) = {
+ 0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5
+};
+
+DECLARE_ALIGNED(16, const uint8_t, vp9_pt_energy_class[MAX_ENTROPY_TOKENS]) = {
+ 0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 5
+};
+
+DECLARE_ALIGNED(16, const int16_t, vp9_default_scan_4x4[16]) = {
+ 0, 4, 1, 5,
+ 8, 2, 12, 9,
+ 3, 6, 13, 10,
+ 7, 14, 11, 15,
+};
+
+DECLARE_ALIGNED(16, const int16_t, vp9_col_scan_4x4[16]) = {
+ 0, 4, 8, 1,
+ 12, 5, 9, 2,
+ 13, 6, 10, 3,
+ 7, 14, 11, 15,
+};
+
+DECLARE_ALIGNED(16, const int16_t, vp9_row_scan_4x4[16]) = {
+ 0, 1, 4, 2,
+ 5, 3, 6, 8,
+ 9, 7, 12, 10,
+ 13, 11, 14, 15,
+};
+
+DECLARE_ALIGNED(16, const int16_t, vp9_default_scan_8x8[64]) = {
+ 0, 8, 1, 16, 9, 2, 17, 24,
+ 10, 3, 18, 25, 32, 11, 4, 26,
+ 33, 19, 40, 12, 34, 27, 5, 41,
+ 20, 48, 13, 35, 42, 28, 21, 6,
+ 49, 56, 36, 43, 29, 7, 14, 50,
+ 57, 44, 22, 37, 15, 51, 58, 30,
+ 45, 23, 52, 59, 38, 31, 60, 53,
+ 46, 39, 61, 54, 47, 62, 55, 63,
+};
+
+DECLARE_ALIGNED(16, const int16_t, vp9_col_scan_8x8[64]) = {
+ 0, 8, 16, 1, 24, 9, 32, 17,
+ 2, 40, 25, 10, 33, 18, 48, 3,
+ 26, 41, 11, 56, 19, 34, 4, 49,
+ 27, 42, 12, 35, 20, 57, 50, 28,
+ 5, 43, 13, 36, 58, 51, 21, 44,
+ 6, 29, 59, 37, 14, 52, 22, 7,
+ 45, 60, 30, 15, 38, 53, 23, 46,
+ 31, 61, 39, 54, 47, 62, 55, 63,
+};
+
+DECLARE_ALIGNED(16, const int16_t, vp9_row_scan_8x8[64]) = {
+ 0, 1, 2, 8, 9, 3, 16, 10,
+ 4, 17, 11, 24, 5, 18, 25, 12,
+ 19, 26, 32, 6, 13, 20, 33, 27,
+ 7, 34, 40, 21, 28, 41, 14, 35,
+ 48, 42, 29, 36, 49, 22, 43, 15,
+ 56, 37, 50, 44, 30, 57, 23, 51,
+ 58, 45, 38, 52, 31, 59, 53, 46,
+ 60, 39, 61, 47, 54, 55, 62, 63,
+};
+
+DECLARE_ALIGNED(16, const int16_t, vp9_default_scan_16x16[256]) = {
+ 0, 16, 1, 32, 17, 2, 48, 33, 18, 3, 64, 34, 49, 19, 65, 80,
+ 50, 4, 35, 66, 20, 81, 96, 51, 5, 36, 82, 97, 67, 112, 21, 52,
+ 98, 37, 83, 113, 6, 68, 128, 53, 22, 99, 114, 84, 7, 129, 38, 69,
+ 100, 115, 144, 130, 85, 54, 23, 8, 145, 39, 70, 116, 101, 131, 160, 146,
+ 55, 86, 24, 71, 132, 117, 161, 40, 9, 102, 147, 176, 162, 87, 56, 25,
+ 133, 118, 177, 148, 72, 103, 41, 163, 10, 192, 178, 88, 57, 134, 149, 119,
+ 26, 164, 73, 104, 193, 42, 179, 208, 11, 135, 89, 165, 120, 150, 58, 194,
+ 180, 27, 74, 209, 105, 151, 136, 43, 90, 224, 166, 195, 181, 121, 210, 59,
+ 12, 152, 106, 167, 196, 75, 137, 225, 211, 240, 182, 122, 91, 28, 197, 13,
+ 226, 168, 183, 153, 44, 212, 138, 107, 241, 60, 29, 123, 198, 184, 227, 169,
+ 242, 76, 213, 154, 45, 92, 14, 199, 139, 61, 228, 214, 170, 185, 243, 108,
+ 77, 155, 30, 15, 200, 229, 124, 215, 244, 93, 46, 186, 171, 201, 109, 140,
+ 230, 62, 216, 245, 31, 125, 78, 156, 231, 47, 187, 202, 217, 94, 246, 141,
+ 63, 232, 172, 110, 247, 157, 79, 218, 203, 126, 233, 188, 248, 95, 173, 142,
+ 219, 111, 249, 234, 158, 127, 189, 204, 250, 235, 143, 174, 220, 205, 159, 251,
+ 190, 221, 175, 236, 237, 191, 206, 252, 222, 253, 207, 238, 223, 254, 239, 255,
+};
+
+DECLARE_ALIGNED(16, const int16_t, vp9_col_scan_16x16[256]) = {
+ 0, 16, 32, 48, 1, 64, 17, 80, 33, 96, 49, 2, 65, 112, 18, 81,
+ 34, 128, 50, 97, 3, 66, 144, 19, 113, 35, 82, 160, 98, 51, 129, 4,
+ 67, 176, 20, 114, 145, 83, 36, 99, 130, 52, 192, 5, 161, 68, 115, 21,
+ 146, 84, 208, 177, 37, 131, 100, 53, 162, 224, 69, 6, 116, 193, 147, 85,
+ 22, 240, 132, 38, 178, 101, 163, 54, 209, 117, 70, 7, 148, 194, 86, 179,
+ 225, 23, 133, 39, 164, 8, 102, 210, 241, 55, 195, 118, 149, 71, 180, 24,
+ 87, 226, 134, 165, 211, 40, 103, 56, 72, 150, 196, 242, 119, 9, 181, 227,
+ 88, 166, 25, 135, 41, 104, 212, 57, 151, 197, 120, 73, 243, 182, 136, 167,
+ 213, 89, 10, 228, 105, 152, 198, 26, 42, 121, 183, 244, 168, 58, 137, 229,
+ 74, 214, 90, 153, 199, 184, 11, 106, 245, 27, 122, 230, 169, 43, 215, 59,
+ 200, 138, 185, 246, 75, 12, 91, 154, 216, 231, 107, 28, 44, 201, 123, 170,
+ 60, 247, 232, 76, 139, 13, 92, 217, 186, 248, 155, 108, 29, 124, 45, 202,
+ 233, 171, 61, 14, 77, 140, 15, 249, 93, 30, 187, 156, 218, 46, 109, 125,
+ 62, 172, 78, 203, 31, 141, 234, 94, 47, 188, 63, 157, 110, 250, 219, 79,
+ 126, 204, 173, 142, 95, 189, 111, 235, 158, 220, 251, 127, 174, 143, 205, 236,
+ 159, 190, 221, 252, 175, 206, 237, 191, 253, 222, 238, 207, 254, 223, 239, 255,
+};
+
+DECLARE_ALIGNED(16, const int16_t, vp9_row_scan_16x16[256]) = {
+ 0, 1, 2, 16, 3, 17, 4, 18, 32, 5, 33, 19, 6, 34, 48, 20,
+ 49, 7, 35, 21, 50, 64, 8, 36, 65, 22, 51, 37, 80, 9, 66, 52,
+ 23, 38, 81, 67, 10, 53, 24, 82, 68, 96, 39, 11, 54, 83, 97, 69,
+ 25, 98, 84, 40, 112, 55, 12, 70, 99, 113, 85, 26, 41, 56, 114, 100,
+ 13, 71, 128, 86, 27, 115, 101, 129, 42, 57, 72, 116, 14, 87, 130, 102,
+ 144, 73, 131, 117, 28, 58, 15, 88, 43, 145, 103, 132, 146, 118, 74, 160,
+ 89, 133, 104, 29, 59, 147, 119, 44, 161, 148, 90, 105, 134, 162, 120, 176,
+ 75, 135, 149, 30, 60, 163, 177, 45, 121, 91, 106, 164, 178, 150, 192, 136,
+ 165, 179, 31, 151, 193, 76, 122, 61, 137, 194, 107, 152, 180, 208, 46, 166,
+ 167, 195, 92, 181, 138, 209, 123, 153, 224, 196, 77, 168, 210, 182, 240, 108,
+ 197, 62, 154, 225, 183, 169, 211, 47, 139, 93, 184, 226, 212, 241, 198, 170,
+ 124, 155, 199, 78, 213, 185, 109, 227, 200, 63, 228, 242, 140, 214, 171, 186,
+ 156, 229, 243, 125, 94, 201, 244, 215, 216, 230, 141, 187, 202, 79, 172, 110,
+ 157, 245, 217, 231, 95, 246, 232, 126, 203, 247, 233, 173, 218, 142, 111, 158,
+ 188, 248, 127, 234, 219, 249, 189, 204, 143, 174, 159, 250, 235, 205, 220, 175,
+ 190, 251, 221, 191, 206, 236, 207, 237, 252, 222, 253, 223, 238, 239, 254, 255,
+};
+
+DECLARE_ALIGNED(16, const int16_t, vp9_default_scan_32x32[1024]) = {
+ 0, 32, 1, 64, 33, 2, 96, 65, 34, 128, 3, 97, 66, 160, 129, 35, 98, 4, 67, 130, 161, 192, 36, 99, 224, 5, 162, 193, 68, 131, 37, 100,
+ 225, 194, 256, 163, 69, 132, 6, 226, 257, 288, 195, 101, 164, 38, 258, 7, 227, 289, 133, 320, 70, 196, 165, 290, 259, 228, 39, 321, 102, 352, 8, 197,
+ 71, 134, 322, 291, 260, 353, 384, 229, 166, 103, 40, 354, 323, 292, 135, 385, 198, 261, 72, 9, 416, 167, 386, 355, 230, 324, 104, 293, 41, 417, 199, 136,
+ 262, 387, 448, 325, 356, 10, 73, 418, 231, 168, 449, 294, 388, 105, 419, 263, 42, 200, 357, 450, 137, 480, 74, 326, 232, 11, 389, 169, 295, 420, 106, 451,
+ 481, 358, 264, 327, 201, 43, 138, 512, 482, 390, 296, 233, 170, 421, 75, 452, 359, 12, 513, 265, 483, 328, 107, 202, 514, 544, 422, 391, 453, 139, 44, 234,
+ 484, 297, 360, 171, 76, 515, 545, 266, 329, 454, 13, 423, 203, 108, 546, 485, 576, 298, 235, 140, 361, 330, 172, 547, 45, 455, 267, 577, 486, 77, 204, 362,
+ 608, 14, 299, 578, 109, 236, 487, 609, 331, 141, 579, 46, 15, 173, 610, 363, 78, 205, 16, 110, 237, 611, 142, 47, 174, 79, 206, 17, 111, 238, 48, 143,
+ 80, 175, 112, 207, 49, 18, 239, 81, 113, 19, 50, 82, 114, 51, 83, 115, 640, 516, 392, 268, 144, 20, 672, 641, 548, 517, 424, 393, 300, 269, 176, 145,
+ 52, 21, 704, 673, 642, 580, 549, 518, 456, 425, 394, 332, 301, 270, 208, 177, 146, 84, 53, 22, 736, 705, 674, 643, 612, 581, 550, 519, 488, 457, 426, 395,
+ 364, 333, 302, 271, 240, 209, 178, 147, 116, 85, 54, 23, 737, 706, 675, 613, 582, 551, 489, 458, 427, 365, 334, 303, 241, 210, 179, 117, 86, 55, 738, 707,
+ 614, 583, 490, 459, 366, 335, 242, 211, 118, 87, 739, 615, 491, 367, 243, 119, 768, 644, 520, 396, 272, 148, 24, 800, 769, 676, 645, 552, 521, 428, 397, 304,
+ 273, 180, 149, 56, 25, 832, 801, 770, 708, 677, 646, 584, 553, 522, 460, 429, 398, 336, 305, 274, 212, 181, 150, 88, 57, 26, 864, 833, 802, 771, 740, 709,
+ 678, 647, 616, 585, 554, 523, 492, 461, 430, 399, 368, 337, 306, 275, 244, 213, 182, 151, 120, 89, 58, 27, 865, 834, 803, 741, 710, 679, 617, 586, 555, 493,
+ 462, 431, 369, 338, 307, 245, 214, 183, 121, 90, 59, 866, 835, 742, 711, 618, 587, 494, 463, 370, 339, 246, 215, 122, 91, 867, 743, 619, 495, 371, 247, 123,
+ 896, 772, 648, 524, 400, 276, 152, 28, 928, 897, 804, 773, 680, 649, 556, 525, 432, 401, 308, 277, 184, 153, 60, 29, 960, 929, 898, 836, 805, 774, 712, 681,
+ 650, 588, 557, 526, 464, 433, 402, 340, 309, 278, 216, 185, 154, 92, 61, 30, 992, 961, 930, 899, 868, 837, 806, 775, 744, 713, 682, 651, 620, 589, 558, 527,
+ 496, 465, 434, 403, 372, 341, 310, 279, 248, 217, 186, 155, 124, 93, 62, 31, 993, 962, 931, 869, 838, 807, 745, 714, 683, 621, 590, 559, 497, 466, 435, 373,
+ 342, 311, 249, 218, 187, 125, 94, 63, 994, 963, 870, 839, 746, 715, 622, 591, 498, 467, 374, 343, 250, 219, 126, 95, 995, 871, 747, 623, 499, 375, 251, 127,
+ 900, 776, 652, 528, 404, 280, 156, 932, 901, 808, 777, 684, 653, 560, 529, 436, 405, 312, 281, 188, 157, 964, 933, 902, 840, 809, 778, 716, 685, 654, 592, 561,
+ 530, 468, 437, 406, 344, 313, 282, 220, 189, 158, 996, 965, 934, 903, 872, 841, 810, 779, 748, 717, 686, 655, 624, 593, 562, 531, 500, 469, 438, 407, 376, 345,
+ 314, 283, 252, 221, 190, 159, 997, 966, 935, 873, 842, 811, 749, 718, 687, 625, 594, 563, 501, 470, 439, 377, 346, 315, 253, 222, 191, 998, 967, 874, 843, 750,
+ 719, 626, 595, 502, 471, 378, 347, 254, 223, 999, 875, 751, 627, 503, 379, 255, 904, 780, 656, 532, 408, 284, 936, 905, 812, 781, 688, 657, 564, 533, 440, 409,
+ 316, 285, 968, 937, 906, 844, 813, 782, 720, 689, 658, 596, 565, 534, 472, 441, 410, 348, 317, 286, 1000, 969, 938, 907, 876, 845, 814, 783, 752, 721, 690, 659,
+ 628, 597, 566, 535, 504, 473, 442, 411, 380, 349, 318, 287, 1001, 970, 939, 877, 846, 815, 753, 722, 691, 629, 598, 567, 505, 474, 443, 381, 350, 319, 1002, 971,
+ 878, 847, 754, 723, 630, 599, 506, 475, 382, 351, 1003, 879, 755, 631, 507, 383, 908, 784, 660, 536, 412, 940, 909, 816, 785, 692, 661, 568, 537, 444, 413, 972,
+ 941, 910, 848, 817, 786, 724, 693, 662, 600, 569, 538, 476, 445, 414, 1004, 973, 942, 911, 880, 849, 818, 787, 756, 725, 694, 663, 632, 601, 570, 539, 508, 477,
+ 446, 415, 1005, 974, 943, 881, 850, 819, 757, 726, 695, 633, 602, 571, 509, 478, 447, 1006, 975, 882, 851, 758, 727, 634, 603, 510, 479, 1007, 883, 759, 635, 511,
+ 912, 788, 664, 540, 944, 913, 820, 789, 696, 665, 572, 541, 976, 945, 914, 852, 821, 790, 728, 697, 666, 604, 573, 542, 1008, 977, 946, 915, 884, 853, 822, 791,
+ 760, 729, 698, 667, 636, 605, 574, 543, 1009, 978, 947, 885, 854, 823, 761, 730, 699, 637, 606, 575, 1010, 979, 886, 855, 762, 731, 638, 607, 1011, 887, 763, 639,
+ 916, 792, 668, 948, 917, 824, 793, 700, 669, 980, 949, 918, 856, 825, 794, 732, 701, 670, 1012, 981, 950, 919, 888, 857, 826, 795, 764, 733, 702, 671, 1013, 982,
+ 951, 889, 858, 827, 765, 734, 703, 1014, 983, 890, 859, 766, 735, 1015, 891, 767, 920, 796, 952, 921, 828, 797, 984, 953, 922, 860, 829, 798, 1016, 985, 954, 923,
+ 892, 861, 830, 799, 1017, 986, 955, 893, 862, 831, 1018, 987, 894, 863, 1019, 895, 924, 956, 925, 988, 957, 926, 1020, 989, 958, 927, 1021, 990, 959, 1022, 991, 1023,
+};
+
+/* Array indices are identical to previously-existing CONTEXT_NODE indices */
+
+const vp9_tree_index vp9_coef_tree[ 22] = /* corresponding _CONTEXT_NODEs */
+{
+ -DCT_EOB_TOKEN, 2, /* 0 = EOB */
+ -ZERO_TOKEN, 4, /* 1 = ZERO */
+ -ONE_TOKEN, 6, /* 2 = ONE */
+ 8, 12, /* 3 = LOW_VAL */
+ -TWO_TOKEN, 10, /* 4 = TWO */
+ -THREE_TOKEN, -FOUR_TOKEN, /* 5 = THREE */
+ 14, 16, /* 6 = HIGH_LOW */
+ -DCT_VAL_CATEGORY1, -DCT_VAL_CATEGORY2, /* 7 = CAT_ONE */
+ 18, 20, /* 8 = CAT_THREEFOUR */
+ -DCT_VAL_CATEGORY3, -DCT_VAL_CATEGORY4, /* 9 = CAT_THREE */
+ -DCT_VAL_CATEGORY5, -DCT_VAL_CATEGORY6 /* 10 = CAT_FIVE */
+};
+
+struct vp9_token vp9_coef_encodings[MAX_ENTROPY_TOKENS];
+
+/* Trees for extra bits. Probabilities are constant and
+ do not depend on previously encoded bits */
+
+static const vp9_prob Pcat1[] = { 159};
+static const vp9_prob Pcat2[] = { 165, 145};
+static const vp9_prob Pcat3[] = { 173, 148, 140};
+static const vp9_prob Pcat4[] = { 176, 155, 140, 135};
+static const vp9_prob Pcat5[] = { 180, 157, 141, 134, 130};
+static const vp9_prob Pcat6[] = {
+ 254, 254, 254, 252, 249, 243, 230, 196, 177, 153, 140, 133, 130, 129
+};
+
+const vp9_tree_index vp9_coefmodel_tree[6] = {
+ -DCT_EOB_MODEL_TOKEN, 2, /* 0 = EOB */
+ -ZERO_TOKEN, 4, /* 1 = ZERO */
+ -ONE_TOKEN, -TWO_TOKEN,
+};
+
+// Model obtained from a 2-sided zero-centerd distribuition derived
+// from a Pareto distribution. The cdf of the distribution is:
+// cdf(x) = 0.5 + 0.5 * sgn(x) * [1 - {alpha/(alpha + |x|)} ^ beta]
+//
+// For a given beta and a given probablity of the 1-node, the alpha
+// is first solved, and then the {alpha, beta} pair is used to generate
+// the probabilities for the rest of the nodes.
+
+// beta = 8
+static const vp9_prob modelcoefprobs_pareto8[COEFPROB_MODELS][MODEL_NODES] = {
+ { 3, 86, 128, 6, 86, 23, 88, 29},
+ { 9, 86, 129, 17, 88, 61, 94, 76},
+ { 15, 87, 129, 28, 89, 93, 100, 110},
+ { 20, 88, 130, 38, 91, 118, 106, 136},
+ { 26, 89, 131, 48, 92, 139, 111, 156},
+ { 31, 90, 131, 58, 94, 156, 117, 171},
+ { 37, 90, 132, 66, 95, 171, 122, 184},
+ { 42, 91, 132, 75, 97, 183, 127, 194},
+ { 47, 92, 133, 83, 98, 193, 132, 202},
+ { 52, 93, 133, 90, 100, 201, 137, 208},
+ { 57, 94, 134, 98, 101, 208, 142, 214},
+ { 62, 94, 135, 105, 103, 214, 146, 218},
+ { 66, 95, 135, 111, 104, 219, 151, 222},
+ { 71, 96, 136, 117, 106, 224, 155, 225},
+ { 76, 97, 136, 123, 107, 227, 159, 228},
+ { 80, 98, 137, 129, 109, 231, 162, 231},
+ { 84, 98, 138, 134, 110, 234, 166, 233},
+ { 89, 99, 138, 140, 112, 236, 170, 235},
+ { 93, 100, 139, 145, 113, 238, 173, 236},
+ { 97, 101, 140, 149, 115, 240, 176, 238},
+ {101, 102, 140, 154, 116, 242, 179, 239},
+ {105, 103, 141, 158, 118, 243, 182, 240},
+ {109, 104, 141, 162, 119, 244, 185, 241},
+ {113, 104, 142, 166, 120, 245, 187, 242},
+ {116, 105, 143, 170, 122, 246, 190, 243},
+ {120, 106, 143, 173, 123, 247, 192, 244},
+ {123, 107, 144, 177, 125, 248, 195, 244},
+ {127, 108, 145, 180, 126, 249, 197, 245},
+ {130, 109, 145, 183, 128, 249, 199, 245},
+ {134, 110, 146, 186, 129, 250, 201, 246},
+ {137, 111, 147, 189, 131, 251, 203, 246},
+ {140, 112, 147, 192, 132, 251, 205, 247},
+ {143, 113, 148, 194, 133, 251, 207, 247},
+ {146, 114, 149, 197, 135, 252, 208, 248},
+ {149, 115, 149, 199, 136, 252, 210, 248},
+ {152, 115, 150, 201, 138, 252, 211, 248},
+ {155, 116, 151, 204, 139, 253, 213, 249},
+ {158, 117, 151, 206, 140, 253, 214, 249},
+ {161, 118, 152, 208, 142, 253, 216, 249},
+ {163, 119, 153, 210, 143, 253, 217, 249},
+ {166, 120, 153, 212, 144, 254, 218, 250},
+ {168, 121, 154, 213, 146, 254, 220, 250},
+ {171, 122, 155, 215, 147, 254, 221, 250},
+ {173, 123, 155, 217, 148, 254, 222, 250},
+ {176, 124, 156, 218, 150, 254, 223, 250},
+ {178, 125, 157, 220, 151, 254, 224, 251},
+ {180, 126, 157, 221, 152, 254, 225, 251},
+ {183, 127, 158, 222, 153, 254, 226, 251},
+ {185, 128, 159, 224, 155, 255, 227, 251},
+ {187, 129, 160, 225, 156, 255, 228, 251},
+ {189, 131, 160, 226, 157, 255, 228, 251},
+ {191, 132, 161, 227, 159, 255, 229, 251},
+ {193, 133, 162, 228, 160, 255, 230, 252},
+ {195, 134, 163, 230, 161, 255, 231, 252},
+ {197, 135, 163, 231, 162, 255, 231, 252},
+ {199, 136, 164, 232, 163, 255, 232, 252},
+ {201, 137, 165, 233, 165, 255, 233, 252},
+ {202, 138, 166, 233, 166, 255, 233, 252},
+ {204, 139, 166, 234, 167, 255, 234, 252},
+ {206, 140, 167, 235, 168, 255, 235, 252},
+ {207, 141, 168, 236, 169, 255, 235, 252},
+ {209, 142, 169, 237, 171, 255, 236, 252},
+ {210, 144, 169, 237, 172, 255, 236, 252},
+ {212, 145, 170, 238, 173, 255, 237, 252},
+ {214, 146, 171, 239, 174, 255, 237, 253},
+ {215, 147, 172, 240, 175, 255, 238, 253},
+ {216, 148, 173, 240, 176, 255, 238, 253},
+ {218, 149, 173, 241, 177, 255, 239, 253},
+ {219, 150, 174, 241, 179, 255, 239, 253},
+ {220, 152, 175, 242, 180, 255, 240, 253},
+ {222, 153, 176, 242, 181, 255, 240, 253},
+ {223, 154, 177, 243, 182, 255, 240, 253},
+ {224, 155, 178, 244, 183, 255, 241, 253},
+ {225, 156, 178, 244, 184, 255, 241, 253},
+ {226, 158, 179, 244, 185, 255, 242, 253},
+ {228, 159, 180, 245, 186, 255, 242, 253},
+ {229, 160, 181, 245, 187, 255, 242, 253},
+ {230, 161, 182, 246, 188, 255, 243, 253},
+ {231, 163, 183, 246, 189, 255, 243, 253},
+ {232, 164, 184, 247, 190, 255, 243, 253},
+ {233, 165, 185, 247, 191, 255, 244, 253},
+ {234, 166, 185, 247, 192, 255, 244, 253},
+ {235, 168, 186, 248, 193, 255, 244, 253},
+ {236, 169, 187, 248, 194, 255, 244, 253},
+ {236, 170, 188, 248, 195, 255, 245, 253},
+ {237, 171, 189, 249, 196, 255, 245, 254},
+ {238, 173, 190, 249, 197, 255, 245, 254},
+ {239, 174, 191, 249, 198, 255, 245, 254},
+ {240, 175, 192, 249, 199, 255, 246, 254},
+ {240, 177, 193, 250, 200, 255, 246, 254},
+ {241, 178, 194, 250, 201, 255, 246, 254},
+ {242, 179, 195, 250, 202, 255, 246, 254},
+ {242, 181, 196, 250, 203, 255, 247, 254},
+ {243, 182, 197, 251, 204, 255, 247, 254},
+ {244, 184, 198, 251, 205, 255, 247, 254},
+ {244, 185, 199, 251, 206, 255, 247, 254},
+ {245, 186, 200, 251, 207, 255, 247, 254},
+ {246, 188, 201, 252, 207, 255, 248, 254},
+ {246, 189, 202, 252, 208, 255, 248, 254},
+ {247, 191, 203, 252, 209, 255, 248, 254},
+ {247, 192, 204, 252, 210, 255, 248, 254},
+ {248, 194, 205, 252, 211, 255, 248, 254},
+ {248, 195, 206, 252, 212, 255, 249, 254},
+ {249, 197, 207, 253, 213, 255, 249, 254},
+ {249, 198, 208, 253, 214, 255, 249, 254},
+ {250, 200, 210, 253, 215, 255, 249, 254},
+ {250, 201, 211, 253, 215, 255, 249, 254},
+ {250, 203, 212, 253, 216, 255, 249, 254},
+ {251, 204, 213, 253, 217, 255, 250, 254},
+ {251, 206, 214, 254, 218, 255, 250, 254},
+ {252, 207, 216, 254, 219, 255, 250, 254},
+ {252, 209, 217, 254, 220, 255, 250, 254},
+ {252, 211, 218, 254, 221, 255, 250, 254},
+ {253, 213, 219, 254, 222, 255, 250, 254},
+ {253, 214, 221, 254, 223, 255, 250, 254},
+ {253, 216, 222, 254, 224, 255, 251, 254},
+ {253, 218, 224, 254, 225, 255, 251, 254},
+ {254, 220, 225, 254, 225, 255, 251, 254},
+ {254, 222, 227, 255, 226, 255, 251, 254},
+ {254, 224, 228, 255, 227, 255, 251, 254},
+ {254, 226, 230, 255, 228, 255, 251, 254},
+ {255, 228, 231, 255, 230, 255, 251, 254},
+ {255, 230, 233, 255, 231, 255, 252, 254},
+ {255, 232, 235, 255, 232, 255, 252, 254},
+ {255, 235, 237, 255, 233, 255, 252, 254},
+ {255, 238, 240, 255, 235, 255, 252, 255},
+ {255, 241, 243, 255, 236, 255, 252, 254},
+ {255, 246, 247, 255, 239, 255, 253, 255}
+};
+
+static void extend_model_to_full_distribution(vp9_prob p,
+ vp9_prob *tree_probs) {
+ const int l = (p - 1) / 2;
+ const vp9_prob (*model)[MODEL_NODES] = modelcoefprobs_pareto8;
+ if (p & 1) {
+ vpx_memcpy(tree_probs + UNCONSTRAINED_NODES,
+ model[l], MODEL_NODES * sizeof(vp9_prob));
+ } else {
+ // interpolate
+ int i;
+ for (i = UNCONSTRAINED_NODES; i < ENTROPY_NODES; ++i)
+ tree_probs[i] = (model[l][i - UNCONSTRAINED_NODES] +
+ model[l + 1][i - UNCONSTRAINED_NODES]) >> 1;
+ }
+}
+
+void vp9_model_to_full_probs(const vp9_prob *model, vp9_prob *full) {
+ if (full != model)
+ vpx_memcpy(full, model, sizeof(vp9_prob) * UNCONSTRAINED_NODES);
+ extend_model_to_full_distribution(model[PIVOT_NODE], full);
+}
+
+static vp9_tree_index cat1[2], cat2[4], cat3[6], cat4[8], cat5[10], cat6[28];
+
+static void init_bit_tree(vp9_tree_index *p, int n) {
+ int i = 0;
+
+ while (++i < n) {
+ p[0] = p[1] = i << 1;
+ p += 2;
+ }
+
+ p[0] = p[1] = 0;
+}
+
+static void init_bit_trees() {
+ init_bit_tree(cat1, 1);
+ init_bit_tree(cat2, 2);
+ init_bit_tree(cat3, 3);
+ init_bit_tree(cat4, 4);
+ init_bit_tree(cat5, 5);
+ init_bit_tree(cat6, 14);
+}
+
+const vp9_extra_bit vp9_extra_bits[12] = {
+ { 0, 0, 0, 0},
+ { 0, 0, 0, 1},
+ { 0, 0, 0, 2},
+ { 0, 0, 0, 3},
+ { 0, 0, 0, 4},
+ { cat1, Pcat1, 1, 5},
+ { cat2, Pcat2, 2, 7},
+ { cat3, Pcat3, 3, 11},
+ { cat4, Pcat4, 4, 19},
+ { cat5, Pcat5, 5, 35},
+ { cat6, Pcat6, 14, 67},
+ { 0, 0, 0, 0}
+};
+
+#include "vp9/common/vp9_default_coef_probs.h"
+
+void vp9_default_coef_probs(VP9_COMMON *cm) {
+ vp9_copy(cm->fc.coef_probs[TX_4X4], default_coef_probs_4x4);
+ vp9_copy(cm->fc.coef_probs[TX_8X8], default_coef_probs_8x8);
+ vp9_copy(cm->fc.coef_probs[TX_16X16], default_coef_probs_16x16);
+ vp9_copy(cm->fc.coef_probs[TX_32X32], default_coef_probs_32x32);
+}
+
+// Neighborhood 5-tuples for various scans and blocksizes,
+// in {top, left, topleft, topright, bottomleft} order
+// for each position in raster scan order.
+// -1 indicates the neighbor does not exist.
+DECLARE_ALIGNED(16, int16_t,
+ vp9_default_scan_4x4_neighbors[17 * MAX_NEIGHBORS]);
+DECLARE_ALIGNED(16, int16_t,
+ vp9_col_scan_4x4_neighbors[17 * MAX_NEIGHBORS]);
+DECLARE_ALIGNED(16, int16_t,
+ vp9_row_scan_4x4_neighbors[17 * MAX_NEIGHBORS]);
+DECLARE_ALIGNED(16, int16_t,
+ vp9_col_scan_8x8_neighbors[65 * MAX_NEIGHBORS]);
+DECLARE_ALIGNED(16, int16_t,
+ vp9_row_scan_8x8_neighbors[65 * MAX_NEIGHBORS]);
+DECLARE_ALIGNED(16, int16_t,
+ vp9_default_scan_8x8_neighbors[65 * MAX_NEIGHBORS]);
+DECLARE_ALIGNED(16, int16_t,
+ vp9_col_scan_16x16_neighbors[257 * MAX_NEIGHBORS]);
+DECLARE_ALIGNED(16, int16_t,
+ vp9_row_scan_16x16_neighbors[257 * MAX_NEIGHBORS]);
+DECLARE_ALIGNED(16, int16_t,
+ vp9_default_scan_16x16_neighbors[257 * MAX_NEIGHBORS]);
+DECLARE_ALIGNED(16, int16_t,
+ vp9_default_scan_32x32_neighbors[1025 * MAX_NEIGHBORS]);
+
+DECLARE_ALIGNED(16, int16_t, vp9_default_iscan_4x4[16]);
+DECLARE_ALIGNED(16, int16_t, vp9_col_iscan_4x4[16]);
+DECLARE_ALIGNED(16, int16_t, vp9_row_iscan_4x4[16]);
+DECLARE_ALIGNED(16, int16_t, vp9_col_iscan_8x8[64]);
+DECLARE_ALIGNED(16, int16_t, vp9_row_iscan_8x8[64]);
+DECLARE_ALIGNED(16, int16_t, vp9_default_iscan_8x8[64]);
+DECLARE_ALIGNED(16, int16_t, vp9_col_iscan_16x16[256]);
+DECLARE_ALIGNED(16, int16_t, vp9_row_iscan_16x16[256]);
+DECLARE_ALIGNED(16, int16_t, vp9_default_iscan_16x16[256]);
+DECLARE_ALIGNED(16, int16_t, vp9_default_iscan_32x32[1024]);
+
+static int find_in_scan(const int16_t *scan, int l, int idx) {
+ int n, l2 = l * l;
+ for (n = 0; n < l2; n++) {
+ int rc = scan[n];
+ if (rc == idx)
+ return n;
+ }
+ assert(0);
+ return -1;
+}
+static void init_scan_neighbors(const int16_t *scan,
+ int16_t *iscan,
+ int l, int16_t *neighbors) {
+ int l2 = l * l;
+ int n, i, j;
+
+ // dc doesn't use this type of prediction
+ neighbors[MAX_NEIGHBORS * 0 + 0] = 0;
+ neighbors[MAX_NEIGHBORS * 0 + 1] = 0;
+ iscan[0] = find_in_scan(scan, l, 0);
+ for (n = 1; n < l2; n++) {
+ int rc = scan[n];
+ iscan[n] = find_in_scan(scan, l, n);
+ i = rc / l;
+ j = rc % l;
+ if (i > 0 && j > 0) {
+ // col/row scan is used for adst/dct, and generally means that
+ // energy decreases to zero much faster in the dimension in
+ // which ADST is used compared to the direction in which DCT
+ // is used. Likewise, we find much higher correlation between
+ // coefficients within the direction in which DCT is used.
+ // Therefore, if we use ADST/DCT, prefer the DCT neighbor coeff
+ // as a context. If ADST or DCT is used in both directions, we
+ // use the combination of the two as a context.
+ int a = (i - 1) * l + j;
+ int b = i * l + j - 1;
+ if (scan == vp9_col_scan_4x4 || scan == vp9_col_scan_8x8 ||
+ scan == vp9_col_scan_16x16) {
+ // in the col/row scan cases (as well as left/top edge cases), we set
+ // both contexts to the same value, so we can branchlessly do a+b+1>>1
+ // which automatically becomes a if a == b
+ neighbors[MAX_NEIGHBORS * n + 0] =
+ neighbors[MAX_NEIGHBORS * n + 1] = a;
+ } else if (scan == vp9_row_scan_4x4 || scan == vp9_row_scan_8x8 ||
+ scan == vp9_row_scan_16x16) {
+ neighbors[MAX_NEIGHBORS * n + 0] =
+ neighbors[MAX_NEIGHBORS * n + 1] = b;
+ } else {
+ neighbors[MAX_NEIGHBORS * n + 0] = a;
+ neighbors[MAX_NEIGHBORS * n + 1] = b;
+ }
+ } else if (i > 0) {
+ neighbors[MAX_NEIGHBORS * n + 0] =
+ neighbors[MAX_NEIGHBORS * n + 1] = (i - 1) * l + j;
+ } else {
+ assert(j > 0);
+ neighbors[MAX_NEIGHBORS * n + 0] =
+ neighbors[MAX_NEIGHBORS * n + 1] = i * l + j - 1;
+ }
+ assert(iscan[neighbors[MAX_NEIGHBORS * n + 0]] < n);
+ }
+ // one padding item so we don't have to add branches in code to handle
+ // calls to get_coef_context() for the token after the final dc token
+ neighbors[MAX_NEIGHBORS * l2 + 0] = 0;
+ neighbors[MAX_NEIGHBORS * l2 + 1] = 0;
+}
+
+void vp9_init_neighbors() {
+ init_scan_neighbors(vp9_default_scan_4x4, vp9_default_iscan_4x4, 4,
+ vp9_default_scan_4x4_neighbors);
+ init_scan_neighbors(vp9_row_scan_4x4, vp9_row_iscan_4x4, 4,
+ vp9_row_scan_4x4_neighbors);
+ init_scan_neighbors(vp9_col_scan_4x4, vp9_col_iscan_4x4, 4,
+ vp9_col_scan_4x4_neighbors);
+ init_scan_neighbors(vp9_default_scan_8x8, vp9_default_iscan_8x8, 8,
+ vp9_default_scan_8x8_neighbors);
+ init_scan_neighbors(vp9_row_scan_8x8, vp9_row_iscan_8x8, 8,
+ vp9_row_scan_8x8_neighbors);
+ init_scan_neighbors(vp9_col_scan_8x8, vp9_col_iscan_8x8, 8,
+ vp9_col_scan_8x8_neighbors);
+ init_scan_neighbors(vp9_default_scan_16x16, vp9_default_iscan_16x16, 16,
+ vp9_default_scan_16x16_neighbors);
+ init_scan_neighbors(vp9_row_scan_16x16, vp9_row_iscan_16x16, 16,
+ vp9_row_scan_16x16_neighbors);
+ init_scan_neighbors(vp9_col_scan_16x16, vp9_col_iscan_16x16, 16,
+ vp9_col_scan_16x16_neighbors);
+ init_scan_neighbors(vp9_default_scan_32x32, vp9_default_iscan_32x32, 32,
+ vp9_default_scan_32x32_neighbors);
+}
+
+const int16_t *vp9_get_coef_neighbors_handle(const int16_t *scan) {
+ if (scan == vp9_default_scan_4x4) {
+ return vp9_default_scan_4x4_neighbors;
+ } else if (scan == vp9_row_scan_4x4) {
+ return vp9_row_scan_4x4_neighbors;
+ } else if (scan == vp9_col_scan_4x4) {
+ return vp9_col_scan_4x4_neighbors;
+ } else if (scan == vp9_default_scan_8x8) {
+ return vp9_default_scan_8x8_neighbors;
+ } else if (scan == vp9_row_scan_8x8) {
+ return vp9_row_scan_8x8_neighbors;
+ } else if (scan == vp9_col_scan_8x8) {
+ return vp9_col_scan_8x8_neighbors;
+ } else if (scan == vp9_default_scan_16x16) {
+ return vp9_default_scan_16x16_neighbors;
+ } else if (scan == vp9_row_scan_16x16) {
+ return vp9_row_scan_16x16_neighbors;
+ } else if (scan == vp9_col_scan_16x16) {
+ return vp9_col_scan_16x16_neighbors;
+ } else {
+ assert(scan == vp9_default_scan_32x32);
+ return vp9_default_scan_32x32_neighbors;
+ }
+}
+
+void vp9_coef_tree_initialize() {
+ vp9_init_neighbors();
+ init_bit_trees();
+ vp9_tokens_from_tree(vp9_coef_encodings, vp9_coef_tree);
+}
+
+// #define COEF_COUNT_TESTING
+
+#define COEF_COUNT_SAT 24
+#define COEF_MAX_UPDATE_FACTOR 112
+#define COEF_COUNT_SAT_KEY 24
+#define COEF_MAX_UPDATE_FACTOR_KEY 112
+#define COEF_COUNT_SAT_AFTER_KEY 24
+#define COEF_MAX_UPDATE_FACTOR_AFTER_KEY 128
+
+static void adapt_coef_probs(VP9_COMMON *cm, TX_SIZE tx_size,
+ unsigned int count_sat,
+ unsigned int update_factor) {
+ FRAME_CONTEXT *pre_fc = &cm->frame_contexts[cm->frame_context_idx];
+
+ vp9_coeff_probs_model *dst_coef_probs = cm->fc.coef_probs[tx_size];
+ vp9_coeff_probs_model *pre_coef_probs = pre_fc->coef_probs[tx_size];
+ vp9_coeff_count_model *coef_counts = cm->counts.coef[tx_size];
+ unsigned int (*eob_branch_count)[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS] =
+ cm->counts.eob_branch[tx_size];
+ int t, i, j, k, l;
+ unsigned int branch_ct[UNCONSTRAINED_NODES][2];
+ vp9_prob coef_probs[UNCONSTRAINED_NODES];
+
+ for (i = 0; i < BLOCK_TYPES; ++i)
+ for (j = 0; j < REF_TYPES; ++j)
+ for (k = 0; k < COEF_BANDS; ++k)
+ for (l = 0; l < PREV_COEF_CONTEXTS; ++l) {
+ if (l >= 3 && k == 0)
+ continue;
+ vp9_tree_probs_from_distribution(vp9_coefmodel_tree, coef_probs,
+ branch_ct, coef_counts[i][j][k][l],
+ 0);
+ branch_ct[0][1] = eob_branch_count[i][j][k][l] - branch_ct[0][0];
+ coef_probs[0] = get_binary_prob(branch_ct[0][0], branch_ct[0][1]);
+ for (t = 0; t < UNCONSTRAINED_NODES; ++t)
+ dst_coef_probs[i][j][k][l][t] = merge_probs(
+ pre_coef_probs[i][j][k][l][t], coef_probs[t],
+ branch_ct[t], count_sat, update_factor);
+ }
+}
+
+void vp9_adapt_coef_probs(VP9_COMMON *cm) {
+ TX_SIZE t;
+ unsigned int count_sat, update_factor;
+
+ if (cm->frame_type == KEY_FRAME || cm->intra_only) {
+ update_factor = COEF_MAX_UPDATE_FACTOR_KEY;
+ count_sat = COEF_COUNT_SAT_KEY;
+ } else if (cm->last_frame_type == KEY_FRAME) {
+ update_factor = COEF_MAX_UPDATE_FACTOR_AFTER_KEY; /* adapt quickly */
+ count_sat = COEF_COUNT_SAT_AFTER_KEY;
+ } else {
+ update_factor = COEF_MAX_UPDATE_FACTOR;
+ count_sat = COEF_COUNT_SAT;
+ }
+ for (t = TX_4X4; t <= TX_32X32; t++)
+ adapt_coef_probs(cm, t, count_sat, update_factor);
+}
diff --git a/libvpx/vp9/common/vp9_entropy.h b/libvpx/vp9/common/vp9_entropy.h
new file mode 100644
index 0000000..f138c09
--- /dev/null
+++ b/libvpx/vp9/common/vp9_entropy.h
@@ -0,0 +1,380 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_ENTROPY_H_
+#define VP9_COMMON_VP9_ENTROPY_H_
+
+#include "vpx/vpx_integer.h"
+#include "vp9/common/vp9_treecoder.h"
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/common/vp9_common.h"
+
+/* Coefficient token alphabet */
+
+#define ZERO_TOKEN 0 /* 0 Extra Bits 0+0 */
+#define ONE_TOKEN 1 /* 1 Extra Bits 0+1 */
+#define TWO_TOKEN 2 /* 2 Extra Bits 0+1 */
+#define THREE_TOKEN 3 /* 3 Extra Bits 0+1 */
+#define FOUR_TOKEN 4 /* 4 Extra Bits 0+1 */
+#define DCT_VAL_CATEGORY1 5 /* 5-6 Extra Bits 1+1 */
+#define DCT_VAL_CATEGORY2 6 /* 7-10 Extra Bits 2+1 */
+#define DCT_VAL_CATEGORY3 7 /* 11-18 Extra Bits 3+1 */
+#define DCT_VAL_CATEGORY4 8 /* 19-34 Extra Bits 4+1 */
+#define DCT_VAL_CATEGORY5 9 /* 35-66 Extra Bits 5+1 */
+#define DCT_VAL_CATEGORY6 10 /* 67+ Extra Bits 14+1 */
+#define DCT_EOB_TOKEN 11 /* EOB Extra Bits 0+0 */
+#define MAX_ENTROPY_TOKENS 12
+#define ENTROPY_NODES 11
+#define EOSB_TOKEN 127 /* Not signalled, encoder only */
+
+#define INTER_MODE_CONTEXTS 7
+
+extern const vp9_tree_index vp9_coef_tree[];
+
+#define DCT_EOB_MODEL_TOKEN 3 /* EOB Extra Bits 0+0 */
+extern const vp9_tree_index vp9_coefmodel_tree[];
+
+extern struct vp9_token vp9_coef_encodings[MAX_ENTROPY_TOKENS];
+
+typedef struct {
+ vp9_tree_p tree;
+ const vp9_prob *prob;
+ int len;
+ int base_val;
+} vp9_extra_bit;
+
+extern const vp9_extra_bit vp9_extra_bits[12]; /* indexed by token value */
+
+#define MAX_PROB 255
+#define DCT_MAX_VALUE 16384
+
+/* Coefficients are predicted via a 3-dimensional probability table. */
+
+/* Outside dimension. 0 = Y with DC, 1 = UV */
+#define BLOCK_TYPES 2
+#define REF_TYPES 2 // intra=0, inter=1
+
+/* Middle dimension reflects the coefficient position within the transform. */
+#define COEF_BANDS 6
+
+/* Inside dimension is measure of nearby complexity, that reflects the energy
+ of nearby coefficients are nonzero. For the first coefficient (DC, unless
+ block type is 0), we look at the (already encoded) blocks above and to the
+ left of the current block. The context index is then the number (0,1,or 2)
+ of these blocks having nonzero coefficients.
+ After decoding a coefficient, the measure is determined by the size of the
+ most recently decoded coefficient.
+ Note that the intuitive meaning of this measure changes as coefficients
+ are decoded, e.g., prior to the first token, a zero means that my neighbors
+ are empty while, after the first token, because of the use of end-of-block,
+ a zero means we just decoded a zero and hence guarantees that a non-zero
+ coefficient will appear later in this block. However, this shift
+ in meaning is perfectly OK because our context depends also on the
+ coefficient band (and since zigzag positions 0, 1, and 2 are in
+ distinct bands). */
+
+#define PREV_COEF_CONTEXTS 6
+
+// #define ENTROPY_STATS
+
+typedef unsigned int vp9_coeff_count[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS]
+ [MAX_ENTROPY_TOKENS];
+typedef unsigned int vp9_coeff_stats[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS]
+ [ENTROPY_NODES][2];
+typedef vp9_prob vp9_coeff_probs[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS]
+ [ENTROPY_NODES];
+
+#define SUBEXP_PARAM 4 /* Subexponential code parameter */
+#define MODULUS_PARAM 13 /* Modulus parameter */
+
+struct VP9Common;
+void vp9_default_coef_probs(struct VP9Common *cm);
+extern DECLARE_ALIGNED(16, const int16_t, vp9_default_scan_4x4[16]);
+
+extern DECLARE_ALIGNED(16, const int16_t, vp9_col_scan_4x4[16]);
+extern DECLARE_ALIGNED(16, const int16_t, vp9_row_scan_4x4[16]);
+
+extern DECLARE_ALIGNED(16, const int16_t, vp9_default_scan_8x8[64]);
+
+extern DECLARE_ALIGNED(16, const int16_t, vp9_col_scan_8x8[64]);
+extern DECLARE_ALIGNED(16, const int16_t, vp9_row_scan_8x8[64]);
+
+extern DECLARE_ALIGNED(16, const int16_t, vp9_default_scan_16x16[256]);
+
+extern DECLARE_ALIGNED(16, const int16_t, vp9_col_scan_16x16[256]);
+extern DECLARE_ALIGNED(16, const int16_t, vp9_row_scan_16x16[256]);
+
+extern DECLARE_ALIGNED(16, const int16_t, vp9_default_scan_32x32[1024]);
+
+extern DECLARE_ALIGNED(16, int16_t, vp9_default_iscan_4x4[16]);
+
+extern DECLARE_ALIGNED(16, int16_t, vp9_col_iscan_4x4[16]);
+extern DECLARE_ALIGNED(16, int16_t, vp9_row_iscan_4x4[16]);
+
+extern DECLARE_ALIGNED(16, int16_t, vp9_default_iscan_8x8[64]);
+
+extern DECLARE_ALIGNED(16, int16_t, vp9_col_iscan_8x8[64]);
+extern DECLARE_ALIGNED(16, int16_t, vp9_row_iscan_8x8[64]);
+
+extern DECLARE_ALIGNED(16, int16_t, vp9_default_iscan_16x16[256]);
+
+extern DECLARE_ALIGNED(16, int16_t, vp9_col_iscan_16x16[256]);
+extern DECLARE_ALIGNED(16, int16_t, vp9_row_iscan_16x16[256]);
+
+extern DECLARE_ALIGNED(16, int16_t, vp9_default_iscan_32x32[1024]);
+
+#define MAX_NEIGHBORS 2
+
+extern DECLARE_ALIGNED(16, int16_t,
+ vp9_default_scan_4x4_neighbors[17 * MAX_NEIGHBORS]);
+extern DECLARE_ALIGNED(16, int16_t,
+ vp9_col_scan_4x4_neighbors[17 * MAX_NEIGHBORS]);
+extern DECLARE_ALIGNED(16, int16_t,
+ vp9_row_scan_4x4_neighbors[17 * MAX_NEIGHBORS]);
+extern DECLARE_ALIGNED(16, int16_t,
+ vp9_col_scan_8x8_neighbors[65 * MAX_NEIGHBORS]);
+extern DECLARE_ALIGNED(16, int16_t,
+ vp9_row_scan_8x8_neighbors[65 * MAX_NEIGHBORS]);
+extern DECLARE_ALIGNED(16, int16_t,
+ vp9_default_scan_8x8_neighbors[65 * MAX_NEIGHBORS]);
+extern DECLARE_ALIGNED(16, int16_t,
+ vp9_col_scan_16x16_neighbors[257 * MAX_NEIGHBORS]);
+extern DECLARE_ALIGNED(16, int16_t,
+ vp9_row_scan_16x16_neighbors[257 * MAX_NEIGHBORS]);
+extern DECLARE_ALIGNED(16, int16_t,
+ vp9_default_scan_16x16_neighbors[257 * MAX_NEIGHBORS]);
+extern DECLARE_ALIGNED(16, int16_t,
+ vp9_default_scan_32x32_neighbors[1025 * MAX_NEIGHBORS]);
+
+void vp9_coef_tree_initialize(void);
+void vp9_adapt_coef_probs(struct VP9Common *cm);
+
+static INLINE void reset_skip_context(MACROBLOCKD *xd, BLOCK_SIZE bsize) {
+ int i;
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ struct macroblockd_plane *const pd = &xd->plane[i];
+ const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
+ vpx_memset(pd->above_context, 0, sizeof(ENTROPY_CONTEXT) *
+ num_4x4_blocks_wide_lookup[plane_bsize]);
+ vpx_memset(pd->left_context, 0, sizeof(ENTROPY_CONTEXT) *
+ num_4x4_blocks_high_lookup[plane_bsize]);
+ }
+}
+
+// This is the index in the scan order beyond which all coefficients for
+// 8x8 transform and above are in the top band.
+// For 4x4 blocks the index is less but to keep things common the lookup
+// table for 4x4 is padded out to this index.
+#define MAXBAND_INDEX 21
+
+extern const uint8_t vp9_coefband_trans_8x8plus[MAXBAND_INDEX + 1];
+extern const uint8_t vp9_coefband_trans_4x4[MAXBAND_INDEX + 1];
+
+
+static int get_coef_band(const uint8_t * band_translate, int coef_index) {
+ return (coef_index > MAXBAND_INDEX)
+ ? (COEF_BANDS-1) : band_translate[coef_index];
+}
+
+static INLINE int get_coef_context(const int16_t *neighbors,
+ uint8_t *token_cache,
+ int c) {
+ return (1 + token_cache[neighbors[MAX_NEIGHBORS * c + 0]] +
+ token_cache[neighbors[MAX_NEIGHBORS * c + 1]]) >> 1;
+}
+
+const int16_t *vp9_get_coef_neighbors_handle(const int16_t *scan);
+
+
+// 128 lists of probabilities are stored for the following ONE node probs:
+// 1, 3, 5, 7, ..., 253, 255
+// In between probabilities are interpolated linearly
+
+#define COEFPROB_MODELS 128
+
+#define UNCONSTRAINED_NODES 3
+
+#define PIVOT_NODE 2 // which node is pivot
+
+typedef vp9_prob vp9_coeff_probs_model[REF_TYPES][COEF_BANDS]
+ [PREV_COEF_CONTEXTS]
+ [UNCONSTRAINED_NODES];
+
+typedef unsigned int vp9_coeff_count_model[REF_TYPES][COEF_BANDS]
+ [PREV_COEF_CONTEXTS]
+ [UNCONSTRAINED_NODES + 1];
+typedef unsigned int vp9_coeff_stats_model[REF_TYPES][COEF_BANDS]
+ [PREV_COEF_CONTEXTS]
+ [UNCONSTRAINED_NODES][2];
+
+void vp9_model_to_full_probs(const vp9_prob *model, vp9_prob *full);
+
+static INLINE const int16_t* get_scan_4x4(TX_TYPE tx_type) {
+ switch (tx_type) {
+ case ADST_DCT:
+ return vp9_row_scan_4x4;
+ case DCT_ADST:
+ return vp9_col_scan_4x4;
+ default:
+ return vp9_default_scan_4x4;
+ }
+}
+
+static INLINE void get_scan_nb_4x4(TX_TYPE tx_type,
+ const int16_t **scan, const int16_t **nb) {
+ switch (tx_type) {
+ case ADST_DCT:
+ *scan = vp9_row_scan_4x4;
+ *nb = vp9_row_scan_4x4_neighbors;
+ break;
+ case DCT_ADST:
+ *scan = vp9_col_scan_4x4;
+ *nb = vp9_col_scan_4x4_neighbors;
+ break;
+ default:
+ *scan = vp9_default_scan_4x4;
+ *nb = vp9_default_scan_4x4_neighbors;
+ break;
+ }
+}
+
+static INLINE const int16_t* get_iscan_4x4(TX_TYPE tx_type) {
+ switch (tx_type) {
+ case ADST_DCT:
+ return vp9_row_iscan_4x4;
+ case DCT_ADST:
+ return vp9_col_iscan_4x4;
+ default:
+ return vp9_default_iscan_4x4;
+ }
+}
+
+static INLINE const int16_t* get_scan_8x8(TX_TYPE tx_type) {
+ switch (tx_type) {
+ case ADST_DCT:
+ return vp9_row_scan_8x8;
+ case DCT_ADST:
+ return vp9_col_scan_8x8;
+ default:
+ return vp9_default_scan_8x8;
+ }
+}
+
+static INLINE void get_scan_nb_8x8(TX_TYPE tx_type,
+ const int16_t **scan, const int16_t **nb) {
+ switch (tx_type) {
+ case ADST_DCT:
+ *scan = vp9_row_scan_8x8;
+ *nb = vp9_row_scan_8x8_neighbors;
+ break;
+ case DCT_ADST:
+ *scan = vp9_col_scan_8x8;
+ *nb = vp9_col_scan_8x8_neighbors;
+ break;
+ default:
+ *scan = vp9_default_scan_8x8;
+ *nb = vp9_default_scan_8x8_neighbors;
+ break;
+ }
+}
+
+static INLINE const int16_t* get_iscan_8x8(TX_TYPE tx_type) {
+ switch (tx_type) {
+ case ADST_DCT:
+ return vp9_row_iscan_8x8;
+ case DCT_ADST:
+ return vp9_col_iscan_8x8;
+ default:
+ return vp9_default_iscan_8x8;
+ }
+}
+
+static INLINE const int16_t* get_scan_16x16(TX_TYPE tx_type) {
+ switch (tx_type) {
+ case ADST_DCT:
+ return vp9_row_scan_16x16;
+ case DCT_ADST:
+ return vp9_col_scan_16x16;
+ default:
+ return vp9_default_scan_16x16;
+ }
+}
+
+static INLINE void get_scan_nb_16x16(TX_TYPE tx_type,
+ const int16_t **scan, const int16_t **nb) {
+ switch (tx_type) {
+ case ADST_DCT:
+ *scan = vp9_row_scan_16x16;
+ *nb = vp9_row_scan_16x16_neighbors;
+ break;
+ case DCT_ADST:
+ *scan = vp9_col_scan_16x16;
+ *nb = vp9_col_scan_16x16_neighbors;
+ break;
+ default:
+ *scan = vp9_default_scan_16x16;
+ *nb = vp9_default_scan_16x16_neighbors;
+ break;
+ }
+}
+
+static INLINE const int16_t* get_iscan_16x16(TX_TYPE tx_type) {
+ switch (tx_type) {
+ case ADST_DCT:
+ return vp9_row_iscan_16x16;
+ case DCT_ADST:
+ return vp9_col_iscan_16x16;
+ default:
+ return vp9_default_iscan_16x16;
+ }
+}
+
+static int get_entropy_context(const MACROBLOCKD *xd, TX_SIZE tx_size,
+ PLANE_TYPE type, int block_idx,
+ ENTROPY_CONTEXT *A, ENTROPY_CONTEXT *L,
+ const int16_t **scan,
+ const uint8_t **band_translate) {
+ ENTROPY_CONTEXT above_ec = 0, left_ec = 0;
+
+ switch (tx_size) {
+ case TX_4X4:
+ *scan = get_scan_4x4(get_tx_type_4x4(type, xd, block_idx));
+ *band_translate = vp9_coefband_trans_4x4;
+ above_ec = A[0] != 0;
+ left_ec = L[0] != 0;
+ break;
+ case TX_8X8:
+ *scan = get_scan_8x8(get_tx_type_8x8(type, xd));
+ *band_translate = vp9_coefband_trans_8x8plus;
+ above_ec = !!*(uint16_t *)A;
+ left_ec = !!*(uint16_t *)L;
+ break;
+ case TX_16X16:
+ *scan = get_scan_16x16(get_tx_type_16x16(type, xd));
+ *band_translate = vp9_coefband_trans_8x8plus;
+ above_ec = !!*(uint32_t *)A;
+ left_ec = !!*(uint32_t *)L;
+ break;
+ case TX_32X32:
+ *scan = vp9_default_scan_32x32;
+ *band_translate = vp9_coefband_trans_8x8plus;
+ above_ec = !!*(uint64_t *)A;
+ left_ec = !!*(uint64_t *)L;
+ break;
+ default:
+ assert(!"Invalid transform size.");
+ }
+
+ return combine_entropy_contexts(above_ec, left_ec);
+}
+
+enum { VP9_COEF_UPDATE_PROB = 252 };
+
+#endif // VP9_COMMON_VP9_ENTROPY_H_
diff --git a/libvpx/vp9/common/vp9_entropymode.c b/libvpx/vp9/common/vp9_entropymode.c
new file mode 100644
index 0000000..93c89b0
--- /dev/null
+++ b/libvpx/vp9/common/vp9_entropymode.c
@@ -0,0 +1,518 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_mem/vpx_mem.h"
+
+#include "vp9/common/vp9_alloccommon.h"
+#include "vp9/common/vp9_onyxc_int.h"
+#include "vp9/common/vp9_seg_common.h"
+
+const vp9_prob vp9_kf_uv_mode_prob[INTRA_MODES]
+ [INTRA_MODES - 1] = {
+ { 144, 11, 54, 157, 195, 130, 46, 58, 108 } /* y = dc */,
+ { 118, 15, 123, 148, 131, 101, 44, 93, 131 } /* y = v */,
+ { 113, 12, 23, 188, 226, 142, 26, 32, 125 } /* y = h */,
+ { 120, 11, 50, 123, 163, 135, 64, 77, 103 } /* y = d45 */,
+ { 113, 9, 36, 155, 111, 157, 32, 44, 161 } /* y = d135 */,
+ { 116, 9, 55, 176, 76, 96, 37, 61, 149 } /* y = d117 */,
+ { 115, 9, 28, 141, 161, 167, 21, 25, 193 } /* y = d153 */,
+ { 120, 12, 32, 145, 195, 142, 32, 38, 86 } /* y = d207 */,
+ { 116, 12, 64, 120, 140, 125, 49, 115, 121 } /* y = d63 */,
+ { 102, 19, 66, 162, 182, 122, 35, 59, 128 } /* y = tm */
+};
+
+static const vp9_prob default_if_y_probs[BLOCK_SIZE_GROUPS]
+ [INTRA_MODES - 1] = {
+ { 65, 32, 18, 144, 162, 194, 41, 51, 98 } /* block_size < 8x8 */,
+ { 132, 68, 18, 165, 217, 196, 45, 40, 78 } /* block_size < 16x16 */,
+ { 173, 80, 19, 176, 240, 193, 64, 35, 46 } /* block_size < 32x32 */,
+ { 221, 135, 38, 194, 248, 121, 96, 85, 29 } /* block_size >= 32x32 */
+};
+
+static const vp9_prob default_if_uv_probs[INTRA_MODES]
+ [INTRA_MODES - 1] = {
+ { 120, 7, 76, 176, 208, 126, 28, 54, 103 } /* y = dc */,
+ { 48, 12, 154, 155, 139, 90, 34, 117, 119 } /* y = v */,
+ { 67, 6, 25, 204, 243, 158, 13, 21, 96 } /* y = h */,
+ { 97, 5, 44, 131, 176, 139, 48, 68, 97 } /* y = d45 */,
+ { 83, 5, 42, 156, 111, 152, 26, 49, 152 } /* y = d135 */,
+ { 80, 5, 58, 178, 74, 83, 33, 62, 145 } /* y = d117 */,
+ { 86, 5, 32, 154, 192, 168, 14, 22, 163 } /* y = d153 */,
+ { 85, 5, 32, 156, 216, 148, 19, 29, 73 } /* y = d207 */,
+ { 77, 7, 64, 116, 132, 122, 37, 126, 120 } /* y = d63 */,
+ { 101, 21, 107, 181, 192, 103, 19, 67, 125 } /* y = tm */
+};
+
+static const vp9_prob default_partition_probs[NUM_FRAME_TYPES]
+ [NUM_PARTITION_CONTEXTS]
+ [PARTITION_TYPES - 1] = {
+ { /* frame_type = keyframe */
+ /* 8x8 -> 4x4 */
+ { 158, 97, 94 } /* a/l both not split */,
+ { 93, 24, 99 } /* a split, l not split */,
+ { 85, 119, 44 } /* l split, a not split */,
+ { 62, 59, 67 } /* a/l both split */,
+ /* 16x16 -> 8x8 */
+ { 149, 53, 53 } /* a/l both not split */,
+ { 94, 20, 48 } /* a split, l not split */,
+ { 83, 53, 24 } /* l split, a not split */,
+ { 52, 18, 18 } /* a/l both split */,
+ /* 32x32 -> 16x16 */
+ { 150, 40, 39 } /* a/l both not split */,
+ { 78, 12, 26 } /* a split, l not split */,
+ { 67, 33, 11 } /* l split, a not split */,
+ { 24, 7, 5 } /* a/l both split */,
+ /* 64x64 -> 32x32 */
+ { 174, 35, 49 } /* a/l both not split */,
+ { 68, 11, 27 } /* a split, l not split */,
+ { 57, 15, 9 } /* l split, a not split */,
+ { 12, 3, 3 } /* a/l both split */
+ }, { /* frame_type = interframe */
+ /* 8x8 -> 4x4 */
+ { 199, 122, 141 } /* a/l both not split */,
+ { 147, 63, 159 } /* a split, l not split */,
+ { 148, 133, 118 } /* l split, a not split */,
+ { 121, 104, 114 } /* a/l both split */,
+ /* 16x16 -> 8x8 */
+ { 174, 73, 87 } /* a/l both not split */,
+ { 92, 41, 83 } /* a split, l not split */,
+ { 82, 99, 50 } /* l split, a not split */,
+ { 53, 39, 39 } /* a/l both split */,
+ /* 32x32 -> 16x16 */
+ { 177, 58, 59 } /* a/l both not split */,
+ { 68, 26, 63 } /* a split, l not split */,
+ { 52, 79, 25 } /* l split, a not split */,
+ { 17, 14, 12 } /* a/l both split */,
+ /* 64x64 -> 32x32 */
+ { 222, 34, 30 } /* a/l both not split */,
+ { 72, 16, 44 } /* a split, l not split */,
+ { 58, 32, 12 } /* l split, a not split */,
+ { 10, 7, 6 } /* a/l both split */
+ }
+};
+
+const vp9_prob vp9_kf_y_mode_prob[INTRA_MODES]
+ [INTRA_MODES]
+ [INTRA_MODES - 1] = {
+ { /* above = dc */
+ { 137, 30, 42, 148, 151, 207, 70, 52, 91 } /* left = dc */,
+ { 92, 45, 102, 136, 116, 180, 74, 90, 100 } /* left = v */,
+ { 73, 32, 19, 187, 222, 215, 46, 34, 100 } /* left = h */,
+ { 91, 30, 32, 116, 121, 186, 93, 86, 94 } /* left = d45 */,
+ { 72, 35, 36, 149, 68, 206, 68, 63, 105 } /* left = d135 */,
+ { 73, 31, 28, 138, 57, 124, 55, 122, 151 } /* left = d117 */,
+ { 67, 23, 21, 140, 126, 197, 40, 37, 171 } /* left = d153 */,
+ { 86, 27, 28, 128, 154, 212, 45, 43, 53 } /* left = d207 */,
+ { 74, 32, 27, 107, 86, 160, 63, 134, 102 } /* left = d63 */,
+ { 59, 67, 44, 140, 161, 202, 78, 67, 119 } /* left = tm */
+ }, { /* above = v */
+ { 63, 36, 126, 146, 123, 158, 60, 90, 96 } /* left = dc */,
+ { 43, 46, 168, 134, 107, 128, 69, 142, 92 } /* left = v */,
+ { 44, 29, 68, 159, 201, 177, 50, 57, 77 } /* left = h */,
+ { 58, 38, 76, 114, 97, 172, 78, 133, 92 } /* left = d45 */,
+ { 46, 41, 76, 140, 63, 184, 69, 112, 57 } /* left = d135 */,
+ { 38, 32, 85, 140, 46, 112, 54, 151, 133 } /* left = d117 */,
+ { 39, 27, 61, 131, 110, 175, 44, 75, 136 } /* left = d153 */,
+ { 52, 30, 74, 113, 130, 175, 51, 64, 58 } /* left = d207 */,
+ { 47, 35, 80, 100, 74, 143, 64, 163, 74 } /* left = d63 */,
+ { 36, 61, 116, 114, 128, 162, 80, 125, 82 } /* left = tm */
+ }, { /* above = h */
+ { 82, 26, 26, 171, 208, 204, 44, 32, 105 } /* left = dc */,
+ { 55, 44, 68, 166, 179, 192, 57, 57, 108 } /* left = v */,
+ { 42, 26, 11, 199, 241, 228, 23, 15, 85 } /* left = h */,
+ { 68, 42, 19, 131, 160, 199, 55, 52, 83 } /* left = d45 */,
+ { 58, 50, 25, 139, 115, 232, 39, 52, 118 } /* left = d135 */,
+ { 50, 35, 33, 153, 104, 162, 64, 59, 131 } /* left = d117 */,
+ { 44, 24, 16, 150, 177, 202, 33, 19, 156 } /* left = d153 */,
+ { 55, 27, 12, 153, 203, 218, 26, 27, 49 } /* left = d207 */,
+ { 53, 49, 21, 110, 116, 168, 59, 80, 76 } /* left = d63 */,
+ { 38, 72, 19, 168, 203, 212, 50, 50, 107 } /* left = tm */
+ }, { /* above = d45 */
+ { 103, 26, 36, 129, 132, 201, 83, 80, 93 } /* left = dc */,
+ { 59, 38, 83, 112, 103, 162, 98, 136, 90 } /* left = v */,
+ { 62, 30, 23, 158, 200, 207, 59, 57, 50 } /* left = h */,
+ { 67, 30, 29, 84, 86, 191, 102, 91, 59 } /* left = d45 */,
+ { 60, 32, 33, 112, 71, 220, 64, 89, 104 } /* left = d135 */,
+ { 53, 26, 34, 130, 56, 149, 84, 120, 103 } /* left = d117 */,
+ { 53, 21, 23, 133, 109, 210, 56, 77, 172 } /* left = d153 */,
+ { 77, 19, 29, 112, 142, 228, 55, 66, 36 } /* left = d207 */,
+ { 61, 29, 29, 93, 97, 165, 83, 175, 162 } /* left = d63 */,
+ { 47, 47, 43, 114, 137, 181, 100, 99, 95 } /* left = tm */
+ }, { /* above = d135 */
+ { 69, 23, 29, 128, 83, 199, 46, 44, 101 } /* left = dc */,
+ { 53, 40, 55, 139, 69, 183, 61, 80, 110 } /* left = v */,
+ { 40, 29, 19, 161, 180, 207, 43, 24, 91 } /* left = h */,
+ { 60, 34, 19, 105, 61, 198, 53, 64, 89 } /* left = d45 */,
+ { 52, 31, 22, 158, 40, 209, 58, 62, 89 } /* left = d135 */,
+ { 44, 31, 29, 147, 46, 158, 56, 102, 198 } /* left = d117 */,
+ { 35, 19, 12, 135, 87, 209, 41, 45, 167 } /* left = d153 */,
+ { 55, 25, 21, 118, 95, 215, 38, 39, 66 } /* left = d207 */,
+ { 51, 38, 25, 113, 58, 164, 70, 93, 97 } /* left = d63 */,
+ { 47, 54, 34, 146, 108, 203, 72, 103, 151 } /* left = tm */
+ }, { /* above = d117 */
+ { 64, 19, 37, 156, 66, 138, 49, 95, 133 } /* left = dc */,
+ { 46, 27, 80, 150, 55, 124, 55, 121, 135 } /* left = v */,
+ { 36, 23, 27, 165, 149, 166, 54, 64, 118 } /* left = h */,
+ { 53, 21, 36, 131, 63, 163, 60, 109, 81 } /* left = d45 */,
+ { 40, 26, 35, 154, 40, 185, 51, 97, 123 } /* left = d135 */,
+ { 35, 19, 34, 179, 19, 97, 48, 129, 124 } /* left = d117 */,
+ { 36, 20, 26, 136, 62, 164, 33, 77, 154 } /* left = d153 */,
+ { 45, 18, 32, 130, 90, 157, 40, 79, 91 } /* left = d207 */,
+ { 45, 26, 28, 129, 45, 129, 49, 147, 123 } /* left = d63 */,
+ { 38, 44, 51, 136, 74, 162, 57, 97, 121 } /* left = tm */
+ }, { /* above = d153 */
+ { 75, 17, 22, 136, 138, 185, 32, 34, 166 } /* left = dc */,
+ { 56, 39, 58, 133, 117, 173, 48, 53, 187 } /* left = v */,
+ { 35, 21, 12, 161, 212, 207, 20, 23, 145 } /* left = h */,
+ { 56, 29, 19, 117, 109, 181, 55, 68, 112 } /* left = d45 */,
+ { 47, 29, 17, 153, 64, 220, 59, 51, 114 } /* left = d135 */,
+ { 46, 16, 24, 136, 76, 147, 41, 64, 172 } /* left = d117 */,
+ { 34, 17, 11, 108, 152, 187, 13, 15, 209 } /* left = d153 */,
+ { 51, 24, 14, 115, 133, 209, 32, 26, 104 } /* left = d207 */,
+ { 55, 30, 18, 122, 79, 179, 44, 88, 116 } /* left = d63 */,
+ { 37, 49, 25, 129, 168, 164, 41, 54, 148 } /* left = tm */
+ }, { /* above = d207 */
+ { 82, 22, 32, 127, 143, 213, 39, 41, 70 } /* left = dc */,
+ { 62, 44, 61, 123, 105, 189, 48, 57, 64 } /* left = v */,
+ { 47, 25, 17, 175, 222, 220, 24, 30, 86 } /* left = h */,
+ { 68, 36, 17, 106, 102, 206, 59, 74, 74 } /* left = d45 */,
+ { 57, 39, 23, 151, 68, 216, 55, 63, 58 } /* left = d135 */,
+ { 49, 30, 35, 141, 70, 168, 82, 40, 115 } /* left = d117 */,
+ { 51, 25, 15, 136, 129, 202, 38, 35, 139 } /* left = d153 */,
+ { 68, 26, 16, 111, 141, 215, 29, 28, 28 } /* left = d207 */,
+ { 59, 39, 19, 114, 75, 180, 77, 104, 42 } /* left = d63 */,
+ { 40, 61, 26, 126, 152, 206, 61, 59, 93 } /* left = tm */
+ }, { /* above = d63 */
+ { 78, 23, 39, 111, 117, 170, 74, 124, 94 } /* left = dc */,
+ { 48, 34, 86, 101, 92, 146, 78, 179, 134 } /* left = v */,
+ { 47, 22, 24, 138, 187, 178, 68, 69, 59 } /* left = h */,
+ { 56, 25, 33, 105, 112, 187, 95, 177, 129 } /* left = d45 */,
+ { 48, 31, 27, 114, 63, 183, 82, 116, 56 } /* left = d135 */,
+ { 43, 28, 37, 121, 63, 123, 61, 192, 169 } /* left = d117 */,
+ { 42, 17, 24, 109, 97, 177, 56, 76, 122 } /* left = d153 */,
+ { 58, 18, 28, 105, 139, 182, 70, 92, 63 } /* left = d207 */,
+ { 46, 23, 32, 74, 86, 150, 67, 183, 88 } /* left = d63 */,
+ { 36, 38, 48, 92, 122, 165, 88, 137, 91 } /* left = tm */
+ }, { /* above = tm */
+ { 65, 70, 60, 155, 159, 199, 61, 60, 81 } /* left = dc */,
+ { 44, 78, 115, 132, 119, 173, 71, 112, 93 } /* left = v */,
+ { 39, 38, 21, 184, 227, 206, 42, 32, 64 } /* left = h */,
+ { 58, 47, 36, 124, 137, 193, 80, 82, 78 } /* left = d45 */,
+ { 49, 50, 35, 144, 95, 205, 63, 78, 59 } /* left = d135 */,
+ { 41, 53, 52, 148, 71, 142, 65, 128, 51 } /* left = d117 */,
+ { 40, 36, 28, 143, 143, 202, 40, 55, 137 } /* left = d153 */,
+ { 52, 34, 29, 129, 183, 227, 42, 35, 43 } /* left = d207 */,
+ { 42, 44, 44, 104, 105, 164, 64, 130, 80 } /* left = d63 */,
+ { 43, 81, 53, 140, 169, 204, 68, 84, 72 } /* left = tm */
+ }
+};
+
+static const vp9_prob default_inter_mode_probs[INTER_MODE_CONTEXTS]
+ [INTER_MODES - 1] = {
+ {2, 173, 34}, // 0 = both zero mv
+ {7, 145, 85}, // 1 = one zero mv + one a predicted mv
+ {7, 166, 63}, // 2 = two predicted mvs
+ {7, 94, 66}, // 3 = one predicted/zero and one new mv
+ {8, 64, 46}, // 4 = two new mvs
+ {17, 81, 31}, // 5 = one intra neighbour + x
+ {25, 29, 30}, // 6 = two intra neighbours
+};
+
+/* Array indices are identical to previously-existing INTRAMODECONTEXTNODES. */
+const vp9_tree_index vp9_intra_mode_tree[INTRA_MODES * 2 - 2] = {
+ -DC_PRED, 2, /* 0 = DC_NODE */
+ -TM_PRED, 4, /* 1 = TM_NODE */
+ -V_PRED, 6, /* 2 = V_NODE */
+ 8, 12, /* 3 = COM_NODE */
+ -H_PRED, 10, /* 4 = H_NODE */
+ -D135_PRED, -D117_PRED, /* 5 = D135_NODE */
+ -D45_PRED, 14, /* 6 = D45_NODE */
+ -D63_PRED, 16, /* 7 = D63_NODE */
+ -D153_PRED, -D207_PRED /* 8 = D153_NODE */
+};
+
+const vp9_tree_index vp9_inter_mode_tree[6] = {
+ -ZEROMV, 2,
+ -NEARESTMV, 4,
+ -NEARMV, -NEWMV
+};
+
+const vp9_tree_index vp9_partition_tree[6] = {
+ -PARTITION_NONE, 2,
+ -PARTITION_HORZ, 4,
+ -PARTITION_VERT, -PARTITION_SPLIT
+};
+
+struct vp9_token vp9_intra_mode_encodings[INTRA_MODES];
+struct vp9_token vp9_inter_mode_encodings[INTER_MODES];
+
+struct vp9_token vp9_partition_encodings[PARTITION_TYPES];
+
+static const vp9_prob default_intra_inter_p[INTRA_INTER_CONTEXTS] = {
+ 9, 102, 187, 225
+};
+
+static const vp9_prob default_comp_inter_p[COMP_INTER_CONTEXTS] = {
+ 239, 183, 119, 96, 41
+};
+
+static const vp9_prob default_comp_ref_p[REF_CONTEXTS] = {
+ 50, 126, 123, 221, 226
+};
+
+static const vp9_prob default_single_ref_p[REF_CONTEXTS][2] = {
+ { 33, 16 },
+ { 77, 74 },
+ { 142, 142 },
+ { 172, 170 },
+ { 238, 247 }
+};
+
+static const struct tx_probs default_tx_probs = {
+ { { 3, 136, 37 },
+ { 5, 52, 13 } },
+
+ { { 20, 152 },
+ { 15, 101 } },
+
+ { { 100 },
+ { 66 } }
+};
+
+void tx_counts_to_branch_counts_32x32(unsigned int *tx_count_32x32p,
+ unsigned int (*ct_32x32p)[2]) {
+ ct_32x32p[0][0] = tx_count_32x32p[TX_4X4];
+ ct_32x32p[0][1] = tx_count_32x32p[TX_8X8] +
+ tx_count_32x32p[TX_16X16] +
+ tx_count_32x32p[TX_32X32];
+ ct_32x32p[1][0] = tx_count_32x32p[TX_8X8];
+ ct_32x32p[1][1] = tx_count_32x32p[TX_16X16] +
+ tx_count_32x32p[TX_32X32];
+ ct_32x32p[2][0] = tx_count_32x32p[TX_16X16];
+ ct_32x32p[2][1] = tx_count_32x32p[TX_32X32];
+}
+
+void tx_counts_to_branch_counts_16x16(unsigned int *tx_count_16x16p,
+ unsigned int (*ct_16x16p)[2]) {
+ ct_16x16p[0][0] = tx_count_16x16p[TX_4X4];
+ ct_16x16p[0][1] = tx_count_16x16p[TX_8X8] + tx_count_16x16p[TX_16X16];
+ ct_16x16p[1][0] = tx_count_16x16p[TX_8X8];
+ ct_16x16p[1][1] = tx_count_16x16p[TX_16X16];
+}
+
+void tx_counts_to_branch_counts_8x8(unsigned int *tx_count_8x8p,
+ unsigned int (*ct_8x8p)[2]) {
+ ct_8x8p[0][0] = tx_count_8x8p[TX_4X4];
+ ct_8x8p[0][1] = tx_count_8x8p[TX_8X8];
+}
+
+static const vp9_prob default_mbskip_probs[MBSKIP_CONTEXTS] = {
+ 192, 128, 64
+};
+
+static const vp9_prob default_switchable_interp_prob[SWITCHABLE_FILTERS+1]
+ [SWITCHABLE_FILTERS-1] = {
+ { 235, 162, },
+ { 36, 255, },
+ { 34, 3, },
+ { 149, 144, },
+};
+
+void vp9_init_mbmode_probs(VP9_COMMON *cm) {
+ vp9_copy(cm->fc.uv_mode_prob, default_if_uv_probs);
+ vp9_copy(cm->fc.y_mode_prob, default_if_y_probs);
+ vp9_copy(cm->fc.switchable_interp_prob, default_switchable_interp_prob);
+ vp9_copy(cm->fc.partition_prob, default_partition_probs);
+ vp9_copy(cm->fc.intra_inter_prob, default_intra_inter_p);
+ vp9_copy(cm->fc.comp_inter_prob, default_comp_inter_p);
+ vp9_copy(cm->fc.comp_ref_prob, default_comp_ref_p);
+ vp9_copy(cm->fc.single_ref_prob, default_single_ref_p);
+ cm->fc.tx_probs = default_tx_probs;
+ vp9_copy(cm->fc.mbskip_probs, default_mbskip_probs);
+}
+
+const vp9_tree_index vp9_switchable_interp_tree[SWITCHABLE_FILTERS*2-2] = {
+ -EIGHTTAP, 2,
+ -EIGHTTAP_SMOOTH, -EIGHTTAP_SHARP
+};
+struct vp9_token vp9_switchable_interp_encodings[SWITCHABLE_FILTERS];
+
+void vp9_entropy_mode_init() {
+ vp9_tokens_from_tree(vp9_intra_mode_encodings, vp9_intra_mode_tree);
+ vp9_tokens_from_tree(vp9_switchable_interp_encodings,
+ vp9_switchable_interp_tree);
+ vp9_tokens_from_tree(vp9_partition_encodings, vp9_partition_tree);
+ vp9_tokens_from_tree_offset(vp9_inter_mode_encodings,
+ vp9_inter_mode_tree, NEARESTMV);
+}
+
+#define COUNT_SAT 20
+#define MAX_UPDATE_FACTOR 128
+
+static int update_ct(vp9_prob pre_prob, vp9_prob prob, unsigned int ct[2]) {
+ return merge_probs(pre_prob, prob, ct, COUNT_SAT, MAX_UPDATE_FACTOR);
+}
+
+static int update_ct2(vp9_prob pre_prob, unsigned int ct[2]) {
+ return merge_probs2(pre_prob, ct, COUNT_SAT, MAX_UPDATE_FACTOR);
+}
+
+static void update_mode_probs(int n_modes,
+ const vp9_tree_index *tree, unsigned int *cnt,
+ vp9_prob *pre_probs, vp9_prob *dst_probs,
+ unsigned int tok0_offset) {
+#define MAX_PROBS 32
+ vp9_prob probs[MAX_PROBS];
+ unsigned int branch_ct[MAX_PROBS][2];
+ int t;
+
+ assert(n_modes - 1 < MAX_PROBS);
+ vp9_tree_probs_from_distribution(tree, probs, branch_ct, cnt, tok0_offset);
+ for (t = 0; t < n_modes - 1; ++t)
+ dst_probs[t] = update_ct(pre_probs[t], probs[t], branch_ct[t]);
+}
+
+void vp9_adapt_mode_probs(VP9_COMMON *cm) {
+ int i, j;
+ FRAME_CONTEXT *fc = &cm->fc;
+ FRAME_CONTEXT *pre_fc = &cm->frame_contexts[cm->frame_context_idx];
+ FRAME_COUNTS *counts = &cm->counts;
+
+ for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
+ fc->intra_inter_prob[i] = update_ct2(pre_fc->intra_inter_prob[i],
+ counts->intra_inter[i]);
+ for (i = 0; i < COMP_INTER_CONTEXTS; i++)
+ fc->comp_inter_prob[i] = update_ct2(pre_fc->comp_inter_prob[i],
+ counts->comp_inter[i]);
+ for (i = 0; i < REF_CONTEXTS; i++)
+ fc->comp_ref_prob[i] = update_ct2(pre_fc->comp_ref_prob[i],
+ counts->comp_ref[i]);
+ for (i = 0; i < REF_CONTEXTS; i++)
+ for (j = 0; j < 2; j++)
+ fc->single_ref_prob[i][j] = update_ct2(pre_fc->single_ref_prob[i][j],
+ counts->single_ref[i][j]);
+
+ for (i = 0; i < INTER_MODE_CONTEXTS; i++)
+ update_mode_probs(INTER_MODES, vp9_inter_mode_tree,
+ counts->inter_mode[i], pre_fc->inter_mode_probs[i],
+ fc->inter_mode_probs[i], NEARESTMV);
+
+ for (i = 0; i < BLOCK_SIZE_GROUPS; i++)
+ update_mode_probs(INTRA_MODES, vp9_intra_mode_tree,
+ counts->y_mode[i], pre_fc->y_mode_prob[i],
+ fc->y_mode_prob[i], 0);
+
+ for (i = 0; i < INTRA_MODES; ++i)
+ update_mode_probs(INTRA_MODES, vp9_intra_mode_tree,
+ counts->uv_mode[i], pre_fc->uv_mode_prob[i],
+ fc->uv_mode_prob[i], 0);
+
+ for (i = 0; i < NUM_PARTITION_CONTEXTS; i++)
+ update_mode_probs(PARTITION_TYPES, vp9_partition_tree,
+ counts->partition[i],
+ pre_fc->partition_prob[INTER_FRAME][i],
+ fc->partition_prob[INTER_FRAME][i], 0);
+
+ if (cm->mcomp_filter_type == SWITCHABLE) {
+ for (i = 0; i <= SWITCHABLE_FILTERS; i++)
+ update_mode_probs(SWITCHABLE_FILTERS, vp9_switchable_interp_tree,
+ counts->switchable_interp[i],
+ pre_fc->switchable_interp_prob[i],
+ fc->switchable_interp_prob[i], 0);
+ }
+
+ if (cm->tx_mode == TX_MODE_SELECT) {
+ int j;
+ unsigned int branch_ct_8x8p[TX_SIZES - 3][2];
+ unsigned int branch_ct_16x16p[TX_SIZES - 2][2];
+ unsigned int branch_ct_32x32p[TX_SIZES - 1][2];
+
+ for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
+ tx_counts_to_branch_counts_8x8(counts->tx.p8x8[i], branch_ct_8x8p);
+ for (j = 0; j < TX_SIZES - 3; ++j)
+ fc->tx_probs.p8x8[i][j] = update_ct2(pre_fc->tx_probs.p8x8[i][j],
+ branch_ct_8x8p[j]);
+
+ tx_counts_to_branch_counts_16x16(counts->tx.p16x16[i], branch_ct_16x16p);
+ for (j = 0; j < TX_SIZES - 2; ++j)
+ fc->tx_probs.p16x16[i][j] = update_ct2(pre_fc->tx_probs.p16x16[i][j],
+ branch_ct_16x16p[j]);
+
+ tx_counts_to_branch_counts_32x32(counts->tx.p32x32[i], branch_ct_32x32p);
+ for (j = 0; j < TX_SIZES - 1; ++j)
+ fc->tx_probs.p32x32[i][j] = update_ct2(pre_fc->tx_probs.p32x32[i][j],
+ branch_ct_32x32p[j]);
+ }
+ }
+
+ for (i = 0; i < MBSKIP_CONTEXTS; ++i)
+ fc->mbskip_probs[i] = update_ct2(pre_fc->mbskip_probs[i],
+ counts->mbskip[i]);
+}
+
+static void set_default_lf_deltas(struct loopfilter *lf) {
+ lf->mode_ref_delta_enabled = 1;
+ lf->mode_ref_delta_update = 1;
+
+ lf->ref_deltas[INTRA_FRAME] = 1;
+ lf->ref_deltas[LAST_FRAME] = 0;
+ lf->ref_deltas[GOLDEN_FRAME] = -1;
+ lf->ref_deltas[ALTREF_FRAME] = -1;
+
+ lf->mode_deltas[0] = 0;
+ lf->mode_deltas[1] = 0;
+}
+
+void vp9_setup_past_independence(VP9_COMMON *cm) {
+ // Reset the segment feature data to the default stats:
+ // Features disabled, 0, with delta coding (Default state).
+ struct loopfilter *const lf = &cm->lf;
+
+ int i;
+ vp9_clearall_segfeatures(&cm->seg);
+ cm->seg.abs_delta = SEGMENT_DELTADATA;
+ if (cm->last_frame_seg_map)
+ vpx_memset(cm->last_frame_seg_map, 0, (cm->mi_rows * cm->mi_cols));
+
+ // Reset the mode ref deltas for loop filter
+ vp9_zero(lf->last_ref_deltas);
+ vp9_zero(lf->last_mode_deltas);
+ set_default_lf_deltas(lf);
+
+ // To force update of the sharpness
+ lf->last_sharpness_level = -1;
+
+ vp9_default_coef_probs(cm);
+ vp9_init_mbmode_probs(cm);
+ vp9_init_mv_probs(cm);
+ vp9_copy(cm->fc.inter_mode_probs, default_inter_mode_probs);
+
+ if (cm->frame_type == KEY_FRAME ||
+ cm->error_resilient_mode || cm->reset_frame_context == 3) {
+ // Reset all frame contexts.
+ for (i = 0; i < NUM_FRAME_CONTEXTS; ++i)
+ cm->frame_contexts[i] = cm->fc;
+ } else if (cm->reset_frame_context == 2) {
+ // Reset only the frame context specified in the frame header.
+ cm->frame_contexts[cm->frame_context_idx] = cm->fc;
+ }
+
+ vpx_memset(cm->prev_mip, 0,
+ cm->mode_info_stride * (cm->mi_rows + 1) * sizeof(MODE_INFO));
+ vpx_memset(cm->mip, 0,
+ cm->mode_info_stride * (cm->mi_rows + 1) * sizeof(MODE_INFO));
+
+ vp9_update_mode_info_border(cm, cm->mip);
+ vp9_update_mode_info_border(cm, cm->prev_mip);
+
+ vp9_zero(cm->ref_frame_sign_bias);
+
+ cm->frame_context_idx = 0;
+}
diff --git a/libvpx/vp9/common/vp9_entropymode.h b/libvpx/vp9/common/vp9_entropymode.h
new file mode 100644
index 0000000..4cf4c03
--- /dev/null
+++ b/libvpx/vp9/common/vp9_entropymode.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_ENTROPYMODE_H_
+#define VP9_COMMON_VP9_ENTROPYMODE_H_
+
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/common/vp9_treecoder.h"
+
+#define SUBMVREF_COUNT 5
+#define TX_SIZE_CONTEXTS 2
+#define MODE_UPDATE_PROB 252
+#define SWITCHABLE_FILTERS 3 // number of switchable filters
+
+// #define MODE_STATS
+
+struct VP9Common;
+
+struct tx_probs {
+ vp9_prob p32x32[TX_SIZE_CONTEXTS][TX_SIZES - 1];
+ vp9_prob p16x16[TX_SIZE_CONTEXTS][TX_SIZES - 2];
+ vp9_prob p8x8[TX_SIZE_CONTEXTS][TX_SIZES - 3];
+};
+
+struct tx_counts {
+ unsigned int p32x32[TX_SIZE_CONTEXTS][TX_SIZES];
+ unsigned int p16x16[TX_SIZE_CONTEXTS][TX_SIZES - 1];
+ unsigned int p8x8[TX_SIZE_CONTEXTS][TX_SIZES - 2];
+};
+
+extern const vp9_prob vp9_kf_uv_mode_prob[INTRA_MODES][INTRA_MODES - 1];
+extern const vp9_prob vp9_kf_y_mode_prob[INTRA_MODES][INTRA_MODES]
+ [INTRA_MODES - 1];
+
+extern const vp9_tree_index vp9_intra_mode_tree[];
+extern const vp9_tree_index vp9_inter_mode_tree[];
+
+extern struct vp9_token vp9_intra_mode_encodings[INTRA_MODES];
+extern struct vp9_token vp9_inter_mode_encodings[INTER_MODES];
+
+// probability models for partition information
+extern const vp9_tree_index vp9_partition_tree[];
+extern struct vp9_token vp9_partition_encodings[PARTITION_TYPES];
+
+extern const vp9_tree_index vp9_switchable_interp_tree
+ [2 * (SWITCHABLE_FILTERS - 1)];
+
+extern struct vp9_token vp9_switchable_interp_encodings[SWITCHABLE_FILTERS];
+
+void vp9_entropy_mode_init();
+
+void vp9_setup_past_independence(struct VP9Common *cm);
+
+void vp9_init_mbmode_probs(struct VP9Common *cm);
+
+void vp9_adapt_mode_probs(struct VP9Common *cm);
+
+void tx_counts_to_branch_counts_32x32(unsigned int *tx_count_32x32p,
+ unsigned int (*ct_32x32p)[2]);
+void tx_counts_to_branch_counts_16x16(unsigned int *tx_count_16x16p,
+ unsigned int (*ct_16x16p)[2]);
+void tx_counts_to_branch_counts_8x8(unsigned int *tx_count_8x8p,
+ unsigned int (*ct_8x8p)[2]);
+
+#endif // VP9_COMMON_VP9_ENTROPYMODE_H_
diff --git a/libvpx/vp9/common/vp9_entropymv.c b/libvpx/vp9/common/vp9_entropymv.c
new file mode 100644
index 0000000..2e973e5
--- /dev/null
+++ b/libvpx/vp9/common/vp9_entropymv.c
@@ -0,0 +1,262 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vp9/common/vp9_onyxc_int.h"
+#include "vp9/common/vp9_entropymv.h"
+
+#define MV_COUNT_SAT 20
+#define MV_MAX_UPDATE_FACTOR 128
+
+/* Integer pel reference mv threshold for use of high-precision 1/8 mv */
+#define COMPANDED_MVREF_THRESH 8
+
+const vp9_tree_index vp9_mv_joint_tree[2 * MV_JOINTS - 2] = {
+ -MV_JOINT_ZERO, 2,
+ -MV_JOINT_HNZVZ, 4,
+ -MV_JOINT_HZVNZ, -MV_JOINT_HNZVNZ
+};
+struct vp9_token vp9_mv_joint_encodings[MV_JOINTS];
+
+const vp9_tree_index vp9_mv_class_tree[2 * MV_CLASSES - 2] = {
+ -MV_CLASS_0, 2,
+ -MV_CLASS_1, 4,
+ 6, 8,
+ -MV_CLASS_2, -MV_CLASS_3,
+ 10, 12,
+ -MV_CLASS_4, -MV_CLASS_5,
+ -MV_CLASS_6, 14,
+ 16, 18,
+ -MV_CLASS_7, -MV_CLASS_8,
+ -MV_CLASS_9, -MV_CLASS_10,
+};
+struct vp9_token vp9_mv_class_encodings[MV_CLASSES];
+
+const vp9_tree_index vp9_mv_class0_tree [2 * CLASS0_SIZE - 2] = {
+ -0, -1,
+};
+struct vp9_token vp9_mv_class0_encodings[CLASS0_SIZE];
+
+const vp9_tree_index vp9_mv_fp_tree [2 * 4 - 2] = {
+ -0, 2,
+ -1, 4,
+ -2, -3
+};
+struct vp9_token vp9_mv_fp_encodings[4];
+
+static const nmv_context default_nmv_context = {
+ {32, 64, 96},
+ {
+ { /* vert component */
+ 128, /* sign */
+ {224, 144, 192, 168, 192, 176, 192, 198, 198, 245}, /* class */
+ {216}, /* class0 */
+ {136, 140, 148, 160, 176, 192, 224, 234, 234, 240}, /* bits */
+ {{128, 128, 64}, {96, 112, 64}}, /* class0_fp */
+ {64, 96, 64}, /* fp */
+ 160, /* class0_hp bit */
+ 128, /* hp */
+ },
+ { /* hor component */
+ 128, /* sign */
+ {216, 128, 176, 160, 176, 176, 192, 198, 198, 208}, /* class */
+ {208}, /* class0 */
+ {136, 140, 148, 160, 176, 192, 224, 234, 234, 240}, /* bits */
+ {{128, 128, 64}, {96, 112, 64}}, /* class0_fp */
+ {64, 96, 64}, /* fp */
+ 160, /* class0_hp bit */
+ 128, /* hp */
+ }
+ },
+};
+
+#define mv_class_base(c) ((c) ? (CLASS0_SIZE << (c + 2)) : 0)
+
+static const uint8_t log_in_base_2[] = {
+ 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10
+};
+
+MV_CLASS_TYPE vp9_get_mv_class(int z, int *offset) {
+ MV_CLASS_TYPE c = MV_CLASS_0;
+ if (z >= CLASS0_SIZE * 4096)
+ c = MV_CLASS_10;
+ else
+ c = log_in_base_2[z >> 3];
+
+ if (offset)
+ *offset = z - mv_class_base(c);
+ return c;
+}
+
+int vp9_use_mv_hp(const MV *ref) {
+ return (abs(ref->row) >> 3) < COMPANDED_MVREF_THRESH &&
+ (abs(ref->col) >> 3) < COMPANDED_MVREF_THRESH;
+}
+
+int vp9_get_mv_mag(MV_CLASS_TYPE c, int offset) {
+ return mv_class_base(c) + offset;
+}
+
+static void inc_mv_component(int v, nmv_component_counts *comp_counts,
+ int incr, int usehp) {
+ int s, z, c, o, d, e, f;
+ assert (v != 0); /* should not be zero */
+ s = v < 0;
+ comp_counts->sign[s] += incr;
+ z = (s ? -v : v) - 1; /* magnitude - 1 */
+
+ c = vp9_get_mv_class(z, &o);
+ comp_counts->classes[c] += incr;
+
+ d = (o >> 3); /* int mv data */
+ f = (o >> 1) & 3; /* fractional pel mv data */
+ e = (o & 1); /* high precision mv data */
+
+ if (c == MV_CLASS_0) {
+ comp_counts->class0[d] += incr;
+ comp_counts->class0_fp[d][f] += incr;
+ comp_counts->class0_hp[e] += usehp * incr;
+ } else {
+ int i;
+ int b = c + CLASS0_BITS - 1; // number of bits
+ for (i = 0; i < b; ++i)
+ comp_counts->bits[i][((d >> i) & 1)] += incr;
+ comp_counts->fp[f] += incr;
+ comp_counts->hp[e] += usehp * incr;
+ }
+}
+
+
+void vp9_inc_mv(const MV *mv, nmv_context_counts *counts) {
+ const MV_JOINT_TYPE j = vp9_get_mv_joint(mv);
+ ++counts->joints[j];
+
+ if (mv_joint_vertical(j)) {
+ inc_mv_component(mv->row, &counts->comps[0], 1, 1);
+ }
+
+ if (mv_joint_horizontal(j)) {
+ inc_mv_component(mv->col, &counts->comps[1], 1, 1);
+ }
+}
+
+static vp9_prob adapt_prob(vp9_prob prep, const unsigned int ct[2]) {
+ return merge_probs2(prep, ct, MV_COUNT_SAT, MV_MAX_UPDATE_FACTOR);
+}
+
+static unsigned int adapt_probs(unsigned int i,
+ vp9_tree tree,
+ vp9_prob this_probs[],
+ const vp9_prob last_probs[],
+ const unsigned int num_events[]) {
+
+
+ const unsigned int left = tree[i] <= 0
+ ? num_events[-tree[i]]
+ : adapt_probs(tree[i], tree, this_probs, last_probs, num_events);
+
+ const unsigned int right = tree[i + 1] <= 0
+ ? num_events[-tree[i + 1]]
+ : adapt_probs(tree[i + 1], tree, this_probs, last_probs, num_events);
+ const unsigned int ct[2] = { left, right };
+ this_probs[i >> 1] = adapt_prob(last_probs[i >> 1], ct);
+ return left + right;
+}
+
+
+void vp9_adapt_mv_probs(VP9_COMMON *cm, int allow_hp) {
+ int i, j;
+
+ FRAME_CONTEXT *pre_fc = &cm->frame_contexts[cm->frame_context_idx];
+
+ nmv_context *ctx = &cm->fc.nmvc;
+ nmv_context *pre_ctx = &pre_fc->nmvc;
+ nmv_context_counts *cts = &cm->counts.mv;
+
+ adapt_probs(0, vp9_mv_joint_tree, ctx->joints, pre_ctx->joints, cts->joints);
+
+ for (i = 0; i < 2; ++i) {
+ ctx->comps[i].sign = adapt_prob(pre_ctx->comps[i].sign, cts->comps[i].sign);
+ adapt_probs(0, vp9_mv_class_tree, ctx->comps[i].classes,
+ pre_ctx->comps[i].classes, cts->comps[i].classes);
+ adapt_probs(0, vp9_mv_class0_tree, ctx->comps[i].class0,
+ pre_ctx->comps[i].class0, cts->comps[i].class0);
+
+ for (j = 0; j < MV_OFFSET_BITS; ++j)
+ ctx->comps[i].bits[j] = adapt_prob(pre_ctx->comps[i].bits[j],
+ cts->comps[i].bits[j]);
+
+ for (j = 0; j < CLASS0_SIZE; ++j)
+ adapt_probs(0, vp9_mv_fp_tree, ctx->comps[i].class0_fp[j],
+ pre_ctx->comps[i].class0_fp[j], cts->comps[i].class0_fp[j]);
+
+ adapt_probs(0, vp9_mv_fp_tree, ctx->comps[i].fp, pre_ctx->comps[i].fp,
+ cts->comps[i].fp);
+
+ if (allow_hp) {
+ ctx->comps[i].class0_hp = adapt_prob(pre_ctx->comps[i].class0_hp,
+ cts->comps[i].class0_hp);
+ ctx->comps[i].hp = adapt_prob(pre_ctx->comps[i].hp, cts->comps[i].hp);
+ }
+ }
+}
+
+void vp9_entropy_mv_init() {
+ vp9_tokens_from_tree(vp9_mv_joint_encodings, vp9_mv_joint_tree);
+ vp9_tokens_from_tree(vp9_mv_class_encodings, vp9_mv_class_tree);
+ vp9_tokens_from_tree(vp9_mv_class0_encodings, vp9_mv_class0_tree);
+ vp9_tokens_from_tree(vp9_mv_fp_encodings, vp9_mv_fp_tree);
+}
+
+void vp9_init_mv_probs(VP9_COMMON *cm) {
+ cm->fc.nmvc = default_nmv_context;
+}
diff --git a/libvpx/vp9/common/vp9_entropymv.h b/libvpx/vp9/common/vp9_entropymv.h
new file mode 100644
index 0000000..a10c933
--- /dev/null
+++ b/libvpx/vp9/common/vp9_entropymv.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_COMMON_VP9_ENTROPYMV_H_
+#define VP9_COMMON_VP9_ENTROPYMV_H_
+
+#include "vp9/common/vp9_treecoder.h"
+#include "vpx_config.h"
+#include "vp9/common/vp9_blockd.h"
+
+struct VP9Common;
+
+void vp9_entropy_mv_init();
+void vp9_init_mv_probs(struct VP9Common *cm);
+
+void vp9_adapt_mv_probs(struct VP9Common *cm, int usehp);
+int vp9_use_mv_hp(const MV *ref);
+
+#define NMV_UPDATE_PROB 252
+
+/* Symbols for coding which components are zero jointly */
+#define MV_JOINTS 4
+typedef enum {
+ MV_JOINT_ZERO = 0, /* Zero vector */
+ MV_JOINT_HNZVZ = 1, /* Vert zero, hor nonzero */
+ MV_JOINT_HZVNZ = 2, /* Hor zero, vert nonzero */
+ MV_JOINT_HNZVNZ = 3, /* Both components nonzero */
+} MV_JOINT_TYPE;
+
+static INLINE int mv_joint_vertical(MV_JOINT_TYPE type) {
+ return type == MV_JOINT_HZVNZ || type == MV_JOINT_HNZVNZ;
+}
+
+static INLINE int mv_joint_horizontal(MV_JOINT_TYPE type) {
+ return type == MV_JOINT_HNZVZ || type == MV_JOINT_HNZVNZ;
+}
+
+extern const vp9_tree_index vp9_mv_joint_tree[2 * MV_JOINTS - 2];
+extern struct vp9_token vp9_mv_joint_encodings[MV_JOINTS];
+
+/* Symbols for coding magnitude class of nonzero components */
+#define MV_CLASSES 11
+typedef enum {
+ MV_CLASS_0 = 0, /* (0, 2] integer pel */
+ MV_CLASS_1 = 1, /* (2, 4] integer pel */
+ MV_CLASS_2 = 2, /* (4, 8] integer pel */
+ MV_CLASS_3 = 3, /* (8, 16] integer pel */
+ MV_CLASS_4 = 4, /* (16, 32] integer pel */
+ MV_CLASS_5 = 5, /* (32, 64] integer pel */
+ MV_CLASS_6 = 6, /* (64, 128] integer pel */
+ MV_CLASS_7 = 7, /* (128, 256] integer pel */
+ MV_CLASS_8 = 8, /* (256, 512] integer pel */
+ MV_CLASS_9 = 9, /* (512, 1024] integer pel */
+ MV_CLASS_10 = 10, /* (1024,2048] integer pel */
+} MV_CLASS_TYPE;
+
+extern const vp9_tree_index vp9_mv_class_tree[2 * MV_CLASSES - 2];
+extern struct vp9_token vp9_mv_class_encodings[MV_CLASSES];
+
+#define CLASS0_BITS 1 /* bits at integer precision for class 0 */
+#define CLASS0_SIZE (1 << CLASS0_BITS)
+#define MV_OFFSET_BITS (MV_CLASSES + CLASS0_BITS - 2)
+
+#define MV_MAX_BITS (MV_CLASSES + CLASS0_BITS + 2)
+#define MV_MAX ((1 << MV_MAX_BITS) - 1)
+#define MV_VALS ((MV_MAX << 1) + 1)
+
+extern const vp9_tree_index vp9_mv_class0_tree[2 * CLASS0_SIZE - 2];
+extern struct vp9_token vp9_mv_class0_encodings[CLASS0_SIZE];
+
+extern const vp9_tree_index vp9_mv_fp_tree[2 * 4 - 2];
+extern struct vp9_token vp9_mv_fp_encodings[4];
+
+typedef struct {
+ vp9_prob sign;
+ vp9_prob classes[MV_CLASSES - 1];
+ vp9_prob class0[CLASS0_SIZE - 1];
+ vp9_prob bits[MV_OFFSET_BITS];
+ vp9_prob class0_fp[CLASS0_SIZE][4 - 1];
+ vp9_prob fp[4 - 1];
+ vp9_prob class0_hp;
+ vp9_prob hp;
+} nmv_component;
+
+typedef struct {
+ vp9_prob joints[MV_JOINTS - 1];
+ nmv_component comps[2];
+} nmv_context;
+
+static INLINE MV_JOINT_TYPE vp9_get_mv_joint(const MV *mv) {
+ if (mv->row == 0) {
+ return mv->col == 0 ? MV_JOINT_ZERO : MV_JOINT_HNZVZ;
+ } else {
+ return mv->col == 0 ? MV_JOINT_HZVNZ : MV_JOINT_HNZVNZ;
+ }
+}
+
+MV_CLASS_TYPE vp9_get_mv_class(int z, int *offset);
+int vp9_get_mv_mag(MV_CLASS_TYPE c, int offset);
+
+
+typedef struct {
+ unsigned int mvcount[MV_VALS];
+ unsigned int sign[2];
+ unsigned int classes[MV_CLASSES];
+ unsigned int class0[CLASS0_SIZE];
+ unsigned int bits[MV_OFFSET_BITS][2];
+ unsigned int class0_fp[CLASS0_SIZE][4];
+ unsigned int fp[4];
+ unsigned int class0_hp[2];
+ unsigned int hp[2];
+} nmv_component_counts;
+
+typedef struct {
+ unsigned int joints[MV_JOINTS];
+ nmv_component_counts comps[2];
+} nmv_context_counts;
+
+void vp9_inc_mv(const MV *mv, nmv_context_counts *mvctx);
+
+#endif // VP9_COMMON_VP9_ENTROPYMV_H_
diff --git a/libvpx/vp9/common/vp9_enums.h b/libvpx/vp9/common/vp9_enums.h
new file mode 100644
index 0000000..1bf0742
--- /dev/null
+++ b/libvpx/vp9/common/vp9_enums.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_ENUMS_H_
+#define VP9_COMMON_VP9_ENUMS_H_
+
+#include "./vpx_config.h"
+
+#define MI_SIZE_LOG2 3
+#define MI_BLOCK_SIZE_LOG2 (6 - MI_SIZE_LOG2) // 64 = 2^6
+
+#define MI_SIZE (1 << MI_SIZE_LOG2) // pixels per mi-unit
+#define MI_BLOCK_SIZE (1 << MI_BLOCK_SIZE_LOG2) // mi-units per max block
+
+#define MI_MASK (MI_BLOCK_SIZE - 1)
+
+
+typedef enum BLOCK_SIZE {
+ BLOCK_4X4,
+ BLOCK_4X8,
+ BLOCK_8X4,
+ BLOCK_8X8,
+ BLOCK_8X16,
+ BLOCK_16X8,
+ BLOCK_16X16,
+ BLOCK_16X32,
+ BLOCK_32X16,
+ BLOCK_32X32,
+ BLOCK_32X64,
+ BLOCK_64X32,
+ BLOCK_64X64,
+ BLOCK_SIZES,
+ BLOCK_INVALID = BLOCK_SIZES
+} BLOCK_SIZE;
+
+typedef enum PARTITION_TYPE {
+ PARTITION_NONE,
+ PARTITION_HORZ,
+ PARTITION_VERT,
+ PARTITION_SPLIT,
+ PARTITION_TYPES,
+ PARTITION_INVALID = PARTITION_TYPES
+} PARTITION_TYPE;
+
+#define PARTITION_PLOFFSET 4 // number of probability models per block size
+#define NUM_PARTITION_CONTEXTS (4 * PARTITION_PLOFFSET)
+
+typedef enum {
+ TX_4X4 = 0, // 4x4 dct transform
+ TX_8X8 = 1, // 8x8 dct transform
+ TX_16X16 = 2, // 16x16 dct transform
+ TX_32X32 = 3, // 32x32 dct transform
+ TX_SIZES
+} TX_SIZE;
+
+typedef enum {
+ ONLY_4X4 = 0,
+ ALLOW_8X8 = 1,
+ ALLOW_16X16 = 2,
+ ALLOW_32X32 = 3,
+ TX_MODE_SELECT = 4,
+ TX_MODES = 5,
+} TX_MODE;
+
+typedef enum {
+ DCT_DCT = 0, // DCT in both horizontal and vertical
+ ADST_DCT = 1, // ADST in vertical, DCT in horizontal
+ DCT_ADST = 2, // DCT in vertical, ADST in horizontal
+ ADST_ADST = 3 // ADST in both directions
+} TX_TYPE;
+
+#endif // VP9_COMMON_VP9_ENUMS_H_
diff --git a/libvpx/vp9/common/vp9_extend.c b/libvpx/vp9/common/vp9_extend.c
new file mode 100644
index 0000000..07c68c8
--- /dev/null
+++ b/libvpx/vp9/common/vp9_extend.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_mem/vpx_mem.h"
+
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_extend.h"
+
+static void copy_and_extend_plane(const uint8_t *src, int src_pitch,
+ uint8_t *dst, int dst_pitch,
+ int w, int h,
+ int extend_top, int extend_left,
+ int extend_bottom, int extend_right) {
+ int i, linesize;
+
+ // copy the left and right most columns out
+ const uint8_t *src_ptr1 = src;
+ const uint8_t *src_ptr2 = src + w - 1;
+ uint8_t *dst_ptr1 = dst - extend_left;
+ uint8_t *dst_ptr2 = dst + w;
+
+ for (i = 0; i < h; i++) {
+ vpx_memset(dst_ptr1, src_ptr1[0], extend_left);
+ vpx_memcpy(dst_ptr1 + extend_left, src_ptr1, w);
+ vpx_memset(dst_ptr2, src_ptr2[0], extend_right);
+ src_ptr1 += src_pitch;
+ src_ptr2 += src_pitch;
+ dst_ptr1 += dst_pitch;
+ dst_ptr2 += dst_pitch;
+ }
+
+ // Now copy the top and bottom lines into each line of the respective
+ // borders
+ src_ptr1 = dst - extend_left;
+ src_ptr2 = dst + dst_pitch * (h - 1) - extend_left;
+ dst_ptr1 = dst + dst_pitch * (-extend_top) - extend_left;
+ dst_ptr2 = dst + dst_pitch * (h) - extend_left;
+ linesize = extend_left + extend_right + w;
+
+ for (i = 0; i < extend_top; i++) {
+ vpx_memcpy(dst_ptr1, src_ptr1, linesize);
+ dst_ptr1 += dst_pitch;
+ }
+
+ for (i = 0; i < extend_bottom; i++) {
+ vpx_memcpy(dst_ptr2, src_ptr2, linesize);
+ dst_ptr2 += dst_pitch;
+ }
+}
+
+void vp9_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
+ YV12_BUFFER_CONFIG *dst) {
+ // Extend src frame in buffer
+ // Altref filtering assumes 16 pixel extension
+ const int et_y = 16;
+ const int el_y = 16;
+ // Motion estimation may use src block variance with the block size up
+ // to 64x64, so the right and bottom need to be extended to 64 mulitple
+ // or up to 16, whichever is greater.
+ const int eb_y = MAX(ALIGN_POWER_OF_TWO(src->y_width, 6) - src->y_width,
+ 16);
+ const int er_y = MAX(ALIGN_POWER_OF_TWO(src->y_height, 6) - src->y_height,
+ 16);
+ const int uv_width_subsampling = (src->uv_width != src->y_width);
+ const int uv_height_subsampling = (src->uv_height != src->y_height);
+ const int et_uv = et_y >> uv_height_subsampling;
+ const int el_uv = el_y >> uv_width_subsampling;
+ const int eb_uv = eb_y >> uv_height_subsampling;
+ const int er_uv = er_y >> uv_width_subsampling;
+
+#if CONFIG_ALPHA
+ const int et_a = dst->border >> (dst->alpha_height != dst->y_height);
+ const int el_a = dst->border >> (dst->alpha_width != dst->y_width);
+ const int eb_a = et_a + dst->alpha_height - src->alpha_height;
+ const int er_a = el_a + dst->alpha_width - src->alpha_width;
+
+ copy_and_extend_plane(src->alpha_buffer, src->alpha_stride,
+ dst->alpha_buffer, dst->alpha_stride,
+ src->alpha_width, src->alpha_height,
+ et_a, el_a, eb_a, er_a);
+#endif
+
+ copy_and_extend_plane(src->y_buffer, src->y_stride,
+ dst->y_buffer, dst->y_stride,
+ src->y_width, src->y_height,
+ et_y, el_y, eb_y, er_y);
+
+ copy_and_extend_plane(src->u_buffer, src->uv_stride,
+ dst->u_buffer, dst->uv_stride,
+ src->uv_width, src->uv_height,
+ et_uv, el_uv, eb_uv, er_uv);
+
+ copy_and_extend_plane(src->v_buffer, src->uv_stride,
+ dst->v_buffer, dst->uv_stride,
+ src->uv_width, src->uv_height,
+ et_uv, el_uv, eb_uv, er_uv);
+}
+
+void vp9_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src,
+ YV12_BUFFER_CONFIG *dst,
+ int srcy, int srcx,
+ int srch, int srcw) {
+ // If the side is not touching the bounder then don't extend.
+ const int et_y = srcy ? 0 : dst->border;
+ const int el_y = srcx ? 0 : dst->border;
+ const int eb_y = srcy + srch != src->y_height ? 0 :
+ dst->border + dst->y_height - src->y_height;
+ const int er_y = srcx + srcw != src->y_width ? 0 :
+ dst->border + dst->y_width - src->y_width;
+ const int src_y_offset = srcy * src->y_stride + srcx;
+ const int dst_y_offset = srcy * dst->y_stride + srcx;
+
+ const int et_uv = ROUND_POWER_OF_TWO(et_y, 1);
+ const int el_uv = ROUND_POWER_OF_TWO(el_y, 1);
+ const int eb_uv = ROUND_POWER_OF_TWO(eb_y, 1);
+ const int er_uv = ROUND_POWER_OF_TWO(er_y, 1);
+ const int src_uv_offset = ((srcy * src->uv_stride) >> 1) + (srcx >> 1);
+ const int dst_uv_offset = ((srcy * dst->uv_stride) >> 1) + (srcx >> 1);
+ const int srch_uv = ROUND_POWER_OF_TWO(srch, 1);
+ const int srcw_uv = ROUND_POWER_OF_TWO(srcw, 1);
+
+ copy_and_extend_plane(src->y_buffer + src_y_offset, src->y_stride,
+ dst->y_buffer + dst_y_offset, dst->y_stride,
+ srcw, srch,
+ et_y, el_y, eb_y, er_y);
+
+ copy_and_extend_plane(src->u_buffer + src_uv_offset, src->uv_stride,
+ dst->u_buffer + dst_uv_offset, dst->uv_stride,
+ srcw_uv, srch_uv,
+ et_uv, el_uv, eb_uv, er_uv);
+
+ copy_and_extend_plane(src->v_buffer + src_uv_offset, src->uv_stride,
+ dst->v_buffer + dst_uv_offset, dst->uv_stride,
+ srcw_uv, srch_uv,
+ et_uv, el_uv, eb_uv, er_uv);
+}
diff --git a/libvpx/vp9/common/vp9_extend.h b/libvpx/vp9/common/vp9_extend.h
new file mode 100644
index 0000000..7ff79b7
--- /dev/null
+++ b/libvpx/vp9/common/vp9_extend.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_EXTEND_H_
+#define VP9_COMMON_VP9_EXTEND_H_
+
+#include "vpx_scale/yv12config.h"
+#include "vpx/vpx_integer.h"
+
+
+void vp9_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
+ YV12_BUFFER_CONFIG *dst);
+
+void vp9_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src,
+ YV12_BUFFER_CONFIG *dst,
+ int srcy, int srcx,
+ int srch, int srcw);
+#endif // VP9_COMMON_VP9_EXTEND_H_
diff --git a/libvpx/vp9/common/vp9_filter.c b/libvpx/vp9/common/vp9_filter.c
new file mode 100644
index 0000000..4ac2bc9
--- /dev/null
+++ b/libvpx/vp9/common/vp9_filter.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_ports/mem.h"
+
+#include "vp9/common/vp9_filter.h"
+
+DECLARE_ALIGNED(256, const int16_t,
+ vp9_bilinear_filters[SUBPEL_SHIFTS][SUBPEL_TAPS]) = {
+ { 0, 0, 0, 128, 0, 0, 0, 0 },
+ { 0, 0, 0, 120, 8, 0, 0, 0 },
+ { 0, 0, 0, 112, 16, 0, 0, 0 },
+ { 0, 0, 0, 104, 24, 0, 0, 0 },
+ { 0, 0, 0, 96, 32, 0, 0, 0 },
+ { 0, 0, 0, 88, 40, 0, 0, 0 },
+ { 0, 0, 0, 80, 48, 0, 0, 0 },
+ { 0, 0, 0, 72, 56, 0, 0, 0 },
+ { 0, 0, 0, 64, 64, 0, 0, 0 },
+ { 0, 0, 0, 56, 72, 0, 0, 0 },
+ { 0, 0, 0, 48, 80, 0, 0, 0 },
+ { 0, 0, 0, 40, 88, 0, 0, 0 },
+ { 0, 0, 0, 32, 96, 0, 0, 0 },
+ { 0, 0, 0, 24, 104, 0, 0, 0 },
+ { 0, 0, 0, 16, 112, 0, 0, 0 },
+ { 0, 0, 0, 8, 120, 0, 0, 0 }
+};
+
+// Lagrangian interpolation filter
+DECLARE_ALIGNED(256, const int16_t,
+ vp9_sub_pel_filters_8[SUBPEL_SHIFTS][SUBPEL_TAPS]) = {
+ { 0, 0, 0, 128, 0, 0, 0, 0},
+ { 0, 1, -5, 126, 8, -3, 1, 0},
+ { -1, 3, -10, 122, 18, -6, 2, 0},
+ { -1, 4, -13, 118, 27, -9, 3, -1},
+ { -1, 4, -16, 112, 37, -11, 4, -1},
+ { -1, 5, -18, 105, 48, -14, 4, -1},
+ { -1, 5, -19, 97, 58, -16, 5, -1},
+ { -1, 6, -19, 88, 68, -18, 5, -1},
+ { -1, 6, -19, 78, 78, -19, 6, -1},
+ { -1, 5, -18, 68, 88, -19, 6, -1},
+ { -1, 5, -16, 58, 97, -19, 5, -1},
+ { -1, 4, -14, 48, 105, -18, 5, -1},
+ { -1, 4, -11, 37, 112, -16, 4, -1},
+ { -1, 3, -9, 27, 118, -13, 4, -1},
+ { 0, 2, -6, 18, 122, -10, 3, -1},
+ { 0, 1, -3, 8, 126, -5, 1, 0}
+};
+
+// DCT based filter
+DECLARE_ALIGNED(256, const int16_t,
+ vp9_sub_pel_filters_8s[SUBPEL_SHIFTS][SUBPEL_TAPS]) = {
+ {0, 0, 0, 128, 0, 0, 0, 0},
+ {-1, 3, -7, 127, 8, -3, 1, 0},
+ {-2, 5, -13, 125, 17, -6, 3, -1},
+ {-3, 7, -17, 121, 27, -10, 5, -2},
+ {-4, 9, -20, 115, 37, -13, 6, -2},
+ {-4, 10, -23, 108, 48, -16, 8, -3},
+ {-4, 10, -24, 100, 59, -19, 9, -3},
+ {-4, 11, -24, 90, 70, -21, 10, -4},
+ {-4, 11, -23, 80, 80, -23, 11, -4},
+ {-4, 10, -21, 70, 90, -24, 11, -4},
+ {-3, 9, -19, 59, 100, -24, 10, -4},
+ {-3, 8, -16, 48, 108, -23, 10, -4},
+ {-2, 6, -13, 37, 115, -20, 9, -4},
+ {-2, 5, -10, 27, 121, -17, 7, -3},
+ {-1, 3, -6, 17, 125, -13, 5, -2},
+ {0, 1, -3, 8, 127, -7, 3, -1}
+};
+
+// freqmultiplier = 0.5
+DECLARE_ALIGNED(256, const int16_t,
+ vp9_sub_pel_filters_8lp[SUBPEL_SHIFTS][SUBPEL_TAPS]) = {
+ { 0, 0, 0, 128, 0, 0, 0, 0},
+ {-3, -1, 32, 64, 38, 1, -3, 0},
+ {-2, -2, 29, 63, 41, 2, -3, 0},
+ {-2, -2, 26, 63, 43, 4, -4, 0},
+ {-2, -3, 24, 62, 46, 5, -4, 0},
+ {-2, -3, 21, 60, 49, 7, -4, 0},
+ {-1, -4, 18, 59, 51, 9, -4, 0},
+ {-1, -4, 16, 57, 53, 12, -4, -1},
+ {-1, -4, 14, 55, 55, 14, -4, -1},
+ {-1, -4, 12, 53, 57, 16, -4, -1},
+ { 0, -4, 9, 51, 59, 18, -4, -1},
+ { 0, -4, 7, 49, 60, 21, -3, -2},
+ { 0, -4, 5, 46, 62, 24, -3, -2},
+ { 0, -4, 4, 43, 63, 26, -2, -2},
+ { 0, -3, 2, 41, 63, 29, -2, -2},
+ { 0, -3, 1, 38, 64, 32, -1, -3}
+};
diff --git a/libvpx/vp9/common/vp9_filter.h b/libvpx/vp9/common/vp9_filter.h
new file mode 100644
index 0000000..7b1ffae
--- /dev/null
+++ b/libvpx/vp9/common/vp9_filter.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_FILTER_H_
+#define VP9_COMMON_VP9_FILTER_H_
+
+#include "vpx_config.h"
+#include "vpx/vpx_integer.h"
+
+#define SUBPEL_BITS 4
+#define SUBPEL_MASK ((1 << SUBPEL_BITS) - 1)
+#define SUBPEL_SHIFTS (1 << SUBPEL_BITS)
+#define SUBPEL_TAPS 8
+
+extern const int16_t vp9_bilinear_filters[SUBPEL_SHIFTS][SUBPEL_TAPS];
+extern const int16_t vp9_sub_pel_filters_6[SUBPEL_SHIFTS][SUBPEL_TAPS];
+extern const int16_t vp9_sub_pel_filters_8[SUBPEL_SHIFTS][SUBPEL_TAPS];
+extern const int16_t vp9_sub_pel_filters_8s[SUBPEL_SHIFTS][SUBPEL_TAPS];
+extern const int16_t vp9_sub_pel_filters_8lp[SUBPEL_SHIFTS][SUBPEL_TAPS];
+
+// The VP9_BILINEAR_FILTERS_2TAP macro returns a pointer to the bilinear
+// filter kernel as a 2 tap filter.
+#define BILINEAR_FILTERS_2TAP(x) \
+ (vp9_bilinear_filters[(x)] + SUBPEL_TAPS/2 - 1)
+
+#endif // VP9_COMMON_VP9_FILTER_H_
diff --git a/libvpx/vp9/common/vp9_findnearmv.c b/libvpx/vp9/common/vp9_findnearmv.c
new file mode 100644
index 0000000..49a731f
--- /dev/null
+++ b/libvpx/vp9/common/vp9_findnearmv.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp9/common/vp9_findnearmv.h"
+#include "vp9/common/vp9_mvref_common.h"
+
+static void lower_mv_precision(MV *mv, int allow_hp) {
+ const int use_hp = allow_hp && vp9_use_mv_hp(mv);
+ if (!use_hp) {
+ if (mv->row & 1)
+ mv->row += (mv->row > 0 ? -1 : 1);
+ if (mv->col & 1)
+ mv->col += (mv->col > 0 ? -1 : 1);
+ }
+}
+
+
+void vp9_find_best_ref_mvs(MACROBLOCKD *xd,
+ int_mv *mvlist,
+ int_mv *nearest,
+ int_mv *near) {
+ int i;
+ // Make sure all the candidates are properly clamped etc
+ for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i) {
+ lower_mv_precision(&mvlist[i].as_mv, xd->allow_high_precision_mv);
+ clamp_mv2(&mvlist[i].as_mv, xd);
+ }
+ *nearest = mvlist[0];
+ *near = mvlist[1];
+}
+
+void vp9_append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd,
+ int_mv *dst_nearest,
+ int_mv *dst_near,
+ int block_idx, int ref_idx,
+ int mi_row, int mi_col) {
+ int_mv dst_list[MAX_MV_REF_CANDIDATES];
+ int_mv mv_list[MAX_MV_REF_CANDIDATES];
+ MODE_INFO *const mi = xd->this_mi;
+
+ assert(ref_idx == 0 || ref_idx == 1);
+ assert(MAX_MV_REF_CANDIDATES == 2); // makes code here slightly easier
+
+ vp9_find_mv_refs_idx(cm, xd, mi, xd->last_mi,
+ mi->mbmi.ref_frame[ref_idx],
+ mv_list, block_idx, mi_row, mi_col);
+
+ dst_list[1].as_int = 0;
+ if (block_idx == 0) {
+ memcpy(dst_list, mv_list, MAX_MV_REF_CANDIDATES * sizeof(int_mv));
+ } else if (block_idx == 1 || block_idx == 2) {
+ int dst = 0, n;
+ union b_mode_info *bmi = mi->bmi;
+
+ dst_list[dst++].as_int = bmi[0].as_mv[ref_idx].as_int;
+ for (n = 0; dst < MAX_MV_REF_CANDIDATES &&
+ n < MAX_MV_REF_CANDIDATES; n++)
+ if (mv_list[n].as_int != dst_list[0].as_int)
+ dst_list[dst++].as_int = mv_list[n].as_int;
+ } else {
+ int dst = 0, n;
+ union b_mode_info *bmi = mi->bmi;
+
+ assert(block_idx == 3);
+ dst_list[dst++].as_int = bmi[2].as_mv[ref_idx].as_int;
+ if (dst_list[0].as_int != bmi[1].as_mv[ref_idx].as_int)
+ dst_list[dst++].as_int = bmi[1].as_mv[ref_idx].as_int;
+ if (dst < MAX_MV_REF_CANDIDATES &&
+ dst_list[0].as_int != bmi[0].as_mv[ref_idx].as_int)
+ dst_list[dst++].as_int = bmi[0].as_mv[ref_idx].as_int;
+ for (n = 0; dst < MAX_MV_REF_CANDIDATES &&
+ n < MAX_MV_REF_CANDIDATES; n++)
+ if (mv_list[n].as_int != dst_list[0].as_int)
+ dst_list[dst++].as_int = mv_list[n].as_int;
+ }
+
+ dst_nearest->as_int = dst_list[0].as_int;
+ dst_near->as_int = dst_list[1].as_int;
+}
diff --git a/libvpx/vp9/common/vp9_findnearmv.h b/libvpx/vp9/common/vp9_findnearmv.h
new file mode 100644
index 0000000..ad0d882
--- /dev/null
+++ b/libvpx/vp9/common/vp9_findnearmv.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_COMMON_VP9_FINDNEARMV_H_
+#define VP9_COMMON_VP9_FINDNEARMV_H_
+
+#include "vp9/common/vp9_mv.h"
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/common/vp9_treecoder.h"
+#include "vp9/common/vp9_onyxc_int.h"
+
+#define LEFT_TOP_MARGIN ((VP9BORDERINPIXELS - VP9_INTERP_EXTEND) << 3)
+#define RIGHT_BOTTOM_MARGIN ((VP9BORDERINPIXELS - VP9_INTERP_EXTEND) << 3)
+
+// check a list of motion vectors by sad score using a number rows of pixels
+// above and a number cols of pixels in the left to select the one with best
+// score to use as ref motion vector
+void vp9_find_best_ref_mvs(MACROBLOCKD *xd,
+ int_mv *mvlist,
+ int_mv *nearest,
+ int_mv *near);
+
+// TODO(jingning): this mv clamping function should be block size dependent.
+static void clamp_mv2(MV *mv, const MACROBLOCKD *xd) {
+ clamp_mv(mv, xd->mb_to_left_edge - LEFT_TOP_MARGIN,
+ xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN,
+ xd->mb_to_top_edge - LEFT_TOP_MARGIN,
+ xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN);
+}
+
+void vp9_append_sub8x8_mvs_for_idx(VP9_COMMON *cm,
+ MACROBLOCKD *xd,
+ int_mv *dst_nearest,
+ int_mv *dst_near,
+ int block_idx, int ref_idx,
+ int mi_row, int mi_col);
+
+static MB_PREDICTION_MODE left_block_mode(const MODE_INFO *cur_mb,
+ const MODE_INFO *left_mb, int b) {
+ // FIXME(rbultje, jingning): temporary hack because jenkins doesn't
+ // understand this condition. This will go away soon.
+ const MODE_INFO *mi = cur_mb;
+
+ if (b == 0 || b == 2) {
+ /* On L edge, get from MB to left of us */
+ mi = left_mb;
+ if (!mi)
+ return DC_PRED;
+
+ if (mi->mbmi.ref_frame[0] != INTRA_FRAME) {
+ return DC_PRED;
+ } else if (mi->mbmi.sb_type < BLOCK_8X8) {
+ return ((mi->bmi + 1 + b)->as_mode);
+ } else {
+ return mi->mbmi.mode;
+ }
+ }
+ assert(b == 1 || b == 3);
+ return (mi->bmi + b - 1)->as_mode;
+}
+
+static MB_PREDICTION_MODE above_block_mode(const MODE_INFO *cur_mb,
+ const MODE_INFO *above_mb, int b) {
+ const MODE_INFO *mi = cur_mb;
+
+ if (!(b >> 1)) {
+ /* On top edge, get from MB above us */
+ mi = above_mb;
+ if (!mi)
+ return DC_PRED;
+
+ if (mi->mbmi.ref_frame[0] != INTRA_FRAME) {
+ return DC_PRED;
+ } else if (mi->mbmi.sb_type < BLOCK_8X8) {
+ return ((mi->bmi + 2 + b)->as_mode);
+ } else {
+ return mi->mbmi.mode;
+ }
+ }
+
+ return (mi->bmi + b - 2)->as_mode;
+}
+
+#endif // VP9_COMMON_VP9_FINDNEARMV_H_
diff --git a/libvpx/vp9/common/vp9_idct.c b/libvpx/vp9/common/vp9_idct.c
new file mode 100644
index 0000000..a224525
--- /dev/null
+++ b/libvpx/vp9/common/vp9_idct.c
@@ -0,0 +1,1276 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <math.h>
+
+#include "./vpx_config.h"
+#include "./vp9_rtcd.h"
+#include "vp9/common/vp9_systemdependent.h"
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_idct.h"
+
+void vp9_short_iwalsh4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride) {
+/* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds,
+ 0.5 shifts per pixel. */
+ int i;
+ int16_t output[16];
+ int a1, b1, c1, d1, e1;
+ int16_t *ip = input;
+ int16_t *op = output;
+
+ for (i = 0; i < 4; i++) {
+ a1 = ip[0] >> WHT_UPSCALE_FACTOR;
+ c1 = ip[1] >> WHT_UPSCALE_FACTOR;
+ d1 = ip[2] >> WHT_UPSCALE_FACTOR;
+ b1 = ip[3] >> WHT_UPSCALE_FACTOR;
+ a1 += c1;
+ d1 -= b1;
+ e1 = (a1 - d1) >> 1;
+ b1 = e1 - b1;
+ c1 = e1 - c1;
+ a1 -= b1;
+ d1 += c1;
+ op[0] = a1;
+ op[1] = b1;
+ op[2] = c1;
+ op[3] = d1;
+ ip += 4;
+ op += 4;
+ }
+
+ ip = output;
+ for (i = 0; i < 4; i++) {
+ a1 = ip[4 * 0];
+ c1 = ip[4 * 1];
+ d1 = ip[4 * 2];
+ b1 = ip[4 * 3];
+ a1 += c1;
+ d1 -= b1;
+ e1 = (a1 - d1) >> 1;
+ b1 = e1 - b1;
+ c1 = e1 - c1;
+ a1 -= b1;
+ d1 += c1;
+ dest[dest_stride * 0] = clip_pixel(dest[dest_stride * 0] + a1);
+ dest[dest_stride * 1] = clip_pixel(dest[dest_stride * 1] + b1);
+ dest[dest_stride * 2] = clip_pixel(dest[dest_stride * 2] + c1);
+ dest[dest_stride * 3] = clip_pixel(dest[dest_stride * 3] + d1);
+
+ ip++;
+ dest++;
+ }
+}
+
+void vp9_short_iwalsh4x4_1_add_c(int16_t *in, uint8_t *dest, int dest_stride) {
+ int i;
+ int a1, e1;
+ int16_t tmp[4];
+ int16_t *ip = in;
+ int16_t *op = tmp;
+
+ a1 = ip[0] >> WHT_UPSCALE_FACTOR;
+ e1 = a1 >> 1;
+ a1 -= e1;
+ op[0] = a1;
+ op[1] = op[2] = op[3] = e1;
+
+ ip = tmp;
+ for (i = 0; i < 4; i++) {
+ e1 = ip[0] >> 1;
+ a1 = ip[0] - e1;
+ dest[dest_stride * 0] = clip_pixel(dest[dest_stride * 0] + a1);
+ dest[dest_stride * 1] = clip_pixel(dest[dest_stride * 1] + e1);
+ dest[dest_stride * 2] = clip_pixel(dest[dest_stride * 2] + e1);
+ dest[dest_stride * 3] = clip_pixel(dest[dest_stride * 3] + e1);
+ ip++;
+ dest++;
+ }
+}
+
+void vp9_idct4_1d_c(int16_t *input, int16_t *output) {
+ int16_t step[4];
+ int temp1, temp2;
+ // stage 1
+ temp1 = (input[0] + input[2]) * cospi_16_64;
+ temp2 = (input[0] - input[2]) * cospi_16_64;
+ step[0] = dct_const_round_shift(temp1);
+ step[1] = dct_const_round_shift(temp2);
+ temp1 = input[1] * cospi_24_64 - input[3] * cospi_8_64;
+ temp2 = input[1] * cospi_8_64 + input[3] * cospi_24_64;
+ step[2] = dct_const_round_shift(temp1);
+ step[3] = dct_const_round_shift(temp2);
+
+ // stage 2
+ output[0] = step[0] + step[3];
+ output[1] = step[1] + step[2];
+ output[2] = step[1] - step[2];
+ output[3] = step[0] - step[3];
+}
+
+void vp9_short_idct4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride) {
+ int16_t out[4 * 4];
+ int16_t *outptr = out;
+ int i, j;
+ int16_t temp_in[4], temp_out[4];
+
+ // Rows
+ for (i = 0; i < 4; ++i) {
+ vp9_idct4_1d(input, outptr);
+ input += 4;
+ outptr += 4;
+ }
+
+ // Columns
+ for (i = 0; i < 4; ++i) {
+ for (j = 0; j < 4; ++j)
+ temp_in[j] = out[j * 4 + i];
+ vp9_idct4_1d(temp_in, temp_out);
+ for (j = 0; j < 4; ++j)
+ dest[j * dest_stride + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 4)
+ + dest[j * dest_stride + i]);
+ }
+}
+
+void vp9_short_idct4x4_1_add_c(int16_t *input, uint8_t *dest, int dest_stride) {
+ int i;
+ int a1;
+ int16_t out = dct_const_round_shift(input[0] * cospi_16_64);
+ out = dct_const_round_shift(out * cospi_16_64);
+ a1 = ROUND_POWER_OF_TWO(out, 4);
+
+ for (i = 0; i < 4; i++) {
+ dest[0] = clip_pixel(dest[0] + a1);
+ dest[1] = clip_pixel(dest[1] + a1);
+ dest[2] = clip_pixel(dest[2] + a1);
+ dest[3] = clip_pixel(dest[3] + a1);
+ dest += dest_stride;
+ }
+}
+
+static void idct8_1d(int16_t *input, int16_t *output) {
+ int16_t step1[8], step2[8];
+ int temp1, temp2;
+ // stage 1
+ step1[0] = input[0];
+ step1[2] = input[4];
+ step1[1] = input[2];
+ step1[3] = input[6];
+ temp1 = input[1] * cospi_28_64 - input[7] * cospi_4_64;
+ temp2 = input[1] * cospi_4_64 + input[7] * cospi_28_64;
+ step1[4] = dct_const_round_shift(temp1);
+ step1[7] = dct_const_round_shift(temp2);
+ temp1 = input[5] * cospi_12_64 - input[3] * cospi_20_64;
+ temp2 = input[5] * cospi_20_64 + input[3] * cospi_12_64;
+ step1[5] = dct_const_round_shift(temp1);
+ step1[6] = dct_const_round_shift(temp2);
+
+ // stage 2 & stage 3 - even half
+ vp9_idct4_1d(step1, step1);
+
+ // stage 2 - odd half
+ step2[4] = step1[4] + step1[5];
+ step2[5] = step1[4] - step1[5];
+ step2[6] = -step1[6] + step1[7];
+ step2[7] = step1[6] + step1[7];
+
+ // stage 3 -odd half
+ step1[4] = step2[4];
+ temp1 = (step2[6] - step2[5]) * cospi_16_64;
+ temp2 = (step2[5] + step2[6]) * cospi_16_64;
+ step1[5] = dct_const_round_shift(temp1);
+ step1[6] = dct_const_round_shift(temp2);
+ step1[7] = step2[7];
+
+ // stage 4
+ output[0] = step1[0] + step1[7];
+ output[1] = step1[1] + step1[6];
+ output[2] = step1[2] + step1[5];
+ output[3] = step1[3] + step1[4];
+ output[4] = step1[3] - step1[4];
+ output[5] = step1[2] - step1[5];
+ output[6] = step1[1] - step1[6];
+ output[7] = step1[0] - step1[7];
+}
+
+void vp9_short_idct8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride) {
+ int16_t out[8 * 8];
+ int16_t *outptr = out;
+ int i, j;
+ int16_t temp_in[8], temp_out[8];
+
+ // First transform rows
+ for (i = 0; i < 8; ++i) {
+ idct8_1d(input, outptr);
+ input += 8;
+ outptr += 8;
+ }
+
+ // Then transform columns
+ for (i = 0; i < 8; ++i) {
+ for (j = 0; j < 8; ++j)
+ temp_in[j] = out[j * 8 + i];
+ idct8_1d(temp_in, temp_out);
+ for (j = 0; j < 8; ++j)
+ dest[j * dest_stride + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 5)
+ + dest[j * dest_stride + i]);
+ }
+}
+
+void vp9_short_idct8x8_1_add_c(int16_t *input, uint8_t *dest, int dest_stride) {
+ int i, j;
+ int a1;
+ int16_t out = dct_const_round_shift(input[0] * cospi_16_64);
+ out = dct_const_round_shift(out * cospi_16_64);
+ a1 = ROUND_POWER_OF_TWO(out, 5);
+ for (j = 0; j < 8; ++j) {
+ for (i = 0; i < 8; ++i)
+ dest[i] = clip_pixel(dest[i] + a1);
+ dest += dest_stride;
+ }
+}
+
+static void iadst4_1d(int16_t *input, int16_t *output) {
+ int s0, s1, s2, s3, s4, s5, s6, s7;
+
+ int x0 = input[0];
+ int x1 = input[1];
+ int x2 = input[2];
+ int x3 = input[3];
+
+ if (!(x0 | x1 | x2 | x3)) {
+ output[0] = output[1] = output[2] = output[3] = 0;
+ return;
+ }
+
+ s0 = sinpi_1_9 * x0;
+ s1 = sinpi_2_9 * x0;
+ s2 = sinpi_3_9 * x1;
+ s3 = sinpi_4_9 * x2;
+ s4 = sinpi_1_9 * x2;
+ s5 = sinpi_2_9 * x3;
+ s6 = sinpi_4_9 * x3;
+ s7 = x0 - x2 + x3;
+
+ x0 = s0 + s3 + s5;
+ x1 = s1 - s4 - s6;
+ x2 = sinpi_3_9 * s7;
+ x3 = s2;
+
+ s0 = x0 + x3;
+ s1 = x1 + x3;
+ s2 = x2;
+ s3 = x0 + x1 - x3;
+
+ // 1-D transform scaling factor is sqrt(2).
+ // The overall dynamic range is 14b (input) + 14b (multiplication scaling)
+ // + 1b (addition) = 29b.
+ // Hence the output bit depth is 15b.
+ output[0] = dct_const_round_shift(s0);
+ output[1] = dct_const_round_shift(s1);
+ output[2] = dct_const_round_shift(s2);
+ output[3] = dct_const_round_shift(s3);
+}
+
+void vp9_short_iht4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride,
+ int tx_type) {
+ const transform_2d IHT_4[] = {
+ { vp9_idct4_1d, vp9_idct4_1d }, // DCT_DCT = 0
+ { iadst4_1d, vp9_idct4_1d }, // ADST_DCT = 1
+ { vp9_idct4_1d, iadst4_1d }, // DCT_ADST = 2
+ { iadst4_1d, iadst4_1d } // ADST_ADST = 3
+ };
+
+ int i, j;
+ int16_t out[4 * 4];
+ int16_t *outptr = out;
+ int16_t temp_in[4], temp_out[4];
+
+ // inverse transform row vectors
+ for (i = 0; i < 4; ++i) {
+ IHT_4[tx_type].rows(input, outptr);
+ input += 4;
+ outptr += 4;
+ }
+
+ // inverse transform column vectors
+ for (i = 0; i < 4; ++i) {
+ for (j = 0; j < 4; ++j)
+ temp_in[j] = out[j * 4 + i];
+ IHT_4[tx_type].cols(temp_in, temp_out);
+ for (j = 0; j < 4; ++j)
+ dest[j * dest_stride + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 4)
+ + dest[j * dest_stride + i]);
+ }
+}
+static void iadst8_1d(int16_t *input, int16_t *output) {
+ int s0, s1, s2, s3, s4, s5, s6, s7;
+
+ int x0 = input[7];
+ int x1 = input[0];
+ int x2 = input[5];
+ int x3 = input[2];
+ int x4 = input[3];
+ int x5 = input[4];
+ int x6 = input[1];
+ int x7 = input[6];
+
+ if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7)) {
+ output[0] = output[1] = output[2] = output[3] = output[4]
+ = output[5] = output[6] = output[7] = 0;
+ return;
+ }
+
+ // stage 1
+ s0 = cospi_2_64 * x0 + cospi_30_64 * x1;
+ s1 = cospi_30_64 * x0 - cospi_2_64 * x1;
+ s2 = cospi_10_64 * x2 + cospi_22_64 * x3;
+ s3 = cospi_22_64 * x2 - cospi_10_64 * x3;
+ s4 = cospi_18_64 * x4 + cospi_14_64 * x5;
+ s5 = cospi_14_64 * x4 - cospi_18_64 * x5;
+ s6 = cospi_26_64 * x6 + cospi_6_64 * x7;
+ s7 = cospi_6_64 * x6 - cospi_26_64 * x7;
+
+ x0 = dct_const_round_shift(s0 + s4);
+ x1 = dct_const_round_shift(s1 + s5);
+ x2 = dct_const_round_shift(s2 + s6);
+ x3 = dct_const_round_shift(s3 + s7);
+ x4 = dct_const_round_shift(s0 - s4);
+ x5 = dct_const_round_shift(s1 - s5);
+ x6 = dct_const_round_shift(s2 - s6);
+ x7 = dct_const_round_shift(s3 - s7);
+
+ // stage 2
+ s0 = x0;
+ s1 = x1;
+ s2 = x2;
+ s3 = x3;
+ s4 = cospi_8_64 * x4 + cospi_24_64 * x5;
+ s5 = cospi_24_64 * x4 - cospi_8_64 * x5;
+ s6 = -cospi_24_64 * x6 + cospi_8_64 * x7;
+ s7 = cospi_8_64 * x6 + cospi_24_64 * x7;
+
+ x0 = s0 + s2;
+ x1 = s1 + s3;
+ x2 = s0 - s2;
+ x3 = s1 - s3;
+ x4 = dct_const_round_shift(s4 + s6);
+ x5 = dct_const_round_shift(s5 + s7);
+ x6 = dct_const_round_shift(s4 - s6);
+ x7 = dct_const_round_shift(s5 - s7);
+
+ // stage 3
+ s2 = cospi_16_64 * (x2 + x3);
+ s3 = cospi_16_64 * (x2 - x3);
+ s6 = cospi_16_64 * (x6 + x7);
+ s7 = cospi_16_64 * (x6 - x7);
+
+ x2 = dct_const_round_shift(s2);
+ x3 = dct_const_round_shift(s3);
+ x6 = dct_const_round_shift(s6);
+ x7 = dct_const_round_shift(s7);
+
+ output[0] = x0;
+ output[1] = -x4;
+ output[2] = x6;
+ output[3] = -x2;
+ output[4] = x3;
+ output[5] = -x7;
+ output[6] = x5;
+ output[7] = -x1;
+}
+
+static const transform_2d IHT_8[] = {
+ { idct8_1d, idct8_1d }, // DCT_DCT = 0
+ { iadst8_1d, idct8_1d }, // ADST_DCT = 1
+ { idct8_1d, iadst8_1d }, // DCT_ADST = 2
+ { iadst8_1d, iadst8_1d } // ADST_ADST = 3
+};
+
+void vp9_short_iht8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride,
+ int tx_type) {
+ int i, j;
+ int16_t out[8 * 8];
+ int16_t *outptr = out;
+ int16_t temp_in[8], temp_out[8];
+ const transform_2d ht = IHT_8[tx_type];
+
+ // inverse transform row vectors
+ for (i = 0; i < 8; ++i) {
+ ht.rows(input, outptr);
+ input += 8;
+ outptr += 8;
+ }
+
+ // inverse transform column vectors
+ for (i = 0; i < 8; ++i) {
+ for (j = 0; j < 8; ++j)
+ temp_in[j] = out[j * 8 + i];
+ ht.cols(temp_in, temp_out);
+ for (j = 0; j < 8; ++j)
+ dest[j * dest_stride + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 5)
+ + dest[j * dest_stride + i]); }
+}
+
+void vp9_short_idct10_8x8_add_c(int16_t *input, uint8_t *dest,
+ int dest_stride) {
+ int16_t out[8 * 8] = { 0 };
+ int16_t *outptr = out;
+ int i, j;
+ int16_t temp_in[8], temp_out[8];
+
+ // First transform rows
+ // only first 4 row has non-zero coefs
+ for (i = 0; i < 4; ++i) {
+ idct8_1d(input, outptr);
+ input += 8;
+ outptr += 8;
+ }
+
+ // Then transform columns
+ for (i = 0; i < 8; ++i) {
+ for (j = 0; j < 8; ++j)
+ temp_in[j] = out[j * 8 + i];
+ idct8_1d(temp_in, temp_out);
+ for (j = 0; j < 8; ++j)
+ dest[j * dest_stride + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 5)
+ + dest[j * dest_stride + i]);
+ }
+}
+
+static void idct16_1d(int16_t *input, int16_t *output) {
+ int16_t step1[16], step2[16];
+ int temp1, temp2;
+
+ // stage 1
+ step1[0] = input[0/2];
+ step1[1] = input[16/2];
+ step1[2] = input[8/2];
+ step1[3] = input[24/2];
+ step1[4] = input[4/2];
+ step1[5] = input[20/2];
+ step1[6] = input[12/2];
+ step1[7] = input[28/2];
+ step1[8] = input[2/2];
+ step1[9] = input[18/2];
+ step1[10] = input[10/2];
+ step1[11] = input[26/2];
+ step1[12] = input[6/2];
+ step1[13] = input[22/2];
+ step1[14] = input[14/2];
+ step1[15] = input[30/2];
+
+ // stage 2
+ step2[0] = step1[0];
+ step2[1] = step1[1];
+ step2[2] = step1[2];
+ step2[3] = step1[3];
+ step2[4] = step1[4];
+ step2[5] = step1[5];
+ step2[6] = step1[6];
+ step2[7] = step1[7];
+
+ temp1 = step1[8] * cospi_30_64 - step1[15] * cospi_2_64;
+ temp2 = step1[8] * cospi_2_64 + step1[15] * cospi_30_64;
+ step2[8] = dct_const_round_shift(temp1);
+ step2[15] = dct_const_round_shift(temp2);
+
+ temp1 = step1[9] * cospi_14_64 - step1[14] * cospi_18_64;
+ temp2 = step1[9] * cospi_18_64 + step1[14] * cospi_14_64;
+ step2[9] = dct_const_round_shift(temp1);
+ step2[14] = dct_const_round_shift(temp2);
+
+ temp1 = step1[10] * cospi_22_64 - step1[13] * cospi_10_64;
+ temp2 = step1[10] * cospi_10_64 + step1[13] * cospi_22_64;
+ step2[10] = dct_const_round_shift(temp1);
+ step2[13] = dct_const_round_shift(temp2);
+
+ temp1 = step1[11] * cospi_6_64 - step1[12] * cospi_26_64;
+ temp2 = step1[11] * cospi_26_64 + step1[12] * cospi_6_64;
+ step2[11] = dct_const_round_shift(temp1);
+ step2[12] = dct_const_round_shift(temp2);
+
+ // stage 3
+ step1[0] = step2[0];
+ step1[1] = step2[1];
+ step1[2] = step2[2];
+ step1[3] = step2[3];
+
+ temp1 = step2[4] * cospi_28_64 - step2[7] * cospi_4_64;
+ temp2 = step2[4] * cospi_4_64 + step2[7] * cospi_28_64;
+ step1[4] = dct_const_round_shift(temp1);
+ step1[7] = dct_const_round_shift(temp2);
+ temp1 = step2[5] * cospi_12_64 - step2[6] * cospi_20_64;
+ temp2 = step2[5] * cospi_20_64 + step2[6] * cospi_12_64;
+ step1[5] = dct_const_round_shift(temp1);
+ step1[6] = dct_const_round_shift(temp2);
+
+ step1[8] = step2[8] + step2[9];
+ step1[9] = step2[8] - step2[9];
+ step1[10] = -step2[10] + step2[11];
+ step1[11] = step2[10] + step2[11];
+ step1[12] = step2[12] + step2[13];
+ step1[13] = step2[12] - step2[13];
+ step1[14] = -step2[14] + step2[15];
+ step1[15] = step2[14] + step2[15];
+
+ // stage 4
+ temp1 = (step1[0] + step1[1]) * cospi_16_64;
+ temp2 = (step1[0] - step1[1]) * cospi_16_64;
+ step2[0] = dct_const_round_shift(temp1);
+ step2[1] = dct_const_round_shift(temp2);
+ temp1 = step1[2] * cospi_24_64 - step1[3] * cospi_8_64;
+ temp2 = step1[2] * cospi_8_64 + step1[3] * cospi_24_64;
+ step2[2] = dct_const_round_shift(temp1);
+ step2[3] = dct_const_round_shift(temp2);
+ step2[4] = step1[4] + step1[5];
+ step2[5] = step1[4] - step1[5];
+ step2[6] = -step1[6] + step1[7];
+ step2[7] = step1[6] + step1[7];
+
+ step2[8] = step1[8];
+ step2[15] = step1[15];
+ temp1 = -step1[9] * cospi_8_64 + step1[14] * cospi_24_64;
+ temp2 = step1[9] * cospi_24_64 + step1[14] * cospi_8_64;
+ step2[9] = dct_const_round_shift(temp1);
+ step2[14] = dct_const_round_shift(temp2);
+ temp1 = -step1[10] * cospi_24_64 - step1[13] * cospi_8_64;
+ temp2 = -step1[10] * cospi_8_64 + step1[13] * cospi_24_64;
+ step2[10] = dct_const_round_shift(temp1);
+ step2[13] = dct_const_round_shift(temp2);
+ step2[11] = step1[11];
+ step2[12] = step1[12];
+
+ // stage 5
+ step1[0] = step2[0] + step2[3];
+ step1[1] = step2[1] + step2[2];
+ step1[2] = step2[1] - step2[2];
+ step1[3] = step2[0] - step2[3];
+ step1[4] = step2[4];
+ temp1 = (step2[6] - step2[5]) * cospi_16_64;
+ temp2 = (step2[5] + step2[6]) * cospi_16_64;
+ step1[5] = dct_const_round_shift(temp1);
+ step1[6] = dct_const_round_shift(temp2);
+ step1[7] = step2[7];
+
+ step1[8] = step2[8] + step2[11];
+ step1[9] = step2[9] + step2[10];
+ step1[10] = step2[9] - step2[10];
+ step1[11] = step2[8] - step2[11];
+ step1[12] = -step2[12] + step2[15];
+ step1[13] = -step2[13] + step2[14];
+ step1[14] = step2[13] + step2[14];
+ step1[15] = step2[12] + step2[15];
+
+ // stage 6
+ step2[0] = step1[0] + step1[7];
+ step2[1] = step1[1] + step1[6];
+ step2[2] = step1[2] + step1[5];
+ step2[3] = step1[3] + step1[4];
+ step2[4] = step1[3] - step1[4];
+ step2[5] = step1[2] - step1[5];
+ step2[6] = step1[1] - step1[6];
+ step2[7] = step1[0] - step1[7];
+ step2[8] = step1[8];
+ step2[9] = step1[9];
+ temp1 = (-step1[10] + step1[13]) * cospi_16_64;
+ temp2 = (step1[10] + step1[13]) * cospi_16_64;
+ step2[10] = dct_const_round_shift(temp1);
+ step2[13] = dct_const_round_shift(temp2);
+ temp1 = (-step1[11] + step1[12]) * cospi_16_64;
+ temp2 = (step1[11] + step1[12]) * cospi_16_64;
+ step2[11] = dct_const_round_shift(temp1);
+ step2[12] = dct_const_round_shift(temp2);
+ step2[14] = step1[14];
+ step2[15] = step1[15];
+
+ // stage 7
+ output[0] = step2[0] + step2[15];
+ output[1] = step2[1] + step2[14];
+ output[2] = step2[2] + step2[13];
+ output[3] = step2[3] + step2[12];
+ output[4] = step2[4] + step2[11];
+ output[5] = step2[5] + step2[10];
+ output[6] = step2[6] + step2[9];
+ output[7] = step2[7] + step2[8];
+ output[8] = step2[7] - step2[8];
+ output[9] = step2[6] - step2[9];
+ output[10] = step2[5] - step2[10];
+ output[11] = step2[4] - step2[11];
+ output[12] = step2[3] - step2[12];
+ output[13] = step2[2] - step2[13];
+ output[14] = step2[1] - step2[14];
+ output[15] = step2[0] - step2[15];
+}
+
+void vp9_short_idct16x16_add_c(int16_t *input, uint8_t *dest, int dest_stride) {
+ int16_t out[16 * 16];
+ int16_t *outptr = out;
+ int i, j;
+ int16_t temp_in[16], temp_out[16];
+
+ // First transform rows
+ for (i = 0; i < 16; ++i) {
+ idct16_1d(input, outptr);
+ input += 16;
+ outptr += 16;
+ }
+
+ // Then transform columns
+ for (i = 0; i < 16; ++i) {
+ for (j = 0; j < 16; ++j)
+ temp_in[j] = out[j * 16 + i];
+ idct16_1d(temp_in, temp_out);
+ for (j = 0; j < 16; ++j)
+ dest[j * dest_stride + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 6)
+ + dest[j * dest_stride + i]);
+ }
+}
+
+void iadst16_1d(int16_t *input, int16_t *output) {
+ int s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15;
+
+ int x0 = input[15];
+ int x1 = input[0];
+ int x2 = input[13];
+ int x3 = input[2];
+ int x4 = input[11];
+ int x5 = input[4];
+ int x6 = input[9];
+ int x7 = input[6];
+ int x8 = input[7];
+ int x9 = input[8];
+ int x10 = input[5];
+ int x11 = input[10];
+ int x12 = input[3];
+ int x13 = input[12];
+ int x14 = input[1];
+ int x15 = input[14];
+
+ if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8
+ | x9 | x10 | x11 | x12 | x13 | x14 | x15)) {
+ output[0] = output[1] = output[2] = output[3] = output[4]
+ = output[5] = output[6] = output[7] = output[8]
+ = output[9] = output[10] = output[11] = output[12]
+ = output[13] = output[14] = output[15] = 0;
+ return;
+ }
+
+ // stage 1
+ s0 = x0 * cospi_1_64 + x1 * cospi_31_64;
+ s1 = x0 * cospi_31_64 - x1 * cospi_1_64;
+ s2 = x2 * cospi_5_64 + x3 * cospi_27_64;
+ s3 = x2 * cospi_27_64 - x3 * cospi_5_64;
+ s4 = x4 * cospi_9_64 + x5 * cospi_23_64;
+ s5 = x4 * cospi_23_64 - x5 * cospi_9_64;
+ s6 = x6 * cospi_13_64 + x7 * cospi_19_64;
+ s7 = x6 * cospi_19_64 - x7 * cospi_13_64;
+ s8 = x8 * cospi_17_64 + x9 * cospi_15_64;
+ s9 = x8 * cospi_15_64 - x9 * cospi_17_64;
+ s10 = x10 * cospi_21_64 + x11 * cospi_11_64;
+ s11 = x10 * cospi_11_64 - x11 * cospi_21_64;
+ s12 = x12 * cospi_25_64 + x13 * cospi_7_64;
+ s13 = x12 * cospi_7_64 - x13 * cospi_25_64;
+ s14 = x14 * cospi_29_64 + x15 * cospi_3_64;
+ s15 = x14 * cospi_3_64 - x15 * cospi_29_64;
+
+ x0 = dct_const_round_shift(s0 + s8);
+ x1 = dct_const_round_shift(s1 + s9);
+ x2 = dct_const_round_shift(s2 + s10);
+ x3 = dct_const_round_shift(s3 + s11);
+ x4 = dct_const_round_shift(s4 + s12);
+ x5 = dct_const_round_shift(s5 + s13);
+ x6 = dct_const_round_shift(s6 + s14);
+ x7 = dct_const_round_shift(s7 + s15);
+ x8 = dct_const_round_shift(s0 - s8);
+ x9 = dct_const_round_shift(s1 - s9);
+ x10 = dct_const_round_shift(s2 - s10);
+ x11 = dct_const_round_shift(s3 - s11);
+ x12 = dct_const_round_shift(s4 - s12);
+ x13 = dct_const_round_shift(s5 - s13);
+ x14 = dct_const_round_shift(s6 - s14);
+ x15 = dct_const_round_shift(s7 - s15);
+
+ // stage 2
+ s0 = x0;
+ s1 = x1;
+ s2 = x2;
+ s3 = x3;
+ s4 = x4;
+ s5 = x5;
+ s6 = x6;
+ s7 = x7;
+ s8 = x8 * cospi_4_64 + x9 * cospi_28_64;
+ s9 = x8 * cospi_28_64 - x9 * cospi_4_64;
+ s10 = x10 * cospi_20_64 + x11 * cospi_12_64;
+ s11 = x10 * cospi_12_64 - x11 * cospi_20_64;
+ s12 = - x12 * cospi_28_64 + x13 * cospi_4_64;
+ s13 = x12 * cospi_4_64 + x13 * cospi_28_64;
+ s14 = - x14 * cospi_12_64 + x15 * cospi_20_64;
+ s15 = x14 * cospi_20_64 + x15 * cospi_12_64;
+
+ x0 = s0 + s4;
+ x1 = s1 + s5;
+ x2 = s2 + s6;
+ x3 = s3 + s7;
+ x4 = s0 - s4;
+ x5 = s1 - s5;
+ x6 = s2 - s6;
+ x7 = s3 - s7;
+ x8 = dct_const_round_shift(s8 + s12);
+ x9 = dct_const_round_shift(s9 + s13);
+ x10 = dct_const_round_shift(s10 + s14);
+ x11 = dct_const_round_shift(s11 + s15);
+ x12 = dct_const_round_shift(s8 - s12);
+ x13 = dct_const_round_shift(s9 - s13);
+ x14 = dct_const_round_shift(s10 - s14);
+ x15 = dct_const_round_shift(s11 - s15);
+
+ // stage 3
+ s0 = x0;
+ s1 = x1;
+ s2 = x2;
+ s3 = x3;
+ s4 = x4 * cospi_8_64 + x5 * cospi_24_64;
+ s5 = x4 * cospi_24_64 - x5 * cospi_8_64;
+ s6 = - x6 * cospi_24_64 + x7 * cospi_8_64;
+ s7 = x6 * cospi_8_64 + x7 * cospi_24_64;
+ s8 = x8;
+ s9 = x9;
+ s10 = x10;
+ s11 = x11;
+ s12 = x12 * cospi_8_64 + x13 * cospi_24_64;
+ s13 = x12 * cospi_24_64 - x13 * cospi_8_64;
+ s14 = - x14 * cospi_24_64 + x15 * cospi_8_64;
+ s15 = x14 * cospi_8_64 + x15 * cospi_24_64;
+
+ x0 = s0 + s2;
+ x1 = s1 + s3;
+ x2 = s0 - s2;
+ x3 = s1 - s3;
+ x4 = dct_const_round_shift(s4 + s6);
+ x5 = dct_const_round_shift(s5 + s7);
+ x6 = dct_const_round_shift(s4 - s6);
+ x7 = dct_const_round_shift(s5 - s7);
+ x8 = s8 + s10;
+ x9 = s9 + s11;
+ x10 = s8 - s10;
+ x11 = s9 - s11;
+ x12 = dct_const_round_shift(s12 + s14);
+ x13 = dct_const_round_shift(s13 + s15);
+ x14 = dct_const_round_shift(s12 - s14);
+ x15 = dct_const_round_shift(s13 - s15);
+
+ // stage 4
+ s2 = (- cospi_16_64) * (x2 + x3);
+ s3 = cospi_16_64 * (x2 - x3);
+ s6 = cospi_16_64 * (x6 + x7);
+ s7 = cospi_16_64 * (- x6 + x7);
+ s10 = cospi_16_64 * (x10 + x11);
+ s11 = cospi_16_64 * (- x10 + x11);
+ s14 = (- cospi_16_64) * (x14 + x15);
+ s15 = cospi_16_64 * (x14 - x15);
+
+ x2 = dct_const_round_shift(s2);
+ x3 = dct_const_round_shift(s3);
+ x6 = dct_const_round_shift(s6);
+ x7 = dct_const_round_shift(s7);
+ x10 = dct_const_round_shift(s10);
+ x11 = dct_const_round_shift(s11);
+ x14 = dct_const_round_shift(s14);
+ x15 = dct_const_round_shift(s15);
+
+ output[0] = x0;
+ output[1] = -x8;
+ output[2] = x12;
+ output[3] = -x4;
+ output[4] = x6;
+ output[5] = x14;
+ output[6] = x10;
+ output[7] = x2;
+ output[8] = x3;
+ output[9] = x11;
+ output[10] = x15;
+ output[11] = x7;
+ output[12] = x5;
+ output[13] = -x13;
+ output[14] = x9;
+ output[15] = -x1;
+}
+
+static const transform_2d IHT_16[] = {
+ { idct16_1d, idct16_1d }, // DCT_DCT = 0
+ { iadst16_1d, idct16_1d }, // ADST_DCT = 1
+ { idct16_1d, iadst16_1d }, // DCT_ADST = 2
+ { iadst16_1d, iadst16_1d } // ADST_ADST = 3
+};
+
+void vp9_short_iht16x16_add_c(int16_t *input, uint8_t *dest, int dest_stride,
+ int tx_type) {
+ int i, j;
+ int16_t out[16 * 16];
+ int16_t *outptr = out;
+ int16_t temp_in[16], temp_out[16];
+ const transform_2d ht = IHT_16[tx_type];
+
+ // Rows
+ for (i = 0; i < 16; ++i) {
+ ht.rows(input, outptr);
+ input += 16;
+ outptr += 16;
+ }
+
+ // Columns
+ for (i = 0; i < 16; ++i) {
+ for (j = 0; j < 16; ++j)
+ temp_in[j] = out[j * 16 + i];
+ ht.cols(temp_in, temp_out);
+ for (j = 0; j < 16; ++j)
+ dest[j * dest_stride + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 6)
+ + dest[j * dest_stride + i]); }
+}
+
+void vp9_short_idct10_16x16_add_c(int16_t *input, uint8_t *dest,
+ int dest_stride) {
+ int16_t out[16 * 16] = { 0 };
+ int16_t *outptr = out;
+ int i, j;
+ int16_t temp_in[16], temp_out[16];
+
+ // First transform rows. Since all non-zero dct coefficients are in
+ // upper-left 4x4 area, we only need to calculate first 4 rows here.
+ for (i = 0; i < 4; ++i) {
+ idct16_1d(input, outptr);
+ input += 16;
+ outptr += 16;
+ }
+
+ // Then transform columns
+ for (i = 0; i < 16; ++i) {
+ for (j = 0; j < 16; ++j)
+ temp_in[j] = out[j*16 + i];
+ idct16_1d(temp_in, temp_out);
+ for (j = 0; j < 16; ++j)
+ dest[j * dest_stride + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 6)
+ + dest[j * dest_stride + i]);
+ }
+}
+
+void vp9_short_idct16x16_1_add_c(int16_t *input, uint8_t *dest,
+ int dest_stride) {
+ int i, j;
+ int a1;
+ int16_t out = dct_const_round_shift(input[0] * cospi_16_64);
+ out = dct_const_round_shift(out * cospi_16_64);
+ a1 = ROUND_POWER_OF_TWO(out, 6);
+ for (j = 0; j < 16; ++j) {
+ for (i = 0; i < 16; ++i)
+ dest[i] = clip_pixel(dest[i] + a1);
+ dest += dest_stride;
+ }
+}
+
+static void idct32_1d(int16_t *input, int16_t *output) {
+ int16_t step1[32], step2[32];
+ int temp1, temp2;
+
+ // stage 1
+ step1[0] = input[0];
+ step1[1] = input[16];
+ step1[2] = input[8];
+ step1[3] = input[24];
+ step1[4] = input[4];
+ step1[5] = input[20];
+ step1[6] = input[12];
+ step1[7] = input[28];
+ step1[8] = input[2];
+ step1[9] = input[18];
+ step1[10] = input[10];
+ step1[11] = input[26];
+ step1[12] = input[6];
+ step1[13] = input[22];
+ step1[14] = input[14];
+ step1[15] = input[30];
+
+ temp1 = input[1] * cospi_31_64 - input[31] * cospi_1_64;
+ temp2 = input[1] * cospi_1_64 + input[31] * cospi_31_64;
+ step1[16] = dct_const_round_shift(temp1);
+ step1[31] = dct_const_round_shift(temp2);
+
+ temp1 = input[17] * cospi_15_64 - input[15] * cospi_17_64;
+ temp2 = input[17] * cospi_17_64 + input[15] * cospi_15_64;
+ step1[17] = dct_const_round_shift(temp1);
+ step1[30] = dct_const_round_shift(temp2);
+
+ temp1 = input[9] * cospi_23_64 - input[23] * cospi_9_64;
+ temp2 = input[9] * cospi_9_64 + input[23] * cospi_23_64;
+ step1[18] = dct_const_round_shift(temp1);
+ step1[29] = dct_const_round_shift(temp2);
+
+ temp1 = input[25] * cospi_7_64 - input[7] * cospi_25_64;
+ temp2 = input[25] * cospi_25_64 + input[7] * cospi_7_64;
+ step1[19] = dct_const_round_shift(temp1);
+ step1[28] = dct_const_round_shift(temp2);
+
+ temp1 = input[5] * cospi_27_64 - input[27] * cospi_5_64;
+ temp2 = input[5] * cospi_5_64 + input[27] * cospi_27_64;
+ step1[20] = dct_const_round_shift(temp1);
+ step1[27] = dct_const_round_shift(temp2);
+
+ temp1 = input[21] * cospi_11_64 - input[11] * cospi_21_64;
+ temp2 = input[21] * cospi_21_64 + input[11] * cospi_11_64;
+ step1[21] = dct_const_round_shift(temp1);
+ step1[26] = dct_const_round_shift(temp2);
+
+ temp1 = input[13] * cospi_19_64 - input[19] * cospi_13_64;
+ temp2 = input[13] * cospi_13_64 + input[19] * cospi_19_64;
+ step1[22] = dct_const_round_shift(temp1);
+ step1[25] = dct_const_round_shift(temp2);
+
+ temp1 = input[29] * cospi_3_64 - input[3] * cospi_29_64;
+ temp2 = input[29] * cospi_29_64 + input[3] * cospi_3_64;
+ step1[23] = dct_const_round_shift(temp1);
+ step1[24] = dct_const_round_shift(temp2);
+
+ // stage 2
+ step2[0] = step1[0];
+ step2[1] = step1[1];
+ step2[2] = step1[2];
+ step2[3] = step1[3];
+ step2[4] = step1[4];
+ step2[5] = step1[5];
+ step2[6] = step1[6];
+ step2[7] = step1[7];
+
+ temp1 = step1[8] * cospi_30_64 - step1[15] * cospi_2_64;
+ temp2 = step1[8] * cospi_2_64 + step1[15] * cospi_30_64;
+ step2[8] = dct_const_round_shift(temp1);
+ step2[15] = dct_const_round_shift(temp2);
+
+ temp1 = step1[9] * cospi_14_64 - step1[14] * cospi_18_64;
+ temp2 = step1[9] * cospi_18_64 + step1[14] * cospi_14_64;
+ step2[9] = dct_const_round_shift(temp1);
+ step2[14] = dct_const_round_shift(temp2);
+
+ temp1 = step1[10] * cospi_22_64 - step1[13] * cospi_10_64;
+ temp2 = step1[10] * cospi_10_64 + step1[13] * cospi_22_64;
+ step2[10] = dct_const_round_shift(temp1);
+ step2[13] = dct_const_round_shift(temp2);
+
+ temp1 = step1[11] * cospi_6_64 - step1[12] * cospi_26_64;
+ temp2 = step1[11] * cospi_26_64 + step1[12] * cospi_6_64;
+ step2[11] = dct_const_round_shift(temp1);
+ step2[12] = dct_const_round_shift(temp2);
+
+ step2[16] = step1[16] + step1[17];
+ step2[17] = step1[16] - step1[17];
+ step2[18] = -step1[18] + step1[19];
+ step2[19] = step1[18] + step1[19];
+ step2[20] = step1[20] + step1[21];
+ step2[21] = step1[20] - step1[21];
+ step2[22] = -step1[22] + step1[23];
+ step2[23] = step1[22] + step1[23];
+ step2[24] = step1[24] + step1[25];
+ step2[25] = step1[24] - step1[25];
+ step2[26] = -step1[26] + step1[27];
+ step2[27] = step1[26] + step1[27];
+ step2[28] = step1[28] + step1[29];
+ step2[29] = step1[28] - step1[29];
+ step2[30] = -step1[30] + step1[31];
+ step2[31] = step1[30] + step1[31];
+
+ // stage 3
+ step1[0] = step2[0];
+ step1[1] = step2[1];
+ step1[2] = step2[2];
+ step1[3] = step2[3];
+
+ temp1 = step2[4] * cospi_28_64 - step2[7] * cospi_4_64;
+ temp2 = step2[4] * cospi_4_64 + step2[7] * cospi_28_64;
+ step1[4] = dct_const_round_shift(temp1);
+ step1[7] = dct_const_round_shift(temp2);
+ temp1 = step2[5] * cospi_12_64 - step2[6] * cospi_20_64;
+ temp2 = step2[5] * cospi_20_64 + step2[6] * cospi_12_64;
+ step1[5] = dct_const_round_shift(temp1);
+ step1[6] = dct_const_round_shift(temp2);
+
+ step1[8] = step2[8] + step2[9];
+ step1[9] = step2[8] - step2[9];
+ step1[10] = -step2[10] + step2[11];
+ step1[11] = step2[10] + step2[11];
+ step1[12] = step2[12] + step2[13];
+ step1[13] = step2[12] - step2[13];
+ step1[14] = -step2[14] + step2[15];
+ step1[15] = step2[14] + step2[15];
+
+ step1[16] = step2[16];
+ step1[31] = step2[31];
+ temp1 = -step2[17] * cospi_4_64 + step2[30] * cospi_28_64;
+ temp2 = step2[17] * cospi_28_64 + step2[30] * cospi_4_64;
+ step1[17] = dct_const_round_shift(temp1);
+ step1[30] = dct_const_round_shift(temp2);
+ temp1 = -step2[18] * cospi_28_64 - step2[29] * cospi_4_64;
+ temp2 = -step2[18] * cospi_4_64 + step2[29] * cospi_28_64;
+ step1[18] = dct_const_round_shift(temp1);
+ step1[29] = dct_const_round_shift(temp2);
+ step1[19] = step2[19];
+ step1[20] = step2[20];
+ temp1 = -step2[21] * cospi_20_64 + step2[26] * cospi_12_64;
+ temp2 = step2[21] * cospi_12_64 + step2[26] * cospi_20_64;
+ step1[21] = dct_const_round_shift(temp1);
+ step1[26] = dct_const_round_shift(temp2);
+ temp1 = -step2[22] * cospi_12_64 - step2[25] * cospi_20_64;
+ temp2 = -step2[22] * cospi_20_64 + step2[25] * cospi_12_64;
+ step1[22] = dct_const_round_shift(temp1);
+ step1[25] = dct_const_round_shift(temp2);
+ step1[23] = step2[23];
+ step1[24] = step2[24];
+ step1[27] = step2[27];
+ step1[28] = step2[28];
+
+ // stage 4
+ temp1 = (step1[0] + step1[1]) * cospi_16_64;
+ temp2 = (step1[0] - step1[1]) * cospi_16_64;
+ step2[0] = dct_const_round_shift(temp1);
+ step2[1] = dct_const_round_shift(temp2);
+ temp1 = step1[2] * cospi_24_64 - step1[3] * cospi_8_64;
+ temp2 = step1[2] * cospi_8_64 + step1[3] * cospi_24_64;
+ step2[2] = dct_const_round_shift(temp1);
+ step2[3] = dct_const_round_shift(temp2);
+ step2[4] = step1[4] + step1[5];
+ step2[5] = step1[4] - step1[5];
+ step2[6] = -step1[6] + step1[7];
+ step2[7] = step1[6] + step1[7];
+
+ step2[8] = step1[8];
+ step2[15] = step1[15];
+ temp1 = -step1[9] * cospi_8_64 + step1[14] * cospi_24_64;
+ temp2 = step1[9] * cospi_24_64 + step1[14] * cospi_8_64;
+ step2[9] = dct_const_round_shift(temp1);
+ step2[14] = dct_const_round_shift(temp2);
+ temp1 = -step1[10] * cospi_24_64 - step1[13] * cospi_8_64;
+ temp2 = -step1[10] * cospi_8_64 + step1[13] * cospi_24_64;
+ step2[10] = dct_const_round_shift(temp1);
+ step2[13] = dct_const_round_shift(temp2);
+ step2[11] = step1[11];
+ step2[12] = step1[12];
+
+ step2[16] = step1[16] + step1[19];
+ step2[17] = step1[17] + step1[18];
+ step2[18] = step1[17] - step1[18];
+ step2[19] = step1[16] - step1[19];
+ step2[20] = -step1[20] + step1[23];
+ step2[21] = -step1[21] + step1[22];
+ step2[22] = step1[21] + step1[22];
+ step2[23] = step1[20] + step1[23];
+
+ step2[24] = step1[24] + step1[27];
+ step2[25] = step1[25] + step1[26];
+ step2[26] = step1[25] - step1[26];
+ step2[27] = step1[24] - step1[27];
+ step2[28] = -step1[28] + step1[31];
+ step2[29] = -step1[29] + step1[30];
+ step2[30] = step1[29] + step1[30];
+ step2[31] = step1[28] + step1[31];
+
+ // stage 5
+ step1[0] = step2[0] + step2[3];
+ step1[1] = step2[1] + step2[2];
+ step1[2] = step2[1] - step2[2];
+ step1[3] = step2[0] - step2[3];
+ step1[4] = step2[4];
+ temp1 = (step2[6] - step2[5]) * cospi_16_64;
+ temp2 = (step2[5] + step2[6]) * cospi_16_64;
+ step1[5] = dct_const_round_shift(temp1);
+ step1[6] = dct_const_round_shift(temp2);
+ step1[7] = step2[7];
+
+ step1[8] = step2[8] + step2[11];
+ step1[9] = step2[9] + step2[10];
+ step1[10] = step2[9] - step2[10];
+ step1[11] = step2[8] - step2[11];
+ step1[12] = -step2[12] + step2[15];
+ step1[13] = -step2[13] + step2[14];
+ step1[14] = step2[13] + step2[14];
+ step1[15] = step2[12] + step2[15];
+
+ step1[16] = step2[16];
+ step1[17] = step2[17];
+ temp1 = -step2[18] * cospi_8_64 + step2[29] * cospi_24_64;
+ temp2 = step2[18] * cospi_24_64 + step2[29] * cospi_8_64;
+ step1[18] = dct_const_round_shift(temp1);
+ step1[29] = dct_const_round_shift(temp2);
+ temp1 = -step2[19] * cospi_8_64 + step2[28] * cospi_24_64;
+ temp2 = step2[19] * cospi_24_64 + step2[28] * cospi_8_64;
+ step1[19] = dct_const_round_shift(temp1);
+ step1[28] = dct_const_round_shift(temp2);
+ temp1 = -step2[20] * cospi_24_64 - step2[27] * cospi_8_64;
+ temp2 = -step2[20] * cospi_8_64 + step2[27] * cospi_24_64;
+ step1[20] = dct_const_round_shift(temp1);
+ step1[27] = dct_const_round_shift(temp2);
+ temp1 = -step2[21] * cospi_24_64 - step2[26] * cospi_8_64;
+ temp2 = -step2[21] * cospi_8_64 + step2[26] * cospi_24_64;
+ step1[21] = dct_const_round_shift(temp1);
+ step1[26] = dct_const_round_shift(temp2);
+ step1[22] = step2[22];
+ step1[23] = step2[23];
+ step1[24] = step2[24];
+ step1[25] = step2[25];
+ step1[30] = step2[30];
+ step1[31] = step2[31];
+
+ // stage 6
+ step2[0] = step1[0] + step1[7];
+ step2[1] = step1[1] + step1[6];
+ step2[2] = step1[2] + step1[5];
+ step2[3] = step1[3] + step1[4];
+ step2[4] = step1[3] - step1[4];
+ step2[5] = step1[2] - step1[5];
+ step2[6] = step1[1] - step1[6];
+ step2[7] = step1[0] - step1[7];
+ step2[8] = step1[8];
+ step2[9] = step1[9];
+ temp1 = (-step1[10] + step1[13]) * cospi_16_64;
+ temp2 = (step1[10] + step1[13]) * cospi_16_64;
+ step2[10] = dct_const_round_shift(temp1);
+ step2[13] = dct_const_round_shift(temp2);
+ temp1 = (-step1[11] + step1[12]) * cospi_16_64;
+ temp2 = (step1[11] + step1[12]) * cospi_16_64;
+ step2[11] = dct_const_round_shift(temp1);
+ step2[12] = dct_const_round_shift(temp2);
+ step2[14] = step1[14];
+ step2[15] = step1[15];
+
+ step2[16] = step1[16] + step1[23];
+ step2[17] = step1[17] + step1[22];
+ step2[18] = step1[18] + step1[21];
+ step2[19] = step1[19] + step1[20];
+ step2[20] = step1[19] - step1[20];
+ step2[21] = step1[18] - step1[21];
+ step2[22] = step1[17] - step1[22];
+ step2[23] = step1[16] - step1[23];
+
+ step2[24] = -step1[24] + step1[31];
+ step2[25] = -step1[25] + step1[30];
+ step2[26] = -step1[26] + step1[29];
+ step2[27] = -step1[27] + step1[28];
+ step2[28] = step1[27] + step1[28];
+ step2[29] = step1[26] + step1[29];
+ step2[30] = step1[25] + step1[30];
+ step2[31] = step1[24] + step1[31];
+
+ // stage 7
+ step1[0] = step2[0] + step2[15];
+ step1[1] = step2[1] + step2[14];
+ step1[2] = step2[2] + step2[13];
+ step1[3] = step2[3] + step2[12];
+ step1[4] = step2[4] + step2[11];
+ step1[5] = step2[5] + step2[10];
+ step1[6] = step2[6] + step2[9];
+ step1[7] = step2[7] + step2[8];
+ step1[8] = step2[7] - step2[8];
+ step1[9] = step2[6] - step2[9];
+ step1[10] = step2[5] - step2[10];
+ step1[11] = step2[4] - step2[11];
+ step1[12] = step2[3] - step2[12];
+ step1[13] = step2[2] - step2[13];
+ step1[14] = step2[1] - step2[14];
+ step1[15] = step2[0] - step2[15];
+
+ step1[16] = step2[16];
+ step1[17] = step2[17];
+ step1[18] = step2[18];
+ step1[19] = step2[19];
+ temp1 = (-step2[20] + step2[27]) * cospi_16_64;
+ temp2 = (step2[20] + step2[27]) * cospi_16_64;
+ step1[20] = dct_const_round_shift(temp1);
+ step1[27] = dct_const_round_shift(temp2);
+ temp1 = (-step2[21] + step2[26]) * cospi_16_64;
+ temp2 = (step2[21] + step2[26]) * cospi_16_64;
+ step1[21] = dct_const_round_shift(temp1);
+ step1[26] = dct_const_round_shift(temp2);
+ temp1 = (-step2[22] + step2[25]) * cospi_16_64;
+ temp2 = (step2[22] + step2[25]) * cospi_16_64;
+ step1[22] = dct_const_round_shift(temp1);
+ step1[25] = dct_const_round_shift(temp2);
+ temp1 = (-step2[23] + step2[24]) * cospi_16_64;
+ temp2 = (step2[23] + step2[24]) * cospi_16_64;
+ step1[23] = dct_const_round_shift(temp1);
+ step1[24] = dct_const_round_shift(temp2);
+ step1[28] = step2[28];
+ step1[29] = step2[29];
+ step1[30] = step2[30];
+ step1[31] = step2[31];
+
+ // final stage
+ output[0] = step1[0] + step1[31];
+ output[1] = step1[1] + step1[30];
+ output[2] = step1[2] + step1[29];
+ output[3] = step1[3] + step1[28];
+ output[4] = step1[4] + step1[27];
+ output[5] = step1[5] + step1[26];
+ output[6] = step1[6] + step1[25];
+ output[7] = step1[7] + step1[24];
+ output[8] = step1[8] + step1[23];
+ output[9] = step1[9] + step1[22];
+ output[10] = step1[10] + step1[21];
+ output[11] = step1[11] + step1[20];
+ output[12] = step1[12] + step1[19];
+ output[13] = step1[13] + step1[18];
+ output[14] = step1[14] + step1[17];
+ output[15] = step1[15] + step1[16];
+ output[16] = step1[15] - step1[16];
+ output[17] = step1[14] - step1[17];
+ output[18] = step1[13] - step1[18];
+ output[19] = step1[12] - step1[19];
+ output[20] = step1[11] - step1[20];
+ output[21] = step1[10] - step1[21];
+ output[22] = step1[9] - step1[22];
+ output[23] = step1[8] - step1[23];
+ output[24] = step1[7] - step1[24];
+ output[25] = step1[6] - step1[25];
+ output[26] = step1[5] - step1[26];
+ output[27] = step1[4] - step1[27];
+ output[28] = step1[3] - step1[28];
+ output[29] = step1[2] - step1[29];
+ output[30] = step1[1] - step1[30];
+ output[31] = step1[0] - step1[31];
+}
+
+void vp9_short_idct32x32_add_c(int16_t *input, uint8_t *dest, int dest_stride) {
+ int16_t out[32 * 32];
+ int16_t *outptr = out;
+ int i, j;
+ int16_t temp_in[32], temp_out[32];
+
+ // Rows
+ for (i = 0; i < 32; ++i) {
+ idct32_1d(input, outptr);
+ input += 32;
+ outptr += 32;
+ }
+
+ // Columns
+ for (i = 0; i < 32; ++i) {
+ for (j = 0; j < 32; ++j)
+ temp_in[j] = out[j * 32 + i];
+ idct32_1d(temp_in, temp_out);
+ for (j = 0; j < 32; ++j)
+ dest[j * dest_stride + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 6)
+ + dest[j * dest_stride + i]);
+ }
+}
+
+void vp9_short_idct1_32x32_c(int16_t *input, int16_t *output) {
+ int16_t out = dct_const_round_shift(input[0] * cospi_16_64);
+ out = dct_const_round_shift(out * cospi_16_64);
+ output[0] = ROUND_POWER_OF_TWO(out, 6);
+}
diff --git a/libvpx/vp9/common/vp9_idct.h b/libvpx/vp9/common/vp9_idct.h
new file mode 100644
index 0000000..0c47da6
--- /dev/null
+++ b/libvpx/vp9/common/vp9_idct.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_IDCT_H_
+#define VP9_COMMON_VP9_IDCT_H_
+
+#include <assert.h>
+
+#include "./vpx_config.h"
+#include "vpx/vpx_integer.h"
+#include "vp9/common/vp9_common.h"
+
+
+// Constants and Macros used by all idct/dct functions
+#define DCT_CONST_BITS 14
+#define DCT_CONST_ROUNDING (1 << (DCT_CONST_BITS - 1))
+
+#define WHT_UPSCALE_FACTOR 2
+
+#define pair_set_epi16(a, b) \
+ _mm_set1_epi32(((uint16_t)(a)) + (((uint16_t)(b)) << 16))
+
+#define pair_set_epi32(a, b) \
+ _mm_set_epi32(b, a, b, a)
+
+// Constants:
+// for (int i = 1; i< 32; ++i)
+// printf("static const int cospi_%d_64 = %.0f;\n", i,
+// round(16384 * cos(i*M_PI/64)));
+// Note: sin(k*Pi/64) = cos((32-k)*Pi/64)
+static const int cospi_1_64 = 16364;
+static const int cospi_2_64 = 16305;
+static const int cospi_3_64 = 16207;
+static const int cospi_4_64 = 16069;
+static const int cospi_5_64 = 15893;
+static const int cospi_6_64 = 15679;
+static const int cospi_7_64 = 15426;
+static const int cospi_8_64 = 15137;
+static const int cospi_9_64 = 14811;
+static const int cospi_10_64 = 14449;
+static const int cospi_11_64 = 14053;
+static const int cospi_12_64 = 13623;
+static const int cospi_13_64 = 13160;
+static const int cospi_14_64 = 12665;
+static const int cospi_15_64 = 12140;
+static const int cospi_16_64 = 11585;
+static const int cospi_17_64 = 11003;
+static const int cospi_18_64 = 10394;
+static const int cospi_19_64 = 9760;
+static const int cospi_20_64 = 9102;
+static const int cospi_21_64 = 8423;
+static const int cospi_22_64 = 7723;
+static const int cospi_23_64 = 7005;
+static const int cospi_24_64 = 6270;
+static const int cospi_25_64 = 5520;
+static const int cospi_26_64 = 4756;
+static const int cospi_27_64 = 3981;
+static const int cospi_28_64 = 3196;
+static const int cospi_29_64 = 2404;
+static const int cospi_30_64 = 1606;
+static const int cospi_31_64 = 804;
+
+// 16384 * sqrt(2) * sin(kPi/9) * 2 / 3
+static const int sinpi_1_9 = 5283;
+static const int sinpi_2_9 = 9929;
+static const int sinpi_3_9 = 13377;
+static const int sinpi_4_9 = 15212;
+
+static INLINE int dct_const_round_shift(int input) {
+ int rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS);
+ assert(INT16_MIN <= rv && rv <= INT16_MAX);
+ return rv;
+}
+
+typedef void (*transform_1d)(int16_t*, int16_t*);
+
+typedef struct {
+ transform_1d cols, rows; // vertical and horizontal
+} transform_2d;
+
+#endif // VP9_COMMON_VP9_IDCT_H_
diff --git a/libvpx/vp9/common/vp9_loopfilter.c b/libvpx/vp9/common/vp9_loopfilter.c
new file mode 100644
index 0000000..cfb5cd4
--- /dev/null
+++ b/libvpx/vp9/common/vp9_loopfilter.c
@@ -0,0 +1,1061 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_config.h"
+#include "vp9/common/vp9_loopfilter.h"
+#include "vp9/common/vp9_onyxc_int.h"
+#include "vp9/common/vp9_reconinter.h"
+#include "vpx_mem/vpx_mem.h"
+
+#include "vp9/common/vp9_seg_common.h"
+
+struct loop_filter_info {
+ const uint8_t *mblim;
+ const uint8_t *lim;
+ const uint8_t *hev_thr;
+};
+
+// This structure holds bit masks for all 8x8 blocks in a 64x64 region.
+// Each 1 bit represents a position in which we want to apply the loop filter.
+// Left_ entries refer to whether we apply a filter on the border to the
+// left of the block. Above_ entries refer to whether or not to apply a
+// filter on the above border. Int_ entries refer to whether or not to
+// apply borders on the 4x4 edges within the 8x8 block that each bit
+// represents.
+// Since each transform is accompanied by a potentially different type of
+// loop filter there is a different entry in the array for each transform size.
+typedef struct {
+ uint64_t left_y[TX_SIZES];
+ uint64_t above_y[TX_SIZES];
+ uint64_t int_4x4_y;
+ uint16_t left_uv[TX_SIZES];
+ uint16_t above_uv[TX_SIZES];
+ uint16_t int_4x4_uv;
+} LOOP_FILTER_MASK;
+
+// 64 bit masks for left transform size. Each 1 represents a position where
+// we should apply a loop filter across the left border of an 8x8 block
+// boundary.
+//
+// In the case of TX_16X16-> ( in low order byte first we end up with
+// a mask that looks like this
+//
+// 10101010
+// 10101010
+// 10101010
+// 10101010
+// 10101010
+// 10101010
+// 10101010
+// 10101010
+//
+// A loopfilter should be applied to every other 8x8 horizontally.
+static const uint64_t left_64x64_txform_mask[TX_SIZES]= {
+ 0xffffffffffffffff, // TX_4X4
+ 0xffffffffffffffff, // TX_8x8
+ 0x5555555555555555, // TX_16x16
+ 0x1111111111111111, // TX_32x32
+};
+
+// 64 bit masks for above transform size. Each 1 represents a position where
+// we should apply a loop filter across the top border of an 8x8 block
+// boundary.
+//
+// In the case of TX_32x32 -> ( in low order byte first we end up with
+// a mask that looks like this
+//
+// 11111111
+// 00000000
+// 00000000
+// 00000000
+// 11111111
+// 00000000
+// 00000000
+// 00000000
+//
+// A loopfilter should be applied to every other 4 the row vertically.
+static const uint64_t above_64x64_txform_mask[TX_SIZES]= {
+ 0xffffffffffffffff, // TX_4X4
+ 0xffffffffffffffff, // TX_8x8
+ 0x00ff00ff00ff00ff, // TX_16x16
+ 0x000000ff000000ff, // TX_32x32
+};
+
+// 64 bit masks for prediction sizes (left). Each 1 represents a position
+// where left border of an 8x8 block. These are aligned to the right most
+// appropriate bit, and then shifted into place.
+//
+// In the case of TX_16x32 -> ( low order byte first ) we end up with
+// a mask that looks like this :
+//
+// 10000000
+// 10000000
+// 10000000
+// 10000000
+// 00000000
+// 00000000
+// 00000000
+// 00000000
+static const uint64_t left_prediction_mask[BLOCK_SIZES] = {
+ 0x0000000000000001, // BLOCK_4X4,
+ 0x0000000000000001, // BLOCK_4X8,
+ 0x0000000000000001, // BLOCK_8X4,
+ 0x0000000000000001, // BLOCK_8X8,
+ 0x0000000000000101, // BLOCK_8X16,
+ 0x0000000000000001, // BLOCK_16X8,
+ 0x0000000000000101, // BLOCK_16X16,
+ 0x0000000001010101, // BLOCK_16X32,
+ 0x0000000000000101, // BLOCK_32X16,
+ 0x0000000001010101, // BLOCK_32X32,
+ 0x0101010101010101, // BLOCK_32X64,
+ 0x0000000001010101, // BLOCK_64X32,
+ 0x0101010101010101, // BLOCK_64X64
+};
+
+// 64 bit mask to shift and set for each prediction size.
+static const uint64_t above_prediction_mask[BLOCK_SIZES] = {
+ 0x0000000000000001, // BLOCK_4X4
+ 0x0000000000000001, // BLOCK_4X8
+ 0x0000000000000001, // BLOCK_8X4
+ 0x0000000000000001, // BLOCK_8X8
+ 0x0000000000000001, // BLOCK_8X16,
+ 0x0000000000000003, // BLOCK_16X8
+ 0x0000000000000003, // BLOCK_16X16
+ 0x0000000000000003, // BLOCK_16X32,
+ 0x000000000000000f, // BLOCK_32X16,
+ 0x000000000000000f, // BLOCK_32X32,
+ 0x000000000000000f, // BLOCK_32X64,
+ 0x00000000000000ff, // BLOCK_64X32,
+ 0x00000000000000ff, // BLOCK_64X64
+};
+// 64 bit mask to shift and set for each prediction size. A bit is set for
+// each 8x8 block that would be in the left most block of the given block
+// size in the 64x64 block.
+static const uint64_t size_mask[BLOCK_SIZES] = {
+ 0x0000000000000001, // BLOCK_4X4
+ 0x0000000000000001, // BLOCK_4X8
+ 0x0000000000000001, // BLOCK_8X4
+ 0x0000000000000001, // BLOCK_8X8
+ 0x0000000000000101, // BLOCK_8X16,
+ 0x0000000000000003, // BLOCK_16X8
+ 0x0000000000000303, // BLOCK_16X16
+ 0x0000000003030303, // BLOCK_16X32,
+ 0x0000000000000f0f, // BLOCK_32X16,
+ 0x000000000f0f0f0f, // BLOCK_32X32,
+ 0x0f0f0f0f0f0f0f0f, // BLOCK_32X64,
+ 0x00000000ffffffff, // BLOCK_64X32,
+ 0xffffffffffffffff, // BLOCK_64X64
+};
+
+// These are used for masking the left and above borders.
+static const uint64_t left_border = 0x1111111111111111;
+static const uint64_t above_border = 0x000000ff000000ff;
+
+// 16 bit masks for uv transform sizes.
+static const uint16_t left_64x64_txform_mask_uv[TX_SIZES]= {
+ 0xffff, // TX_4X4
+ 0xffff, // TX_8x8
+ 0x5555, // TX_16x16
+ 0x1111, // TX_32x32
+};
+
+static const uint16_t above_64x64_txform_mask_uv[TX_SIZES]= {
+ 0xffff, // TX_4X4
+ 0xffff, // TX_8x8
+ 0x0f0f, // TX_16x16
+ 0x000f, // TX_32x32
+};
+
+// 16 bit left mask to shift and set for each uv prediction size.
+static const uint16_t left_prediction_mask_uv[BLOCK_SIZES] = {
+ 0x0001, // BLOCK_4X4,
+ 0x0001, // BLOCK_4X8,
+ 0x0001, // BLOCK_8X4,
+ 0x0001, // BLOCK_8X8,
+ 0x0001, // BLOCK_8X16,
+ 0x0001, // BLOCK_16X8,
+ 0x0001, // BLOCK_16X16,
+ 0x0011, // BLOCK_16X32,
+ 0x0001, // BLOCK_32X16,
+ 0x0011, // BLOCK_32X32,
+ 0x1111, // BLOCK_32X64
+ 0x0011, // BLOCK_64X32,
+ 0x1111, // BLOCK_64X64
+};
+// 16 bit above mask to shift and set for uv each prediction size.
+static const uint16_t above_prediction_mask_uv[BLOCK_SIZES] = {
+ 0x0001, // BLOCK_4X4
+ 0x0001, // BLOCK_4X8
+ 0x0001, // BLOCK_8X4
+ 0x0001, // BLOCK_8X8
+ 0x0001, // BLOCK_8X16,
+ 0x0001, // BLOCK_16X8
+ 0x0001, // BLOCK_16X16
+ 0x0001, // BLOCK_16X32,
+ 0x0003, // BLOCK_32X16,
+ 0x0003, // BLOCK_32X32,
+ 0x0003, // BLOCK_32X64,
+ 0x000f, // BLOCK_64X32,
+ 0x000f, // BLOCK_64X64
+};
+
+// 64 bit mask to shift and set for each uv prediction size
+static const uint16_t size_mask_uv[BLOCK_SIZES] = {
+ 0x0001, // BLOCK_4X4
+ 0x0001, // BLOCK_4X8
+ 0x0001, // BLOCK_8X4
+ 0x0001, // BLOCK_8X8
+ 0x0001, // BLOCK_8X16,
+ 0x0001, // BLOCK_16X8
+ 0x0001, // BLOCK_16X16
+ 0x0011, // BLOCK_16X32,
+ 0x0003, // BLOCK_32X16,
+ 0x0033, // BLOCK_32X32,
+ 0x3333, // BLOCK_32X64,
+ 0x00ff, // BLOCK_64X32,
+ 0xffff, // BLOCK_64X64
+};
+static const uint16_t left_border_uv = 0x1111;
+static const uint16_t above_border_uv = 0x000f;
+
+
+static void lf_init_lut(loop_filter_info_n *lfi) {
+ lfi->mode_lf_lut[DC_PRED] = 0;
+ lfi->mode_lf_lut[D45_PRED] = 0;
+ lfi->mode_lf_lut[D135_PRED] = 0;
+ lfi->mode_lf_lut[D117_PRED] = 0;
+ lfi->mode_lf_lut[D153_PRED] = 0;
+ lfi->mode_lf_lut[D207_PRED] = 0;
+ lfi->mode_lf_lut[D63_PRED] = 0;
+ lfi->mode_lf_lut[V_PRED] = 0;
+ lfi->mode_lf_lut[H_PRED] = 0;
+ lfi->mode_lf_lut[TM_PRED] = 0;
+ lfi->mode_lf_lut[ZEROMV] = 0;
+ lfi->mode_lf_lut[NEARESTMV] = 1;
+ lfi->mode_lf_lut[NEARMV] = 1;
+ lfi->mode_lf_lut[NEWMV] = 1;
+}
+
+static void update_sharpness(loop_filter_info_n *lfi, int sharpness_lvl) {
+ int lvl;
+
+ // For each possible value for the loop filter fill out limits
+ for (lvl = 0; lvl <= MAX_LOOP_FILTER; lvl++) {
+ // Set loop filter paramaeters that control sharpness.
+ int block_inside_limit = lvl >> ((sharpness_lvl > 0) + (sharpness_lvl > 4));
+
+ if (sharpness_lvl > 0) {
+ if (block_inside_limit > (9 - sharpness_lvl))
+ block_inside_limit = (9 - sharpness_lvl);
+ }
+
+ if (block_inside_limit < 1)
+ block_inside_limit = 1;
+
+ vpx_memset(lfi->lim[lvl], block_inside_limit, SIMD_WIDTH);
+ vpx_memset(lfi->mblim[lvl], (2 * (lvl + 2) + block_inside_limit),
+ SIMD_WIDTH);
+ }
+}
+
+void vp9_loop_filter_init(VP9_COMMON *cm) {
+ loop_filter_info_n *lfi = &cm->lf_info;
+ struct loopfilter *lf = &cm->lf;
+ int i;
+
+ // init limits for given sharpness
+ update_sharpness(lfi, lf->sharpness_level);
+ lf->last_sharpness_level = lf->sharpness_level;
+
+ // init LUT for lvl and hev thr picking
+ lf_init_lut(lfi);
+
+ // init hev threshold const vectors
+ for (i = 0; i < 4; i++)
+ vpx_memset(lfi->hev_thr[i], i, SIMD_WIDTH);
+}
+
+void vp9_loop_filter_frame_init(VP9_COMMON *cm, int default_filt_lvl) {
+ int seg_id;
+ // n_shift is the a multiplier for lf_deltas
+ // the multiplier is 1 for when filter_lvl is between 0 and 31;
+ // 2 when filter_lvl is between 32 and 63
+ const int n_shift = default_filt_lvl >> 5;
+ loop_filter_info_n *const lfi = &cm->lf_info;
+ struct loopfilter *const lf = &cm->lf;
+ struct segmentation *const seg = &cm->seg;
+
+ // update limits if sharpness has changed
+ if (lf->last_sharpness_level != lf->sharpness_level) {
+ update_sharpness(lfi, lf->sharpness_level);
+ lf->last_sharpness_level = lf->sharpness_level;
+ }
+
+ for (seg_id = 0; seg_id < MAX_SEGMENTS; seg_id++) {
+ int lvl_seg = default_filt_lvl, ref, mode, intra_lvl;
+
+ // Set the baseline filter values for each segment
+ if (vp9_segfeature_active(seg, seg_id, SEG_LVL_ALT_LF)) {
+ const int data = vp9_get_segdata(seg, seg_id, SEG_LVL_ALT_LF);
+ lvl_seg = seg->abs_delta == SEGMENT_ABSDATA
+ ? data
+ : clamp(default_filt_lvl + data, 0, MAX_LOOP_FILTER);
+ }
+
+ if (!lf->mode_ref_delta_enabled) {
+ // we could get rid of this if we assume that deltas are set to
+ // zero when not in use; encoder always uses deltas
+ vpx_memset(lfi->lvl[seg_id], lvl_seg, sizeof(lfi->lvl[seg_id]));
+ continue;
+ }
+
+ intra_lvl = lvl_seg + (lf->ref_deltas[INTRA_FRAME] << n_shift);
+ lfi->lvl[seg_id][INTRA_FRAME][0] = clamp(intra_lvl, 0, MAX_LOOP_FILTER);
+
+ for (ref = LAST_FRAME; ref < MAX_REF_FRAMES; ++ref)
+ for (mode = 0; mode < MAX_MODE_LF_DELTAS; ++mode) {
+ const int inter_lvl = lvl_seg + (lf->ref_deltas[ref] << n_shift)
+ + (lf->mode_deltas[mode] << n_shift);
+ lfi->lvl[seg_id][ref][mode] = clamp(inter_lvl, 0, MAX_LOOP_FILTER);
+ }
+ }
+}
+
+static int build_lfi(const loop_filter_info_n *lfi_n,
+ const MB_MODE_INFO *mbmi,
+ struct loop_filter_info *lfi) {
+ const int seg = mbmi->segment_id;
+ const int ref = mbmi->ref_frame[0];
+ const int mode = lfi_n->mode_lf_lut[mbmi->mode];
+ const int filter_level = lfi_n->lvl[seg][ref][mode];
+
+ if (filter_level > 0) {
+ lfi->mblim = lfi_n->mblim[filter_level];
+ lfi->lim = lfi_n->lim[filter_level];
+ lfi->hev_thr = lfi_n->hev_thr[filter_level >> 4];
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static void filter_selectively_vert(uint8_t *s, int pitch,
+ unsigned int mask_16x16,
+ unsigned int mask_8x8,
+ unsigned int mask_4x4,
+ unsigned int mask_4x4_int,
+ const struct loop_filter_info *lfi) {
+ unsigned int mask;
+
+ for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int;
+ mask; mask >>= 1) {
+ if (mask & 1) {
+ if (mask_16x16 & 1) {
+ vp9_mb_lpf_vertical_edge_w(s, pitch, lfi->mblim, lfi->lim,
+ lfi->hev_thr);
+ assert(!(mask_8x8 & 1));
+ assert(!(mask_4x4 & 1));
+ assert(!(mask_4x4_int & 1));
+ } else if (mask_8x8 & 1) {
+ vp9_mbloop_filter_vertical_edge(s, pitch, lfi->mblim, lfi->lim,
+ lfi->hev_thr, 1);
+ assert(!(mask_16x16 & 1));
+ assert(!(mask_4x4 & 1));
+ } else if (mask_4x4 & 1) {
+ vp9_loop_filter_vertical_edge(s, pitch, lfi->mblim, lfi->lim,
+ lfi->hev_thr, 1);
+ assert(!(mask_16x16 & 1));
+ assert(!(mask_8x8 & 1));
+ }
+ }
+ if (mask_4x4_int & 1)
+ vp9_loop_filter_vertical_edge(s + 4, pitch, lfi->mblim, lfi->lim,
+ lfi->hev_thr, 1);
+ s += 8;
+ lfi++;
+ mask_16x16 >>= 1;
+ mask_8x8 >>= 1;
+ mask_4x4 >>= 1;
+ mask_4x4_int >>= 1;
+ }
+}
+
+static void filter_selectively_horiz(uint8_t *s, int pitch,
+ unsigned int mask_16x16,
+ unsigned int mask_8x8,
+ unsigned int mask_4x4,
+ unsigned int mask_4x4_int,
+ int only_4x4_1,
+ const struct loop_filter_info *lfi) {
+ unsigned int mask;
+ int count;
+
+ for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int;
+ mask; mask >>= count) {
+ count = 1;
+ if (mask & 1) {
+ if (!only_4x4_1) {
+ if (mask_16x16 & 1) {
+ if ((mask_16x16 & 3) == 3) {
+ vp9_mb_lpf_horizontal_edge_w(s, pitch, lfi->mblim, lfi->lim,
+ lfi->hev_thr, 2);
+ count = 2;
+ } else {
+ vp9_mb_lpf_horizontal_edge_w(s, pitch, lfi->mblim, lfi->lim,
+ lfi->hev_thr, 1);
+ }
+ assert(!(mask_8x8 & 1));
+ assert(!(mask_4x4 & 1));
+ assert(!(mask_4x4_int & 1));
+ } else if (mask_8x8 & 1) {
+ vp9_mbloop_filter_horizontal_edge(s, pitch, lfi->mblim, lfi->lim,
+ lfi->hev_thr, 1);
+ assert(!(mask_16x16 & 1));
+ assert(!(mask_4x4 & 1));
+ } else if (mask_4x4 & 1) {
+ vp9_loop_filter_horizontal_edge(s, pitch, lfi->mblim, lfi->lim,
+ lfi->hev_thr, 1);
+ assert(!(mask_16x16 & 1));
+ assert(!(mask_8x8 & 1));
+ }
+ }
+
+ if (mask_4x4_int & 1)
+ vp9_loop_filter_horizontal_edge(s + 4 * pitch, pitch, lfi->mblim,
+ lfi->lim, lfi->hev_thr, 1);
+ }
+ s += 8 * count;
+ lfi += count;
+ mask_16x16 >>= count;
+ mask_8x8 >>= count;
+ mask_4x4 >>= count;
+ mask_4x4_int >>= count;
+ }
+}
+
+// This function ors into the current lfm structure, where to do loop
+// filters for the specific mi we are looking at. It uses information
+// including the block_size_type (32x16, 32x32, etc), the transform size,
+// whether there were any coefficients encoded, and the loop filter strength
+// block we are currently looking at. Shift is used to position the
+// 1's we produce.
+// TODO(JBB) Need another function for different resolution color..
+static void build_masks(const loop_filter_info_n *const lfi_n,
+ const MODE_INFO *mi, const int shift_y,
+ const int shift_uv,
+ LOOP_FILTER_MASK *lfm) {
+ const BLOCK_SIZE block_size = mi->mbmi.sb_type;
+ const TX_SIZE tx_size_y = mi->mbmi.tx_size;
+ const TX_SIZE tx_size_uv = get_uv_tx_size(&mi->mbmi);
+ const int skip = mi->mbmi.skip_coeff;
+ const int seg = mi->mbmi.segment_id;
+ const int ref = mi->mbmi.ref_frame[0];
+ const int mode = lfi_n->mode_lf_lut[mi->mbmi.mode];
+ const int filter_level = lfi_n->lvl[seg][ref][mode];
+ uint64_t *left_y = &lfm->left_y[tx_size_y];
+ uint64_t *above_y = &lfm->above_y[tx_size_y];
+ uint64_t *int_4x4_y = &lfm->int_4x4_y;
+ uint16_t *left_uv = &lfm->left_uv[tx_size_uv];
+ uint16_t *above_uv = &lfm->above_uv[tx_size_uv];
+ uint16_t *int_4x4_uv = &lfm->int_4x4_uv;
+
+ // If filter level is 0 we don't loop filter.
+ if (!filter_level)
+ return;
+
+ // These set 1 in the current block size for the block size edges.
+ // For instance if the block size is 32x16, we'll set :
+ // above = 1111
+ // 0000
+ // and
+ // left = 1000
+ // = 1000
+ // NOTE : In this example the low bit is left most ( 1000 ) is stored as
+ // 1, not 8...
+ //
+ // U and v set things on a 16 bit scale.
+ //
+ *above_y |= above_prediction_mask[block_size] << shift_y;
+ *above_uv |= above_prediction_mask_uv[block_size] << shift_uv;
+ *left_y |= left_prediction_mask[block_size] << shift_y;
+ *left_uv |= left_prediction_mask_uv[block_size] << shift_uv;
+
+ // If the block has no coefficients and is not intra we skip applying
+ // the loop filter on block edges.
+ if (skip && ref > INTRA_FRAME)
+ return;
+
+ // Here we are adding a mask for the transform size. The transform
+ // size mask is set to be correct for a 64x64 prediction block size. We
+ // mask to match the size of the block we are working on and then shift it
+ // into place..
+ *above_y |= (size_mask[block_size] &
+ above_64x64_txform_mask[tx_size_y]) << shift_y;
+ *above_uv |= (size_mask_uv[block_size] &
+ above_64x64_txform_mask_uv[tx_size_uv]) << shift_uv;
+
+ *left_y |= (size_mask[block_size] &
+ left_64x64_txform_mask[tx_size_y]) << shift_y;
+ *left_uv |= (size_mask_uv[block_size] &
+ left_64x64_txform_mask_uv[tx_size_uv]) << shift_uv;
+
+ // Here we are trying to determine what to do with the internal 4x4 block
+ // boundaries. These differ from the 4x4 boundaries on the outside edge of
+ // an 8x8 in that the internal ones can be skipped and don't depend on
+ // the prediction block size.
+ if (tx_size_y == TX_4X4) {
+ *int_4x4_y |= (size_mask[block_size] & 0xffffffffffffffff) << shift_y;
+ }
+ if (tx_size_uv == TX_4X4) {
+ *int_4x4_uv |= (size_mask_uv[block_size] & 0xffff) << shift_uv;
+ }
+}
+
+// This function does the same thing as the one above with the exception that
+// it only affects the y masks. It exists because for blocks < 16x16 in size,
+// we only update u and v masks on the first block.
+static void build_y_mask(const loop_filter_info_n *const lfi_n,
+ const MODE_INFO *mi, const int shift_y,
+ LOOP_FILTER_MASK *lfm) {
+ const BLOCK_SIZE block_size = mi->mbmi.sb_type;
+ const TX_SIZE tx_size_y = mi->mbmi.tx_size;
+ const int skip = mi->mbmi.skip_coeff;
+ const int seg = mi->mbmi.segment_id;
+ const int ref = mi->mbmi.ref_frame[0];
+ const int mode = lfi_n->mode_lf_lut[mi->mbmi.mode];
+ const int filter_level = lfi_n->lvl[seg][ref][mode];
+ uint64_t *left_y = &lfm->left_y[tx_size_y];
+ uint64_t *above_y = &lfm->above_y[tx_size_y];
+ uint64_t *int_4x4_y = &lfm->int_4x4_y;
+
+ if (!filter_level)
+ return;
+
+ *above_y |= above_prediction_mask[block_size] << shift_y;
+ *left_y |= left_prediction_mask[block_size] << shift_y;
+
+ if (skip && ref > INTRA_FRAME)
+ return;
+
+ *above_y |= (size_mask[block_size] &
+ above_64x64_txform_mask[tx_size_y]) << shift_y;
+
+ *left_y |= (size_mask[block_size] &
+ left_64x64_txform_mask[tx_size_y]) << shift_y;
+
+ if (tx_size_y == TX_4X4) {
+ *int_4x4_y |= (size_mask[block_size] & 0xffffffffffffffff) << shift_y;
+ }
+}
+
+// This function sets up the bit masks for the entire 64x64 region represented
+// by mi_row, mi_col.
+// TODO(JBB): This function only works for yv12.
+static void setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col,
+ MODE_INFO **mi_8x8, const int mode_info_stride,
+ LOOP_FILTER_MASK *lfm) {
+ int idx_32, idx_16, idx_8;
+ const loop_filter_info_n *const lfi_n = &cm->lf_info;
+ MODE_INFO **mip = mi_8x8;
+ MODE_INFO **mip2 = mi_8x8;
+
+ // These are offsets to the next mi in the 64x64 block. It is what gets
+ // added to the mi ptr as we go through each loop. It helps us to avoids
+ // setting up special row and column counters for each index. The last step
+ // brings us out back to the starting position.
+ const int offset_32[] = {4, (mode_info_stride << 2) - 4, 4,
+ -(mode_info_stride << 2) - 4};
+ const int offset_16[] = {2, (mode_info_stride << 1) - 2, 2,
+ -(mode_info_stride << 1) - 2};
+ const int offset[] = {1, mode_info_stride - 1, 1, -mode_info_stride - 1};
+
+ // Following variables represent shifts to position the current block
+ // mask over the appropriate block. A shift of 36 to the left will move
+ // the bits for the final 32 by 32 block in the 64x64 up 4 rows and left
+ // 4 rows to the appropriate spot.
+ const int shift_32_y[] = {0, 4, 32, 36};
+ const int shift_16_y[] = {0, 2, 16, 18};
+ const int shift_8_y[] = {0, 1, 8, 9};
+ const int shift_32_uv[] = {0, 2, 8, 10};
+ const int shift_16_uv[] = {0, 1, 4, 5};
+ int i;
+ const int max_rows = (mi_row + MI_BLOCK_SIZE > cm->mi_rows ?
+ cm->mi_rows - mi_row : MI_BLOCK_SIZE);
+ const int max_cols = (mi_col + MI_BLOCK_SIZE > cm->mi_cols ?
+ cm->mi_cols - mi_col : MI_BLOCK_SIZE);
+
+ vp9_zero(*lfm);
+
+ // TODO(jimbankoski): Try moving most of the following code into decode
+ // loop and storing lfm in the mbmi structure so that we don't have to go
+ // through the recursive loop structure multiple times.
+ switch (mip[0]->mbmi.sb_type) {
+ case BLOCK_64X64:
+ build_masks(lfi_n, mip[0] , 0, 0, lfm);
+ break;
+ case BLOCK_64X32:
+ build_masks(lfi_n, mip[0], 0, 0, lfm);
+ mip2 = mip + mode_info_stride * 4;
+ if (4 >= max_rows)
+ break;
+ build_masks(lfi_n, mip2[0], 32, 8, lfm);
+ break;
+ case BLOCK_32X64:
+ build_masks(lfi_n, mip[0], 0, 0, lfm);
+ mip2 = mip + 4;
+ if (4 >= max_cols)
+ break;
+ build_masks(lfi_n, mip2[0], 4, 2, lfm);
+ break;
+ default:
+ for (idx_32 = 0; idx_32 < 4; mip += offset_32[idx_32], ++idx_32) {
+ const int shift_y = shift_32_y[idx_32];
+ const int shift_uv = shift_32_uv[idx_32];
+ const int mi_32_col_offset = ((idx_32 & 1) << 2);
+ const int mi_32_row_offset = ((idx_32 >> 1) << 2);
+ if (mi_32_col_offset >= max_cols || mi_32_row_offset >= max_rows)
+ continue;
+ switch (mip[0]->mbmi.sb_type) {
+ case BLOCK_32X32:
+ build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
+ break;
+ case BLOCK_32X16:
+ build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
+ if (mi_32_row_offset + 2 >= max_rows)
+ continue;
+ mip2 = mip + mode_info_stride * 2;
+ build_masks(lfi_n, mip2[0], shift_y + 16, shift_uv + 4, lfm);
+ break;
+ case BLOCK_16X32:
+ build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
+ if (mi_32_col_offset + 2 >= max_cols)
+ continue;
+ mip2 = mip + 2;
+ build_masks(lfi_n, mip2[0], shift_y + 2, shift_uv + 1, lfm);
+ break;
+ default:
+ for (idx_16 = 0; idx_16 < 4; mip += offset_16[idx_16], ++idx_16) {
+ const int shift_y = shift_32_y[idx_32] + shift_16_y[idx_16];
+ const int shift_uv = shift_32_uv[idx_32] + shift_16_uv[idx_16];
+ const int mi_16_col_offset = mi_32_col_offset +
+ ((idx_16 & 1) << 1);
+ const int mi_16_row_offset = mi_32_row_offset +
+ ((idx_16 >> 1) << 1);
+
+ if (mi_16_col_offset >= max_cols || mi_16_row_offset >= max_rows)
+ continue;
+
+ switch (mip[0]->mbmi.sb_type) {
+ case BLOCK_16X16:
+ build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
+ break;
+ case BLOCK_16X8:
+ build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
+ if (mi_16_row_offset + 1 >= max_rows)
+ continue;
+ mip2 = mip + mode_info_stride;
+ build_y_mask(lfi_n, mip2[0], shift_y+8, lfm);
+ break;
+ case BLOCK_8X16:
+ build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
+ if (mi_16_col_offset +1 >= max_cols)
+ continue;
+ mip2 = mip + 1;
+ build_y_mask(lfi_n, mip2[0], shift_y+1, lfm);
+ break;
+ default: {
+ const int shift_y = shift_32_y[idx_32] +
+ shift_16_y[idx_16] +
+ shift_8_y[0];
+ build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
+ mip += offset[0];
+ for (idx_8 = 1; idx_8 < 4; mip += offset[idx_8], ++idx_8) {
+ const int shift_y = shift_32_y[idx_32] +
+ shift_16_y[idx_16] +
+ shift_8_y[idx_8];
+ const int mi_8_col_offset = mi_16_col_offset +
+ ((idx_8 & 1));
+ const int mi_8_row_offset = mi_16_row_offset +
+ ((idx_8 >> 1));
+
+ if (mi_8_col_offset >= max_cols ||
+ mi_8_row_offset >= max_rows)
+ continue;
+ build_y_mask(lfi_n, mip[0], shift_y, lfm);
+ }
+ break;
+ }
+ }
+ }
+ break;
+ }
+ }
+ break;
+ }
+ // The largest loopfilter we have is 16x16 so we use the 16x16 mask
+ // for 32x32 transforms also also.
+ lfm->left_y[TX_16X16] |= lfm->left_y[TX_32X32];
+ lfm->above_y[TX_16X16] |= lfm->above_y[TX_32X32];
+ lfm->left_uv[TX_16X16] |= lfm->left_uv[TX_32X32];
+ lfm->above_uv[TX_16X16] |= lfm->above_uv[TX_32X32];
+
+ // We do at least 8 tap filter on every 32x32 even if the transform size
+ // is 4x4. So if the 4x4 is set on a border pixel add it to the 8x8 and
+ // remove it from the 4x4.
+ lfm->left_y[TX_8X8] |= lfm->left_y[TX_4X4] & left_border;
+ lfm->left_y[TX_4X4] &= ~left_border;
+ lfm->above_y[TX_8X8] |= lfm->above_y[TX_4X4] & above_border;
+ lfm->above_y[TX_4X4] &= ~above_border;
+ lfm->left_uv[TX_8X8] |= lfm->left_uv[TX_4X4] & left_border_uv;
+ lfm->left_uv[TX_4X4] &= ~left_border_uv;
+ lfm->above_uv[TX_8X8] |= lfm->above_uv[TX_4X4] & above_border_uv;
+ lfm->above_uv[TX_4X4] &= ~above_border_uv;
+
+ // We do some special edge handling.
+ if (mi_row + MI_BLOCK_SIZE > cm->mi_rows) {
+ const uint64_t rows = cm->mi_rows - mi_row;
+
+ // Each pixel inside the border gets a 1,
+ const uint64_t mask_y = (((uint64_t) 1 << (rows << 3)) - 1);
+ const uint16_t mask_uv = (((uint16_t) 1 << (((rows + 1) >> 1) << 2)) - 1);
+
+ // Remove values completely outside our border.
+ for (i = 0; i < TX_32X32; i++) {
+ lfm->left_y[i] &= mask_y;
+ lfm->above_y[i] &= mask_y;
+ lfm->left_uv[i] &= mask_uv;
+ lfm->above_uv[i] &= mask_uv;
+ }
+ lfm->int_4x4_y &= mask_y;
+ lfm->int_4x4_uv &= mask_uv;
+
+ // We don't apply a wide loop filter on the last uv block row. If set
+ // apply the shorter one instead.
+ if (rows == 1) {
+ lfm->above_uv[TX_8X8] |= lfm->above_uv[TX_16X16];
+ lfm->above_uv[TX_16X16] = 0;
+ }
+ if (rows == 5) {
+ lfm->above_uv[TX_8X8] |= lfm->above_uv[TX_16X16] & 0xff00;
+ lfm->above_uv[TX_16X16] &= ~(lfm->above_uv[TX_16X16] & 0xff00);
+ }
+ }
+
+ if (mi_col + MI_BLOCK_SIZE > cm->mi_cols) {
+ const uint64_t columns = cm->mi_cols - mi_col;
+
+ // Each pixel inside the border gets a 1, the multiply copies the border
+ // to where we need it.
+ const uint64_t mask_y = (((1 << columns) - 1)) * 0x0101010101010101;
+ const uint16_t mask_uv = ((1 << ((columns + 1) >> 1)) - 1) * 0x1111;
+
+ // Internal edges are not applied on the last column of the image so
+ // we mask 1 more for the internal edges
+ const uint16_t mask_uv_int = ((1 << (columns >> 1)) - 1) * 0x1111;
+
+ // Remove the bits outside the image edge.
+ for (i = 0; i < TX_32X32; i++) {
+ lfm->left_y[i] &= mask_y;
+ lfm->above_y[i] &= mask_y;
+ lfm->left_uv[i] &= mask_uv;
+ lfm->above_uv[i] &= mask_uv;
+ }
+ lfm->int_4x4_y &= mask_y;
+ lfm->int_4x4_uv &= mask_uv_int;
+
+ // We don't apply a wide loop filter on the last uv column. If set
+ // apply the shorter one instead.
+ if (columns == 1) {
+ lfm->left_uv[TX_8X8] |= lfm->left_uv[TX_16X16];
+ lfm->left_uv[TX_16X16] = 0;
+ }
+ if (columns == 5) {
+ lfm->left_uv[TX_8X8] |= (lfm->left_uv[TX_16X16] & 0xcccc);
+ lfm->left_uv[TX_16X16] &= ~(lfm->left_uv[TX_16X16] & 0xcccc);
+ }
+ }
+ // We don't a loop filter on the first column in the image. Mask that out.
+ if (mi_col == 0) {
+ for (i = 0; i < TX_32X32; i++) {
+ lfm->left_y[i] &= 0xfefefefefefefefe;
+ lfm->left_uv[i] &= 0xeeee;
+ }
+ }
+}
+#if CONFIG_NON420
+static void filter_block_plane_non420(VP9_COMMON *cm,
+ struct macroblockd_plane *plane,
+ MODE_INFO **mi_8x8,
+ int mi_row, int mi_col) {
+ const int ss_x = plane->subsampling_x;
+ const int ss_y = plane->subsampling_y;
+ const int row_step = 1 << ss_x;
+ const int col_step = 1 << ss_y;
+ const int row_step_stride = cm->mode_info_stride * row_step;
+ struct buf_2d *const dst = &plane->dst;
+ uint8_t* const dst0 = dst->buf;
+ unsigned int mask_16x16[MI_BLOCK_SIZE] = {0};
+ unsigned int mask_8x8[MI_BLOCK_SIZE] = {0};
+ unsigned int mask_4x4[MI_BLOCK_SIZE] = {0};
+ unsigned int mask_4x4_int[MI_BLOCK_SIZE] = {0};
+ struct loop_filter_info lfi[MI_BLOCK_SIZE][MI_BLOCK_SIZE];
+ int r, c;
+
+ for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r += row_step) {
+ unsigned int mask_16x16_c = 0;
+ unsigned int mask_8x8_c = 0;
+ unsigned int mask_4x4_c = 0;
+ unsigned int border_mask;
+
+ // Determine the vertical edges that need filtering
+ for (c = 0; c < MI_BLOCK_SIZE && mi_col + c < cm->mi_cols; c += col_step) {
+ const MODE_INFO *mi = mi_8x8[c];
+ const int skip_this = mi[0].mbmi.skip_coeff
+ && is_inter_block(&mi[0].mbmi);
+ // left edge of current unit is block/partition edge -> no skip
+ const int block_edge_left = b_width_log2(mi[0].mbmi.sb_type) ?
+ !(c & ((1 << (b_width_log2(mi[0].mbmi.sb_type)-1)) - 1)) : 1;
+ const int skip_this_c = skip_this && !block_edge_left;
+ // top edge of current unit is block/partition edge -> no skip
+ const int block_edge_above = b_height_log2(mi[0].mbmi.sb_type) ?
+ !(r & ((1 << (b_height_log2(mi[0].mbmi.sb_type)-1)) - 1)) : 1;
+ const int skip_this_r = skip_this && !block_edge_above;
+ const TX_SIZE tx_size = (plane->plane_type == PLANE_TYPE_UV)
+ ? get_uv_tx_size(&mi[0].mbmi)
+ : mi[0].mbmi.tx_size;
+ const int skip_border_4x4_c = ss_x && mi_col + c == cm->mi_cols - 1;
+ const int skip_border_4x4_r = ss_y && mi_row + r == cm->mi_rows - 1;
+
+ // Filter level can vary per MI
+ if (!build_lfi(&cm->lf_info, &mi[0].mbmi, lfi[r] + (c >> ss_x)))
+ continue;
+
+ // Build masks based on the transform size of each block
+ if (tx_size == TX_32X32) {
+ if (!skip_this_c && ((c >> ss_x) & 3) == 0) {
+ if (!skip_border_4x4_c)
+ mask_16x16_c |= 1 << (c >> ss_x);
+ else
+ mask_8x8_c |= 1 << (c >> ss_x);
+ }
+ if (!skip_this_r && ((r >> ss_y) & 3) == 0) {
+ if (!skip_border_4x4_r)
+ mask_16x16[r] |= 1 << (c >> ss_x);
+ else
+ mask_8x8[r] |= 1 << (c >> ss_x);
+ }
+ } else if (tx_size == TX_16X16) {
+ if (!skip_this_c && ((c >> ss_x) & 1) == 0) {
+ if (!skip_border_4x4_c)
+ mask_16x16_c |= 1 << (c >> ss_x);
+ else
+ mask_8x8_c |= 1 << (c >> ss_x);
+ }
+ if (!skip_this_r && ((r >> ss_y) & 1) == 0) {
+ if (!skip_border_4x4_r)
+ mask_16x16[r] |= 1 << (c >> ss_x);
+ else
+ mask_8x8[r] |= 1 << (c >> ss_x);
+ }
+ } else {
+ // force 8x8 filtering on 32x32 boundaries
+ if (!skip_this_c) {
+ if (tx_size == TX_8X8 || ((c >> ss_x) & 3) == 0)
+ mask_8x8_c |= 1 << (c >> ss_x);
+ else
+ mask_4x4_c |= 1 << (c >> ss_x);
+ }
+
+ if (!skip_this_r) {
+ if (tx_size == TX_8X8 || ((r >> ss_y) & 3) == 0)
+ mask_8x8[r] |= 1 << (c >> ss_x);
+ else
+ mask_4x4[r] |= 1 << (c >> ss_x);
+ }
+
+ if (!skip_this && tx_size < TX_8X8 && !skip_border_4x4_c)
+ mask_4x4_int[r] |= 1 << (c >> ss_x);
+ }
+ }
+
+ // Disable filtering on the leftmost column
+ border_mask = ~(mi_col == 0);
+ filter_selectively_vert(dst->buf, dst->stride,
+ mask_16x16_c & border_mask,
+ mask_8x8_c & border_mask,
+ mask_4x4_c & border_mask,
+ mask_4x4_int[r], lfi[r]);
+ dst->buf += 8 * dst->stride;
+ mi_8x8 += row_step_stride;
+ }
+
+ // Now do horizontal pass
+ dst->buf = dst0;
+ for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r += row_step) {
+ const int skip_border_4x4_r = ss_y && mi_row + r == cm->mi_rows - 1;
+ const unsigned int mask_4x4_int_r = skip_border_4x4_r ? 0 : mask_4x4_int[r];
+
+ filter_selectively_horiz(dst->buf, dst->stride,
+ mask_16x16[r],
+ mask_8x8[r],
+ mask_4x4[r],
+ mask_4x4_int_r, mi_row + r == 0, lfi[r]);
+ dst->buf += 8 * dst->stride;
+ }
+}
+#endif
+
+static void filter_block_plane(VP9_COMMON *const cm,
+ struct macroblockd_plane *const plane,
+ MODE_INFO **mi_8x8,
+ int mi_row, int mi_col,
+ LOOP_FILTER_MASK *lfm) {
+ const int ss_x = plane->subsampling_x;
+ const int ss_y = plane->subsampling_y;
+ const int row_step = 1 << ss_x;
+ const int col_step = 1 << ss_y;
+ const int row_step_stride = cm->mode_info_stride * row_step;
+ struct buf_2d *const dst = &plane->dst;
+ uint8_t* const dst0 = dst->buf;
+ unsigned int mask_4x4_int[MI_BLOCK_SIZE] = {0};
+ struct loop_filter_info lfi[MI_BLOCK_SIZE][MI_BLOCK_SIZE];
+ int r, c;
+ int row_shift = 3 - ss_x;
+ int row_mask = 0xff >> (ss_x << 2);
+
+#define MASK_ROW(value) ((value >> (r_sampled << row_shift)) & row_mask)
+
+ for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r += row_step) {
+ int r_sampled = r >> ss_x;
+
+ // Determine the vertical edges that need filtering
+ for (c = 0; c < MI_BLOCK_SIZE && mi_col + c < cm->mi_cols; c += col_step) {
+ const MODE_INFO *mi = mi_8x8[c];
+ if (!build_lfi(&cm->lf_info, &mi[0].mbmi, lfi[r] + (c >> ss_x)))
+ continue;
+ }
+ if (!plane->plane_type) {
+ mask_4x4_int[r] = MASK_ROW(lfm->int_4x4_y);
+ // Disable filtering on the leftmost column
+ filter_selectively_vert(dst->buf, dst->stride,
+ MASK_ROW(lfm->left_y[TX_16X16]),
+ MASK_ROW(lfm->left_y[TX_8X8]),
+ MASK_ROW(lfm->left_y[TX_4X4]),
+ MASK_ROW(lfm->int_4x4_y),
+ lfi[r]);
+ } else {
+ mask_4x4_int[r] = MASK_ROW(lfm->int_4x4_uv);
+ // Disable filtering on the leftmost column
+ filter_selectively_vert(dst->buf, dst->stride,
+ MASK_ROW(lfm->left_uv[TX_16X16]),
+ MASK_ROW(lfm->left_uv[TX_8X8]),
+ MASK_ROW(lfm->left_uv[TX_4X4]),
+ MASK_ROW(lfm->int_4x4_uv),
+ lfi[r]);
+ }
+ dst->buf += 8 * dst->stride;
+ mi_8x8 += row_step_stride;
+ }
+
+ // Now do horizontal pass
+ dst->buf = dst0;
+ for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r += row_step) {
+ const int skip_border_4x4_r = ss_y && mi_row + r == cm->mi_rows - 1;
+ const unsigned int mask_4x4_int_r = skip_border_4x4_r ? 0 : mask_4x4_int[r];
+ int r_sampled = r >> ss_x;
+
+ if (!plane->plane_type) {
+ filter_selectively_horiz(dst->buf, dst->stride,
+ MASK_ROW(lfm->above_y[TX_16X16]),
+ MASK_ROW(lfm->above_y[TX_8X8]),
+ MASK_ROW(lfm->above_y[TX_4X4]),
+ MASK_ROW(lfm->int_4x4_y),
+ mi_row + r == 0, lfi[r]);
+ } else {
+ filter_selectively_horiz(dst->buf, dst->stride,
+ MASK_ROW(lfm->above_uv[TX_16X16]),
+ MASK_ROW(lfm->above_uv[TX_8X8]),
+ MASK_ROW(lfm->above_uv[TX_4X4]),
+ mask_4x4_int_r,
+ mi_row + r == 0, lfi[r]);
+ }
+ dst->buf += 8 * dst->stride;
+ }
+#undef MASK_ROW
+}
+
+void vp9_loop_filter_rows(const YV12_BUFFER_CONFIG *frame_buffer,
+ VP9_COMMON *cm, MACROBLOCKD *xd,
+ int start, int stop, int y_only) {
+ const int num_planes = y_only ? 1 : MAX_MB_PLANE;
+ int mi_row, mi_col;
+ LOOP_FILTER_MASK lfm;
+#if CONFIG_NON420
+ int use_420 = y_only || (xd->plane[1].subsampling_y == 1 &&
+ xd->plane[1].subsampling_x == 1);
+#endif
+
+ for (mi_row = start; mi_row < stop; mi_row += MI_BLOCK_SIZE) {
+ MODE_INFO **mi_8x8 = cm->mi_grid_visible + mi_row * cm->mode_info_stride;
+
+ for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MI_BLOCK_SIZE) {
+ int plane;
+
+ setup_dst_planes(xd, frame_buffer, mi_row, mi_col);
+
+ // TODO(JBB): Make setup_mask work for non 420.
+#if CONFIG_NON420
+ if (use_420)
+#endif
+ setup_mask(cm, mi_row, mi_col, mi_8x8 + mi_col, cm->mode_info_stride,
+ &lfm);
+
+ for (plane = 0; plane < num_planes; ++plane) {
+#if CONFIG_NON420
+ if (use_420)
+#endif
+ filter_block_plane(cm, &xd->plane[plane], mi_8x8 + mi_col, mi_row,
+ mi_col, &lfm);
+#if CONFIG_NON420
+ else
+ filter_block_plane_non420(cm, &xd->plane[plane], mi_8x8 + mi_col,
+ mi_row, mi_col);
+#endif
+ }
+ }
+ }
+}
+
+void vp9_loop_filter_frame(VP9_COMMON *cm, MACROBLOCKD *xd,
+ int frame_filter_level,
+ int y_only, int partial) {
+ int start_mi_row, end_mi_row, mi_rows_to_filter;
+ if (!frame_filter_level) return;
+ start_mi_row = 0;
+ mi_rows_to_filter = cm->mi_rows;
+ if (partial && cm->mi_rows > 8) {
+ start_mi_row = cm->mi_rows >> 1;
+ start_mi_row &= 0xfffffff8;
+ mi_rows_to_filter = MAX(cm->mi_rows / 8, 8);
+ }
+ end_mi_row = start_mi_row + mi_rows_to_filter;
+ vp9_loop_filter_frame_init(cm, frame_filter_level);
+ vp9_loop_filter_rows(cm->frame_to_show, cm, xd,
+ start_mi_row, end_mi_row,
+ y_only);
+}
+
+int vp9_loop_filter_worker(void *arg1, void *arg2) {
+ LFWorkerData *const lf_data = (LFWorkerData*)arg1;
+ (void)arg2;
+ vp9_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, &lf_data->xd,
+ lf_data->start, lf_data->stop, lf_data->y_only);
+ return 1;
+}
diff --git a/libvpx/vp9/common/vp9_loopfilter.h b/libvpx/vp9/common/vp9_loopfilter.h
new file mode 100644
index 0000000..91d40ac
--- /dev/null
+++ b/libvpx/vp9/common/vp9_loopfilter.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_LOOPFILTER_H_
+#define VP9_COMMON_VP9_LOOPFILTER_H_
+
+#include "vpx_ports/mem.h"
+#include "vpx_config.h"
+
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/common/vp9_seg_common.h"
+
+#define MAX_LOOP_FILTER 63
+#define MAX_SHARPNESS 7
+
+#define SIMD_WIDTH 16
+
+#define MAX_REF_LF_DELTAS 4
+#define MAX_MODE_LF_DELTAS 2
+
+struct loopfilter {
+ int filter_level;
+
+ int sharpness_level;
+ int last_sharpness_level;
+
+ uint8_t mode_ref_delta_enabled;
+ uint8_t mode_ref_delta_update;
+
+ // 0 = Intra, Last, GF, ARF
+ signed char ref_deltas[MAX_REF_LF_DELTAS];
+ signed char last_ref_deltas[MAX_REF_LF_DELTAS];
+
+ // 0 = ZERO_MV, MV
+ signed char mode_deltas[MAX_MODE_LF_DELTAS];
+ signed char last_mode_deltas[MAX_MODE_LF_DELTAS];
+};
+
+// Need to align this structure so when it is declared and
+// passed it can be loaded into vector registers.
+typedef struct {
+ DECLARE_ALIGNED(SIMD_WIDTH, uint8_t,
+ mblim[MAX_LOOP_FILTER + 1][SIMD_WIDTH]);
+ DECLARE_ALIGNED(SIMD_WIDTH, uint8_t,
+ lim[MAX_LOOP_FILTER + 1][SIMD_WIDTH]);
+ DECLARE_ALIGNED(SIMD_WIDTH, uint8_t,
+ hev_thr[4][SIMD_WIDTH]);
+ uint8_t lvl[MAX_SEGMENTS][MAX_REF_FRAMES][MAX_MODE_LF_DELTAS];
+ uint8_t mode_lf_lut[MB_MODE_COUNT];
+} loop_filter_info_n;
+
+/* assorted loopfilter functions which get used elsewhere */
+struct VP9Common;
+struct macroblockd;
+
+void vp9_loop_filter_init(struct VP9Common *cm);
+
+// Update the loop filter for the current frame.
+// This should be called before vp9_loop_filter_rows(), vp9_loop_filter_frame()
+// calls this function directly.
+void vp9_loop_filter_frame_init(struct VP9Common *cm, int default_filt_lvl);
+
+void vp9_loop_filter_frame(struct VP9Common *cm,
+ struct macroblockd *mbd,
+ int filter_level,
+ int y_only, int partial);
+
+// Apply the loop filter to [start, stop) macro block rows in frame_buffer.
+void vp9_loop_filter_rows(const YV12_BUFFER_CONFIG *frame_buffer,
+ struct VP9Common *cm, struct macroblockd *xd,
+ int start, int stop, int y_only);
+
+typedef struct LoopFilterWorkerData {
+ const YV12_BUFFER_CONFIG *frame_buffer;
+ struct VP9Common *cm;
+ struct macroblockd xd; // TODO(jzern): most of this is unnecessary to the
+ // loopfilter. the planes are necessary as their state
+ // is changed during decode.
+ int start;
+ int stop;
+ int y_only;
+} LFWorkerData;
+
+// Operates on the rows described by LFWorkerData passed as 'arg1'.
+int vp9_loop_filter_worker(void *arg1, void *arg2);
+#endif // VP9_COMMON_VP9_LOOPFILTER_H_
diff --git a/libvpx/vp9/common/vp9_loopfilter_filters.c b/libvpx/vp9/common/vp9_loopfilter_filters.c
new file mode 100644
index 0000000..88130d8
--- /dev/null
+++ b/libvpx/vp9/common/vp9_loopfilter_filters.c
@@ -0,0 +1,309 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_config.h"
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_loopfilter.h"
+#include "vp9/common/vp9_onyxc_int.h"
+
+static INLINE int8_t signed_char_clamp(int t) {
+ return (int8_t)clamp(t, -128, 127);
+}
+
+// should we apply any filter at all: 11111111 yes, 00000000 no
+static INLINE int8_t filter_mask(uint8_t limit, uint8_t blimit,
+ uint8_t p3, uint8_t p2,
+ uint8_t p1, uint8_t p0,
+ uint8_t q0, uint8_t q1,
+ uint8_t q2, uint8_t q3) {
+ int8_t mask = 0;
+ mask |= (abs(p3 - p2) > limit) * -1;
+ mask |= (abs(p2 - p1) > limit) * -1;
+ mask |= (abs(p1 - p0) > limit) * -1;
+ mask |= (abs(q1 - q0) > limit) * -1;
+ mask |= (abs(q2 - q1) > limit) * -1;
+ mask |= (abs(q3 - q2) > limit) * -1;
+ mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1;
+ return ~mask;
+}
+
+static INLINE int8_t flat_mask4(uint8_t thresh,
+ uint8_t p3, uint8_t p2,
+ uint8_t p1, uint8_t p0,
+ uint8_t q0, uint8_t q1,
+ uint8_t q2, uint8_t q3) {
+ int8_t mask = 0;
+ mask |= (abs(p1 - p0) > thresh) * -1;
+ mask |= (abs(q1 - q0) > thresh) * -1;
+ mask |= (abs(p2 - p0) > thresh) * -1;
+ mask |= (abs(q2 - q0) > thresh) * -1;
+ mask |= (abs(p3 - p0) > thresh) * -1;
+ mask |= (abs(q3 - q0) > thresh) * -1;
+ return ~mask;
+}
+
+static INLINE int8_t flat_mask5(uint8_t thresh,
+ uint8_t p4, uint8_t p3,
+ uint8_t p2, uint8_t p1,
+ uint8_t p0, uint8_t q0,
+ uint8_t q1, uint8_t q2,
+ uint8_t q3, uint8_t q4) {
+ int8_t mask = ~flat_mask4(thresh, p3, p2, p1, p0, q0, q1, q2, q3);
+ mask |= (abs(p4 - p0) > thresh) * -1;
+ mask |= (abs(q4 - q0) > thresh) * -1;
+ return ~mask;
+}
+
+// is there high edge variance internal edge: 11111111 yes, 00000000 no
+static INLINE int8_t hev_mask(uint8_t thresh, uint8_t p1, uint8_t p0,
+ uint8_t q0, uint8_t q1) {
+ int8_t hev = 0;
+ hev |= (abs(p1 - p0) > thresh) * -1;
+ hev |= (abs(q1 - q0) > thresh) * -1;
+ return hev;
+}
+
+static INLINE void filter4(int8_t mask, uint8_t hev, uint8_t *op1,
+ uint8_t *op0, uint8_t *oq0, uint8_t *oq1) {
+ int8_t filter1, filter2;
+
+ const int8_t ps1 = (int8_t) *op1 ^ 0x80;
+ const int8_t ps0 = (int8_t) *op0 ^ 0x80;
+ const int8_t qs0 = (int8_t) *oq0 ^ 0x80;
+ const int8_t qs1 = (int8_t) *oq1 ^ 0x80;
+
+ // add outer taps if we have high edge variance
+ int8_t filter = signed_char_clamp(ps1 - qs1) & hev;
+
+ // inner taps
+ filter = signed_char_clamp(filter + 3 * (qs0 - ps0)) & mask;
+
+ // save bottom 3 bits so that we round one side +4 and the other +3
+ // if it equals 4 we'll set to adjust by -1 to account for the fact
+ // we'd round 3 the other way
+ filter1 = signed_char_clamp(filter + 4) >> 3;
+ filter2 = signed_char_clamp(filter + 3) >> 3;
+
+ *oq0 = signed_char_clamp(qs0 - filter1) ^ 0x80;
+ *op0 = signed_char_clamp(ps0 + filter2) ^ 0x80;
+
+ // outer tap adjustments
+ filter = ROUND_POWER_OF_TWO(filter1, 1) & ~hev;
+
+ *oq1 = signed_char_clamp(qs1 - filter) ^ 0x80;
+ *op1 = signed_char_clamp(ps1 + filter) ^ 0x80;
+}
+
+void vp9_loop_filter_horizontal_edge_c(uint8_t *s, int p /* pitch */,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh,
+ int count) {
+ int i;
+
+ // loop filter designed to work using chars so that we can make maximum use
+ // of 8 bit simd instructions.
+ for (i = 0; i < 8 * count; ++i) {
+ const uint8_t p3 = s[-4 * p], p2 = s[-3 * p], p1 = s[-2 * p], p0 = s[-p];
+ const uint8_t q0 = s[0 * p], q1 = s[1 * p], q2 = s[2 * p], q3 = s[3 * p];
+ const int8_t mask = filter_mask(*limit, *blimit,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+ const int8_t hev = hev_mask(*thresh, p1, p0, q0, q1);
+ filter4(mask, hev, s - 2 * p, s - 1 * p, s, s + 1 * p);
+ ++s;
+ }
+}
+
+void vp9_loop_filter_vertical_edge_c(uint8_t *s, int pitch,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh,
+ int count) {
+ int i;
+
+ // loop filter designed to work using chars so that we can make maximum use
+ // of 8 bit simd instructions.
+ for (i = 0; i < 8 * count; ++i) {
+ const uint8_t p3 = s[-4], p2 = s[-3], p1 = s[-2], p0 = s[-1];
+ const uint8_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3];
+ const int8_t mask = filter_mask(*limit, *blimit,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+ const int8_t hev = hev_mask(*thresh, p1, p0, q0, q1);
+ filter4(mask, hev, s - 2, s - 1, s, s + 1);
+ s += pitch;
+ }
+}
+
+static INLINE void filter8(int8_t mask, uint8_t hev, uint8_t flat,
+ uint8_t *op3, uint8_t *op2,
+ uint8_t *op1, uint8_t *op0,
+ uint8_t *oq0, uint8_t *oq1,
+ uint8_t *oq2, uint8_t *oq3) {
+ if (flat && mask) {
+ const uint8_t p3 = *op3, p2 = *op2, p1 = *op1, p0 = *op0;
+ const uint8_t q0 = *oq0, q1 = *oq1, q2 = *oq2, q3 = *oq3;
+
+ // 7-tap filter [1, 1, 1, 2, 1, 1, 1]
+ *op2 = ROUND_POWER_OF_TWO(p3 + p3 + p3 + 2 * p2 + p1 + p0 + q0, 3);
+ *op1 = ROUND_POWER_OF_TWO(p3 + p3 + p2 + 2 * p1 + p0 + q0 + q1, 3);
+ *op0 = ROUND_POWER_OF_TWO(p3 + p2 + p1 + 2 * p0 + q0 + q1 + q2, 3);
+ *oq0 = ROUND_POWER_OF_TWO(p2 + p1 + p0 + 2 * q0 + q1 + q2 + q3, 3);
+ *oq1 = ROUND_POWER_OF_TWO(p1 + p0 + q0 + 2 * q1 + q2 + q3 + q3, 3);
+ *oq2 = ROUND_POWER_OF_TWO(p0 + q0 + q1 + 2 * q2 + q3 + q3 + q3, 3);
+ } else {
+ filter4(mask, hev, op1, op0, oq0, oq1);
+ }
+}
+
+void vp9_mbloop_filter_horizontal_edge_c(uint8_t *s, int p,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh,
+ int count) {
+ int i;
+
+ // loop filter designed to work using chars so that we can make maximum use
+ // of 8 bit simd instructions.
+ for (i = 0; i < 8 * count; ++i) {
+ const uint8_t p3 = s[-4 * p], p2 = s[-3 * p], p1 = s[-2 * p], p0 = s[-p];
+ const uint8_t q0 = s[0 * p], q1 = s[1 * p], q2 = s[2 * p], q3 = s[3 * p];
+
+ const int8_t mask = filter_mask(*limit, *blimit,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+ const int8_t hev = hev_mask(*thresh, p1, p0, q0, q1);
+ const int8_t flat = flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3);
+ filter8(mask, hev, flat, s - 4 * p, s - 3 * p, s - 2 * p, s - 1 * p,
+ s, s + 1 * p, s + 2 * p, s + 3 * p);
+ ++s;
+ }
+}
+
+void vp9_mbloop_filter_vertical_edge_c(uint8_t *s, int pitch,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh,
+ int count) {
+ int i;
+
+ for (i = 0; i < 8 * count; ++i) {
+ const uint8_t p3 = s[-4], p2 = s[-3], p1 = s[-2], p0 = s[-1];
+ const uint8_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3];
+ const int8_t mask = filter_mask(*limit, *blimit,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+ const int8_t hev = hev_mask(thresh[0], p1, p0, q0, q1);
+ const int8_t flat = flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3);
+ filter8(mask, hev, flat, s - 4, s - 3, s - 2, s - 1,
+ s, s + 1, s + 2, s + 3);
+ s += pitch;
+ }
+}
+
+static INLINE void filter16(int8_t mask, uint8_t hev,
+ uint8_t flat, uint8_t flat2,
+ uint8_t *op7, uint8_t *op6,
+ uint8_t *op5, uint8_t *op4,
+ uint8_t *op3, uint8_t *op2,
+ uint8_t *op1, uint8_t *op0,
+ uint8_t *oq0, uint8_t *oq1,
+ uint8_t *oq2, uint8_t *oq3,
+ uint8_t *oq4, uint8_t *oq5,
+ uint8_t *oq6, uint8_t *oq7) {
+ if (flat2 && flat && mask) {
+ const uint8_t p7 = *op7, p6 = *op6, p5 = *op5, p4 = *op4,
+ p3 = *op3, p2 = *op2, p1 = *op1, p0 = *op0;
+
+ const uint8_t q0 = *oq0, q1 = *oq1, q2 = *oq2, q3 = *oq3,
+ q4 = *oq4, q5 = *oq5, q6 = *oq6, q7 = *oq7;
+
+ // 15-tap filter [1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1]
+ *op6 = ROUND_POWER_OF_TWO(p7 * 7 + p6 * 2 + p5 + p4 + p3 + p2 + p1 + p0 +
+ q0, 4);
+ *op5 = ROUND_POWER_OF_TWO(p7 * 6 + p6 + p5 * 2 + p4 + p3 + p2 + p1 + p0 +
+ q0 + q1, 4);
+ *op4 = ROUND_POWER_OF_TWO(p7 * 5 + p6 + p5 + p4 * 2 + p3 + p2 + p1 + p0 +
+ q0 + q1 + q2, 4);
+ *op3 = ROUND_POWER_OF_TWO(p7 * 4 + p6 + p5 + p4 + p3 * 2 + p2 + p1 + p0 +
+ q0 + q1 + q2 + q3, 4);
+ *op2 = ROUND_POWER_OF_TWO(p7 * 3 + p6 + p5 + p4 + p3 + p2 * 2 + p1 + p0 +
+ q0 + q1 + q2 + q3 + q4, 4);
+ *op1 = ROUND_POWER_OF_TWO(p7 * 2 + p6 + p5 + p4 + p3 + p2 + p1 * 2 + p0 +
+ q0 + q1 + q2 + q3 + q4 + q5, 4);
+ *op0 = ROUND_POWER_OF_TWO(p7 + p6 + p5 + p4 + p3 + p2 + p1 + p0 * 2 +
+ q0 + q1 + q2 + q3 + q4 + q5 + q6, 4);
+ *oq0 = ROUND_POWER_OF_TWO(p6 + p5 + p4 + p3 + p2 + p1 + p0 +
+ q0 * 2 + q1 + q2 + q3 + q4 + q5 + q6 + q7, 4);
+ *oq1 = ROUND_POWER_OF_TWO(p5 + p4 + p3 + p2 + p1 + p0 +
+ q0 + q1 * 2 + q2 + q3 + q4 + q5 + q6 + q7 * 2, 4);
+ *oq2 = ROUND_POWER_OF_TWO(p4 + p3 + p2 + p1 + p0 +
+ q0 + q1 + q2 * 2 + q3 + q4 + q5 + q6 + q7 * 3, 4);
+ *oq3 = ROUND_POWER_OF_TWO(p3 + p2 + p1 + p0 +
+ q0 + q1 + q2 + q3 * 2 + q4 + q5 + q6 + q7 * 4, 4);
+ *oq4 = ROUND_POWER_OF_TWO(p2 + p1 + p0 +
+ q0 + q1 + q2 + q3 + q4 * 2 + q5 + q6 + q7 * 5, 4);
+ *oq5 = ROUND_POWER_OF_TWO(p1 + p0 +
+ q0 + q1 + q2 + q3 + q4 + q5 * 2 + q6 + q7 * 6, 4);
+ *oq6 = ROUND_POWER_OF_TWO(p0 +
+ q0 + q1 + q2 + q3 + q4 + q5 + q6 * 2 + q7 * 7, 4);
+ } else {
+ filter8(mask, hev, flat, op3, op2, op1, op0, oq0, oq1, oq2, oq3);
+ }
+}
+
+void vp9_mb_lpf_horizontal_edge_w_c(uint8_t *s, int p,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh,
+ int count) {
+ int i;
+
+ // loop filter designed to work using chars so that we can make maximum use
+ // of 8 bit simd instructions.
+ for (i = 0; i < 8 * count; ++i) {
+ const uint8_t p3 = s[-4 * p], p2 = s[-3 * p], p1 = s[-2 * p], p0 = s[-p];
+ const uint8_t q0 = s[0 * p], q1 = s[1 * p], q2 = s[2 * p], q3 = s[3 * p];
+ const int8_t mask = filter_mask(*limit, *blimit,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+ const int8_t hev = hev_mask(*thresh, p1, p0, q0, q1);
+ const int8_t flat = flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3);
+ const int8_t flat2 = flat_mask5(1,
+ s[-8 * p], s[-7 * p], s[-6 * p], s[-5 * p], p0,
+ q0, s[4 * p], s[5 * p], s[6 * p], s[7 * p]);
+
+ filter16(mask, hev, flat, flat2,
+ s - 8 * p, s - 7 * p, s - 6 * p, s - 5 * p,
+ s - 4 * p, s - 3 * p, s - 2 * p, s - 1 * p,
+ s, s + 1 * p, s + 2 * p, s + 3 * p,
+ s + 4 * p, s + 5 * p, s + 6 * p, s + 7 * p);
+ ++s;
+ }
+}
+
+void vp9_mb_lpf_vertical_edge_w_c(uint8_t *s, int p,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh) {
+ int i;
+
+ for (i = 0; i < 8; ++i) {
+ const uint8_t p3 = s[-4], p2 = s[-3], p1 = s[-2], p0 = s[-1];
+ const uint8_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3];
+ const int8_t mask = filter_mask(*limit, *blimit,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+ const int8_t hev = hev_mask(*thresh, p1, p0, q0, q1);
+ const int8_t flat = flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3);
+ const int8_t flat2 = flat_mask5(1, s[-8], s[-7], s[-6], s[-5], p0,
+ q0, s[4], s[5], s[6], s[7]);
+
+ filter16(mask, hev, flat, flat2,
+ s - 8, s - 7, s - 6, s - 5, s - 4, s - 3, s - 2, s - 1,
+ s, s + 1, s + 2, s + 3, s + 4, s + 5, s + 6, s + 7);
+ s += p;
+ }
+}
diff --git a/libvpx/vp9/common/vp9_mv.h b/libvpx/vp9/common/vp9_mv.h
new file mode 100644
index 0000000..31a79b9
--- /dev/null
+++ b/libvpx/vp9/common/vp9_mv.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_MV_H_
+#define VP9_COMMON_VP9_MV_H_
+
+#include "vpx/vpx_integer.h"
+
+#include "vp9/common/vp9_common.h"
+
+typedef struct {
+ int16_t row;
+ int16_t col;
+} MV;
+
+typedef union int_mv {
+ uint32_t as_int;
+ MV as_mv;
+} int_mv; /* facilitates faster equality tests and copies */
+
+typedef struct {
+ int32_t row;
+ int32_t col;
+} MV32;
+
+static void clamp_mv(MV *mv, int min_col, int max_col,
+ int min_row, int max_row) {
+ mv->col = clamp(mv->col, min_col, max_col);
+ mv->row = clamp(mv->row, min_row, max_row);
+}
+
+#endif // VP9_COMMON_VP9_MV_H_
diff --git a/libvpx/vp9/common/vp9_mvref_common.c b/libvpx/vp9/common/vp9_mvref_common.c
new file mode 100644
index 0000000..bfeeb57
--- /dev/null
+++ b/libvpx/vp9/common/vp9_mvref_common.c
@@ -0,0 +1,286 @@
+
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp9/common/vp9_mvref_common.h"
+
+#define MVREF_NEIGHBOURS 8
+
+typedef enum {
+ BOTH_ZERO = 0,
+ ZERO_PLUS_PREDICTED = 1,
+ BOTH_PREDICTED = 2,
+ NEW_PLUS_NON_INTRA = 3,
+ BOTH_NEW = 4,
+ INTRA_PLUS_NON_INTRA = 5,
+ BOTH_INTRA = 6,
+ INVALID_CASE = 9
+} motion_vector_context;
+
+// This is used to figure out a context for the ref blocks. The code flattens
+// an array that would have 3 possible counts (0, 1 & 2) for 3 choices by
+// adding 9 for each intra block, 3 for each zero mv and 1 for each new
+// motion vector. This single number is then converted into a context
+// with a single lookup ( counter_to_context ).
+static const int mode_2_counter[MB_MODE_COUNT] = {
+ 9, // DC_PRED
+ 9, // V_PRED
+ 9, // H_PRED
+ 9, // D45_PRED
+ 9, // D135_PRED
+ 9, // D117_PRED
+ 9, // D153_PRED
+ 9, // D207_PRED
+ 9, // D63_PRED
+ 9, // TM_PRED
+ 0, // NEARESTMV
+ 0, // NEARMV
+ 3, // ZEROMV
+ 1, // NEWMV
+};
+
+// There are 3^3 different combinations of 3 counts that can be either 0,1 or
+// 2. However the actual count can never be greater than 2 so the highest
+// counter we need is 18. 9 is an invalid counter that's never used.
+static const int counter_to_context[19] = {
+ BOTH_PREDICTED, // 0
+ NEW_PLUS_NON_INTRA, // 1
+ BOTH_NEW, // 2
+ ZERO_PLUS_PREDICTED, // 3
+ NEW_PLUS_NON_INTRA, // 4
+ INVALID_CASE, // 5
+ BOTH_ZERO, // 6
+ INVALID_CASE, // 7
+ INVALID_CASE, // 8
+ INTRA_PLUS_NON_INTRA, // 9
+ INTRA_PLUS_NON_INTRA, // 10
+ INVALID_CASE, // 11
+ INTRA_PLUS_NON_INTRA, // 12
+ INVALID_CASE, // 13
+ INVALID_CASE, // 14
+ INVALID_CASE, // 15
+ INVALID_CASE, // 16
+ INVALID_CASE, // 17
+ BOTH_INTRA // 18
+};
+
+static const MV mv_ref_blocks[BLOCK_SIZES][MVREF_NEIGHBOURS] = {
+ // 4X4
+ {{-1, 0}, {0, -1}, {-1, -1}, {-2, 0}, {0, -2}, {-2, -1}, {-1, -2}, {-2, -2}},
+ // 4X8
+ {{-1, 0}, {0, -1}, {-1, -1}, {-2, 0}, {0, -2}, {-2, -1}, {-1, -2}, {-2, -2}},
+ // 8X4
+ {{-1, 0}, {0, -1}, {-1, -1}, {-2, 0}, {0, -2}, {-2, -1}, {-1, -2}, {-2, -2}},
+ // 8X8
+ {{-1, 0}, {0, -1}, {-1, -1}, {-2, 0}, {0, -2}, {-2, -1}, {-1, -2}, {-2, -2}},
+ // 8X16
+ {{0, -1}, {-1, 0}, {1, -1}, {-1, -1}, {0, -2}, {-2, 0}, {-2, -1}, {-1, -2}},
+ // 16X8
+ {{-1, 0}, {0, -1}, {-1, 1}, {-1, -1}, {-2, 0}, {0, -2}, {-1, -2}, {-2, -1}},
+ // 16X16
+ {{-1, 0}, {0, -1}, {-1, 1}, {1, -1}, {-1, -1}, {-3, 0}, {0, -3}, {-3, -3}},
+ // 16X32
+ {{0, -1}, {-1, 0}, {2, -1}, {-1, -1}, {-1, 1}, {0, -3}, {-3, 0}, {-3, -3}},
+ // 32X16
+ {{-1, 0}, {0, -1}, {-1, 2}, {-1, -1}, {1, -1}, {-3, 0}, {0, -3}, {-3, -3}},
+ // 32X32
+ {{-1, 1}, {1, -1}, {-1, 2}, {2, -1}, {-1, -1}, {-3, 0}, {0, -3}, {-3, -3}},
+ // 32X64
+ {{0, -1}, {-1, 0}, {4, -1}, {-1, 2}, {-1, -1}, {0, -3}, {-3, 0}, {2, -1}},
+ // 64X32
+ {{-1, 0}, {0, -1}, {-1, 4}, {2, -1}, {-1, -1}, {-3, 0}, {0, -3}, {-1, 2}},
+ // 64X64
+ {{-1, 3}, {3, -1}, {-1, 4}, {4, -1}, {-1, -1}, {-1, 0}, {0, -1}, {-1, 6}}
+};
+
+static const int idx_n_column_to_subblock[4][2] = {
+ {1, 2},
+ {1, 3},
+ {3, 2},
+ {3, 3}
+};
+
+// clamp_mv_ref
+#define MV_BORDER (16 << 3) // Allow 16 pels in 1/8th pel units
+
+static void clamp_mv_ref(MV *mv, const MACROBLOCKD *xd) {
+ clamp_mv(mv, xd->mb_to_left_edge - MV_BORDER,
+ xd->mb_to_right_edge + MV_BORDER,
+ xd->mb_to_top_edge - MV_BORDER,
+ xd->mb_to_bottom_edge + MV_BORDER);
+}
+
+// This function returns either the appropriate sub block or block's mv
+// on whether the block_size < 8x8 and we have check_sub_blocks set.
+static INLINE int_mv get_sub_block_mv(const MODE_INFO *candidate,
+ int check_sub_blocks, int which_mv,
+ int search_col, int block_idx) {
+ return check_sub_blocks && candidate->mbmi.sb_type < BLOCK_8X8
+ ? candidate->bmi[idx_n_column_to_subblock[block_idx][search_col == 0]]
+ .as_mv[which_mv]
+ : candidate->mbmi.mv[which_mv];
+}
+
+
+// Performs mv sign inversion if indicated by the reference frame combination.
+static INLINE int_mv scale_mv(const MB_MODE_INFO *mbmi, int ref,
+ const MV_REFERENCE_FRAME this_ref_frame,
+ const int *ref_sign_bias) {
+ int_mv mv = mbmi->mv[ref];
+ if (ref_sign_bias[mbmi->ref_frame[ref]] != ref_sign_bias[this_ref_frame]) {
+ mv.as_mv.row *= -1;
+ mv.as_mv.col *= -1;
+ }
+ return mv;
+}
+
+// This macro is used to add a motion vector mv_ref list if it isn't
+// already in the list. If it's the second motion vector it will also
+// skip all additional processing and jump to done!
+#define ADD_MV_REF_LIST(MV) \
+ do { \
+ if (refmv_count) { \
+ if ((MV).as_int != mv_ref_list[0].as_int) { \
+ mv_ref_list[refmv_count] = (MV); \
+ goto Done; \
+ } \
+ } else { \
+ mv_ref_list[refmv_count++] = (MV); \
+ } \
+ } while (0)
+
+// If either reference frame is different, not INTRA, and they
+// are different from each other scale and add the mv to our list.
+#define IF_DIFF_REF_FRAME_ADD_MV(CANDIDATE) \
+ do { \
+ if ((CANDIDATE)->ref_frame[0] != ref_frame) \
+ ADD_MV_REF_LIST(scale_mv((CANDIDATE), 0, ref_frame, ref_sign_bias)); \
+ if ((CANDIDATE)->ref_frame[1] != ref_frame && \
+ has_second_ref(CANDIDATE) && \
+ (CANDIDATE)->mv[1].as_int != (CANDIDATE)->mv[0].as_int) \
+ ADD_MV_REF_LIST(scale_mv((CANDIDATE), 1, ref_frame, ref_sign_bias)); \
+ } while (0)
+
+
+// Checks that the given mi_row, mi_col and search point
+// are inside the borders of the tile.
+static INLINE int is_inside(const VP9_COMMON *cm, int mi_col, int mi_row,
+ const MV *mv) {
+ return !(mi_row + mv->row < 0 ||
+ mi_col + mv->col < cm->cur_tile_mi_col_start ||
+ mi_row + mv->row >= cm->mi_rows ||
+ mi_col + mv->col >= cm->cur_tile_mi_col_end);
+}
+
+// This function searches the neighbourhood of a given MB/SB
+// to try and find candidate reference vectors.
+void vp9_find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd,
+ MODE_INFO *mi, const MODE_INFO *prev_mi,
+ MV_REFERENCE_FRAME ref_frame,
+ int_mv *mv_ref_list,
+ int block_idx,
+ int mi_row, int mi_col) {
+ const int *ref_sign_bias = cm->ref_frame_sign_bias;
+ int i, refmv_count = 0;
+ const MV *const mv_ref_search = mv_ref_blocks[mi->mbmi.sb_type];
+ const MB_MODE_INFO *const prev_mbmi = prev_mi ? &prev_mi->mbmi : NULL;
+ int different_ref_found = 0;
+ int context_counter = 0;
+
+ // Blank the reference vector list
+ vpx_memset(mv_ref_list, 0, sizeof(*mv_ref_list) * MAX_MV_REF_CANDIDATES);
+
+ // The nearest 2 blocks are treated differently
+ // if the size < 8x8 we get the mv from the bmi substructure,
+ // and we also need to keep a mode count.
+ for (i = 0; i < 2; ++i) {
+ const MV *const mv_ref = &mv_ref_search[i];
+ if (is_inside(cm, mi_col, mi_row, mv_ref)) {
+ const int check_sub_blocks = block_idx >= 0;
+ const MODE_INFO *const candidate_mi = xd->mi_8x8[mv_ref->col + mv_ref->row
+ * xd->mode_info_stride];
+ const MB_MODE_INFO *const candidate = &candidate_mi->mbmi;
+ // Keep counts for entropy encoding.
+ context_counter += mode_2_counter[candidate->mode];
+
+ // Check if the candidate comes from the same reference frame.
+ if (candidate->ref_frame[0] == ref_frame) {
+ ADD_MV_REF_LIST(get_sub_block_mv(candidate_mi, check_sub_blocks, 0,
+ mv_ref->col, block_idx));
+ different_ref_found = candidate->ref_frame[1] != ref_frame;
+ } else {
+ if (candidate->ref_frame[1] == ref_frame)
+ // Add second motion vector if it has the same ref_frame.
+ ADD_MV_REF_LIST(get_sub_block_mv(candidate_mi, check_sub_blocks, 1,
+ mv_ref->col, block_idx));
+ different_ref_found = 1;
+ }
+ }
+ }
+
+ // Check the rest of the neighbors in much the same way
+ // as before except we don't need to keep track of sub blocks or
+ // mode counts.
+ for (; i < MVREF_NEIGHBOURS; ++i) {
+ const MV *const mv_ref = &mv_ref_search[i];
+ if (is_inside(cm, mi_col, mi_row, mv_ref)) {
+ const MB_MODE_INFO *const candidate = &xd->mi_8x8[mv_ref->col +
+ mv_ref->row
+ * xd->mode_info_stride]->mbmi;
+
+ if (candidate->ref_frame[0] == ref_frame) {
+ ADD_MV_REF_LIST(candidate->mv[0]);
+ different_ref_found = candidate->ref_frame[1] != ref_frame;
+ } else {
+ if (candidate->ref_frame[1] == ref_frame)
+ ADD_MV_REF_LIST(candidate->mv[1]);
+ different_ref_found = 1;
+ }
+ }
+ }
+
+ // Check the last frame's mode and mv info.
+ if (prev_mbmi) {
+ if (prev_mbmi->ref_frame[0] == ref_frame)
+ ADD_MV_REF_LIST(prev_mbmi->mv[0]);
+ else if (prev_mbmi->ref_frame[1] == ref_frame)
+ ADD_MV_REF_LIST(prev_mbmi->mv[1]);
+ }
+
+ // Since we couldn't find 2 mvs from the same reference frame
+ // go back through the neighbors and find motion vectors from
+ // different reference frames.
+ if (different_ref_found) {
+ for (i = 0; i < MVREF_NEIGHBOURS; ++i) {
+ const MV *mv_ref = &mv_ref_search[i];
+ if (is_inside(cm, mi_col, mi_row, mv_ref)) {
+ const MB_MODE_INFO *const candidate = &xd->mi_8x8[mv_ref->col +
+ mv_ref->row
+ * xd->mode_info_stride]->mbmi;
+
+ // If the candidate is INTRA we don't want to consider its mv.
+ if (is_inter_block(candidate))
+ IF_DIFF_REF_FRAME_ADD_MV(candidate);
+ }
+ }
+ }
+
+ // Since we still don't have a candidate we'll try the last frame.
+ if (prev_mbmi && is_inter_block(prev_mbmi))
+ IF_DIFF_REF_FRAME_ADD_MV(prev_mbmi);
+
+ Done:
+
+ mi->mbmi.mode_context[ref_frame] = counter_to_context[context_counter];
+
+ // Clamp vectors
+ for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i)
+ clamp_mv_ref(&mv_ref_list[i].as_mv, xd);
+}
diff --git a/libvpx/vp9/common/vp9_mvref_common.h b/libvpx/vp9/common/vp9_mvref_common.h
new file mode 100644
index 0000000..39ebdb0
--- /dev/null
+++ b/libvpx/vp9/common/vp9_mvref_common.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp9/common/vp9_onyxc_int.h"
+#include "vp9/common/vp9_blockd.h"
+
+#ifndef VP9_COMMON_VP9_MVREF_COMMON_H_
+#define VP9_COMMON_VP9_MVREF_COMMON_H_
+
+void vp9_find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd,
+ MODE_INFO *mi, const MODE_INFO *prev_mi,
+ MV_REFERENCE_FRAME ref_frame,
+ int_mv *mv_ref_list,
+ int block_idx,
+ int mi_row, int mi_col);
+
+static INLINE void vp9_find_mv_refs(const VP9_COMMON *cm, const MACROBLOCKD *xd,
+ MODE_INFO *mi, const MODE_INFO *prev_mi,
+ MV_REFERENCE_FRAME ref_frame,
+ int_mv *mv_ref_list,
+ int mi_row, int mi_col) {
+ vp9_find_mv_refs_idx(cm, xd, mi, prev_mi, ref_frame,
+ mv_ref_list, -1, mi_row, mi_col);
+}
+
+#endif // VP9_COMMON_VP9_MVREF_COMMON_H_
diff --git a/libvpx/vp9/common/vp9_onyx.h b/libvpx/vp9/common/vp9_onyx.h
new file mode 100644
index 0000000..f424e6a
--- /dev/null
+++ b/libvpx/vp9/common/vp9_onyx.h
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_ONYX_H_
+#define VP9_COMMON_VP9_ONYX_H_
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include "./vpx_config.h"
+#include "vpx/internal/vpx_codec_internal.h"
+#include "vpx/vp8cx.h"
+#include "vpx_scale/yv12config.h"
+#include "vp9/common/vp9_ppflags.h"
+
+#define MAX_SEGMENTS 8
+
+ typedef int *VP9_PTR;
+
+ /* Create/destroy static data structures. */
+
+ typedef enum {
+ NORMAL = 0,
+ FOURFIVE = 1,
+ THREEFIVE = 2,
+ ONETWO = 3
+
+ } VPX_SCALING;
+
+ typedef enum {
+ VP9_LAST_FLAG = 1,
+ VP9_GOLD_FLAG = 2,
+ VP9_ALT_FLAG = 4
+ } VP9_REFFRAME;
+
+
+ typedef enum {
+ USAGE_STREAM_FROM_SERVER = 0x0,
+ USAGE_LOCAL_FILE_PLAYBACK = 0x1,
+ USAGE_CONSTRAINED_QUALITY = 0x2,
+ USAGE_CONSTANT_QUALITY = 0x3,
+ } END_USAGE;
+
+
+ typedef enum {
+ MODE_GOODQUALITY = 0x1,
+ MODE_BESTQUALITY = 0x2,
+ MODE_FIRSTPASS = 0x3,
+ MODE_SECONDPASS = 0x4,
+ MODE_SECONDPASS_BEST = 0x5,
+ } MODE;
+
+ typedef enum {
+ FRAMEFLAGS_KEY = 1,
+ FRAMEFLAGS_GOLDEN = 2,
+ FRAMEFLAGS_ALTREF = 4,
+ } FRAMETYPE_FLAGS;
+
+ typedef struct {
+ int version; // 4 versions of bitstream defined:
+ // 0 - best quality/slowest decode,
+ // 3 - lowest quality/fastest decode
+ int width; // width of data passed to the compressor
+ int height; // height of data passed to the compressor
+ double framerate; // set to passed in framerate
+ int64_t target_bandwidth; // bandwidth to be used in kilobits per second
+
+ int noise_sensitivity; // parameter used for applying pre processing blur: recommendation 0
+ int Sharpness; // parameter used for sharpening output: recommendation 0:
+ int cpu_used;
+ unsigned int rc_max_intra_bitrate_pct;
+
+ // mode ->
+ // (0)=Realtime/Live Encoding. This mode is optimized for realtim encoding (for example, capturing
+ // a television signal or feed from a live camera). ( speed setting controls how fast )
+ // (1)=Good Quality Fast Encoding. The encoder balances quality with the amount of time it takes to
+ // encode the output. ( speed setting controls how fast )
+ // (2)=One Pass - Best Quality. The encoder places priority on the quality of the output over encoding
+ // speed. The output is compressed at the highest possible quality. This option takes the longest
+ // amount of time to encode. ( speed setting ignored )
+ // (3)=Two Pass - First Pass. The encoder generates a file of statistics for use in the second encoding
+ // pass. ( speed setting controls how fast )
+ // (4)=Two Pass - Second Pass. The encoder uses the statistics that were generated in the first encoding
+ // pass to create the compressed output. ( speed setting controls how fast )
+ // (5)=Two Pass - Second Pass Best. The encoder uses the statistics that were generated in the first
+ // encoding pass to create the compressed output using the highest possible quality, and taking a
+ // longer amount of time to encode.. ( speed setting ignored )
+ int Mode; //
+
+ // Key Framing Operations
+ int auto_key; // automatically detect cut scenes and set the keyframes
+ int key_freq; // maximum distance to key frame.
+
+ int allow_lag; // allow lagged compression (if 0 lagin frames is ignored)
+ int lag_in_frames; // how many frames lag before we start encoding
+
+ // ----------------------------------------------------------------
+ // DATARATE CONTROL OPTIONS
+
+ int end_usage; // vbr or cbr
+
+ // buffer targeting aggressiveness
+ int under_shoot_pct;
+ int over_shoot_pct;
+
+ // buffering parameters
+ int64_t starting_buffer_level; // in seconds
+ int64_t optimal_buffer_level;
+ int64_t maximum_buffer_size;
+
+ // controlling quality
+ int fixed_q;
+ int worst_allowed_q;
+ int best_allowed_q;
+ int cq_level;
+ int lossless;
+
+ // two pass datarate control
+ int two_pass_vbrbias; // two pass datarate control tweaks
+ int two_pass_vbrmin_section;
+ int two_pass_vbrmax_section;
+ // END DATARATE CONTROL OPTIONS
+ // ----------------------------------------------------------------
+
+ // Spatial scalability
+ int ss_number_layers;
+
+ // these parameters aren't to be used in final build don't use!!!
+ int play_alternate;
+ int alt_freq;
+
+ int encode_breakout; // early breakout encode threshold : for video conf recommend 800
+
+ /* Bitfield defining the error resiliency features to enable.
+ * Can provide decodable frames after losses in previous
+ * frames and decodable partitions after losses in the same frame.
+ */
+ unsigned int error_resilient_mode;
+
+ /* Bitfield defining the parallel decoding mode where the
+ * decoding in successive frames may be conducted in parallel
+ * just by decoding the frame headers.
+ */
+ unsigned int frame_parallel_decoding_mode;
+
+ int arnr_max_frames;
+ int arnr_strength;
+ int arnr_type;
+
+ int tile_columns;
+ int tile_rows;
+
+ struct vpx_fixed_buf two_pass_stats_in;
+ struct vpx_codec_pkt_list *output_pkt_list;
+
+ vp8e_tuning tuning;
+ } VP9_CONFIG;
+
+
+ void vp9_initialize_enc();
+
+ VP9_PTR vp9_create_compressor(VP9_CONFIG *oxcf);
+ void vp9_remove_compressor(VP9_PTR *comp);
+
+ void vp9_change_config(VP9_PTR onyx, VP9_CONFIG *oxcf);
+
+// receive a frames worth of data caller can assume that a copy of this frame is made
+// and not just a copy of the pointer..
+ int vp9_receive_raw_frame(VP9_PTR comp, unsigned int frame_flags,
+ YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
+ int64_t end_time_stamp);
+
+ int vp9_get_compressed_data(VP9_PTR comp, unsigned int *frame_flags,
+ unsigned long *size, unsigned char *dest,
+ int64_t *time_stamp, int64_t *time_end,
+ int flush);
+
+ int vp9_get_preview_raw_frame(VP9_PTR comp, YV12_BUFFER_CONFIG *dest,
+ vp9_ppflags_t *flags);
+
+ int vp9_use_as_reference(VP9_PTR comp, int ref_frame_flags);
+
+ int vp9_update_reference(VP9_PTR comp, int ref_frame_flags);
+
+ int vp9_copy_reference_enc(VP9_PTR comp, VP9_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd);
+
+ int vp9_get_reference_enc(VP9_PTR ptr, int index, YV12_BUFFER_CONFIG **fb);
+
+ int vp9_set_reference_enc(VP9_PTR comp, VP9_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd);
+
+ int vp9_update_entropy(VP9_PTR comp, int update);
+
+ int vp9_set_roimap(VP9_PTR comp, unsigned char *map,
+ unsigned int rows, unsigned int cols,
+ int delta_q[MAX_SEGMENTS],
+ int delta_lf[MAX_SEGMENTS],
+ unsigned int threshold[MAX_SEGMENTS]);
+
+ int vp9_set_active_map(VP9_PTR comp, unsigned char *map,
+ unsigned int rows, unsigned int cols);
+
+ int vp9_set_internal_size(VP9_PTR comp,
+ VPX_SCALING horiz_mode, VPX_SCALING vert_mode);
+
+ int vp9_set_size_literal(VP9_PTR comp, unsigned int width,
+ unsigned int height);
+
+ int vp9_switch_layer(VP9_PTR comp, int layer);
+
+ void vp9_set_svc(VP9_PTR comp, int use_svc);
+
+ int vp9_get_quantizer(VP9_PTR c);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // VP9_COMMON_VP9_ONYX_H_
diff --git a/libvpx/vp9/common/vp9_onyxc_int.h b/libvpx/vp9/common/vp9_onyxc_int.h
new file mode 100644
index 0000000..0431e14
--- /dev/null
+++ b/libvpx/vp9/common/vp9_onyxc_int.h
@@ -0,0 +1,309 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_ONYXC_INT_H_
+#define VP9_COMMON_VP9_ONYXC_INT_H_
+
+#include "vpx_config.h"
+#include "vpx/internal/vpx_codec_internal.h"
+#include "vp9_rtcd.h"
+#include "vp9/common/vp9_loopfilter.h"
+#include "vp9/common/vp9_entropymv.h"
+#include "vp9/common/vp9_entropy.h"
+#include "vp9/common/vp9_entropymode.h"
+#include "vp9/common/vp9_quant_common.h"
+
+#if CONFIG_VP9_POSTPROC
+#include "vp9/common/vp9_postproc.h"
+#endif
+
+#define ALLOWED_REFS_PER_FRAME 3
+
+#define NUM_REF_FRAMES_LOG2 3
+#define NUM_REF_FRAMES (1 << NUM_REF_FRAMES_LOG2)
+
+// 1 scratch frame for the new frame, 3 for scaled references on the encoder
+// TODO(jkoleszar): These 3 extra references could probably come from the
+// normal reference pool.
+#define NUM_YV12_BUFFERS (NUM_REF_FRAMES + 4)
+
+#define NUM_FRAME_CONTEXTS_LOG2 2
+#define NUM_FRAME_CONTEXTS (1 << NUM_FRAME_CONTEXTS_LOG2)
+
+typedef struct frame_contexts {
+ vp9_prob y_mode_prob[BLOCK_SIZE_GROUPS][INTRA_MODES - 1];
+ vp9_prob uv_mode_prob[INTRA_MODES][INTRA_MODES - 1];
+ vp9_prob partition_prob[NUM_FRAME_TYPES][NUM_PARTITION_CONTEXTS]
+ [PARTITION_TYPES - 1];
+ vp9_coeff_probs_model coef_probs[TX_SIZES][BLOCK_TYPES];
+ vp9_prob switchable_interp_prob[SWITCHABLE_FILTERS + 1]
+ [SWITCHABLE_FILTERS - 1];
+ vp9_prob inter_mode_probs[INTER_MODE_CONTEXTS][INTER_MODES - 1];
+ vp9_prob intra_inter_prob[INTRA_INTER_CONTEXTS];
+ vp9_prob comp_inter_prob[COMP_INTER_CONTEXTS];
+ vp9_prob single_ref_prob[REF_CONTEXTS][2];
+ vp9_prob comp_ref_prob[REF_CONTEXTS];
+ struct tx_probs tx_probs;
+ vp9_prob mbskip_probs[MBSKIP_CONTEXTS];
+ nmv_context nmvc;
+} FRAME_CONTEXT;
+
+typedef struct {
+ unsigned int y_mode[BLOCK_SIZE_GROUPS][INTRA_MODES];
+ unsigned int uv_mode[INTRA_MODES][INTRA_MODES];
+ unsigned int partition[NUM_PARTITION_CONTEXTS][PARTITION_TYPES];
+ vp9_coeff_count_model coef[TX_SIZES][BLOCK_TYPES];
+ unsigned int eob_branch[TX_SIZES][BLOCK_TYPES][REF_TYPES]
+ [COEF_BANDS][PREV_COEF_CONTEXTS];
+ unsigned int switchable_interp[SWITCHABLE_FILTERS + 1]
+ [SWITCHABLE_FILTERS];
+ unsigned int inter_mode[INTER_MODE_CONTEXTS][INTER_MODES];
+ unsigned int intra_inter[INTRA_INTER_CONTEXTS][2];
+ unsigned int comp_inter[COMP_INTER_CONTEXTS][2];
+ unsigned int single_ref[REF_CONTEXTS][2][2];
+ unsigned int comp_ref[REF_CONTEXTS][2];
+ struct tx_counts tx;
+ unsigned int mbskip[MBSKIP_CONTEXTS][2];
+ nmv_context_counts mv;
+} FRAME_COUNTS;
+
+
+typedef enum {
+ SINGLE_PREDICTION_ONLY = 0,
+ COMP_PREDICTION_ONLY = 1,
+ HYBRID_PREDICTION = 2,
+ NB_PREDICTION_TYPES = 3,
+} COMPPREDMODE_TYPE;
+
+typedef struct VP9Common {
+ struct vpx_internal_error_info error;
+
+ DECLARE_ALIGNED(16, int16_t, y_dequant[QINDEX_RANGE][8]);
+ DECLARE_ALIGNED(16, int16_t, uv_dequant[QINDEX_RANGE][8]);
+#if CONFIG_ALPHA
+ DECLARE_ALIGNED(16, int16_t, a_dequant[QINDEX_RANGE][8]);
+#endif
+
+ int width;
+ int height;
+ int display_width;
+ int display_height;
+ int last_width;
+ int last_height;
+
+ // TODO(jkoleszar): this implies chroma ss right now, but could vary per
+ // plane. Revisit as part of the future change to YV12_BUFFER_CONFIG to
+ // support additional planes.
+ int subsampling_x;
+ int subsampling_y;
+
+ YV12_BUFFER_CONFIG *frame_to_show;
+
+ YV12_BUFFER_CONFIG yv12_fb[NUM_YV12_BUFFERS];
+ int fb_idx_ref_cnt[NUM_YV12_BUFFERS]; /* reference counts */
+ int ref_frame_map[NUM_REF_FRAMES]; /* maps fb_idx to reference slot */
+
+ // TODO(jkoleszar): could expand active_ref_idx to 4, with 0 as intra, and
+ // roll new_fb_idx into it.
+
+ // Each frame can reference ALLOWED_REFS_PER_FRAME buffers
+ int active_ref_idx[ALLOWED_REFS_PER_FRAME];
+ struct scale_factors active_ref_scale[ALLOWED_REFS_PER_FRAME];
+ int new_fb_idx;
+
+ YV12_BUFFER_CONFIG post_proc_buffer;
+
+ FRAME_TYPE last_frame_type; /* Save last frame's frame type for motion search. */
+ FRAME_TYPE frame_type;
+
+ int show_frame;
+ int last_show_frame;
+
+ // Flag signaling that the frame is encoded using only INTRA modes.
+ int intra_only;
+
+ // Flag signaling that the frame context should be reset to default values.
+ // 0 or 1 implies don't reset, 2 reset just the context specified in the
+ // frame header, 3 reset all contexts.
+ int reset_frame_context;
+
+ int frame_flags;
+ // MBs, mb_rows/cols is in 16-pixel units; mi_rows/cols is in
+ // MODE_INFO (8-pixel) units.
+ int MBs;
+ int mb_rows, mi_rows;
+ int mb_cols, mi_cols;
+ int mode_info_stride;
+
+ /* profile settings */
+ TX_MODE tx_mode;
+
+ int base_qindex;
+ int last_kf_gf_q; /* Q used on the last GF or KF */
+
+ int y_dc_delta_q;
+ int uv_dc_delta_q;
+ int uv_ac_delta_q;
+#if CONFIG_ALPHA
+ int a_dc_delta_q;
+ int a_ac_delta_q;
+#endif
+
+ /* We allocate a MODE_INFO struct for each macroblock, together with
+ an extra row on top and column on the left to simplify prediction. */
+
+ MODE_INFO *mip; /* Base of allocated array */
+ MODE_INFO *mi; /* Corresponds to upper left visible macroblock */
+ MODE_INFO *prev_mip; /* MODE_INFO array 'mip' from last decoded frame */
+ MODE_INFO *prev_mi; /* 'mi' from last frame (points into prev_mip) */
+
+ MODE_INFO **mi_grid_base;
+ MODE_INFO **mi_grid_visible;
+ MODE_INFO **prev_mi_grid_base;
+ MODE_INFO **prev_mi_grid_visible;
+
+ // Persistent mb segment id map used in prediction.
+ unsigned char *last_frame_seg_map;
+
+ INTERPOLATIONFILTERTYPE mcomp_filter_type;
+
+ loop_filter_info_n lf_info;
+
+ int refresh_frame_context; /* Two state 0 = NO, 1 = YES */
+
+ int ref_frame_sign_bias[MAX_REF_FRAMES]; /* Two state 0, 1 */
+
+ struct loopfilter lf;
+ struct segmentation seg;
+
+ /* Y,U,V */
+ ENTROPY_CONTEXT *above_context[MAX_MB_PLANE];
+ ENTROPY_CONTEXT left_context[MAX_MB_PLANE][16];
+
+ // partition contexts
+ PARTITION_CONTEXT *above_seg_context;
+ PARTITION_CONTEXT left_seg_context[8];
+
+ // Context probabilities for reference frame prediction
+ int allow_comp_inter_inter;
+ MV_REFERENCE_FRAME comp_fixed_ref;
+ MV_REFERENCE_FRAME comp_var_ref[2];
+ COMPPREDMODE_TYPE comp_pred_mode;
+
+ FRAME_CONTEXT fc; /* this frame entropy */
+ FRAME_CONTEXT frame_contexts[NUM_FRAME_CONTEXTS];
+ unsigned int frame_context_idx; /* Context to use/update */
+ FRAME_COUNTS counts;
+
+ unsigned int current_video_frame;
+ int version;
+
+#if CONFIG_VP9_POSTPROC
+ struct postproc_state postproc_state;
+#endif
+
+ int error_resilient_mode;
+ int frame_parallel_decoding_mode;
+
+ int log2_tile_cols, log2_tile_rows;
+ int cur_tile_mi_col_start, cur_tile_mi_col_end;
+ int cur_tile_mi_row_start, cur_tile_mi_row_end;
+} VP9_COMMON;
+
+static int get_free_fb(VP9_COMMON *cm) {
+ int i;
+ for (i = 0; i < NUM_YV12_BUFFERS; i++)
+ if (cm->fb_idx_ref_cnt[i] == 0)
+ break;
+
+ assert(i < NUM_YV12_BUFFERS);
+ cm->fb_idx_ref_cnt[i] = 1;
+ return i;
+}
+
+static void ref_cnt_fb(int *buf, int *idx, int new_idx) {
+ if (buf[*idx] > 0)
+ buf[*idx]--;
+
+ *idx = new_idx;
+
+ buf[new_idx]++;
+}
+
+static int mi_cols_aligned_to_sb(int n_mis) {
+ return ALIGN_POWER_OF_TWO(n_mis, MI_BLOCK_SIZE_LOG2);
+}
+
+static INLINE void set_skip_context(VP9_COMMON *cm, MACROBLOCKD *xd,
+ int mi_row, int mi_col) {
+ const int above_idx = mi_col * 2;
+ const int left_idx = (mi_row * 2) & 15;
+ int i;
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ struct macroblockd_plane *const pd = &xd->plane[i];
+ pd->above_context = cm->above_context[i] + (above_idx >> pd->subsampling_x);
+ pd->left_context = cm->left_context[i] + (left_idx >> pd->subsampling_y);
+ }
+}
+
+static INLINE void set_partition_seg_context(VP9_COMMON *cm, MACROBLOCKD *xd,
+ int mi_row, int mi_col) {
+ xd->above_seg_context = cm->above_seg_context + mi_col;
+ xd->left_seg_context = cm->left_seg_context + (mi_row & MI_MASK);
+}
+
+// return the node index in the prob tree for binary coding
+static int check_bsize_coverage(int bs, int mi_rows, int mi_cols,
+ int mi_row, int mi_col) {
+ const int r = (mi_row + bs < mi_rows);
+ const int c = (mi_col + bs < mi_cols);
+
+ if (r && c)
+ return 0;
+
+ if (c && !r)
+ return 1; // only allow horizontal/split partition types
+
+ if (r && !c)
+ return 2; // only allow vertical/split partition types
+
+ return -1;
+}
+
+static void set_mi_row_col(VP9_COMMON *cm, MACROBLOCKD *xd,
+ int mi_row, int bh,
+ int mi_col, int bw) {
+ xd->mb_to_top_edge = -((mi_row * MI_SIZE) << 3);
+ xd->mb_to_bottom_edge = ((cm->mi_rows - bh - mi_row) * MI_SIZE) << 3;
+ xd->mb_to_left_edge = -((mi_col * MI_SIZE) << 3);
+ xd->mb_to_right_edge = ((cm->mi_cols - bw - mi_col) * MI_SIZE) << 3;
+
+ // Are edges available for intra prediction?
+ xd->up_available = (mi_row != 0);
+ xd->left_available = (mi_col > cm->cur_tile_mi_col_start);
+ xd->right_available = (mi_col + bw < cm->cur_tile_mi_col_end);
+}
+
+static int get_token_alloc(int mb_rows, int mb_cols) {
+ return mb_rows * mb_cols * (48 * 16 + 4);
+}
+
+static void set_prev_mi(VP9_COMMON *cm) {
+ const int use_prev_in_find_mv_refs = cm->width == cm->last_width &&
+ cm->height == cm->last_height &&
+ !cm->error_resilient_mode &&
+ !cm->intra_only &&
+ cm->last_show_frame;
+ // Special case: set prev_mi to NULL when the previous mode info
+ // context cannot be used.
+ cm->prev_mi = use_prev_in_find_mv_refs ?
+ cm->prev_mip + cm->mode_info_stride + 1 : NULL;
+}
+#endif // VP9_COMMON_VP9_ONYXC_INT_H_
diff --git a/libvpx/vp9/common/vp9_postproc.c b/libvpx/vp9/common/vp9_postproc.c
new file mode 100644
index 0000000..955e676
--- /dev/null
+++ b/libvpx/vp9/common/vp9_postproc.c
@@ -0,0 +1,1018 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "./vpx_config.h"
+#include "vpx_scale/yv12config.h"
+#include "vp9/common/vp9_postproc.h"
+#include "vp9/common/vp9_textblit.h"
+#include "vpx_scale/vpx_scale.h"
+#include "vp9/common/vp9_systemdependent.h"
+#include "./vp9_rtcd.h"
+#include "./vpx_scale_rtcd.h"
+
+
+#include <math.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#define RGB_TO_YUV(t) \
+ ( (0.257*(float)(t >> 16)) + (0.504*(float)(t >> 8 & 0xff)) + \
+ (0.098*(float)(t & 0xff)) + 16), \
+ (-(0.148*(float)(t >> 16)) - (0.291*(float)(t >> 8 & 0xff)) + \
+ (0.439*(float)(t & 0xff)) + 128), \
+ ( (0.439*(float)(t >> 16)) - (0.368*(float)(t >> 8 & 0xff)) - \
+ (0.071*(float)(t & 0xff)) + 128)
+
+/* global constants */
+#if 0 && CONFIG_POSTPROC_VISUALIZER
+static const unsigned char MB_PREDICTION_MODE_colors[MB_MODE_COUNT][3] = {
+ { RGB_TO_YUV(0x98FB98) }, /* PaleGreen */
+ { RGB_TO_YUV(0x00FF00) }, /* Green */
+ { RGB_TO_YUV(0xADFF2F) }, /* GreenYellow */
+ { RGB_TO_YUV(0x8F0000) }, /* Dark Red */
+ { RGB_TO_YUV(0x008F8F) }, /* Dark Cyan */
+ { RGB_TO_YUV(0x008F8F) }, /* Dark Cyan */
+ { RGB_TO_YUV(0x008F8F) }, /* Dark Cyan */
+ { RGB_TO_YUV(0x8F0000) }, /* Dark Red */
+ { RGB_TO_YUV(0x8F0000) }, /* Dark Red */
+ { RGB_TO_YUV(0x228B22) }, /* ForestGreen */
+ { RGB_TO_YUV(0x006400) }, /* DarkGreen */
+ { RGB_TO_YUV(0x98F5FF) }, /* Cadet Blue */
+ { RGB_TO_YUV(0x6CA6CD) }, /* Sky Blue */
+ { RGB_TO_YUV(0x00008B) }, /* Dark blue */
+ { RGB_TO_YUV(0x551A8B) }, /* Purple */
+ { RGB_TO_YUV(0xFF0000) } /* Red */
+ { RGB_TO_YUV(0xCC33FF) }, /* Magenta */
+};
+
+static const unsigned char B_PREDICTION_MODE_colors[INTRA_MODES][3] = {
+ { RGB_TO_YUV(0x6633ff) }, /* Purple */
+ { RGB_TO_YUV(0xcc33ff) }, /* Magenta */
+ { RGB_TO_YUV(0xff33cc) }, /* Pink */
+ { RGB_TO_YUV(0xff3366) }, /* Coral */
+ { RGB_TO_YUV(0x3366ff) }, /* Blue */
+ { RGB_TO_YUV(0xed00f5) }, /* Dark Blue */
+ { RGB_TO_YUV(0x2e00b8) }, /* Dark Purple */
+ { RGB_TO_YUV(0xff6633) }, /* Orange */
+ { RGB_TO_YUV(0x33ccff) }, /* Light Blue */
+ { RGB_TO_YUV(0x8ab800) }, /* Green */
+ { RGB_TO_YUV(0xffcc33) }, /* Light Orange */
+ { RGB_TO_YUV(0x33ffcc) }, /* Aqua */
+ { RGB_TO_YUV(0x66ff33) }, /* Light Green */
+ { RGB_TO_YUV(0xccff33) }, /* Yellow */
+};
+
+static const unsigned char MV_REFERENCE_FRAME_colors[MAX_REF_FRAMES][3] = {
+ { RGB_TO_YUV(0x00ff00) }, /* Blue */
+ { RGB_TO_YUV(0x0000ff) }, /* Green */
+ { RGB_TO_YUV(0xffff00) }, /* Yellow */
+ { RGB_TO_YUV(0xff0000) }, /* Red */
+};
+#endif
+
+static const short kernel5[] = {
+ 1, 1, 4, 1, 1
+};
+
+const short vp9_rv[] = {
+ 8, 5, 2, 2, 8, 12, 4, 9, 8, 3,
+ 0, 3, 9, 0, 0, 0, 8, 3, 14, 4,
+ 10, 1, 11, 14, 1, 14, 9, 6, 12, 11,
+ 8, 6, 10, 0, 0, 8, 9, 0, 3, 14,
+ 8, 11, 13, 4, 2, 9, 0, 3, 9, 6,
+ 1, 2, 3, 14, 13, 1, 8, 2, 9, 7,
+ 3, 3, 1, 13, 13, 6, 6, 5, 2, 7,
+ 11, 9, 11, 8, 7, 3, 2, 0, 13, 13,
+ 14, 4, 12, 5, 12, 10, 8, 10, 13, 10,
+ 4, 14, 4, 10, 0, 8, 11, 1, 13, 7,
+ 7, 14, 6, 14, 13, 2, 13, 5, 4, 4,
+ 0, 10, 0, 5, 13, 2, 12, 7, 11, 13,
+ 8, 0, 4, 10, 7, 2, 7, 2, 2, 5,
+ 3, 4, 7, 3, 3, 14, 14, 5, 9, 13,
+ 3, 14, 3, 6, 3, 0, 11, 8, 13, 1,
+ 13, 1, 12, 0, 10, 9, 7, 6, 2, 8,
+ 5, 2, 13, 7, 1, 13, 14, 7, 6, 7,
+ 9, 6, 10, 11, 7, 8, 7, 5, 14, 8,
+ 4, 4, 0, 8, 7, 10, 0, 8, 14, 11,
+ 3, 12, 5, 7, 14, 3, 14, 5, 2, 6,
+ 11, 12, 12, 8, 0, 11, 13, 1, 2, 0,
+ 5, 10, 14, 7, 8, 0, 4, 11, 0, 8,
+ 0, 3, 10, 5, 8, 0, 11, 6, 7, 8,
+ 10, 7, 13, 9, 2, 5, 1, 5, 10, 2,
+ 4, 3, 5, 6, 10, 8, 9, 4, 11, 14,
+ 0, 10, 0, 5, 13, 2, 12, 7, 11, 13,
+ 8, 0, 4, 10, 7, 2, 7, 2, 2, 5,
+ 3, 4, 7, 3, 3, 14, 14, 5, 9, 13,
+ 3, 14, 3, 6, 3, 0, 11, 8, 13, 1,
+ 13, 1, 12, 0, 10, 9, 7, 6, 2, 8,
+ 5, 2, 13, 7, 1, 13, 14, 7, 6, 7,
+ 9, 6, 10, 11, 7, 8, 7, 5, 14, 8,
+ 4, 4, 0, 8, 7, 10, 0, 8, 14, 11,
+ 3, 12, 5, 7, 14, 3, 14, 5, 2, 6,
+ 11, 12, 12, 8, 0, 11, 13, 1, 2, 0,
+ 5, 10, 14, 7, 8, 0, 4, 11, 0, 8,
+ 0, 3, 10, 5, 8, 0, 11, 6, 7, 8,
+ 10, 7, 13, 9, 2, 5, 1, 5, 10, 2,
+ 4, 3, 5, 6, 10, 8, 9, 4, 11, 14,
+ 3, 8, 3, 7, 8, 5, 11, 4, 12, 3,
+ 11, 9, 14, 8, 14, 13, 4, 3, 1, 2,
+ 14, 6, 5, 4, 4, 11, 4, 6, 2, 1,
+ 5, 8, 8, 12, 13, 5, 14, 10, 12, 13,
+ 0, 9, 5, 5, 11, 10, 13, 9, 10, 13,
+};
+
+
+/****************************************************************************
+ */
+void vp9_post_proc_down_and_across_c(const uint8_t *src_ptr,
+ uint8_t *dst_ptr,
+ int src_pixels_per_line,
+ int dst_pixels_per_line,
+ int rows,
+ int cols,
+ int flimit) {
+ uint8_t const *p_src;
+ uint8_t *p_dst;
+ int row;
+ int col;
+ int i;
+ int v;
+ int pitch = src_pixels_per_line;
+ uint8_t d[8];
+ (void)dst_pixels_per_line;
+
+ for (row = 0; row < rows; row++) {
+ /* post_proc_down for one row */
+ p_src = src_ptr;
+ p_dst = dst_ptr;
+
+ for (col = 0; col < cols; col++) {
+
+ int kernel = 4;
+ int v = p_src[col];
+
+ for (i = -2; i <= 2; i++) {
+ if (abs(v - p_src[col + i * pitch]) > flimit)
+ goto down_skip_convolve;
+
+ kernel += kernel5[2 + i] * p_src[col + i * pitch];
+ }
+
+ v = (kernel >> 3);
+ down_skip_convolve:
+ p_dst[col] = v;
+ }
+
+ /* now post_proc_across */
+ p_src = dst_ptr;
+ p_dst = dst_ptr;
+
+ for (i = 0; i < 8; i++)
+ d[i] = p_src[i];
+
+ for (col = 0; col < cols; col++) {
+ int kernel = 4;
+ v = p_src[col];
+
+ d[col & 7] = v;
+
+ for (i = -2; i <= 2; i++) {
+ if (abs(v - p_src[col + i]) > flimit)
+ goto across_skip_convolve;
+
+ kernel += kernel5[2 + i] * p_src[col + i];
+ }
+
+ d[col & 7] = (kernel >> 3);
+ across_skip_convolve:
+
+ if (col >= 2)
+ p_dst[col - 2] = d[(col - 2) & 7];
+ }
+
+ /* handle the last two pixels */
+ p_dst[col - 2] = d[(col - 2) & 7];
+ p_dst[col - 1] = d[(col - 1) & 7];
+
+
+ /* next row */
+ src_ptr += pitch;
+ dst_ptr += pitch;
+ }
+}
+
+static int q2mbl(int x) {
+ if (x < 20) x = 20;
+
+ x = 50 + (x - 50) * 10 / 8;
+ return x * x / 3;
+}
+
+void vp9_mbpost_proc_across_ip_c(uint8_t *src, int pitch,
+ int rows, int cols, int flimit) {
+ int r, c, i;
+
+ uint8_t *s = src;
+ uint8_t d[16];
+
+
+ for (r = 0; r < rows; r++) {
+ int sumsq = 0;
+ int sum = 0;
+
+ for (i = -8; i <= 6; i++) {
+ sumsq += s[i] * s[i];
+ sum += s[i];
+ d[i + 8] = 0;
+ }
+
+ for (c = 0; c < cols + 8; c++) {
+ int x = s[c + 7] - s[c - 8];
+ int y = s[c + 7] + s[c - 8];
+
+ sum += x;
+ sumsq += x * y;
+
+ d[c & 15] = s[c];
+
+ if (sumsq * 15 - sum * sum < flimit) {
+ d[c & 15] = (8 + sum + s[c]) >> 4;
+ }
+
+ s[c - 8] = d[(c - 8) & 15];
+ }
+
+ s += pitch;
+ }
+}
+
+void vp9_mbpost_proc_down_c(uint8_t *dst, int pitch,
+ int rows, int cols, int flimit) {
+ int r, c, i;
+ const short *rv3 = &vp9_rv[63 & rand()];
+
+ for (c = 0; c < cols; c++) {
+ uint8_t *s = &dst[c];
+ int sumsq = 0;
+ int sum = 0;
+ uint8_t d[16];
+ const short *rv2 = rv3 + ((c * 17) & 127);
+
+ for (i = -8; i <= 6; i++) {
+ sumsq += s[i * pitch] * s[i * pitch];
+ sum += s[i * pitch];
+ }
+
+ for (r = 0; r < rows + 8; r++) {
+ sumsq += s[7 * pitch] * s[ 7 * pitch] - s[-8 * pitch] * s[-8 * pitch];
+ sum += s[7 * pitch] - s[-8 * pitch];
+ d[r & 15] = s[0];
+
+ if (sumsq * 15 - sum * sum < flimit) {
+ d[r & 15] = (rv2[r & 127] + sum + s[0]) >> 4;
+ }
+
+ s[-8 * pitch] = d[(r - 8) & 15];
+ s += pitch;
+ }
+ }
+}
+
+static void deblock_and_de_macro_block(YV12_BUFFER_CONFIG *source,
+ YV12_BUFFER_CONFIG *post,
+ int q,
+ int low_var_thresh,
+ int flag) {
+ double level = 6.0e-05 * q * q * q - .0067 * q * q + .306 * q + .0065;
+ int ppl = (int)(level + .5);
+ (void) low_var_thresh;
+ (void) flag;
+
+ vp9_post_proc_down_and_across(source->y_buffer, post->y_buffer,
+ source->y_stride, post->y_stride,
+ source->y_height, source->y_width, ppl);
+
+ vp9_mbpost_proc_across_ip(post->y_buffer, post->y_stride, post->y_height,
+ post->y_width, q2mbl(q));
+
+ vp9_mbpost_proc_down(post->y_buffer, post->y_stride, post->y_height,
+ post->y_width, q2mbl(q));
+
+ vp9_post_proc_down_and_across(source->u_buffer, post->u_buffer,
+ source->uv_stride, post->uv_stride,
+ source->uv_height, source->uv_width, ppl);
+ vp9_post_proc_down_and_across(source->v_buffer, post->v_buffer,
+ source->uv_stride, post->uv_stride,
+ source->uv_height, source->uv_width, ppl);
+}
+
+void vp9_deblock(const YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst,
+ int q) {
+ const int ppl = (int)(6.0e-05 * q * q * q - 0.0067 * q * q + 0.306 * q
+ + 0.0065 + 0.5);
+ int i;
+
+ const uint8_t *const srcs[4] = {src->y_buffer, src->u_buffer, src->v_buffer,
+ src->alpha_buffer};
+ const int src_strides[4] = {src->y_stride, src->uv_stride, src->uv_stride,
+ src->alpha_stride};
+ const int src_widths[4] = {src->y_width, src->uv_width, src->uv_width,
+ src->alpha_width};
+ const int src_heights[4] = {src->y_height, src->uv_height, src->uv_height,
+ src->alpha_height};
+
+ uint8_t *const dsts[4] = {dst->y_buffer, dst->u_buffer, dst->v_buffer,
+ dst->alpha_buffer};
+ const int dst_strides[4] = {dst->y_stride, dst->uv_stride, dst->uv_stride,
+ dst->alpha_stride};
+
+ for (i = 0; i < MAX_MB_PLANE; ++i)
+ vp9_post_proc_down_and_across(srcs[i], dsts[i],
+ src_strides[i], dst_strides[i],
+ src_heights[i], src_widths[i], ppl);
+}
+
+void vp9_denoise(const YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst,
+ int q) {
+ const int ppl = (int)(6.0e-05 * q * q * q - 0.0067 * q * q + 0.306 * q
+ + 0.0065 + 0.5);
+ int i;
+
+ const uint8_t *const srcs[4] = {src->y_buffer, src->u_buffer, src->v_buffer,
+ src->alpha_buffer};
+ const int src_strides[4] = {src->y_stride, src->uv_stride, src->uv_stride,
+ src->alpha_stride};
+ const int src_widths[4] = {src->y_width, src->uv_width, src->uv_width,
+ src->alpha_width};
+ const int src_heights[4] = {src->y_height, src->uv_height, src->uv_height,
+ src->alpha_height};
+
+ uint8_t *const dsts[4] = {dst->y_buffer, dst->u_buffer, dst->v_buffer,
+ dst->alpha_buffer};
+ const int dst_strides[4] = {dst->y_stride, dst->uv_stride, dst->uv_stride,
+ dst->alpha_stride};
+
+ for (i = 0; i < MAX_MB_PLANE; ++i) {
+ const int src_stride = src_strides[i];
+ const uint8_t *const src = srcs[i] + 2 * src_stride + 2;
+ const int src_width = src_widths[i] - 4;
+ const int src_height = src_heights[i] - 4;
+
+ const int dst_stride = dst_strides[i];
+ uint8_t *const dst = dsts[i] + 2 * dst_stride + 2;
+
+ vp9_post_proc_down_and_across(src, dst, src_stride, dst_stride,
+ src_height, src_width, ppl);
+ }
+}
+
+double vp9_gaussian(double sigma, double mu, double x) {
+ return 1 / (sigma * sqrt(2.0 * 3.14159265)) *
+ (exp(-(x - mu) * (x - mu) / (2 * sigma * sigma)));
+}
+
+static void fillrd(struct postproc_state *state, int q, int a) {
+ char char_dist[300];
+
+ double sigma;
+ int ai = a, qi = q, i;
+
+ vp9_clear_system_state();
+
+ sigma = ai + .5 + .6 * (63 - qi) / 63.0;
+
+ /* set up a lookup table of 256 entries that matches
+ * a gaussian distribution with sigma determined by q.
+ */
+ {
+ double i;
+ int next, j;
+
+ next = 0;
+
+ for (i = -32; i < 32; i++) {
+ int a = (int)(.5 + 256 * vp9_gaussian(sigma, 0, i));
+
+ if (a) {
+ for (j = 0; j < a; j++) {
+ char_dist[next + j] = (char) i;
+ }
+
+ next = next + j;
+ }
+
+ }
+
+ for (; next < 256; next++)
+ char_dist[next] = 0;
+ }
+
+ for (i = 0; i < 3072; i++) {
+ state->noise[i] = char_dist[rand() & 0xff];
+ }
+
+ for (i = 0; i < 16; i++) {
+ state->blackclamp[i] = -char_dist[0];
+ state->whiteclamp[i] = -char_dist[0];
+ state->bothclamp[i] = -2 * char_dist[0];
+ }
+
+ state->last_q = q;
+ state->last_noise = a;
+}
+
+/****************************************************************************
+ *
+ * ROUTINE : plane_add_noise_c
+ *
+ * INPUTS : unsigned char *Start starting address of buffer to
+ * add gaussian noise to
+ * unsigned int width width of plane
+ * unsigned int height height of plane
+ * int pitch distance between subsequent lines of frame
+ * int q quantizer used to determine amount of noise
+ * to add
+ *
+ * OUTPUTS : None.
+ *
+ * RETURNS : void.
+ *
+ * FUNCTION : adds gaussian noise to a plane of pixels
+ *
+ * SPECIAL NOTES : None.
+ *
+ ****************************************************************************/
+void vp9_plane_add_noise_c(uint8_t *start, char *noise,
+ char blackclamp[16],
+ char whiteclamp[16],
+ char bothclamp[16],
+ unsigned int width, unsigned int height, int pitch) {
+ unsigned int i, j;
+
+ for (i = 0; i < height; i++) {
+ uint8_t *pos = start + i * pitch;
+ char *ref = (char *)(noise + (rand() & 0xff)); // NOLINT
+
+ for (j = 0; j < width; j++) {
+ if (pos[j] < blackclamp[0])
+ pos[j] = blackclamp[0];
+
+ if (pos[j] > 255 + whiteclamp[0])
+ pos[j] = 255 + whiteclamp[0];
+
+ pos[j] += ref[j];
+ }
+ }
+}
+
+/* Blend the macro block with a solid colored square. Leave the
+ * edges unblended to give distinction to macro blocks in areas
+ * filled with the same color block.
+ */
+void vp9_blend_mb_inner_c(uint8_t *y, uint8_t *u, uint8_t *v,
+ int y1, int u1, int v1, int alpha, int stride) {
+ int i, j;
+ int y1_const = y1 * ((1 << 16) - alpha);
+ int u1_const = u1 * ((1 << 16) - alpha);
+ int v1_const = v1 * ((1 << 16) - alpha);
+
+ y += 2 * stride + 2;
+ for (i = 0; i < 12; i++) {
+ for (j = 0; j < 12; j++) {
+ y[j] = (y[j] * alpha + y1_const) >> 16;
+ }
+ y += stride;
+ }
+
+ stride >>= 1;
+
+ u += stride + 1;
+ v += stride + 1;
+
+ for (i = 0; i < 6; i++) {
+ for (j = 0; j < 6; j++) {
+ u[j] = (u[j] * alpha + u1_const) >> 16;
+ v[j] = (v[j] * alpha + v1_const) >> 16;
+ }
+ u += stride;
+ v += stride;
+ }
+}
+
+/* Blend only the edge of the macro block. Leave center
+ * unblended to allow for other visualizations to be layered.
+ */
+void vp9_blend_mb_outer_c(uint8_t *y, uint8_t *u, uint8_t *v,
+ int y1, int u1, int v1, int alpha, int stride) {
+ int i, j;
+ int y1_const = y1 * ((1 << 16) - alpha);
+ int u1_const = u1 * ((1 << 16) - alpha);
+ int v1_const = v1 * ((1 << 16) - alpha);
+
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < 16; j++) {
+ y[j] = (y[j] * alpha + y1_const) >> 16;
+ }
+ y += stride;
+ }
+
+ for (i = 0; i < 12; i++) {
+ y[0] = (y[0] * alpha + y1_const) >> 16;
+ y[1] = (y[1] * alpha + y1_const) >> 16;
+ y[14] = (y[14] * alpha + y1_const) >> 16;
+ y[15] = (y[15] * alpha + y1_const) >> 16;
+ y += stride;
+ }
+
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < 16; j++) {
+ y[j] = (y[j] * alpha + y1_const) >> 16;
+ }
+ y += stride;
+ }
+
+ stride >>= 1;
+
+ for (j = 0; j < 8; j++) {
+ u[j] = (u[j] * alpha + u1_const) >> 16;
+ v[j] = (v[j] * alpha + v1_const) >> 16;
+ }
+ u += stride;
+ v += stride;
+
+ for (i = 0; i < 6; i++) {
+ u[0] = (u[0] * alpha + u1_const) >> 16;
+ v[0] = (v[0] * alpha + v1_const) >> 16;
+
+ u[7] = (u[7] * alpha + u1_const) >> 16;
+ v[7] = (v[7] * alpha + v1_const) >> 16;
+
+ u += stride;
+ v += stride;
+ }
+
+ for (j = 0; j < 8; j++) {
+ u[j] = (u[j] * alpha + u1_const) >> 16;
+ v[j] = (v[j] * alpha + v1_const) >> 16;
+ }
+}
+
+void vp9_blend_b_c(uint8_t *y, uint8_t *u, uint8_t *v,
+ int y1, int u1, int v1, int alpha, int stride) {
+ int i, j;
+ int y1_const = y1 * ((1 << 16) - alpha);
+ int u1_const = u1 * ((1 << 16) - alpha);
+ int v1_const = v1 * ((1 << 16) - alpha);
+
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 4; j++) {
+ y[j] = (y[j] * alpha + y1_const) >> 16;
+ }
+ y += stride;
+ }
+
+ stride >>= 1;
+
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < 2; j++) {
+ u[j] = (u[j] * alpha + u1_const) >> 16;
+ v[j] = (v[j] * alpha + v1_const) >> 16;
+ }
+ u += stride;
+ v += stride;
+ }
+}
+
+static void constrain_line(int x0, int *x1, int y0, int *y1,
+ int width, int height) {
+ int dx;
+ int dy;
+
+ if (*x1 > width) {
+ dx = *x1 - x0;
+ dy = *y1 - y0;
+
+ *x1 = width;
+ if (dx)
+ *y1 = ((width - x0) * dy) / dx + y0;
+ }
+ if (*x1 < 0) {
+ dx = *x1 - x0;
+ dy = *y1 - y0;
+
+ *x1 = 0;
+ if (dx)
+ *y1 = ((0 - x0) * dy) / dx + y0;
+ }
+ if (*y1 > height) {
+ dx = *x1 - x0;
+ dy = *y1 - y0;
+
+ *y1 = height;
+ if (dy)
+ *x1 = ((height - y0) * dx) / dy + x0;
+ }
+ if (*y1 < 0) {
+ dx = *x1 - x0;
+ dy = *y1 - y0;
+
+ *y1 = 0;
+ if (dy)
+ *x1 = ((0 - y0) * dx) / dy + x0;
+ }
+}
+
+int vp9_post_proc_frame(struct VP9Common *cm,
+ YV12_BUFFER_CONFIG *dest, vp9_ppflags_t *ppflags) {
+ int q = cm->lf.filter_level * 10 / 6;
+ int flags = ppflags->post_proc_flag;
+ int deblock_level = ppflags->deblocking_level;
+ int noise_level = ppflags->noise_level;
+
+ if (!cm->frame_to_show)
+ return -1;
+
+ if (q > 63)
+ q = 63;
+
+ if (!flags) {
+ *dest = *cm->frame_to_show;
+ return 0;
+ }
+
+#if ARCH_X86||ARCH_X86_64
+ vpx_reset_mmx_state();
+#endif
+
+ if (flags & VP9D_DEMACROBLOCK) {
+ deblock_and_de_macro_block(cm->frame_to_show, &cm->post_proc_buffer,
+ q + (deblock_level - 5) * 10, 1, 0);
+ } else if (flags & VP9D_DEBLOCK) {
+ vp9_deblock(cm->frame_to_show, &cm->post_proc_buffer, q);
+ } else {
+ vp8_yv12_copy_frame(cm->frame_to_show, &cm->post_proc_buffer);
+ }
+
+ if (flags & VP9D_ADDNOISE) {
+ if (cm->postproc_state.last_q != q
+ || cm->postproc_state.last_noise != noise_level) {
+ fillrd(&cm->postproc_state, 63 - q, noise_level);
+ }
+
+ vp9_plane_add_noise(cm->post_proc_buffer.y_buffer,
+ cm->postproc_state.noise,
+ cm->postproc_state.blackclamp,
+ cm->postproc_state.whiteclamp,
+ cm->postproc_state.bothclamp,
+ cm->post_proc_buffer.y_width,
+ cm->post_proc_buffer.y_height,
+ cm->post_proc_buffer.y_stride);
+ }
+
+#if 0 && CONFIG_POSTPROC_VISUALIZER
+ if (flags & VP9D_DEBUG_TXT_FRAME_INFO) {
+ char message[512];
+ sprintf(message, "F%1dG%1dQ%3dF%3dP%d_s%dx%d",
+ (cm->frame_type == KEY_FRAME),
+ cm->refresh_golden_frame,
+ cm->base_qindex,
+ cm->filter_level,
+ flags,
+ cm->mb_cols, cm->mb_rows);
+ vp9_blit_text(message, cm->post_proc_buffer.y_buffer,
+ cm->post_proc_buffer.y_stride);
+ }
+
+ if (flags & VP9D_DEBUG_TXT_MBLK_MODES) {
+ int i, j;
+ uint8_t *y_ptr;
+ YV12_BUFFER_CONFIG *post = &cm->post_proc_buffer;
+ int mb_rows = post->y_height >> 4;
+ int mb_cols = post->y_width >> 4;
+ int mb_index = 0;
+ MODE_INFO *mi = cm->mi;
+
+ y_ptr = post->y_buffer + 4 * post->y_stride + 4;
+
+ /* vp9_filter each macro block */
+ for (i = 0; i < mb_rows; i++) {
+ for (j = 0; j < mb_cols; j++) {
+ char zz[4];
+
+ sprintf(zz, "%c", mi[mb_index].mbmi.mode + 'a');
+
+ vp9_blit_text(zz, y_ptr, post->y_stride);
+ mb_index++;
+ y_ptr += 16;
+ }
+
+ mb_index++; /* border */
+ y_ptr += post->y_stride * 16 - post->y_width;
+
+ }
+ }
+
+ if (flags & VP9D_DEBUG_TXT_DC_DIFF) {
+ int i, j;
+ uint8_t *y_ptr;
+ YV12_BUFFER_CONFIG *post = &cm->post_proc_buffer;
+ int mb_rows = post->y_height >> 4;
+ int mb_cols = post->y_width >> 4;
+ int mb_index = 0;
+ MODE_INFO *mi = cm->mi;
+
+ y_ptr = post->y_buffer + 4 * post->y_stride + 4;
+
+ /* vp9_filter each macro block */
+ for (i = 0; i < mb_rows; i++) {
+ for (j = 0; j < mb_cols; j++) {
+ char zz[4];
+ int dc_diff = !(mi[mb_index].mbmi.mode != I4X4_PRED &&
+ mi[mb_index].mbmi.mode != SPLITMV &&
+ mi[mb_index].mbmi.skip_coeff);
+
+ if (cm->frame_type == KEY_FRAME)
+ sprintf(zz, "a");
+ else
+ sprintf(zz, "%c", dc_diff + '0');
+
+ vp9_blit_text(zz, y_ptr, post->y_stride);
+ mb_index++;
+ y_ptr += 16;
+ }
+
+ mb_index++; /* border */
+ y_ptr += post->y_stride * 16 - post->y_width;
+
+ }
+ }
+
+ if (flags & VP9D_DEBUG_TXT_RATE_INFO) {
+ char message[512];
+ snprintf(message, sizeof(message),
+ "Bitrate: %10.2f framerate: %10.2f ",
+ cm->bitrate, cm->framerate);
+ vp9_blit_text(message, cm->post_proc_buffer.y_buffer,
+ cm->post_proc_buffer.y_stride);
+ }
+
+ /* Draw motion vectors */
+ if ((flags & VP9D_DEBUG_DRAW_MV) && ppflags->display_mv_flag) {
+ YV12_BUFFER_CONFIG *post = &cm->post_proc_buffer;
+ int width = post->y_width;
+ int height = post->y_height;
+ uint8_t *y_buffer = cm->post_proc_buffer.y_buffer;
+ int y_stride = cm->post_proc_buffer.y_stride;
+ MODE_INFO *mi = cm->mi;
+ int x0, y0;
+
+ for (y0 = 0; y0 < height; y0 += 16) {
+ for (x0 = 0; x0 < width; x0 += 16) {
+ int x1, y1;
+
+ if (!(ppflags->display_mv_flag & (1 << mi->mbmi.mode))) {
+ mi++;
+ continue;
+ }
+
+ if (mi->mbmi.mode == SPLITMV) {
+ switch (mi->mbmi.partitioning) {
+ case PARTITIONING_16X8 : { /* mv_top_bottom */
+ union b_mode_info *bmi = &mi->bmi[0];
+ MV *mv = &bmi->mv.as_mv;
+
+ x1 = x0 + 8 + (mv->col >> 3);
+ y1 = y0 + 4 + (mv->row >> 3);
+
+ constrain_line(x0 + 8, &x1, y0 + 4, &y1, width, height);
+ vp9_blit_line(x0 + 8, x1, y0 + 4, y1, y_buffer, y_stride);
+
+ bmi = &mi->bmi[8];
+
+ x1 = x0 + 8 + (mv->col >> 3);
+ y1 = y0 + 12 + (mv->row >> 3);
+
+ constrain_line(x0 + 8, &x1, y0 + 12, &y1, width, height);
+ vp9_blit_line(x0 + 8, x1, y0 + 12, y1, y_buffer, y_stride);
+
+ break;
+ }
+ case PARTITIONING_8X16 : { /* mv_left_right */
+ union b_mode_info *bmi = &mi->bmi[0];
+ MV *mv = &bmi->mv.as_mv;
+
+ x1 = x0 + 4 + (mv->col >> 3);
+ y1 = y0 + 8 + (mv->row >> 3);
+
+ constrain_line(x0 + 4, &x1, y0 + 8, &y1, width, height);
+ vp9_blit_line(x0 + 4, x1, y0 + 8, y1, y_buffer, y_stride);
+
+ bmi = &mi->bmi[2];
+
+ x1 = x0 + 12 + (mv->col >> 3);
+ y1 = y0 + 8 + (mv->row >> 3);
+
+ constrain_line(x0 + 12, &x1, y0 + 8, &y1, width, height);
+ vp9_blit_line(x0 + 12, x1, y0 + 8, y1, y_buffer, y_stride);
+
+ break;
+ }
+ case PARTITIONING_8X8 : { /* mv_quarters */
+ union b_mode_info *bmi = &mi->bmi[0];
+ MV *mv = &bmi->mv.as_mv;
+
+ x1 = x0 + 4 + (mv->col >> 3);
+ y1 = y0 + 4 + (mv->row >> 3);
+
+ constrain_line(x0 + 4, &x1, y0 + 4, &y1, width, height);
+ vp9_blit_line(x0 + 4, x1, y0 + 4, y1, y_buffer, y_stride);
+
+ bmi = &mi->bmi[2];
+
+ x1 = x0 + 12 + (mv->col >> 3);
+ y1 = y0 + 4 + (mv->row >> 3);
+
+ constrain_line(x0 + 12, &x1, y0 + 4, &y1, width, height);
+ vp9_blit_line(x0 + 12, x1, y0 + 4, y1, y_buffer, y_stride);
+
+ bmi = &mi->bmi[8];
+
+ x1 = x0 + 4 + (mv->col >> 3);
+ y1 = y0 + 12 + (mv->row >> 3);
+
+ constrain_line(x0 + 4, &x1, y0 + 12, &y1, width, height);
+ vp9_blit_line(x0 + 4, x1, y0 + 12, y1, y_buffer, y_stride);
+
+ bmi = &mi->bmi[10];
+
+ x1 = x0 + 12 + (mv->col >> 3);
+ y1 = y0 + 12 + (mv->row >> 3);
+
+ constrain_line(x0 + 12, &x1, y0 + 12, &y1, width, height);
+ vp9_blit_line(x0 + 12, x1, y0 + 12, y1, y_buffer, y_stride);
+ break;
+ }
+ case PARTITIONING_4X4:
+ default : {
+ union b_mode_info *bmi = mi->bmi;
+ int bx0, by0;
+
+ for (by0 = y0; by0 < (y0 + 16); by0 += 4) {
+ for (bx0 = x0; bx0 < (x0 + 16); bx0 += 4) {
+ MV *mv = &bmi->mv.as_mv;
+
+ x1 = bx0 + 2 + (mv->col >> 3);
+ y1 = by0 + 2 + (mv->row >> 3);
+
+ constrain_line(bx0 + 2, &x1, by0 + 2, &y1, width, height);
+ vp9_blit_line(bx0 + 2, x1, by0 + 2, y1, y_buffer, y_stride);
+
+ bmi++;
+ }
+ }
+ }
+ }
+ } else if (is_inter_mode(mi->mbmi.mode)) {
+ MV *mv = &mi->mbmi.mv.as_mv;
+ const int lx0 = x0 + 8;
+ const int ly0 = y0 + 8;
+
+ x1 = lx0 + (mv->col >> 3);
+ y1 = ly0 + (mv->row >> 3);
+
+ if (x1 != lx0 && y1 != ly0) {
+ constrain_line(lx0, &x1, ly0 - 1, &y1, width, height);
+ vp9_blit_line(lx0, x1, ly0 - 1, y1, y_buffer, y_stride);
+
+ constrain_line(lx0, &x1, ly0 + 1, &y1, width, height);
+ vp9_blit_line(lx0, x1, ly0 + 1, y1, y_buffer, y_stride);
+ } else
+ vp9_blit_line(lx0, x1, ly0, y1, y_buffer, y_stride);
+ }
+
+ mi++;
+ }
+ mi++;
+ }
+ }
+
+ /* Color in block modes */
+ if ((flags & VP9D_DEBUG_CLR_BLK_MODES)
+ && (ppflags->display_mb_modes_flag || ppflags->display_b_modes_flag)) {
+ int y, x;
+ YV12_BUFFER_CONFIG *post = &cm->post_proc_buffer;
+ int width = post->y_width;
+ int height = post->y_height;
+ uint8_t *y_ptr = cm->post_proc_buffer.y_buffer;
+ uint8_t *u_ptr = cm->post_proc_buffer.u_buffer;
+ uint8_t *v_ptr = cm->post_proc_buffer.v_buffer;
+ int y_stride = cm->post_proc_buffer.y_stride;
+ MODE_INFO *mi = cm->mi;
+
+ for (y = 0; y < height; y += 16) {
+ for (x = 0; x < width; x += 16) {
+ int Y = 0, U = 0, V = 0;
+
+ if (mi->mbmi.mode == I4X4_PRED &&
+ ((ppflags->display_mb_modes_flag & I4X4_PRED) ||
+ ppflags->display_b_modes_flag)) {
+ int by, bx;
+ uint8_t *yl, *ul, *vl;
+ union b_mode_info *bmi = mi->bmi;
+
+ yl = y_ptr + x;
+ ul = u_ptr + (x >> 1);
+ vl = v_ptr + (x >> 1);
+
+ for (by = 0; by < 16; by += 4) {
+ for (bx = 0; bx < 16; bx += 4) {
+ if ((ppflags->display_b_modes_flag & (1 << mi->mbmi.mode))
+ || (ppflags->display_mb_modes_flag & I4X4_PRED)) {
+ Y = B_PREDICTION_MODE_colors[bmi->as_mode][0];
+ U = B_PREDICTION_MODE_colors[bmi->as_mode][1];
+ V = B_PREDICTION_MODE_colors[bmi->as_mode][2];
+
+ vp9_blend_b(yl + bx, ul + (bx >> 1), vl + (bx >> 1), Y, U, V,
+ 0xc000, y_stride);
+ }
+ bmi++;
+ }
+
+ yl += y_stride * 4;
+ ul += y_stride * 1;
+ vl += y_stride * 1;
+ }
+ } else if (ppflags->display_mb_modes_flag & (1 << mi->mbmi.mode)) {
+ Y = MB_PREDICTION_MODE_colors[mi->mbmi.mode][0];
+ U = MB_PREDICTION_MODE_colors[mi->mbmi.mode][1];
+ V = MB_PREDICTION_MODE_colors[mi->mbmi.mode][2];
+
+ vp9_blend_mb_inner(y_ptr + x, u_ptr + (x >> 1), v_ptr + (x >> 1),
+ Y, U, V, 0xc000, y_stride);
+ }
+
+ mi++;
+ }
+ y_ptr += y_stride * 16;
+ u_ptr += y_stride * 4;
+ v_ptr += y_stride * 4;
+
+ mi++;
+ }
+ }
+
+ /* Color in frame reference blocks */
+ if ((flags & VP9D_DEBUG_CLR_FRM_REF_BLKS) &&
+ ppflags->display_ref_frame_flag) {
+ int y, x;
+ YV12_BUFFER_CONFIG *post = &cm->post_proc_buffer;
+ int width = post->y_width;
+ int height = post->y_height;
+ uint8_t *y_ptr = cm->post_proc_buffer.y_buffer;
+ uint8_t *u_ptr = cm->post_proc_buffer.u_buffer;
+ uint8_t *v_ptr = cm->post_proc_buffer.v_buffer;
+ int y_stride = cm->post_proc_buffer.y_stride;
+ MODE_INFO *mi = cm->mi;
+
+ for (y = 0; y < height; y += 16) {
+ for (x = 0; x < width; x += 16) {
+ int Y = 0, U = 0, V = 0;
+
+ if (ppflags->display_ref_frame_flag & (1 << mi->mbmi.ref_frame)) {
+ Y = MV_REFERENCE_FRAME_colors[mi->mbmi.ref_frame][0];
+ U = MV_REFERENCE_FRAME_colors[mi->mbmi.ref_frame][1];
+ V = MV_REFERENCE_FRAME_colors[mi->mbmi.ref_frame][2];
+
+ vp9_blend_mb_outer(y_ptr + x, u_ptr + (x >> 1), v_ptr + (x >> 1),
+ Y, U, V, 0xc000, y_stride);
+ }
+
+ mi++;
+ }
+ y_ptr += y_stride * 16;
+ u_ptr += y_stride * 4;
+ v_ptr += y_stride * 4;
+
+ mi++;
+ }
+ }
+#endif
+
+ *dest = cm->post_proc_buffer;
+
+ /* handle problem with extending borders */
+ dest->y_width = cm->width;
+ dest->y_height = cm->height;
+ dest->uv_width = dest->y_width >> cm->subsampling_x;
+ dest->uv_height = dest->y_height >> cm->subsampling_y;
+
+ return 0;
+}
diff --git a/libvpx/vp9/common/vp9_postproc.h b/libvpx/vp9/common/vp9_postproc.h
new file mode 100644
index 0000000..c63beae
--- /dev/null
+++ b/libvpx/vp9/common/vp9_postproc.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_COMMON_VP9_POSTPROC_H_
+#define VP9_COMMON_VP9_POSTPROC_H_
+
+#include "vpx_ports/mem.h"
+
+struct postproc_state {
+ int last_q;
+ int last_noise;
+ char noise[3072];
+ DECLARE_ALIGNED(16, char, blackclamp[16]);
+ DECLARE_ALIGNED(16, char, whiteclamp[16]);
+ DECLARE_ALIGNED(16, char, bothclamp[16]);
+};
+
+#include "vp9/common/vp9_onyxc_int.h"
+#include "vp9/common/vp9_ppflags.h"
+
+int vp9_post_proc_frame(struct VP9Common *cm,
+ YV12_BUFFER_CONFIG *dest, vp9_ppflags_t *flags);
+
+void vp9_denoise(const YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst, int q);
+
+void vp9_deblock(const YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst, int q);
+
+#endif // VP9_COMMON_VP9_POSTPROC_H_
diff --git a/libvpx/vp9/common/vp9_ppflags.h b/libvpx/vp9/common/vp9_ppflags.h
new file mode 100644
index 0000000..561c930
--- /dev/null
+++ b/libvpx/vp9/common/vp9_ppflags.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_PPFLAGS_H_
+#define VP9_COMMON_VP9_PPFLAGS_H_
+
+enum {
+ VP9D_NOFILTERING = 0,
+ VP9D_DEBLOCK = 1 << 0,
+ VP9D_DEMACROBLOCK = 1 << 1,
+ VP9D_ADDNOISE = 1 << 2,
+ VP9D_DEBUG_TXT_FRAME_INFO = 1 << 3,
+ VP9D_DEBUG_TXT_MBLK_MODES = 1 << 4,
+ VP9D_DEBUG_TXT_DC_DIFF = 1 << 5,
+ VP9D_DEBUG_TXT_RATE_INFO = 1 << 6,
+ VP9D_DEBUG_DRAW_MV = 1 << 7,
+ VP9D_DEBUG_CLR_BLK_MODES = 1 << 8,
+ VP9D_DEBUG_CLR_FRM_REF_BLKS = 1 << 9
+};
+
+typedef struct {
+ int post_proc_flag;
+ int deblocking_level;
+ int noise_level;
+ int display_ref_frame_flag;
+ int display_mb_modes_flag;
+ int display_b_modes_flag;
+ int display_mv_flag;
+} vp9_ppflags_t;
+
+#endif // VP9_COMMON_VP9_PPFLAGS_H_
diff --git a/libvpx/vpx_scale/scale_mode.h b/libvpx/vp9/common/vp9_pragmas.h
index 5581385..f079161 100644
--- a/libvpx/vpx_scale/scale_mode.h
+++ b/libvpx/vp9/common/vp9_pragmas.h
@@ -8,21 +8,15 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#ifndef VP9_COMMON_VP9_PRAGMAS_H_
+#define VP9_COMMON_VP9_PRAGMAS_H_
-/****************************************************************************
-*
-*****************************************************************************
-*/
-
-#ifndef SCALE_MODE_H
-#define SCALE_MODE_H
-
-typedef enum {
- MAINTAIN_ASPECT_RATIO = 0x0,
- SCALE_TO_FIT = 0x1,
- CENTER = 0x2,
- OTHER = 0x3
-} SCALE_MODE;
-
+#ifdef __INTEL_COMPILER
+#pragma warning(disable:997 1011 170)
+#endif
+#ifdef _MSC_VER
+#pragma warning(disable:4799)
#endif
+
+#endif // VP9_COMMON_VP9_PRAGMAS_H_
diff --git a/libvpx/vp9/common/vp9_pred_common.c b/libvpx/vp9/common/vp9_pred_common.c
new file mode 100644
index 0000000..81fbf1f
--- /dev/null
+++ b/libvpx/vp9/common/vp9_pred_common.c
@@ -0,0 +1,416 @@
+
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits.h>
+
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_pred_common.h"
+#include "vp9/common/vp9_seg_common.h"
+#include "vp9/common/vp9_treecoder.h"
+
+// Returns a context number for the given MB prediction signal
+unsigned char vp9_get_pred_context_switchable_interp(const MACROBLOCKD *xd) {
+ const MODE_INFO * const above_mi = xd->mi_8x8[-xd->mode_info_stride];
+ const MODE_INFO * const left_mi = xd->mi_8x8[-1];
+ const int left_in_image = xd->left_available && left_mi;
+ const int above_in_image = xd->up_available && above_mi;
+ // Note:
+ // The mode info data structure has a one element border above and to the
+ // left of the entries correpsonding to real macroblocks.
+ // The prediction flags in these dummy entries are initialised to 0.
+ // left
+ const int left_mv_pred = left_in_image ? is_inter_mode(left_mi->mbmi.mode)
+ : 0;
+ const int left_interp = left_in_image && left_mv_pred
+ ? left_mi->mbmi.interp_filter
+ : SWITCHABLE_FILTERS;
+
+ // above
+ const int above_mv_pred = above_in_image ? is_inter_mode(above_mi->mbmi.mode)
+ : 0;
+ const int above_interp = above_in_image && above_mv_pred
+ ? above_mi->mbmi.interp_filter
+ : SWITCHABLE_FILTERS;
+
+ if (left_interp == above_interp)
+ return left_interp;
+ else if (left_interp == SWITCHABLE_FILTERS &&
+ above_interp != SWITCHABLE_FILTERS)
+ return above_interp;
+ else if (left_interp != SWITCHABLE_FILTERS &&
+ above_interp == SWITCHABLE_FILTERS)
+ return left_interp;
+ else
+ return SWITCHABLE_FILTERS;
+}
+// Returns a context number for the given MB prediction signal
+unsigned char vp9_get_pred_context_intra_inter(const MACROBLOCKD *xd) {
+ const MODE_INFO * const above_mi = xd->mi_8x8[-xd->mode_info_stride];
+ const MODE_INFO * const left_mi = xd->mi_8x8[-1];
+ const MB_MODE_INFO *const above_mbmi = above_mi ? &above_mi->mbmi : 0;
+ const MB_MODE_INFO *const left_mbmi = left_mi ? &left_mi->mbmi : 0;
+ const int left_in_image = xd->left_available && left_mi;
+ const int above_in_image = xd->up_available && above_mi;
+ const int left_intra = left_in_image ? !is_inter_block(left_mbmi) : 1;
+ const int above_intra = above_in_image ? !is_inter_block(above_mbmi) : 1;
+
+ // The mode info data structure has a one element border above and to the
+ // left of the entries corresponding to real macroblocks.
+ // The prediction flags in these dummy entries are initialized to 0.
+ // 0 - inter/inter, inter/--, --/inter, --/--
+ // 1 - intra/inter, inter/intra
+ // 2 - intra/--, --/intra
+ // 3 - intra/intra
+ if (above_in_image && left_in_image) // both edges available
+ return left_intra && above_intra ? 3
+ : left_intra || above_intra;
+ else if (above_in_image || left_in_image) // one edge available
+ return 2 * (above_in_image ? above_intra : left_intra);
+ else
+ return 0;
+}
+// Returns a context number for the given MB prediction signal
+unsigned char vp9_get_pred_context_comp_inter_inter(const VP9_COMMON *cm,
+ const MACROBLOCKD *xd) {
+ int pred_context;
+ const MODE_INFO * const above_mi = xd->mi_8x8[-xd->mode_info_stride];
+ const MODE_INFO * const left_mi = xd->mi_8x8[-1];
+ const MB_MODE_INFO *const above_mbmi = above_mi ? &above_mi->mbmi : 0;
+ const MB_MODE_INFO *const left_mbmi = left_mi ? &left_mi->mbmi : 0;
+ const int left_in_image = xd->left_available && left_mi;
+ const int above_in_image = xd->up_available && above_mi;
+ // Note:
+ // The mode info data structure has a one element border above and to the
+ // left of the entries correpsonding to real macroblocks.
+ // The prediction flags in these dummy entries are initialised to 0.
+ if (above_in_image && left_in_image) { // both edges available
+ if (!has_second_ref(above_mbmi) && !has_second_ref(left_mbmi))
+ // neither edge uses comp pred (0/1)
+ pred_context = (above_mbmi->ref_frame[0] == cm->comp_fixed_ref) ^
+ (left_mbmi->ref_frame[0] == cm->comp_fixed_ref);
+ else if (!has_second_ref(above_mbmi))
+ // one of two edges uses comp pred (2/3)
+ pred_context = 2 + (above_mbmi->ref_frame[0] == cm->comp_fixed_ref ||
+ !is_inter_block(above_mbmi));
+ else if (!has_second_ref(left_mbmi))
+ // one of two edges uses comp pred (2/3)
+ pred_context = 2 + (left_mbmi->ref_frame[0] == cm->comp_fixed_ref ||
+ !is_inter_block(left_mbmi));
+ else // both edges use comp pred (4)
+ pred_context = 4;
+ } else if (above_in_image || left_in_image) { // one edge available
+ const MB_MODE_INFO *edge_mbmi = above_in_image ? above_mbmi : left_mbmi;
+
+ if (!has_second_ref(edge_mbmi))
+ // edge does not use comp pred (0/1)
+ pred_context = edge_mbmi->ref_frame[0] == cm->comp_fixed_ref;
+ else
+ // edge uses comp pred (3)
+ pred_context = 3;
+ } else { // no edges available (1)
+ pred_context = 1;
+ }
+ assert(pred_context >= 0 && pred_context < COMP_INTER_CONTEXTS);
+ return pred_context;
+}
+
+// Returns a context number for the given MB prediction signal
+unsigned char vp9_get_pred_context_comp_ref_p(const VP9_COMMON *cm,
+ const MACROBLOCKD *xd) {
+ int pred_context;
+ const MODE_INFO * const above_mi = xd->mi_8x8[-cm->mode_info_stride];
+ const MODE_INFO * const left_mi = xd->mi_8x8[-1];
+ const MB_MODE_INFO *const above_mbmi = above_mi ? &above_mi->mbmi : 0;
+ const MB_MODE_INFO *const left_mbmi = left_mi ? &left_mi->mbmi : 0;
+ const int left_in_image = xd->left_available && left_mi;
+ const int above_in_image = xd->up_available && above_mi;
+ const int left_intra = left_in_image ? !is_inter_block(left_mbmi) : 1;
+ const int above_intra = above_in_image ? !is_inter_block(above_mbmi) : 1;
+ // Note:
+ // The mode info data structure has a one element border above and to the
+ // left of the entries correpsonding to real macroblocks.
+ // The prediction flags in these dummy entries are initialised to 0.
+ const int fix_ref_idx = cm->ref_frame_sign_bias[cm->comp_fixed_ref];
+ const int var_ref_idx = !fix_ref_idx;
+
+ if (above_in_image && left_in_image) { // both edges available
+ if (above_intra && left_intra) { // intra/intra (2)
+ pred_context = 2;
+ } else if (above_intra || left_intra) { // intra/inter
+ const MB_MODE_INFO *edge_mbmi = above_intra ? left_mbmi : above_mbmi;
+
+ if (!has_second_ref(edge_mbmi)) // single pred (1/3)
+ pred_context = 1 + 2 * (edge_mbmi->ref_frame[0] != cm->comp_var_ref[1]);
+ else // comp pred (1/3)
+ pred_context = 1 + 2 * (edge_mbmi->ref_frame[var_ref_idx]
+ != cm->comp_var_ref[1]);
+ } else { // inter/inter
+ const int l_sg = !has_second_ref(left_mbmi);
+ const int a_sg = !has_second_ref(above_mbmi);
+ MV_REFERENCE_FRAME vrfa = a_sg ? above_mbmi->ref_frame[0]
+ : above_mbmi->ref_frame[var_ref_idx];
+ MV_REFERENCE_FRAME vrfl = l_sg ? left_mbmi->ref_frame[0]
+ : left_mbmi->ref_frame[var_ref_idx];
+
+ if (vrfa == vrfl && cm->comp_var_ref[1] == vrfa) {
+ pred_context = 0;
+ } else if (l_sg && a_sg) { // single/single
+ if ((vrfa == cm->comp_fixed_ref && vrfl == cm->comp_var_ref[0]) ||
+ (vrfl == cm->comp_fixed_ref && vrfa == cm->comp_var_ref[0]))
+ pred_context = 4;
+ else if (vrfa == vrfl)
+ pred_context = 3;
+ else
+ pred_context = 1;
+ } else if (l_sg || a_sg) { // single/comp
+ MV_REFERENCE_FRAME vrfc = l_sg ? vrfa : vrfl;
+ MV_REFERENCE_FRAME rfs = a_sg ? vrfa : vrfl;
+ if (vrfc == cm->comp_var_ref[1] && rfs != cm->comp_var_ref[1])
+ pred_context = 1;
+ else if (rfs == cm->comp_var_ref[1] && vrfc != cm->comp_var_ref[1])
+ pred_context = 2;
+ else
+ pred_context = 4;
+ } else if (vrfa == vrfl) { // comp/comp
+ pred_context = 4;
+ } else {
+ pred_context = 2;
+ }
+ }
+ } else if (above_in_image || left_in_image) { // one edge available
+ const MB_MODE_INFO *edge_mbmi = above_in_image ? above_mbmi : left_mbmi;
+
+ if (!is_inter_block(edge_mbmi)) {
+ pred_context = 2;
+ } else {
+ if (has_second_ref(edge_mbmi))
+ pred_context = 4 * (edge_mbmi->ref_frame[var_ref_idx]
+ != cm->comp_var_ref[1]);
+ else
+ pred_context = 3 * (edge_mbmi->ref_frame[0] != cm->comp_var_ref[1]);
+ }
+ } else { // no edges available (2)
+ pred_context = 2;
+ }
+ assert(pred_context >= 0 && pred_context < REF_CONTEXTS);
+
+ return pred_context;
+}
+unsigned char vp9_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
+ int pred_context;
+ const MODE_INFO * const above_mi = xd->mi_8x8[-xd->mode_info_stride];
+ const MODE_INFO * const left_mi = xd->mi_8x8[-1];
+ const MB_MODE_INFO *const above_mbmi = above_mi ? &above_mi->mbmi : 0;
+ const MB_MODE_INFO *const left_mbmi = left_mi ? &left_mi->mbmi : 0;
+ const int left_in_image = xd->left_available && left_mi;
+ const int above_in_image = xd->up_available && above_mi;
+ const int left_intra = left_in_image ? !is_inter_block(left_mbmi) : 1;
+ const int above_intra = above_in_image ? !is_inter_block(above_mbmi) : 1;
+ // Note:
+ // The mode info data structure has a one element border above and to the
+ // left of the entries correpsonding to real macroblocks.
+ // The prediction flags in these dummy entries are initialised to 0.
+ if (above_in_image && left_in_image) { // both edges available
+ if (above_intra && left_intra) { // intra/intra
+ pred_context = 2;
+ } else if (above_intra || left_intra) { // intra/inter or inter/intra
+ const MB_MODE_INFO *edge_mbmi = above_intra ? left_mbmi : above_mbmi;
+ if (!has_second_ref(edge_mbmi))
+ pred_context = 4 * (edge_mbmi->ref_frame[0] == LAST_FRAME);
+ else
+ pred_context = 1 + (edge_mbmi->ref_frame[0] == LAST_FRAME ||
+ edge_mbmi->ref_frame[1] == LAST_FRAME);
+ } else { // inter/inter
+ if (!has_second_ref(above_mbmi) && !has_second_ref(left_mbmi)) {
+ pred_context = 2 * (above_mbmi->ref_frame[0] == LAST_FRAME) +
+ 2 * (left_mbmi->ref_frame[0] == LAST_FRAME);
+ } else if (has_second_ref(above_mbmi) && has_second_ref(left_mbmi)) {
+ pred_context = 1 + (above_mbmi->ref_frame[0] == LAST_FRAME ||
+ above_mbmi->ref_frame[1] == LAST_FRAME ||
+ left_mbmi->ref_frame[0] == LAST_FRAME ||
+ left_mbmi->ref_frame[1] == LAST_FRAME);
+ } else {
+ const MV_REFERENCE_FRAME rfs = !has_second_ref(above_mbmi) ?
+ above_mbmi->ref_frame[0] : left_mbmi->ref_frame[0];
+ const MV_REFERENCE_FRAME crf1 = has_second_ref(above_mbmi) ?
+ above_mbmi->ref_frame[0] : left_mbmi->ref_frame[0];
+ const MV_REFERENCE_FRAME crf2 = has_second_ref(above_mbmi) ?
+ above_mbmi->ref_frame[1] : left_mbmi->ref_frame[1];
+
+ if (rfs == LAST_FRAME)
+ pred_context = 3 + (crf1 == LAST_FRAME || crf2 == LAST_FRAME);
+ else
+ pred_context = crf1 == LAST_FRAME || crf2 == LAST_FRAME;
+ }
+ }
+ } else if (above_in_image || left_in_image) { // one edge available
+ const MB_MODE_INFO *edge_mbmi = above_in_image ? above_mbmi : left_mbmi;
+ if (!is_inter_block(edge_mbmi)) { // intra
+ pred_context = 2;
+ } else { // inter
+ if (!has_second_ref(edge_mbmi))
+ pred_context = 4 * (edge_mbmi->ref_frame[0] == LAST_FRAME);
+ else
+ pred_context = 1 + (edge_mbmi->ref_frame[0] == LAST_FRAME ||
+ edge_mbmi->ref_frame[1] == LAST_FRAME);
+ }
+ } else { // no edges available
+ pred_context = 2;
+ }
+
+ assert(pred_context >= 0 && pred_context < REF_CONTEXTS);
+ return pred_context;
+}
+
+unsigned char vp9_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) {
+ int pred_context;
+ const MODE_INFO * const above_mi = xd->mi_8x8[-xd->mode_info_stride];
+ const MODE_INFO * const left_mi = xd->mi_8x8[-1];
+ const MB_MODE_INFO *const above_mbmi = above_mi ? &above_mi->mbmi : 0;
+ const MB_MODE_INFO *const left_mbmi = left_mi ? &left_mi->mbmi : 0;
+ const int left_in_image = xd->left_available && left_mi;
+ const int above_in_image = xd->up_available && above_mi;
+ const int left_intra = left_in_image ? !is_inter_block(left_mbmi) : 1;
+ const int above_intra = above_in_image ? !is_inter_block(above_mbmi) : 1;
+
+ // Note:
+ // The mode info data structure has a one element border above and to the
+ // left of the entries correpsonding to real macroblocks.
+ // The prediction flags in these dummy entries are initialised to 0.
+ if (above_in_image && left_in_image) { // both edges available
+ if (above_intra && left_intra) { // intra/intra
+ pred_context = 2;
+ } else if (above_intra || left_intra) { // intra/inter or inter/intra
+ const MB_MODE_INFO *edge_mbmi = above_intra ? left_mbmi : above_mbmi;
+ if (!has_second_ref(edge_mbmi)) {
+ if (edge_mbmi->ref_frame[0] == LAST_FRAME)
+ pred_context = 3;
+ else
+ pred_context = 4 * (edge_mbmi->ref_frame[0] == GOLDEN_FRAME);
+ } else {
+ pred_context = 1 + 2 * (edge_mbmi->ref_frame[0] == GOLDEN_FRAME ||
+ edge_mbmi->ref_frame[1] == GOLDEN_FRAME);
+ }
+ } else { // inter/inter
+ if (!has_second_ref(above_mbmi) && !has_second_ref(left_mbmi)) {
+ if (above_mbmi->ref_frame[0] == LAST_FRAME &&
+ left_mbmi->ref_frame[0] == LAST_FRAME) {
+ pred_context = 3;
+ } else if (above_mbmi->ref_frame[0] == LAST_FRAME ||
+ left_mbmi->ref_frame[0] == LAST_FRAME) {
+ const MB_MODE_INFO *edge_mbmi =
+ above_mbmi->ref_frame[0] == LAST_FRAME ? left_mbmi : above_mbmi;
+
+ pred_context = 4 * (edge_mbmi->ref_frame[0] == GOLDEN_FRAME);
+ } else {
+ pred_context = 2 * (above_mbmi->ref_frame[0] == GOLDEN_FRAME) +
+ 2 * (left_mbmi->ref_frame[0] == GOLDEN_FRAME);
+ }
+ } else if (has_second_ref(above_mbmi) && has_second_ref(left_mbmi)) {
+ if (above_mbmi->ref_frame[0] == left_mbmi->ref_frame[0] &&
+ above_mbmi->ref_frame[1] == left_mbmi->ref_frame[1])
+ pred_context = 3 * (above_mbmi->ref_frame[0] == GOLDEN_FRAME ||
+ above_mbmi->ref_frame[1] == GOLDEN_FRAME ||
+ left_mbmi->ref_frame[0] == GOLDEN_FRAME ||
+ left_mbmi->ref_frame[1] == GOLDEN_FRAME);
+ else
+ pred_context = 2;
+ } else {
+ const MV_REFERENCE_FRAME rfs = !has_second_ref(above_mbmi) ?
+ above_mbmi->ref_frame[0] : left_mbmi->ref_frame[0];
+ const MV_REFERENCE_FRAME crf1 = has_second_ref(above_mbmi) ?
+ above_mbmi->ref_frame[0] : left_mbmi->ref_frame[0];
+ const MV_REFERENCE_FRAME crf2 = has_second_ref(above_mbmi) ?
+ above_mbmi->ref_frame[1] : left_mbmi->ref_frame[1];
+
+ if (rfs == GOLDEN_FRAME)
+ pred_context = 3 + (crf1 == GOLDEN_FRAME || crf2 == GOLDEN_FRAME);
+ else if (rfs == ALTREF_FRAME)
+ pred_context = crf1 == GOLDEN_FRAME || crf2 == GOLDEN_FRAME;
+ else
+ pred_context = 1 + 2 * (crf1 == GOLDEN_FRAME || crf2 == GOLDEN_FRAME);
+ }
+ }
+ } else if (above_in_image || left_in_image) { // one edge available
+ const MB_MODE_INFO *edge_mbmi = above_in_image ? above_mbmi : left_mbmi;
+
+ if (!is_inter_block(edge_mbmi) ||
+ (edge_mbmi->ref_frame[0] == LAST_FRAME && !has_second_ref(edge_mbmi)))
+ pred_context = 2;
+ else if (!has_second_ref(edge_mbmi))
+ pred_context = 4 * (edge_mbmi->ref_frame[0] == GOLDEN_FRAME);
+ else
+ pred_context = 3 * (edge_mbmi->ref_frame[0] == GOLDEN_FRAME ||
+ edge_mbmi->ref_frame[1] == GOLDEN_FRAME);
+ } else { // no edges available (2)
+ pred_context = 2;
+ }
+ assert(pred_context >= 0 && pred_context < REF_CONTEXTS);
+ return pred_context;
+}
+// Returns a context number for the given MB prediction signal
+// The mode info data structure has a one element border above and to the
+// left of the entries corresponding to real blocks.
+// The prediction flags in these dummy entries are initialized to 0.
+unsigned char vp9_get_pred_context_tx_size(const MACROBLOCKD *xd) {
+ const MODE_INFO * const above_mi = xd->mi_8x8[-xd->mode_info_stride];
+ const MODE_INFO * const left_mi = xd->mi_8x8[-1];
+ const MB_MODE_INFO *const above_mbmi = above_mi ? &above_mi->mbmi : 0;
+ const MB_MODE_INFO *const left_mbmi = left_mi ? &left_mi->mbmi : 0;
+ const int left_in_image = xd->left_available && left_mi;
+ const int above_in_image = xd->up_available && above_mi;
+ const int max_tx_size = max_txsize_lookup[xd->mi_8x8[0]->mbmi.sb_type];
+ int above_context = max_tx_size;
+ int left_context = max_tx_size;
+
+ if (above_in_image)
+ above_context = above_mbmi->skip_coeff ? max_tx_size
+ : above_mbmi->tx_size;
+
+ if (left_in_image)
+ left_context = left_mbmi->skip_coeff ? max_tx_size
+ : left_mbmi->tx_size;
+
+ if (!left_in_image)
+ left_context = above_context;
+
+ if (!above_in_image)
+ above_context = left_context;
+
+ return above_context + left_context > max_tx_size;
+}
+
+void vp9_set_pred_flag_seg_id(MACROBLOCKD *xd, uint8_t pred_flag) {
+ xd->this_mi->mbmi.seg_id_predicted = pred_flag;
+}
+
+void vp9_set_pred_flag_mbskip(MACROBLOCKD *xd, BLOCK_SIZE bsize,
+ uint8_t pred_flag) {
+ xd->this_mi->mbmi.skip_coeff = pred_flag;
+}
+
+int vp9_get_segment_id(VP9_COMMON *cm, const uint8_t *segment_ids,
+ BLOCK_SIZE bsize, int mi_row, int mi_col) {
+ const int mi_offset = mi_row * cm->mi_cols + mi_col;
+ const int bw = 1 << mi_width_log2(bsize);
+ const int bh = 1 << mi_height_log2(bsize);
+ const int xmis = MIN(cm->mi_cols - mi_col, bw);
+ const int ymis = MIN(cm->mi_rows - mi_row, bh);
+ int x, y, segment_id = INT_MAX;
+
+ for (y = 0; y < ymis; y++)
+ for (x = 0; x < xmis; x++)
+ segment_id = MIN(segment_id,
+ segment_ids[mi_offset + y * cm->mi_cols + x]);
+
+ assert(segment_id >= 0 && segment_id < MAX_SEGMENTS);
+ return segment_id;
+}
diff --git a/libvpx/vp9/common/vp9_pred_common.h b/libvpx/vp9/common/vp9_pred_common.h
new file mode 100644
index 0000000..47ca8ab
--- /dev/null
+++ b/libvpx/vp9/common/vp9_pred_common.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_PRED_COMMON_H_
+#define VP9_COMMON_VP9_PRED_COMMON_H_
+
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/common/vp9_onyxc_int.h"
+
+int vp9_get_segment_id(VP9_COMMON *cm, const uint8_t *segment_ids,
+ BLOCK_SIZE bsize, int mi_row, int mi_col);
+
+
+static INLINE int vp9_get_pred_context_seg_id(const MACROBLOCKD *xd) {
+ const MODE_INFO * const above_mi = xd->mi_8x8[-xd->mode_info_stride];
+ const MODE_INFO * const left_mi = xd->mi_8x8[-1];
+ const int above_sip = above_mi ? above_mi->mbmi.seg_id_predicted : 0;
+ const int left_sip = left_mi ? left_mi->mbmi.seg_id_predicted : 0;
+
+ return above_sip + (xd->left_available ? left_sip : 0);
+}
+
+static INLINE vp9_prob vp9_get_pred_prob_seg_id(struct segmentation *seg,
+ const MACROBLOCKD *xd) {
+ return seg->pred_probs[vp9_get_pred_context_seg_id(xd)];
+}
+
+void vp9_set_pred_flag_seg_id(MACROBLOCKD *xd, uint8_t pred_flag);
+
+static INLINE int vp9_get_pred_context_mbskip(const MACROBLOCKD *xd) {
+ const MODE_INFO * const above_mi = xd->mi_8x8[-xd->mode_info_stride];
+ const MODE_INFO * const left_mi = xd->mi_8x8[-1];
+ const int above_skip_coeff = above_mi ? above_mi->mbmi.skip_coeff : 0;
+ const int left_skip_coeff = left_mi ? left_mi->mbmi.skip_coeff : 0;
+
+ return above_skip_coeff + (xd->left_available ? left_skip_coeff : 0);
+}
+
+static INLINE vp9_prob vp9_get_pred_prob_mbskip(const VP9_COMMON *cm,
+ const MACROBLOCKD *xd) {
+ return cm->fc.mbskip_probs[vp9_get_pred_context_mbskip(xd)];
+}
+
+static INLINE unsigned char vp9_get_pred_flag_mbskip(const MACROBLOCKD *xd) {
+ return xd->this_mi->mbmi.skip_coeff;
+}
+
+void vp9_set_pred_flag_mbskip(MACROBLOCKD *xd, BLOCK_SIZE bsize,
+ uint8_t pred_flag);
+
+unsigned char vp9_get_pred_context_switchable_interp(const MACROBLOCKD *xd);
+
+unsigned char vp9_get_pred_context_intra_inter(const MACROBLOCKD *xd);
+
+static INLINE vp9_prob vp9_get_pred_prob_intra_inter(const VP9_COMMON *cm,
+ const MACROBLOCKD *xd) {
+ const int pred_context = vp9_get_pred_context_intra_inter(xd);
+ return cm->fc.intra_inter_prob[pred_context];
+}
+
+unsigned char vp9_get_pred_context_comp_inter_inter(const VP9_COMMON *cm,
+ const MACROBLOCKD *xd);
+
+
+static INLINE vp9_prob vp9_get_pred_prob_comp_inter_inter(const VP9_COMMON *cm,
+ const MACROBLOCKD *xd) {
+ const int pred_context = vp9_get_pred_context_comp_inter_inter(cm, xd);
+ return cm->fc.comp_inter_prob[pred_context];
+}
+
+unsigned char vp9_get_pred_context_comp_ref_p(const VP9_COMMON *cm,
+ const MACROBLOCKD *xd);
+
+static INLINE vp9_prob vp9_get_pred_prob_comp_ref_p(const VP9_COMMON *cm,
+ const MACROBLOCKD *xd) {
+ const int pred_context = vp9_get_pred_context_comp_ref_p(cm, xd);
+ return cm->fc.comp_ref_prob[pred_context];
+}
+
+unsigned char vp9_get_pred_context_single_ref_p1(const MACROBLOCKD *xd);
+
+static INLINE vp9_prob vp9_get_pred_prob_single_ref_p1(const VP9_COMMON *cm,
+ const MACROBLOCKD *xd) {
+ const int pred_context = vp9_get_pred_context_single_ref_p1(xd);
+ return cm->fc.single_ref_prob[pred_context][0];
+}
+
+unsigned char vp9_get_pred_context_single_ref_p2(const MACROBLOCKD *xd);
+
+static INLINE vp9_prob vp9_get_pred_prob_single_ref_p2(const VP9_COMMON *cm,
+ const MACROBLOCKD *xd) {
+ const int pred_context = vp9_get_pred_context_single_ref_p2(xd);
+ return cm->fc.single_ref_prob[pred_context][1];
+}
+
+unsigned char vp9_get_pred_context_tx_size(const MACROBLOCKD *xd);
+
+static const vp9_prob *get_tx_probs(BLOCK_SIZE bsize, uint8_t context,
+ const struct tx_probs *tx_probs) {
+ if (bsize < BLOCK_16X16)
+ return tx_probs->p8x8[context];
+ else if (bsize < BLOCK_32X32)
+ return tx_probs->p16x16[context];
+ else
+ return tx_probs->p32x32[context];
+}
+
+static const vp9_prob *get_tx_probs2(const MACROBLOCKD *xd,
+ const struct tx_probs *tx_probs,
+ const MODE_INFO *m) {
+ const BLOCK_SIZE bsize = m->mbmi.sb_type;
+ const int context = vp9_get_pred_context_tx_size(xd);
+ return get_tx_probs(bsize, context, tx_probs);
+}
+
+static void update_tx_counts(BLOCK_SIZE bsize, uint8_t context,
+ TX_SIZE tx_size, struct tx_counts *tx_counts) {
+ if (bsize >= BLOCK_32X32)
+ tx_counts->p32x32[context][tx_size]++;
+ else if (bsize >= BLOCK_16X16)
+ tx_counts->p16x16[context][tx_size]++;
+ else
+ tx_counts->p8x8[context][tx_size]++;
+}
+
+#endif // VP9_COMMON_VP9_PRED_COMMON_H_
diff --git a/libvpx/vp9/common/vp9_quant_common.c b/libvpx/vp9/common/vp9_quant_common.c
new file mode 100644
index 0000000..bc40854
--- /dev/null
+++ b/libvpx/vp9/common/vp9_quant_common.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_quant_common.h"
+#include "vp9/common/vp9_seg_common.h"
+
+#if 1
+static const int16_t dc_qlookup[QINDEX_RANGE] = {
+ 4, 8, 8, 9, 10, 11, 12, 12,
+ 13, 14, 15, 16, 17, 18, 19, 19,
+ 20, 21, 22, 23, 24, 25, 26, 26,
+ 27, 28, 29, 30, 31, 32, 32, 33,
+ 34, 35, 36, 37, 38, 38, 39, 40,
+ 41, 42, 43, 43, 44, 45, 46, 47,
+ 48, 48, 49, 50, 51, 52, 53, 53,
+ 54, 55, 56, 57, 57, 58, 59, 60,
+ 61, 62, 62, 63, 64, 65, 66, 66,
+ 67, 68, 69, 70, 70, 71, 72, 73,
+ 74, 74, 75, 76, 77, 78, 78, 79,
+ 80, 81, 81, 82, 83, 84, 85, 85,
+ 87, 88, 90, 92, 93, 95, 96, 98,
+ 99, 101, 102, 104, 105, 107, 108, 110,
+ 111, 113, 114, 116, 117, 118, 120, 121,
+ 123, 125, 127, 129, 131, 134, 136, 138,
+ 140, 142, 144, 146, 148, 150, 152, 154,
+ 156, 158, 161, 164, 166, 169, 172, 174,
+ 177, 180, 182, 185, 187, 190, 192, 195,
+ 199, 202, 205, 208, 211, 214, 217, 220,
+ 223, 226, 230, 233, 237, 240, 243, 247,
+ 250, 253, 257, 261, 265, 269, 272, 276,
+ 280, 284, 288, 292, 296, 300, 304, 309,
+ 313, 317, 322, 326, 330, 335, 340, 344,
+ 349, 354, 359, 364, 369, 374, 379, 384,
+ 389, 395, 400, 406, 411, 417, 423, 429,
+ 435, 441, 447, 454, 461, 467, 475, 482,
+ 489, 497, 505, 513, 522, 530, 539, 549,
+ 559, 569, 579, 590, 602, 614, 626, 640,
+ 654, 668, 684, 700, 717, 736, 755, 775,
+ 796, 819, 843, 869, 896, 925, 955, 988,
+ 1022, 1058, 1098, 1139, 1184, 1232, 1282, 1336,
+};
+
+static const int16_t ac_qlookup[QINDEX_RANGE] = {
+ 4, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22,
+ 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35, 36, 37, 38,
+ 39, 40, 41, 42, 43, 44, 45, 46,
+ 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70,
+ 71, 72, 73, 74, 75, 76, 77, 78,
+ 79, 80, 81, 82, 83, 84, 85, 86,
+ 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102,
+ 104, 106, 108, 110, 112, 114, 116, 118,
+ 120, 122, 124, 126, 128, 130, 132, 134,
+ 136, 138, 140, 142, 144, 146, 148, 150,
+ 152, 155, 158, 161, 164, 167, 170, 173,
+ 176, 179, 182, 185, 188, 191, 194, 197,
+ 200, 203, 207, 211, 215, 219, 223, 227,
+ 231, 235, 239, 243, 247, 251, 255, 260,
+ 265, 270, 275, 280, 285, 290, 295, 300,
+ 305, 311, 317, 323, 329, 335, 341, 347,
+ 353, 359, 366, 373, 380, 387, 394, 401,
+ 408, 416, 424, 432, 440, 448, 456, 465,
+ 474, 483, 492, 501, 510, 520, 530, 540,
+ 550, 560, 571, 582, 593, 604, 615, 627,
+ 639, 651, 663, 676, 689, 702, 715, 729,
+ 743, 757, 771, 786, 801, 816, 832, 848,
+ 864, 881, 898, 915, 933, 951, 969, 988,
+ 1007, 1026, 1046, 1066, 1087, 1108, 1129, 1151,
+ 1173, 1196, 1219, 1243, 1267, 1292, 1317, 1343,
+ 1369, 1396, 1423, 1451, 1479, 1508, 1537, 1567,
+ 1597, 1628, 1660, 1692, 1725, 1759, 1793, 1828,
+};
+
+void vp9_init_quant_tables(void) { }
+#else
+static int16_t dc_qlookup[QINDEX_RANGE];
+static int16_t ac_qlookup[QINDEX_RANGE];
+
+#define ACDC_MIN 8
+
+// TODO(dkovalev) move to common and reuse
+static double poly3(double a, double b, double c, double d, double x) {
+ return a*x*x*x + b*x*x + c*x + d;
+}
+
+void vp9_init_quant_tables() {
+ int i, val = 4;
+
+ // A "real" q of 1.0 forces lossless mode.
+ // In practice non lossless Q's between 1.0 and 2.0 (represented here by
+ // integer values from 5-7 give poor rd results (lower psnr and often
+ // larger size than the lossless encode. To block out those "not very useful"
+ // values we increment the ac and dc q lookup values by 4 after position 0.
+ ac_qlookup[0] = val;
+ dc_qlookup[0] = val;
+ val += 4;
+
+ for (i = 1; i < QINDEX_RANGE; i++) {
+ const int ac_val = val;
+
+ val = (int)(val * 1.01975);
+ if (val == ac_val)
+ ++val;
+
+ ac_qlookup[i] = (int16_t)ac_val;
+ dc_qlookup[i] = (int16_t)MAX(ACDC_MIN, poly3(0.000000305, -0.00065, 0.9,
+ 0.5, ac_val));
+ }
+}
+#endif
+
+int16_t vp9_dc_quant(int qindex, int delta) {
+ return dc_qlookup[clamp(qindex + delta, 0, MAXQ)];
+}
+
+int16_t vp9_ac_quant(int qindex, int delta) {
+ return ac_qlookup[clamp(qindex + delta, 0, MAXQ)];
+}
+
+
+int vp9_get_qindex(struct segmentation *seg, int segment_id, int base_qindex) {
+ if (vp9_segfeature_active(seg, segment_id, SEG_LVL_ALT_Q)) {
+ const int data = vp9_get_segdata(seg, segment_id, SEG_LVL_ALT_Q);
+ return seg->abs_delta == SEGMENT_ABSDATA ?
+ data : // Abs value
+ clamp(base_qindex + data, 0, MAXQ); // Delta value
+ } else {
+ return base_qindex;
+ }
+}
+
diff --git a/libvpx/vp9/common/vp9_quant_common.h b/libvpx/vp9/common/vp9_quant_common.h
new file mode 100644
index 0000000..83f2fb6
--- /dev/null
+++ b/libvpx/vp9/common/vp9_quant_common.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_QUANT_COMMON_H_
+#define VP9_COMMON_VP9_QUANT_COMMON_H_
+
+#include "vp9/common/vp9_blockd.h"
+
+#define MINQ 0
+#define MAXQ 255
+#define QINDEX_RANGE (MAXQ - MINQ + 1)
+#define QINDEX_BITS 8
+
+void vp9_init_quant_tables();
+
+int16_t vp9_dc_quant(int qindex, int delta);
+int16_t vp9_ac_quant(int qindex, int delta);
+
+int vp9_get_qindex(struct segmentation *seg, int segment_id, int base_qindex);
+
+#endif // VP9_COMMON_VP9_QUANT_COMMON_H_
diff --git a/libvpx/vp9/common/vp9_reconinter.c b/libvpx/vp9/common/vp9_reconinter.c
new file mode 100644
index 0000000..dc1d46c
--- /dev/null
+++ b/libvpx/vp9/common/vp9_reconinter.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+
+#include "./vpx_scale_rtcd.h"
+#include "./vpx_config.h"
+
+#include "vpx/vpx_integer.h"
+
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/common/vp9_filter.h"
+#include "vp9/common/vp9_reconinter.h"
+#include "vp9/common/vp9_reconintra.h"
+
+
+void vp9_setup_interp_filters(MACROBLOCKD *xd,
+ INTERPOLATIONFILTERTYPE mcomp_filter_type,
+ VP9_COMMON *cm) {
+ if (xd->mi_8x8 && xd->this_mi) {
+ MB_MODE_INFO * mbmi = &xd->this_mi->mbmi;
+
+ set_scale_factors(xd, mbmi->ref_frame[0] - 1, mbmi->ref_frame[1] - 1,
+ cm->active_ref_scale);
+ } else {
+ set_scale_factors(xd, -1, -1, cm->active_ref_scale);
+ }
+
+ switch (mcomp_filter_type) {
+ case EIGHTTAP:
+ case SWITCHABLE:
+ xd->subpix.filter_x = xd->subpix.filter_y = vp9_sub_pel_filters_8;
+ break;
+ case EIGHTTAP_SMOOTH:
+ xd->subpix.filter_x = xd->subpix.filter_y = vp9_sub_pel_filters_8lp;
+ break;
+ case EIGHTTAP_SHARP:
+ xd->subpix.filter_x = xd->subpix.filter_y = vp9_sub_pel_filters_8s;
+ break;
+ case BILINEAR:
+ xd->subpix.filter_x = xd->subpix.filter_y = vp9_bilinear_filters;
+ break;
+ }
+ assert(((intptr_t)xd->subpix.filter_x & 0xff) == 0);
+}
+
+void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const MV *src_mv,
+ const struct scale_factors *scale,
+ int w, int h, int ref,
+ const struct subpix_fn_table *subpix,
+ enum mv_precision precision) {
+ const int is_q4 = precision == MV_PRECISION_Q4;
+ const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row << 1,
+ is_q4 ? src_mv->col : src_mv->col << 1 };
+ const MV32 mv = scale->scale_mv(&mv_q4, scale);
+ const int subpel_x = mv.col & SUBPEL_MASK;
+ const int subpel_y = mv.row & SUBPEL_MASK;
+
+ src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS);
+ scale->predict[subpel_x != 0][subpel_y != 0][ref](
+ src, src_stride, dst, dst_stride,
+ subpix->filter_x[subpel_x], scale->x_step_q4,
+ subpix->filter_y[subpel_y], scale->y_step_q4,
+ w, h);
+}
+
+static INLINE int round_mv_comp_q4(int value) {
+ return (value < 0 ? value - 2 : value + 2) / 4;
+}
+
+static MV mi_mv_pred_q4(const MODE_INFO *mi, int idx) {
+ MV res = { round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.row +
+ mi->bmi[1].as_mv[idx].as_mv.row +
+ mi->bmi[2].as_mv[idx].as_mv.row +
+ mi->bmi[3].as_mv[idx].as_mv.row),
+ round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.col +
+ mi->bmi[1].as_mv[idx].as_mv.col +
+ mi->bmi[2].as_mv[idx].as_mv.col +
+ mi->bmi[3].as_mv[idx].as_mv.col) };
+ return res;
+}
+
+// TODO(jkoleszar): yet another mv clamping function :-(
+MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd, const MV *src_mv,
+ int bw, int bh, int ss_x, int ss_y) {
+ // If the MV points so far into the UMV border that no visible pixels
+ // are used for reconstruction, the subpel part of the MV can be
+ // discarded and the MV limited to 16 pixels with equivalent results.
+ const int spel_left = (VP9_INTERP_EXTEND + bw) << SUBPEL_BITS;
+ const int spel_right = spel_left - SUBPEL_SHIFTS;
+ const int spel_top = (VP9_INTERP_EXTEND + bh) << SUBPEL_BITS;
+ const int spel_bottom = spel_top - SUBPEL_SHIFTS;
+ MV clamped_mv = {
+ src_mv->row << (1 - ss_y),
+ src_mv->col << (1 - ss_x)
+ };
+ assert(ss_x <= 1);
+ assert(ss_y <= 1);
+
+ clamp_mv(&clamped_mv, (xd->mb_to_left_edge << (1 - ss_x)) - spel_left,
+ (xd->mb_to_right_edge << (1 - ss_x)) + spel_right,
+ (xd->mb_to_top_edge << (1 - ss_y)) - spel_top,
+ (xd->mb_to_bottom_edge << (1 - ss_y)) + spel_bottom);
+
+ return clamped_mv;
+}
+
+struct build_inter_predictors_args {
+ MACROBLOCKD *xd;
+ int x, y;
+};
+
+static void build_inter_predictors(int plane, int block, BLOCK_SIZE bsize,
+ int pred_w, int pred_h,
+ void *argv) {
+ const struct build_inter_predictors_args* const arg = argv;
+ MACROBLOCKD *const xd = arg->xd;
+ struct macroblockd_plane *const pd = &xd->plane[plane];
+ const int bwl = b_width_log2(bsize) - pd->subsampling_x;
+ const int bw = 4 << bwl;
+ const int bh = plane_block_height(bsize, pd);
+ const int x = 4 * (block & ((1 << bwl) - 1));
+ const int y = 4 * (block >> bwl);
+ const MODE_INFO *mi = xd->this_mi;
+ const int use_second_ref = mi->mbmi.ref_frame[1] > 0;
+ int ref;
+
+ assert(x < bw);
+ assert(y < bh);
+ assert(mi->mbmi.sb_type < BLOCK_8X8 || 4 << pred_w == bw);
+ assert(mi->mbmi.sb_type < BLOCK_8X8 || 4 << pred_h == bh);
+
+ for (ref = 0; ref < 1 + use_second_ref; ++ref) {
+ struct scale_factors *const scale = &xd->scale_factor[ref];
+ struct buf_2d *const pre_buf = &pd->pre[ref];
+ struct buf_2d *const dst_buf = &pd->dst;
+
+ const uint8_t *const pre = pre_buf->buf + scaled_buffer_offset(x, y,
+ pre_buf->stride, scale);
+
+ uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
+
+ // TODO(jkoleszar): All chroma MVs in SPLITMV mode are taken as the
+ // same MV (the average of the 4 luma MVs) but we could do something
+ // smarter for non-4:2:0. Just punt for now, pending the changes to get
+ // rid of SPLITMV mode entirely.
+ const MV mv = mi->mbmi.sb_type < BLOCK_8X8
+ ? (plane == 0 ? mi->bmi[block].as_mv[ref].as_mv
+ : mi_mv_pred_q4(mi, ref))
+ : mi->mbmi.mv[ref].as_mv;
+
+ // TODO(jkoleszar): This clamping is done in the incorrect place for the
+ // scaling case. It needs to be done on the scaled MV, not the pre-scaling
+ // MV. Note however that it performs the subsampling aware scaling so
+ // that the result is always q4.
+ const MV res_mv = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh,
+ pd->subsampling_x,
+ pd->subsampling_y);
+
+ scale->set_scaled_offsets(scale, arg->y + y, arg->x + x);
+ vp9_build_inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
+ &res_mv, scale,
+ 4 << pred_w, 4 << pred_h, ref,
+ &xd->subpix, MV_PRECISION_Q4);
+ }
+}
+
+// TODO(jkoleszar): In principle, pred_w, pred_h are unnecessary, as we could
+// calculate the subsampled BLOCK_SIZE, but that type isn't defined for
+// sizes smaller than 16x16 yet.
+typedef void (*foreach_predicted_block_visitor)(int plane, int block,
+ BLOCK_SIZE bsize,
+ int pred_w, int pred_h,
+ void *arg);
+static INLINE void foreach_predicted_block_in_plane(
+ const MACROBLOCKD* const xd, BLOCK_SIZE bsize, int plane,
+ foreach_predicted_block_visitor visit, void *arg) {
+ int i, x, y;
+
+ // block sizes in number of 4x4 blocks log 2 ("*_b")
+ // 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8
+ // subsampled size of the block
+ const int bwl = b_width_log2(bsize) - xd->plane[plane].subsampling_x;
+ const int bhl = b_height_log2(bsize) - xd->plane[plane].subsampling_y;
+
+ // size of the predictor to use.
+ int pred_w, pred_h;
+
+ if (xd->this_mi->mbmi.sb_type < BLOCK_8X8) {
+ assert(bsize == BLOCK_8X8);
+ pred_w = 0;
+ pred_h = 0;
+ } else {
+ pred_w = bwl;
+ pred_h = bhl;
+ }
+ assert(pred_w <= bwl);
+ assert(pred_h <= bhl);
+
+ // visit each subblock in raster order
+ i = 0;
+ for (y = 0; y < 1 << bhl; y += 1 << pred_h) {
+ for (x = 0; x < 1 << bwl; x += 1 << pred_w) {
+ visit(plane, i, bsize, pred_w, pred_h, arg);
+ i += 1 << pred_w;
+ }
+ i += (1 << (bwl + pred_h)) - (1 << bwl);
+ }
+}
+
+static void build_inter_predictors_for_planes(MACROBLOCKD *xd, BLOCK_SIZE bsize,
+ int mi_row, int mi_col,
+ int plane_from, int plane_to) {
+ int plane;
+ for (plane = plane_from; plane <= plane_to; ++plane) {
+ struct build_inter_predictors_args args = {
+ xd, mi_col * MI_SIZE, mi_row * MI_SIZE,
+ };
+ foreach_predicted_block_in_plane(xd, bsize, plane, build_inter_predictors,
+ &args);
+ }
+}
+
+void vp9_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
+ BLOCK_SIZE bsize) {
+ build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0, 0);
+}
+void vp9_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
+ BLOCK_SIZE bsize) {
+ build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 1,
+ MAX_MB_PLANE - 1);
+}
+void vp9_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
+ BLOCK_SIZE bsize) {
+ build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0,
+ MAX_MB_PLANE - 1);
+}
+
+// TODO(dkovalev: find better place for this function)
+void vp9_setup_scale_factors(VP9_COMMON *cm, int i) {
+ const int ref = cm->active_ref_idx[i];
+ struct scale_factors *const sf = &cm->active_ref_scale[i];
+ if (ref >= NUM_YV12_BUFFERS) {
+ vp9_zero(*sf);
+ } else {
+ YV12_BUFFER_CONFIG *const fb = &cm->yv12_fb[ref];
+ vp9_setup_scale_factors_for_frame(sf,
+ fb->y_crop_width, fb->y_crop_height,
+ cm->width, cm->height);
+
+ if (vp9_is_scaled(sf))
+ vp9_extend_frame_borders(fb, cm->subsampling_x, cm->subsampling_y);
+ }
+}
+
diff --git a/libvpx/vp9/common/vp9_reconinter.h b/libvpx/vp9/common/vp9_reconinter.h
new file mode 100644
index 0000000..504b793
--- /dev/null
+++ b/libvpx/vp9/common/vp9_reconinter.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_RECONINTER_H_
+#define VP9_COMMON_VP9_RECONINTER_H_
+
+#include "vpx/vpx_integer.h"
+#include "vp9/common/vp9_onyxc_int.h"
+
+struct subpix_fn_table;
+void vp9_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
+ BLOCK_SIZE bsize);
+
+void vp9_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
+ BLOCK_SIZE bsize);
+
+void vp9_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
+ BLOCK_SIZE bsize);
+
+void vp9_setup_interp_filters(MACROBLOCKD *xd,
+ INTERPOLATIONFILTERTYPE filter,
+ VP9_COMMON *cm);
+
+void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const MV *mv_q3,
+ const struct scale_factors *scale,
+ int w, int h, int do_avg,
+ const struct subpix_fn_table *subpix,
+ enum mv_precision precision);
+
+static int scaled_buffer_offset(int x_offset, int y_offset, int stride,
+ const struct scale_factors *scale) {
+ const int x = scale ? scale->scale_value_x(x_offset, scale) : x_offset;
+ const int y = scale ? scale->scale_value_y(y_offset, scale) : y_offset;
+ return y * stride + x;
+}
+
+static void setup_pred_plane(struct buf_2d *dst,
+ uint8_t *src, int stride,
+ int mi_row, int mi_col,
+ const struct scale_factors *scale,
+ int subsampling_x, int subsampling_y) {
+ const int x = (MI_SIZE * mi_col) >> subsampling_x;
+ const int y = (MI_SIZE * mi_row) >> subsampling_y;
+ dst->buf = src + scaled_buffer_offset(x, y, stride, scale);
+ dst->stride = stride;
+}
+
+// TODO(jkoleszar): audit all uses of this that don't set mb_row, mb_col
+static void setup_dst_planes(MACROBLOCKD *xd,
+ const YV12_BUFFER_CONFIG *src,
+ int mi_row, int mi_col) {
+ uint8_t *buffers[4] = {src->y_buffer, src->u_buffer, src->v_buffer,
+ src->alpha_buffer};
+ int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride,
+ src->alpha_stride};
+ int i;
+
+ for (i = 0; i < MAX_MB_PLANE; ++i) {
+ struct macroblockd_plane *pd = &xd->plane[i];
+ setup_pred_plane(&pd->dst, buffers[i], strides[i], mi_row, mi_col, NULL,
+ pd->subsampling_x, pd->subsampling_y);
+ }
+}
+
+static void setup_pre_planes(MACROBLOCKD *xd, int i,
+ const YV12_BUFFER_CONFIG *src,
+ int mi_row, int mi_col,
+ const struct scale_factors *sf) {
+ if (src) {
+ int j;
+ uint8_t* buffers[4] = {src->y_buffer, src->u_buffer, src->v_buffer,
+ src->alpha_buffer};
+ int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride,
+ src->alpha_stride};
+
+ for (j = 0; j < MAX_MB_PLANE; ++j) {
+ struct macroblockd_plane *pd = &xd->plane[j];
+ setup_pred_plane(&pd->pre[i], buffers[j], strides[j],
+ mi_row, mi_col, sf, pd->subsampling_x, pd->subsampling_y);
+ }
+ }
+}
+
+static void set_scale_factors(MACROBLOCKD *xd, int ref0, int ref1,
+ struct scale_factors sf[MAX_REF_FRAMES]) {
+ xd->scale_factor[0] = sf[ref0 >= 0 ? ref0 : 0];
+ xd->scale_factor[1] = sf[ref1 >= 0 ? ref1 : 0];
+}
+
+void vp9_setup_scale_factors(VP9_COMMON *cm, int i);
+
+#endif // VP9_COMMON_VP9_RECONINTER_H_
diff --git a/libvpx/vp9/common/vp9_reconintra.c b/libvpx/vp9/common/vp9_reconintra.c
new file mode 100644
index 0000000..4a451b9
--- /dev/null
+++ b/libvpx/vp9/common/vp9_reconintra.c
@@ -0,0 +1,385 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vpx_config.h"
+
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_ports/vpx_once.h"
+
+#include "vp9_rtcd.h"
+
+#include "vp9/common/vp9_reconintra.h"
+#include "vp9/common/vp9_onyxc_int.h"
+
+const TX_TYPE mode2txfm_map[MB_MODE_COUNT] = {
+ DCT_DCT, // DC
+ ADST_DCT, // V
+ DCT_ADST, // H
+ DCT_DCT, // D45
+ ADST_ADST, // D135
+ ADST_DCT, // D117
+ DCT_ADST, // D153
+ DCT_ADST, // D207
+ ADST_DCT, // D63
+ ADST_ADST, // TM
+ DCT_DCT, // NEARESTMV
+ DCT_DCT, // NEARMV
+ DCT_DCT, // ZEROMV
+ DCT_DCT // NEWMV
+};
+
+#define intra_pred_sized(type, size) \
+ void vp9_##type##_predictor_##size##x##size##_c(uint8_t *dst, \
+ ptrdiff_t stride, \
+ const uint8_t *above, \
+ const uint8_t *left) { \
+ type##_predictor(dst, stride, size, above, left); \
+ }
+
+#define intra_pred_allsizes(type) \
+ intra_pred_sized(type, 4) \
+ intra_pred_sized(type, 8) \
+ intra_pred_sized(type, 16) \
+ intra_pred_sized(type, 32)
+
+static INLINE void d207_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above, const uint8_t *left) {
+ int r, c;
+
+ // first column
+ for (r = 0; r < bs - 1; ++r)
+ dst[r * stride] = ROUND_POWER_OF_TWO(left[r] + left[r + 1], 1);
+ dst[(bs - 1) * stride] = left[bs - 1];
+ dst++;
+
+ // second column
+ for (r = 0; r < bs - 2; ++r)
+ dst[r * stride] = ROUND_POWER_OF_TWO(left[r] + left[r + 1] * 2 +
+ left[r + 2], 2);
+ dst[(bs - 2) * stride] = ROUND_POWER_OF_TWO(left[bs - 2] +
+ left[bs - 1] * 3, 2);
+ dst[(bs - 1) * stride] = left[bs - 1];
+ dst++;
+
+ // rest of last row
+ for (c = 0; c < bs - 2; ++c)
+ dst[(bs - 1) * stride + c] = left[bs - 1];
+
+ for (r = bs - 2; r >= 0; --r)
+ for (c = 0; c < bs - 2; ++c)
+ dst[r * stride + c] = dst[(r + 1) * stride + c - 2];
+}
+intra_pred_allsizes(d207)
+
+static INLINE void d63_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above, const uint8_t *left) {
+ int r, c;
+ for (r = 0; r < bs; ++r) {
+ for (c = 0; c < bs; ++c)
+ dst[c] = r & 1 ? ROUND_POWER_OF_TWO(above[r/2 + c] +
+ above[r/2 + c + 1] * 2 +
+ above[r/2 + c + 2], 2)
+ : ROUND_POWER_OF_TWO(above[r/2 + c] +
+ above[r/2 + c + 1], 1);
+ dst += stride;
+ }
+}
+intra_pred_allsizes(d63)
+
+static INLINE void d45_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above, const uint8_t *left) {
+ int r, c;
+ for (r = 0; r < bs; ++r) {
+ for (c = 0; c < bs; ++c)
+ dst[c] = r + c + 2 < bs * 2 ? ROUND_POWER_OF_TWO(above[r + c] +
+ above[r + c + 1] * 2 +
+ above[r + c + 2], 2)
+ : above[bs * 2 - 1];
+ dst += stride;
+ }
+}
+intra_pred_allsizes(d45)
+
+static INLINE void d117_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above, const uint8_t *left) {
+ int r, c;
+
+ // first row
+ for (c = 0; c < bs; c++)
+ dst[c] = ROUND_POWER_OF_TWO(above[c - 1] + above[c], 1);
+ dst += stride;
+
+ // second row
+ dst[0] = ROUND_POWER_OF_TWO(left[0] + above[-1] * 2 + above[0], 2);
+ for (c = 1; c < bs; c++)
+ dst[c] = ROUND_POWER_OF_TWO(above[c - 2] + above[c - 1] * 2 + above[c], 2);
+ dst += stride;
+
+ // the rest of first col
+ dst[0] = ROUND_POWER_OF_TWO(above[-1] + left[0] * 2 + left[1], 2);
+ for (r = 3; r < bs; ++r)
+ dst[(r - 2) * stride] = ROUND_POWER_OF_TWO(left[r - 3] + left[r - 2] * 2 +
+ left[r - 1], 2);
+
+ // the rest of the block
+ for (r = 2; r < bs; ++r) {
+ for (c = 1; c < bs; c++)
+ dst[c] = dst[-2 * stride + c - 1];
+ dst += stride;
+ }
+}
+intra_pred_allsizes(d117)
+
+static INLINE void d135_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above, const uint8_t *left) {
+ int r, c;
+ dst[0] = ROUND_POWER_OF_TWO(left[0] + above[-1] * 2 + above[0], 2);
+ for (c = 1; c < bs; c++)
+ dst[c] = ROUND_POWER_OF_TWO(above[c - 2] + above[c - 1] * 2 + above[c], 2);
+
+ dst[stride] = ROUND_POWER_OF_TWO(above[-1] + left[0] * 2 + left[1], 2);
+ for (r = 2; r < bs; ++r)
+ dst[r * stride] = ROUND_POWER_OF_TWO(left[r - 2] + left[r - 1] * 2 +
+ left[r], 2);
+
+ dst += stride;
+ for (r = 1; r < bs; ++r) {
+ for (c = 1; c < bs; c++)
+ dst[c] = dst[-stride + c - 1];
+ dst += stride;
+ }
+}
+intra_pred_allsizes(d135)
+
+static INLINE void d153_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above, const uint8_t *left) {
+ int r, c;
+ dst[0] = ROUND_POWER_OF_TWO(above[-1] + left[0], 1);
+ for (r = 1; r < bs; r++)
+ dst[r * stride] = ROUND_POWER_OF_TWO(left[r - 1] + left[r], 1);
+ dst++;
+
+ dst[0] = ROUND_POWER_OF_TWO(left[0] + above[-1] * 2 + above[0], 2);
+ dst[stride] = ROUND_POWER_OF_TWO(above[-1] + left[0] * 2 + left[1], 2);
+ for (r = 2; r < bs; r++)
+ dst[r * stride] = ROUND_POWER_OF_TWO(left[r - 2] + left[r - 1] * 2 +
+ left[r], 2);
+ dst++;
+
+ for (c = 0; c < bs - 2; c++)
+ dst[c] = ROUND_POWER_OF_TWO(above[c - 1] + above[c] * 2 + above[c + 1], 2);
+ dst += stride;
+
+ for (r = 1; r < bs; ++r) {
+ for (c = 0; c < bs - 2; c++)
+ dst[c] = dst[-stride + c - 2];
+ dst += stride;
+ }
+}
+intra_pred_allsizes(d153)
+
+static INLINE void v_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above, const uint8_t *left) {
+ int r;
+
+ for (r = 0; r < bs; r++) {
+ vpx_memcpy(dst, above, bs);
+ dst += stride;
+ }
+}
+intra_pred_allsizes(v)
+
+static INLINE void h_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above, const uint8_t *left) {
+ int r;
+
+ for (r = 0; r < bs; r++) {
+ vpx_memset(dst, left[r], bs);
+ dst += stride;
+ }
+}
+intra_pred_allsizes(h)
+
+static INLINE void tm_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above, const uint8_t *left) {
+ int r, c;
+ int ytop_left = above[-1];
+
+ for (r = 0; r < bs; r++) {
+ for (c = 0; c < bs; c++)
+ dst[c] = clip_pixel(left[r] + above[c] - ytop_left);
+ dst += stride;
+ }
+}
+intra_pred_allsizes(tm)
+
+static INLINE void dc_128_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above, const uint8_t *left) {
+ int r;
+
+ for (r = 0; r < bs; r++) {
+ vpx_memset(dst, 128, bs);
+ dst += stride;
+ }
+}
+intra_pred_allsizes(dc_128)
+
+static INLINE void dc_left_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above,
+ const uint8_t *left) {
+ int i, r, expected_dc, sum = 0;
+
+ for (i = 0; i < bs; i++)
+ sum += left[i];
+ expected_dc = (sum + (bs >> 1)) / bs;
+
+ for (r = 0; r < bs; r++) {
+ vpx_memset(dst, expected_dc, bs);
+ dst += stride;
+ }
+}
+intra_pred_allsizes(dc_left)
+
+static INLINE void dc_top_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above, const uint8_t *left) {
+ int i, r, expected_dc, sum = 0;
+
+ for (i = 0; i < bs; i++)
+ sum += above[i];
+ expected_dc = (sum + (bs >> 1)) / bs;
+
+ for (r = 0; r < bs; r++) {
+ vpx_memset(dst, expected_dc, bs);
+ dst += stride;
+ }
+}
+intra_pred_allsizes(dc_top)
+
+static INLINE void dc_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
+ const uint8_t *above, const uint8_t *left) {
+ int i, r, expected_dc, sum = 0;
+ const int count = 2 * bs;
+
+ for (i = 0; i < bs; i++) {
+ sum += above[i];
+ sum += left[i];
+ }
+
+ expected_dc = (sum + (count >> 1)) / count;
+
+ for (r = 0; r < bs; r++) {
+ vpx_memset(dst, expected_dc, bs);
+ dst += stride;
+ }
+}
+intra_pred_allsizes(dc)
+#undef intra_pred_allsizes
+
+typedef void (*intra_pred_fn)(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left);
+
+static intra_pred_fn pred[INTRA_MODES][4];
+static intra_pred_fn dc_pred[2][2][4];
+
+static void init_intra_pred_fn_ptrs(void) {
+#define intra_pred_allsizes(l, type) \
+ l[0] = vp9_##type##_predictor_4x4; \
+ l[1] = vp9_##type##_predictor_8x8; \
+ l[2] = vp9_##type##_predictor_16x16; \
+ l[3] = vp9_##type##_predictor_32x32
+
+ intra_pred_allsizes(pred[V_PRED], v);
+ intra_pred_allsizes(pred[H_PRED], h);
+ intra_pred_allsizes(pred[D207_PRED], d207);
+ intra_pred_allsizes(pred[D45_PRED], d45);
+ intra_pred_allsizes(pred[D63_PRED], d63);
+ intra_pred_allsizes(pred[D117_PRED], d117);
+ intra_pred_allsizes(pred[D135_PRED], d135);
+ intra_pred_allsizes(pred[D153_PRED], d153);
+ intra_pred_allsizes(pred[TM_PRED], tm);
+
+ intra_pred_allsizes(dc_pred[0][0], dc_128);
+ intra_pred_allsizes(dc_pred[0][1], dc_top);
+ intra_pred_allsizes(dc_pred[1][0], dc_left);
+ intra_pred_allsizes(dc_pred[1][1], dc);
+
+#undef intra_pred_allsizes
+}
+
+static void build_intra_predictors(const uint8_t *ref, int ref_stride,
+ uint8_t *dst, int dst_stride,
+ MB_PREDICTION_MODE mode, TX_SIZE tx_size,
+ int up_available, int left_available,
+ int right_available) {
+ int i;
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, left_col, 64);
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, above_data, 128 + 16);
+ uint8_t *above_row = above_data + 16;
+ const uint8_t *const_above_row = above_row;
+ const int bs = 4 << tx_size;
+
+ // 127 127 127 .. 127 127 127 127 127 127
+ // 129 A B .. Y Z
+ // 129 C D .. W X
+ // 129 E F .. U V
+ // 129 G H .. S T T T T T
+ // ..
+
+ once(init_intra_pred_fn_ptrs);
+
+ // left
+ if (left_available) {
+ for (i = 0; i < bs; i++)
+ left_col[i] = ref[i * ref_stride - 1];
+ } else {
+ vpx_memset(left_col, 129, bs);
+ }
+
+ // above
+ if (up_available) {
+ const uint8_t *above_ref = ref - ref_stride;
+ if (bs == 4 && right_available && left_available) {
+ const_above_row = above_ref;
+ } else {
+ vpx_memcpy(above_row, above_ref, bs);
+ if (bs == 4 && right_available)
+ vpx_memcpy(above_row + bs, above_ref + bs, bs);
+ else
+ vpx_memset(above_row + bs, above_row[bs - 1], bs);
+ above_row[-1] = left_available ? above_ref[-1] : 129;
+ }
+ } else {
+ vpx_memset(above_row, 127, bs * 2);
+ above_row[-1] = 127;
+ }
+
+ // predict
+ if (mode == DC_PRED) {
+ dc_pred[left_available][up_available][tx_size](dst, dst_stride,
+ const_above_row, left_col);
+ } else {
+ pred[mode][tx_size](dst, dst_stride, const_above_row, left_col);
+ }
+}
+
+void vp9_predict_intra_block(MACROBLOCKD *xd, int block_idx, int bwl_in,
+ TX_SIZE tx_size, int mode,
+ const uint8_t *ref, int ref_stride,
+ uint8_t *dst, int dst_stride) {
+ const int bwl = bwl_in - tx_size;
+ const int wmask = (1 << bwl) - 1;
+ const int have_top = (block_idx >> bwl) || xd->up_available;
+ const int have_left = (block_idx & wmask) || xd->left_available;
+ const int have_right = ((block_idx & wmask) != wmask);
+
+ assert(bwl >= 0);
+ build_intra_predictors(ref, ref_stride, dst, dst_stride, mode, tx_size,
+ have_top, have_left, have_right);
+}
diff --git a/libvpx/vp9/common/vp9_reconintra.h b/libvpx/vp9/common/vp9_reconintra.h
new file mode 100644
index 0000000..e9d0dbf
--- /dev/null
+++ b/libvpx/vp9/common/vp9_reconintra.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_RECONINTRA_H_
+#define VP9_COMMON_VP9_RECONINTRA_H_
+
+#include "vpx/vpx_integer.h"
+#include "vp9/common/vp9_blockd.h"
+
+void vp9_predict_intra_block(MACROBLOCKD *xd, int block_idx, int bwl_in,
+ TX_SIZE tx_size, int mode,
+ const uint8_t *ref, int ref_stride,
+ uint8_t *dst, int dst_stride);
+#endif // VP9_COMMON_VP9_RECONINTRA_H_
diff --git a/libvpx/vp9/common/vp9_rtcd.c b/libvpx/vp9/common/vp9_rtcd.c
new file mode 100644
index 0000000..72613ae
--- /dev/null
+++ b/libvpx/vp9/common/vp9_rtcd.c
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "vpx_config.h"
+#define RTCD_C
+#include "vp9_rtcd.h"
+#include "vpx_ports/vpx_once.h"
+
+void vpx_scale_rtcd(void);
+
+void vp9_rtcd() {
+ vpx_scale_rtcd();
+ once(setup_rtcd_internal);
+}
diff --git a/libvpx/vp9/common/vp9_rtcd_defs.sh b/libvpx/vp9/common/vp9_rtcd_defs.sh
new file mode 100644
index 0000000..042afbb
--- /dev/null
+++ b/libvpx/vp9/common/vp9_rtcd_defs.sh
@@ -0,0 +1,775 @@
+vp9_common_forward_decls() {
+cat <<EOF
+/*
+ * VP9
+ */
+
+#include "vpx/vpx_integer.h"
+#include "vp9/common/vp9_enums.h"
+
+struct macroblockd;
+
+/* Encoder forward decls */
+struct macroblock;
+struct vp9_variance_vtable;
+
+#define DEC_MVCOSTS int *mvjcost, int *mvcost[2]
+union int_mv;
+struct yv12_buffer_config;
+EOF
+}
+forward_decls vp9_common_forward_decls
+
+# x86inc.asm doesn't work if pic is enabled on 32 bit platforms so no assembly.
+[ "$CONFIG_USE_X86INC" = "yes" ] && mmx_x86inc=mmx && sse_x86inc=sse &&
+ sse2_x86inc=sse2 && ssse3_x86inc=ssse3
+
+# this variable is for functions that are 64 bit only.
+[ $arch = "x86_64" ] && mmx_x86_64=mmx && sse2_x86_64=sse2 && ssse3_x86_64=ssse3
+
+#
+# Dequant
+#
+
+prototype void vp9_idct_add_16x16 "int16_t *input, uint8_t *dest, int stride, int eob"
+specialize vp9_idct_add_16x16
+
+prototype void vp9_idct_add_8x8 "int16_t *input, uint8_t *dest, int stride, int eob"
+specialize vp9_idct_add_8x8
+
+prototype void vp9_idct_add "int16_t *input, uint8_t *dest, int stride, int eob"
+specialize vp9_idct_add
+
+prototype void vp9_idct_add_32x32 "int16_t *q, uint8_t *dst, int stride, int eob"
+specialize vp9_idct_add_32x32
+
+#
+# RECON
+#
+prototype void vp9_d207_predictor_4x4 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_d207_predictor_4x4
+
+prototype void vp9_d45_predictor_4x4 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_d45_predictor_4x4 $ssse3_x86inc
+
+prototype void vp9_d63_predictor_4x4 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_d63_predictor_4x4
+
+prototype void vp9_h_predictor_4x4 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_h_predictor_4x4 $ssse3_x86inc
+
+prototype void vp9_d117_predictor_4x4 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_d117_predictor_4x4
+
+prototype void vp9_d135_predictor_4x4 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_d135_predictor_4x4
+
+prototype void vp9_d153_predictor_4x4 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_d153_predictor_4x4
+
+prototype void vp9_v_predictor_4x4 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_v_predictor_4x4 $sse_x86inc
+
+prototype void vp9_tm_predictor_4x4 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_tm_predictor_4x4 $sse_x86inc
+
+prototype void vp9_dc_predictor_4x4 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_dc_predictor_4x4 $sse_x86inc
+
+prototype void vp9_dc_top_predictor_4x4 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_dc_top_predictor_4x4
+
+prototype void vp9_dc_left_predictor_4x4 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_dc_left_predictor_4x4
+
+prototype void vp9_dc_128_predictor_4x4 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_dc_128_predictor_4x4
+
+prototype void vp9_d207_predictor_8x8 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_d207_predictor_8x8
+
+prototype void vp9_d45_predictor_8x8 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_d45_predictor_8x8 $ssse3_x86inc
+
+prototype void vp9_d63_predictor_8x8 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_d63_predictor_8x8
+
+prototype void vp9_h_predictor_8x8 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_h_predictor_8x8 $ssse3_x86inc
+
+prototype void vp9_d117_predictor_8x8 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_d117_predictor_8x8
+
+prototype void vp9_d135_predictor_8x8 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_d135_predictor_8x8
+
+prototype void vp9_d153_predictor_8x8 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_d153_predictor_8x8
+
+prototype void vp9_v_predictor_8x8 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_v_predictor_8x8 $sse_x86inc
+
+prototype void vp9_tm_predictor_8x8 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_tm_predictor_8x8 $sse2_x86inc
+
+prototype void vp9_dc_predictor_8x8 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_dc_predictor_8x8 $sse_x86inc
+
+prototype void vp9_dc_top_predictor_8x8 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_dc_top_predictor_8x8
+
+prototype void vp9_dc_left_predictor_8x8 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_dc_left_predictor_8x8
+
+prototype void vp9_dc_128_predictor_8x8 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_dc_128_predictor_8x8
+
+prototype void vp9_d207_predictor_16x16 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_d207_predictor_16x16
+
+prototype void vp9_d45_predictor_16x16 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_d45_predictor_16x16 $ssse3_x86inc
+
+prototype void vp9_d63_predictor_16x16 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_d63_predictor_16x16
+
+prototype void vp9_h_predictor_16x16 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_h_predictor_16x16 $ssse3_x86inc
+
+prototype void vp9_d117_predictor_16x16 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_d117_predictor_16x16
+
+prototype void vp9_d135_predictor_16x16 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_d135_predictor_16x16
+
+prototype void vp9_d153_predictor_16x16 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_d153_predictor_16x16
+
+prototype void vp9_v_predictor_16x16 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_v_predictor_16x16 $sse2_x86inc
+
+prototype void vp9_tm_predictor_16x16 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_tm_predictor_16x16 $sse2_x86inc
+
+prototype void vp9_dc_predictor_16x16 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_dc_predictor_16x16 $sse2_x86inc
+
+prototype void vp9_dc_top_predictor_16x16 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_dc_top_predictor_16x16
+
+prototype void vp9_dc_left_predictor_16x16 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_dc_left_predictor_16x16
+
+prototype void vp9_dc_128_predictor_16x16 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_dc_128_predictor_16x16
+
+prototype void vp9_d207_predictor_32x32 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_d207_predictor_32x32
+
+prototype void vp9_d45_predictor_32x32 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_d45_predictor_32x32 $ssse3_x86inc
+
+prototype void vp9_d63_predictor_32x32 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_d63_predictor_32x32
+
+prototype void vp9_h_predictor_32x32 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_h_predictor_32x32 $ssse3 x86inc
+
+prototype void vp9_d117_predictor_32x32 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_d117_predictor_32x32
+
+prototype void vp9_d135_predictor_32x32 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_d135_predictor_32x32
+
+prototype void vp9_d153_predictor_32x32 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_d153_predictor_32x32
+
+prototype void vp9_v_predictor_32x32 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_v_predictor_32x32 $sse2_x86inc
+
+prototype void vp9_tm_predictor_32x32 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_tm_predictor_32x32 $sse2_x86_64
+
+prototype void vp9_dc_predictor_32x32 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_dc_predictor_32x32 $sse2_x86inc
+
+prototype void vp9_dc_top_predictor_32x32 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_dc_top_predictor_32x32
+
+prototype void vp9_dc_left_predictor_32x32 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_dc_left_predictor_32x32
+
+prototype void vp9_dc_128_predictor_32x32 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
+specialize vp9_dc_128_predictor_32x32
+
+if [ "$CONFIG_VP9_DECODER" = "yes" ]; then
+prototype void vp9_add_constant_residual_8x8 "const int16_t diff, uint8_t *dest, int stride"
+specialize vp9_add_constant_residual_8x8 sse2 neon
+
+prototype void vp9_add_constant_residual_16x16 "const int16_t diff, uint8_t *dest, int stride"
+specialize vp9_add_constant_residual_16x16 sse2 neon
+
+prototype void vp9_add_constant_residual_32x32 "const int16_t diff, uint8_t *dest, int stride"
+specialize vp9_add_constant_residual_32x32 sse2 neon
+fi
+
+#
+# Loopfilter
+#
+prototype void vp9_mb_lpf_vertical_edge_w "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh"
+specialize vp9_mb_lpf_vertical_edge_w sse2 neon
+
+prototype void vp9_mbloop_filter_vertical_edge "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count"
+specialize vp9_mbloop_filter_vertical_edge sse2 neon
+
+prototype void vp9_loop_filter_vertical_edge "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count"
+specialize vp9_loop_filter_vertical_edge mmx neon
+
+prototype void vp9_mb_lpf_horizontal_edge_w "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count"
+specialize vp9_mb_lpf_horizontal_edge_w sse2 neon
+
+prototype void vp9_mbloop_filter_horizontal_edge "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count"
+specialize vp9_mbloop_filter_horizontal_edge sse2 neon
+
+prototype void vp9_loop_filter_horizontal_edge "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count"
+specialize vp9_loop_filter_horizontal_edge mmx neon
+
+#
+# post proc
+#
+if [ "$CONFIG_VP9_POSTPROC" = "yes" ]; then
+prototype void vp9_mbpost_proc_down "uint8_t *dst, int pitch, int rows, int cols, int flimit"
+specialize vp9_mbpost_proc_down mmx sse2
+vp9_mbpost_proc_down_sse2=vp9_mbpost_proc_down_xmm
+
+prototype void vp9_mbpost_proc_across_ip "uint8_t *src, int pitch, int rows, int cols, int flimit"
+specialize vp9_mbpost_proc_across_ip sse2
+vp9_mbpost_proc_across_ip_sse2=vp9_mbpost_proc_across_ip_xmm
+
+prototype void vp9_post_proc_down_and_across "const uint8_t *src_ptr, uint8_t *dst_ptr, int src_pixels_per_line, int dst_pixels_per_line, int rows, int cols, int flimit"
+specialize vp9_post_proc_down_and_across mmx sse2
+vp9_post_proc_down_and_across_sse2=vp9_post_proc_down_and_across_xmm
+
+prototype void vp9_plane_add_noise "uint8_t *Start, char *noise, char blackclamp[16], char whiteclamp[16], char bothclamp[16], unsigned int Width, unsigned int Height, int Pitch"
+specialize vp9_plane_add_noise mmx sse2
+vp9_plane_add_noise_sse2=vp9_plane_add_noise_wmt
+fi
+
+prototype void vp9_blend_mb_inner "uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride"
+specialize vp9_blend_mb_inner
+
+prototype void vp9_blend_mb_outer "uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride"
+specialize vp9_blend_mb_outer
+
+prototype void vp9_blend_b "uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride"
+specialize vp9_blend_b
+
+#
+# Sub Pixel Filters
+#
+prototype void vp9_convolve_copy "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
+specialize vp9_convolve_copy $sse2_x86inc neon
+
+prototype void vp9_convolve_avg "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
+specialize vp9_convolve_avg $sse2_x86inc neon
+
+prototype void vp9_convolve8 "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
+specialize vp9_convolve8 ssse3 neon
+
+prototype void vp9_convolve8_horiz "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
+specialize vp9_convolve8_horiz ssse3 neon
+
+prototype void vp9_convolve8_vert "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
+specialize vp9_convolve8_vert ssse3 neon
+
+prototype void vp9_convolve8_avg "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
+specialize vp9_convolve8_avg ssse3 neon
+
+prototype void vp9_convolve8_avg_horiz "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
+specialize vp9_convolve8_avg_horiz ssse3 neon
+
+prototype void vp9_convolve8_avg_vert "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
+specialize vp9_convolve8_avg_vert ssse3 neon
+
+#
+# dct
+#
+prototype void vp9_short_idct4x4_1_add "int16_t *input, uint8_t *dest, int dest_stride"
+specialize vp9_short_idct4x4_1_add sse2 neon
+
+prototype void vp9_short_idct4x4_add "int16_t *input, uint8_t *dest, int dest_stride"
+specialize vp9_short_idct4x4_add sse2 neon
+
+prototype void vp9_short_idct8x8_1_add "int16_t *input, uint8_t *dest, int dest_stride"
+specialize vp9_short_idct8x8_1_add sse2 neon
+
+prototype void vp9_short_idct8x8_add "int16_t *input, uint8_t *dest, int dest_stride"
+specialize vp9_short_idct8x8_add sse2 neon
+
+prototype void vp9_short_idct10_8x8_add "int16_t *input, uint8_t *dest, int dest_stride"
+specialize vp9_short_idct10_8x8_add sse2 neon
+
+prototype void vp9_short_idct16x16_1_add "int16_t *input, uint8_t *dest, int dest_stride"
+specialize vp9_short_idct16x16_1_add sse2 neon
+
+prototype void vp9_short_idct16x16_add "int16_t *input, uint8_t *dest, int dest_stride"
+specialize vp9_short_idct16x16_add sse2 neon
+
+prototype void vp9_short_idct10_16x16_add "int16_t *input, uint8_t *dest, int dest_stride"
+specialize vp9_short_idct10_16x16_add sse2 neon
+
+prototype void vp9_short_idct32x32_add "int16_t *input, uint8_t *dest, int dest_stride"
+specialize vp9_short_idct32x32_add sse2 neon
+
+prototype void vp9_short_idct1_32x32 "int16_t *input, int16_t *output"
+specialize vp9_short_idct1_32x32
+
+prototype void vp9_short_iht4x4_add "int16_t *input, uint8_t *dest, int dest_stride, int tx_type"
+specialize vp9_short_iht4x4_add sse2 neon
+
+prototype void vp9_short_iht8x8_add "int16_t *input, uint8_t *dest, int dest_stride, int tx_type"
+specialize vp9_short_iht8x8_add sse2 neon
+
+prototype void vp9_short_iht16x16_add "int16_t *input, uint8_t *output, int pitch, int tx_type"
+specialize vp9_short_iht16x16_add sse2
+
+prototype void vp9_idct4_1d "int16_t *input, int16_t *output"
+specialize vp9_idct4_1d sse2
+# dct and add
+
+prototype void vp9_short_iwalsh4x4_1_add "int16_t *input, uint8_t *dest, int dest_stride"
+specialize vp9_short_iwalsh4x4_1_add
+
+prototype void vp9_short_iwalsh4x4_add "int16_t *input, uint8_t *dest, int dest_stride"
+specialize vp9_short_iwalsh4x4_add
+
+#
+# Encoder functions below this point.
+#
+if [ "$CONFIG_VP9_ENCODER" = "yes" ]; then
+
+
+# variance
+prototype unsigned int vp9_variance32x16 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance32x16 $sse2_x86inc
+
+prototype unsigned int vp9_variance16x32 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance16x32 $sse2_x86inc
+
+prototype unsigned int vp9_variance64x32 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance64x32 $sse2_x86inc
+
+prototype unsigned int vp9_variance32x64 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance32x64 $sse2_x86inc
+
+prototype unsigned int vp9_variance32x32 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance32x32 $sse2_x86inc
+
+prototype unsigned int vp9_variance64x64 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance64x64 $sse2_x86inc
+
+prototype unsigned int vp9_variance16x16 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance16x16 mmx $sse2_x86inc
+
+prototype unsigned int vp9_variance16x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance16x8 mmx $sse2_x86inc
+
+prototype unsigned int vp9_variance8x16 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance8x16 mmx $sse2_x86inc
+
+prototype unsigned int vp9_variance8x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance8x8 mmx $sse2_x86inc
+
+prototype void vp9_get_sse_sum_8x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum"
+specialize vp9_get_sse_sum_8x8 sse2
+vp9_get_sse_sum_8x8_sse2=vp9_get8x8var_sse2
+
+prototype unsigned int vp9_variance8x4 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance8x4 $sse2_x86inc
+
+prototype unsigned int vp9_variance4x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance4x8 $sse2_x86inc
+
+prototype unsigned int vp9_variance4x4 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance4x4 mmx $sse2_x86inc
+
+prototype unsigned int vp9_sub_pixel_variance64x64 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_sub_pixel_variance64x64 $sse2_x86inc $ssse3_x86inc
+
+prototype unsigned int vp9_sub_pixel_avg_variance64x64 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"
+specialize vp9_sub_pixel_avg_variance64x64 $sse2_x86inc $ssse3_x86inc
+
+prototype unsigned int vp9_sub_pixel_variance32x64 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_sub_pixel_variance32x64 $sse2_x86inc $ssse3_x86inc
+
+prototype unsigned int vp9_sub_pixel_avg_variance32x64 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"
+specialize vp9_sub_pixel_avg_variance32x64 $sse2_x86inc $ssse3_x86inc
+
+prototype unsigned int vp9_sub_pixel_variance64x32 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_sub_pixel_variance64x32 $sse2_x86inc $ssse3_x86inc
+
+prototype unsigned int vp9_sub_pixel_avg_variance64x32 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"
+specialize vp9_sub_pixel_avg_variance64x32 $sse2_x86inc $ssse3_x86inc
+
+prototype unsigned int vp9_sub_pixel_variance32x16 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_sub_pixel_variance32x16 $sse2_x86inc $ssse3_x86inc
+
+prototype unsigned int vp9_sub_pixel_avg_variance32x16 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"
+specialize vp9_sub_pixel_avg_variance32x16 $sse2_x86inc $ssse3_x86inc
+
+prototype unsigned int vp9_sub_pixel_variance16x32 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_sub_pixel_variance16x32 $sse2_x86inc $ssse3_x86inc
+
+prototype unsigned int vp9_sub_pixel_avg_variance16x32 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"
+specialize vp9_sub_pixel_avg_variance16x32 $sse2_x86inc $ssse3_x86inc
+
+prototype unsigned int vp9_sub_pixel_variance32x32 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_sub_pixel_variance32x32 $sse2_x86inc $ssse3_x86inc
+
+prototype unsigned int vp9_sub_pixel_avg_variance32x32 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"
+specialize vp9_sub_pixel_avg_variance32x32 $sse2_x86inc $ssse3_x86inc
+
+prototype unsigned int vp9_sub_pixel_variance16x16 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_sub_pixel_variance16x16 $sse2_x86inc $ssse3_x86inc
+
+prototype unsigned int vp9_sub_pixel_avg_variance16x16 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"
+specialize vp9_sub_pixel_avg_variance16x16 $sse2_x86inc $ssse3_x86inc
+
+prototype unsigned int vp9_sub_pixel_variance8x16 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_sub_pixel_variance8x16 $sse2_x86inc $ssse3_x86inc
+
+prototype unsigned int vp9_sub_pixel_avg_variance8x16 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"
+specialize vp9_sub_pixel_avg_variance8x16 $sse2_x86inc $ssse3_x86inc
+
+prototype unsigned int vp9_sub_pixel_variance16x8 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_sub_pixel_variance16x8 $sse2_x86inc $ssse3_x86inc
+
+prototype unsigned int vp9_sub_pixel_avg_variance16x8 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"
+specialize vp9_sub_pixel_avg_variance16x8 $sse2_x86inc $ssse3_x86inc
+
+prototype unsigned int vp9_sub_pixel_variance8x8 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_sub_pixel_variance8x8 $sse2_x86inc $ssse3_x86inc
+
+prototype unsigned int vp9_sub_pixel_avg_variance8x8 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"
+specialize vp9_sub_pixel_avg_variance8x8 $sse2_x86inc $ssse3_x86inc
+
+# TODO(jingning): need to convert 8x4/4x8 functions into mmx/sse form
+prototype unsigned int vp9_sub_pixel_variance8x4 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_sub_pixel_variance8x4 $sse2_x86inc $ssse3_x86inc
+
+prototype unsigned int vp9_sub_pixel_avg_variance8x4 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"
+specialize vp9_sub_pixel_avg_variance8x4 $sse2_x86inc $ssse3_x86inc
+
+prototype unsigned int vp9_sub_pixel_variance4x8 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_sub_pixel_variance4x8 $sse_x86inc $ssse3_x86inc
+
+prototype unsigned int vp9_sub_pixel_avg_variance4x8 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"
+specialize vp9_sub_pixel_avg_variance4x8 $sse_x86inc $ssse3_x86inc
+
+prototype unsigned int vp9_sub_pixel_variance4x4 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_sub_pixel_variance4x4 $sse_x86inc $ssse3_x86inc
+#vp9_sub_pixel_variance4x4_sse2=vp9_sub_pixel_variance4x4_wmt
+
+prototype unsigned int vp9_sub_pixel_avg_variance4x4 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"
+specialize vp9_sub_pixel_avg_variance4x4 $sse_x86inc $ssse3_x86inc
+
+prototype unsigned int vp9_sad64x64 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
+specialize vp9_sad64x64 $sse2_x86inc
+
+prototype unsigned int vp9_sad32x64 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
+specialize vp9_sad32x64 $sse2_x86inc
+
+prototype unsigned int vp9_sad64x32 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
+specialize vp9_sad64x32 $sse2_x86inc
+
+prototype unsigned int vp9_sad32x16 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
+specialize vp9_sad32x16 $sse2_x86inc
+
+prototype unsigned int vp9_sad16x32 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
+specialize vp9_sad16x32 $sse2_x86inc
+
+prototype unsigned int vp9_sad32x32 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
+specialize vp9_sad32x32 $sse2_x86inc
+
+prototype unsigned int vp9_sad16x16 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
+specialize vp9_sad16x16 mmx $sse2_x86inc
+
+prototype unsigned int vp9_sad16x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
+specialize vp9_sad16x8 mmx $sse2_x86inc
+
+prototype unsigned int vp9_sad8x16 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
+specialize vp9_sad8x16 mmx $sse2_x86inc
+
+prototype unsigned int vp9_sad8x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
+specialize vp9_sad8x8 mmx $sse2_x86inc
+
+prototype unsigned int vp9_sad8x4 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
+specialize vp9_sad8x4 $sse2_x86inc
+
+prototype unsigned int vp9_sad4x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
+specialize vp9_sad4x8 $sse_x86inc
+
+prototype unsigned int vp9_sad4x4 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
+specialize vp9_sad4x4 mmx $sse_x86inc
+
+prototype unsigned int vp9_sad64x64_avg "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad"
+specialize vp9_sad64x64_avg $sse2_x86inc
+
+prototype unsigned int vp9_sad32x64_avg "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad"
+specialize vp9_sad32x64_avg $sse2_x86inc
+
+prototype unsigned int vp9_sad64x32_avg "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad"
+specialize vp9_sad64x32_avg $sse2_x86inc
+
+prototype unsigned int vp9_sad32x16_avg "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad"
+specialize vp9_sad32x16_avg $sse2_x86inc
+
+prototype unsigned int vp9_sad16x32_avg "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad"
+specialize vp9_sad16x32_avg $sse2_x86inc
+
+prototype unsigned int vp9_sad32x32_avg "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad"
+specialize vp9_sad32x32_avg $sse2_x86inc
+
+prototype unsigned int vp9_sad16x16_avg "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad"
+specialize vp9_sad16x16_avg $sse2_x86inc
+
+prototype unsigned int vp9_sad16x8_avg "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad"
+specialize vp9_sad16x8_avg $sse2_x86inc
+
+prototype unsigned int vp9_sad8x16_avg "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad"
+specialize vp9_sad8x16_avg $sse2_x86inc
+
+prototype unsigned int vp9_sad8x8_avg "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad"
+specialize vp9_sad8x8_avg $sse2_x86inc
+
+prototype unsigned int vp9_sad8x4_avg "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad"
+specialize vp9_sad8x4_avg $sse2_x86inc
+
+prototype unsigned int vp9_sad4x8_avg "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad"
+specialize vp9_sad4x8_avg $sse_x86inc
+
+prototype unsigned int vp9_sad4x4_avg "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad"
+specialize vp9_sad4x4_avg $sse_x86inc
+
+prototype unsigned int vp9_variance_halfpixvar16x16_h "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance_halfpixvar16x16_h $sse2_x86inc
+
+prototype unsigned int vp9_variance_halfpixvar16x16_v "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance_halfpixvar16x16_v $sse2_x86inc
+
+prototype unsigned int vp9_variance_halfpixvar16x16_hv "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance_halfpixvar16x16_hv $sse2_x86inc
+
+prototype unsigned int vp9_variance_halfpixvar64x64_h "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance_halfpixvar64x64_h
+
+prototype unsigned int vp9_variance_halfpixvar64x64_v "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance_halfpixvar64x64_v
+
+prototype unsigned int vp9_variance_halfpixvar64x64_hv "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance_halfpixvar64x64_hv
+
+prototype unsigned int vp9_variance_halfpixvar32x32_h "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance_halfpixvar32x32_h
+
+prototype unsigned int vp9_variance_halfpixvar32x32_v "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance_halfpixvar32x32_v
+
+prototype unsigned int vp9_variance_halfpixvar32x32_hv "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance_halfpixvar32x32_hv
+
+prototype void vp9_sad64x64x3 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array"
+specialize vp9_sad64x64x3
+
+prototype void vp9_sad32x32x3 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array"
+specialize vp9_sad32x32x3
+
+prototype void vp9_sad16x16x3 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array"
+specialize vp9_sad16x16x3 sse3 ssse3
+
+prototype void vp9_sad16x8x3 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array"
+specialize vp9_sad16x8x3 sse3 ssse3
+
+prototype void vp9_sad8x16x3 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array"
+specialize vp9_sad8x16x3 sse3
+
+prototype void vp9_sad8x8x3 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array"
+specialize vp9_sad8x8x3 sse3
+
+prototype void vp9_sad4x4x3 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array"
+specialize vp9_sad4x4x3 sse3
+
+prototype void vp9_sad64x64x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array"
+specialize vp9_sad64x64x8
+
+prototype void vp9_sad32x32x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array"
+specialize vp9_sad32x32x8
+
+prototype void vp9_sad16x16x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array"
+specialize vp9_sad16x16x8 sse4
+
+prototype void vp9_sad16x8x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array"
+specialize vp9_sad16x8x8 sse4
+
+prototype void vp9_sad8x16x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array"
+specialize vp9_sad8x16x8 sse4
+
+prototype void vp9_sad8x8x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array"
+specialize vp9_sad8x8x8 sse4
+
+prototype void vp9_sad8x4x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array"
+specialize vp9_sad8x4x8
+
+prototype void vp9_sad4x8x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array"
+specialize vp9_sad4x8x8
+
+prototype void vp9_sad4x4x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array"
+specialize vp9_sad4x4x8 sse4
+
+prototype void vp9_sad64x64x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
+specialize vp9_sad64x64x4d sse2
+
+prototype void vp9_sad32x64x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
+specialize vp9_sad32x64x4d sse2
+
+prototype void vp9_sad64x32x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
+specialize vp9_sad64x32x4d sse2
+
+prototype void vp9_sad32x16x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
+specialize vp9_sad32x16x4d sse2
+
+prototype void vp9_sad16x32x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
+specialize vp9_sad16x32x4d sse2
+
+prototype void vp9_sad32x32x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
+specialize vp9_sad32x32x4d sse2
+
+prototype void vp9_sad16x16x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
+specialize vp9_sad16x16x4d sse2
+
+prototype void vp9_sad16x8x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
+specialize vp9_sad16x8x4d sse2
+
+prototype void vp9_sad8x16x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
+specialize vp9_sad8x16x4d sse2
+
+prototype void vp9_sad8x8x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
+specialize vp9_sad8x8x4d sse2
+
+# TODO(jingning): need to convert these 4x8/8x4 functions into sse2 form
+prototype void vp9_sad8x4x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
+specialize vp9_sad8x4x4d sse2
+
+prototype void vp9_sad4x8x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
+specialize vp9_sad4x8x4d sse
+
+prototype void vp9_sad4x4x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
+specialize vp9_sad4x4x4d sse
+
+#prototype unsigned int vp9_sub_pixel_mse16x16 "const uint8_t *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, const uint8_t *dst_ptr, int dst_pixels_per_line, unsigned int *sse"
+#specialize vp9_sub_pixel_mse16x16 sse2 mmx
+
+prototype unsigned int vp9_mse16x16 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse"
+specialize vp9_mse16x16 mmx $sse2_x86inc
+
+prototype unsigned int vp9_mse8x16 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse"
+specialize vp9_mse8x16
+
+prototype unsigned int vp9_mse16x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse"
+specialize vp9_mse16x8
+
+prototype unsigned int vp9_mse8x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse"
+specialize vp9_mse8x8
+
+prototype unsigned int vp9_sub_pixel_mse64x64 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_sub_pixel_mse64x64
+
+prototype unsigned int vp9_sub_pixel_mse32x32 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_sub_pixel_mse32x32
+
+prototype unsigned int vp9_get_mb_ss "const int16_t *"
+specialize vp9_get_mb_ss mmx sse2
+# ENCODEMB INVOKE
+
+prototype int64_t vp9_block_error "int16_t *coeff, int16_t *dqcoeff, intptr_t block_size, int64_t *ssz"
+specialize vp9_block_error $sse2_x86inc
+
+prototype void vp9_subtract_block "int rows, int cols, int16_t *diff_ptr, ptrdiff_t diff_stride, const uint8_t *src_ptr, ptrdiff_t src_stride, const uint8_t *pred_ptr, ptrdiff_t pred_stride"
+specialize vp9_subtract_block $sse2_x86inc
+
+prototype void vp9_quantize_b "int16_t *coeff_ptr, intptr_t n_coeffs, int skip_block, int16_t *zbin_ptr, int16_t *round_ptr, int16_t *quant_ptr, int16_t *quant_shift_ptr, int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr, int16_t *dequant_ptr, int zbin_oq_value, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan"
+specialize vp9_quantize_b $ssse3_x86_64
+
+prototype void vp9_quantize_b_32x32 "int16_t *coeff_ptr, intptr_t n_coeffs, int skip_block, int16_t *zbin_ptr, int16_t *round_ptr, int16_t *quant_ptr, int16_t *quant_shift_ptr, int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr, int16_t *dequant_ptr, int zbin_oq_value, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan"
+specialize vp9_quantize_b_32x32 $ssse3_x86_64
+
+#
+# Structured Similarity (SSIM)
+#
+if [ "$CONFIG_INTERNAL_STATS" = "yes" ]; then
+ prototype void vp9_ssim_parms_8x8 "uint8_t *s, int sp, uint8_t *r, int rp, unsigned long *sum_s, unsigned long *sum_r, unsigned long *sum_sq_s, unsigned long *sum_sq_r, unsigned long *sum_sxr"
+ specialize vp9_ssim_parms_8x8 $sse2_x86_64
+
+ prototype void vp9_ssim_parms_16x16 "uint8_t *s, int sp, uint8_t *r, int rp, unsigned long *sum_s, unsigned long *sum_r, unsigned long *sum_sq_s, unsigned long *sum_sq_r, unsigned long *sum_sxr"
+ specialize vp9_ssim_parms_16x16 $sse2_x86_64
+fi
+
+# fdct functions
+prototype void vp9_short_fht4x4 "int16_t *InputData, int16_t *OutputData, int pitch, int tx_type"
+specialize vp9_short_fht4x4 sse2
+
+prototype void vp9_short_fht8x8 "int16_t *InputData, int16_t *OutputData, int pitch, int tx_type"
+specialize vp9_short_fht8x8 sse2
+
+prototype void vp9_short_fht16x16 "int16_t *InputData, int16_t *OutputData, int pitch, int tx_type"
+specialize vp9_short_fht16x16 sse2
+
+prototype void vp9_short_fdct8x8 "int16_t *InputData, int16_t *OutputData, int pitch"
+specialize vp9_short_fdct8x8 sse2
+
+prototype void vp9_short_fdct4x4 "int16_t *InputData, int16_t *OutputData, int pitch"
+specialize vp9_short_fdct4x4 sse2
+
+prototype void vp9_short_fdct8x4 "int16_t *InputData, int16_t *OutputData, int pitch"
+specialize vp9_short_fdct8x4 sse2
+
+prototype void vp9_short_fdct32x32 "int16_t *InputData, int16_t *OutputData, int pitch"
+specialize vp9_short_fdct32x32 sse2
+
+prototype void vp9_short_fdct32x32_rd "int16_t *InputData, int16_t *OutputData, int pitch"
+specialize vp9_short_fdct32x32_rd sse2
+
+prototype void vp9_short_fdct16x16 "int16_t *InputData, int16_t *OutputData, int pitch"
+specialize vp9_short_fdct16x16 sse2
+
+prototype void vp9_short_walsh4x4 "int16_t *InputData, int16_t *OutputData, int pitch"
+specialize vp9_short_walsh4x4
+
+prototype void vp9_short_walsh8x4 "int16_t *InputData, int16_t *OutputData, int pitch"
+specialize vp9_short_walsh8x4
+
+#
+# Motion search
+#
+prototype int vp9_full_search_sad "struct macroblock *x, union int_mv *ref_mv, int sad_per_bit, int distance, struct vp9_variance_vtable *fn_ptr, DEC_MVCOSTS, union int_mv *center_mv, int n"
+specialize vp9_full_search_sad sse3 sse4_1
+vp9_full_search_sad_sse3=vp9_full_search_sadx3
+vp9_full_search_sad_sse4_1=vp9_full_search_sadx8
+
+prototype int vp9_refining_search_sad "struct macroblock *x, union int_mv *ref_mv, int sad_per_bit, int distance, struct vp9_variance_vtable *fn_ptr, DEC_MVCOSTS, union int_mv *center_mv"
+specialize vp9_refining_search_sad sse3
+vp9_refining_search_sad_sse3=vp9_refining_search_sadx4
+
+prototype int vp9_diamond_search_sad "struct macroblock *x, union int_mv *ref_mv, union int_mv *best_mv, int search_param, int sad_per_bit, int *num00, struct vp9_variance_vtable *fn_ptr, DEC_MVCOSTS, union int_mv *center_mv"
+specialize vp9_diamond_search_sad sse3
+vp9_diamond_search_sad_sse3=vp9_diamond_search_sadx4
+
+prototype void vp9_temporal_filter_apply "uint8_t *frame1, unsigned int stride, uint8_t *frame2, unsigned int block_size, int strength, int filter_weight, unsigned int *accumulator, uint16_t *count"
+specialize vp9_temporal_filter_apply sse2
+
+prototype void vp9_yv12_copy_partial_frame "struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc, int fraction"
+specialize vp9_yv12_copy_partial_frame
+
+
+fi
+# end encoder functions
diff --git a/libvpx/vp9/common/vp9_sadmxn.h b/libvpx/vp9/common/vp9_sadmxn.h
new file mode 100644
index 0000000..b2dfd63
--- /dev/null
+++ b/libvpx/vp9/common/vp9_sadmxn.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_SADMXN_H_
+#define VP9_COMMON_VP9_SADMXN_H_
+
+#include "./vpx_config.h"
+#include "vpx/vpx_integer.h"
+
+static INLINE unsigned int sad_mx_n_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ int m,
+ int n) {
+ int r, c;
+ unsigned int sad = 0;
+
+ for (r = 0; r < n; r++) {
+ for (c = 0; c < m; c++) {
+ sad += abs(src_ptr[c] - ref_ptr[c]);
+ }
+
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
+ }
+
+ return sad;
+}
+
+#endif // VP9_COMMON_VP9_SADMXN_H_
diff --git a/libvpx/vp9/common/vp9_scale.c b/libvpx/vp9/common/vp9_scale.c
new file mode 100644
index 0000000..989206c
--- /dev/null
+++ b/libvpx/vp9/common/vp9_scale.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vp9_rtcd.h"
+#include "vp9/common/vp9_filter.h"
+#include "vp9/common/vp9_scale.h"
+
+static INLINE int scaled_x(int val, const struct scale_factors *scale) {
+ return val * scale->x_scale_fp >> REF_SCALE_SHIFT;
+}
+
+static INLINE int scaled_y(int val, const struct scale_factors *scale) {
+ return val * scale->y_scale_fp >> REF_SCALE_SHIFT;
+}
+
+static int unscaled_value(int val, const struct scale_factors *scale) {
+ (void) scale;
+ return val;
+}
+
+static MV32 scaled_mv(const MV *mv, const struct scale_factors *scale) {
+ const MV32 res = {
+ scaled_y(mv->row, scale) + scale->y_offset_q4,
+ scaled_x(mv->col, scale) + scale->x_offset_q4
+ };
+ return res;
+}
+
+static MV32 unscaled_mv(const MV *mv, const struct scale_factors *scale) {
+ const MV32 res = {
+ mv->row,
+ mv->col
+ };
+ return res;
+}
+
+static void set_offsets_with_scaling(struct scale_factors *scale,
+ int row, int col) {
+ scale->x_offset_q4 = scaled_x(col << SUBPEL_BITS, scale) & SUBPEL_MASK;
+ scale->y_offset_q4 = scaled_y(row << SUBPEL_BITS, scale) & SUBPEL_MASK;
+}
+
+static void set_offsets_without_scaling(struct scale_factors *scale,
+ int row, int col) {
+ scale->x_offset_q4 = 0;
+ scale->y_offset_q4 = 0;
+}
+
+static int get_fixed_point_scale_factor(int other_size, int this_size) {
+ // Calculate scaling factor once for each reference frame
+ // and use fixed point scaling factors in decoding and encoding routines.
+ // Hardware implementations can calculate scale factor in device driver
+ // and use multiplication and shifting on hardware instead of division.
+ return (other_size << REF_SCALE_SHIFT) / this_size;
+}
+
+static int check_scale_factors(int other_w, int other_h,
+ int this_w, int this_h) {
+ return 2 * this_w >= other_w &&
+ 2 * this_h >= other_h &&
+ this_w <= 16 * other_w &&
+ this_h <= 16 * other_h;
+}
+
+void vp9_setup_scale_factors_for_frame(struct scale_factors *scale,
+ int other_w, int other_h,
+ int this_w, int this_h) {
+ if (!check_scale_factors(other_w, other_h, this_w, this_h)) {
+ scale->x_scale_fp = REF_INVALID_SCALE;
+ scale->y_scale_fp = REF_INVALID_SCALE;
+ return;
+ }
+
+ scale->x_scale_fp = get_fixed_point_scale_factor(other_w, this_w);
+ scale->y_scale_fp = get_fixed_point_scale_factor(other_h, this_h);
+ scale->x_step_q4 = scaled_x(16, scale);
+ scale->y_step_q4 = scaled_y(16, scale);
+ scale->x_offset_q4 = 0; // calculated per block
+ scale->y_offset_q4 = 0; // calculated per block
+
+ if (vp9_is_scaled(scale)) {
+ scale->scale_value_x = scaled_x;
+ scale->scale_value_y = scaled_y;
+ scale->set_scaled_offsets = set_offsets_with_scaling;
+ scale->scale_mv = scaled_mv;
+ } else {
+ scale->scale_value_x = unscaled_value;
+ scale->scale_value_y = unscaled_value;
+ scale->set_scaled_offsets = set_offsets_without_scaling;
+ scale->scale_mv = unscaled_mv;
+ }
+
+ // TODO(agrange): Investigate the best choice of functions to use here
+ // for EIGHTTAP_SMOOTH. Since it is not interpolating, need to choose what
+ // to do at full-pel offsets. The current selection, where the filter is
+ // applied in one direction only, and not at all for 0,0, seems to give the
+ // best quality, but it may be worth trying an additional mode that does
+ // do the filtering on full-pel.
+ if (scale->x_step_q4 == 16) {
+ if (scale->y_step_q4 == 16) {
+ // No scaling in either direction.
+ scale->predict[0][0][0] = vp9_convolve_copy;
+ scale->predict[0][0][1] = vp9_convolve_avg;
+ scale->predict[0][1][0] = vp9_convolve8_vert;
+ scale->predict[0][1][1] = vp9_convolve8_avg_vert;
+ scale->predict[1][0][0] = vp9_convolve8_horiz;
+ scale->predict[1][0][1] = vp9_convolve8_avg_horiz;
+ } else {
+ // No scaling in x direction. Must always scale in the y direction.
+ scale->predict[0][0][0] = vp9_convolve8_vert;
+ scale->predict[0][0][1] = vp9_convolve8_avg_vert;
+ scale->predict[0][1][0] = vp9_convolve8_vert;
+ scale->predict[0][1][1] = vp9_convolve8_avg_vert;
+ scale->predict[1][0][0] = vp9_convolve8;
+ scale->predict[1][0][1] = vp9_convolve8_avg;
+ }
+ } else {
+ if (scale->y_step_q4 == 16) {
+ // No scaling in the y direction. Must always scale in the x direction.
+ scale->predict[0][0][0] = vp9_convolve8_horiz;
+ scale->predict[0][0][1] = vp9_convolve8_avg_horiz;
+ scale->predict[0][1][0] = vp9_convolve8;
+ scale->predict[0][1][1] = vp9_convolve8_avg;
+ scale->predict[1][0][0] = vp9_convolve8_horiz;
+ scale->predict[1][0][1] = vp9_convolve8_avg_horiz;
+ } else {
+ // Must always scale in both directions.
+ scale->predict[0][0][0] = vp9_convolve8;
+ scale->predict[0][0][1] = vp9_convolve8_avg;
+ scale->predict[0][1][0] = vp9_convolve8;
+ scale->predict[0][1][1] = vp9_convolve8_avg;
+ scale->predict[1][0][0] = vp9_convolve8;
+ scale->predict[1][0][1] = vp9_convolve8_avg;
+ }
+ }
+ // 2D subpel motion always gets filtered in both directions
+ scale->predict[1][1][0] = vp9_convolve8;
+ scale->predict[1][1][1] = vp9_convolve8_avg;
+}
diff --git a/libvpx/vp9/common/vp9_scale.h b/libvpx/vp9/common/vp9_scale.h
new file mode 100644
index 0000000..7a720d0
--- /dev/null
+++ b/libvpx/vp9/common/vp9_scale.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_SCALE_H_
+#define VP9_COMMON_VP9_SCALE_H_
+
+#include "vp9/common/vp9_mv.h"
+#include "vp9/common/vp9_convolve.h"
+
+#define REF_SCALE_SHIFT 14
+#define REF_NO_SCALE (1 << REF_SCALE_SHIFT)
+#define REF_INVALID_SCALE -1
+
+struct scale_factors {
+ int x_scale_fp; // horizontal fixed point scale factor
+ int y_scale_fp; // vertical fixed point scale factor
+ int x_offset_q4;
+ int x_step_q4;
+ int y_offset_q4;
+ int y_step_q4;
+
+ int (*scale_value_x)(int val, const struct scale_factors *scale);
+ int (*scale_value_y)(int val, const struct scale_factors *scale);
+ void (*set_scaled_offsets)(struct scale_factors *scale, int row, int col);
+ MV32 (*scale_mv)(const MV *mv, const struct scale_factors *scale);
+
+ convolve_fn_t predict[2][2][2]; // horiz, vert, avg
+};
+
+void vp9_setup_scale_factors_for_frame(struct scale_factors *scale,
+ int other_w, int other_h,
+ int this_w, int this_h);
+
+static int vp9_is_valid_scale(const struct scale_factors *sf) {
+ return sf->x_scale_fp != REF_INVALID_SCALE &&
+ sf->y_scale_fp != REF_INVALID_SCALE;
+}
+
+static int vp9_is_scaled(const struct scale_factors *sf) {
+ return sf->x_scale_fp != REF_NO_SCALE ||
+ sf->y_scale_fp != REF_NO_SCALE;
+}
+
+#endif // VP9_COMMON_VP9_SCALE_H_
diff --git a/libvpx/vp9/common/vp9_seg_common.c b/libvpx/vp9/common/vp9_seg_common.c
new file mode 100644
index 0000000..6bfd8f8
--- /dev/null
+++ b/libvpx/vp9/common/vp9_seg_common.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/common/vp9_loopfilter.h"
+#include "vp9/common/vp9_seg_common.h"
+#include "vp9/common/vp9_quant_common.h"
+
+static const int seg_feature_data_signed[SEG_LVL_MAX] = { 1, 1, 0, 0 };
+
+static const int seg_feature_data_max[SEG_LVL_MAX] = {
+ MAXQ, MAX_LOOP_FILTER, 3, 0 };
+
+// These functions provide access to new segment level features.
+// Eventually these function may be "optimized out" but for the moment,
+// the coding mechanism is still subject to change so these provide a
+// convenient single point of change.
+
+int vp9_segfeature_active(const struct segmentation *seg, int segment_id,
+ SEG_LVL_FEATURES feature_id) {
+ return seg->enabled &&
+ (seg->feature_mask[segment_id] & (1 << feature_id));
+}
+
+void vp9_clearall_segfeatures(struct segmentation *seg) {
+ vp9_zero(seg->feature_data);
+ vp9_zero(seg->feature_mask);
+}
+
+void vp9_enable_segfeature(struct segmentation *seg, int segment_id,
+ SEG_LVL_FEATURES feature_id) {
+ seg->feature_mask[segment_id] |= 1 << feature_id;
+}
+
+void vp9_disable_segfeature(struct segmentation *seg, int segment_id,
+ SEG_LVL_FEATURES feature_id) {
+ seg->feature_mask[segment_id] &= ~(1 << feature_id);
+}
+
+int vp9_seg_feature_data_max(SEG_LVL_FEATURES feature_id) {
+ return seg_feature_data_max[feature_id];
+}
+
+int vp9_is_segfeature_signed(SEG_LVL_FEATURES feature_id) {
+ return seg_feature_data_signed[feature_id];
+}
+
+void vp9_clear_segdata(struct segmentation *seg, int segment_id,
+ SEG_LVL_FEATURES feature_id) {
+ seg->feature_data[segment_id][feature_id] = 0;
+}
+
+void vp9_set_segdata(struct segmentation *seg, int segment_id,
+ SEG_LVL_FEATURES feature_id, int seg_data) {
+ assert(seg_data <= seg_feature_data_max[feature_id]);
+ if (seg_data < 0) {
+ assert(seg_feature_data_signed[feature_id]);
+ assert(-seg_data <= seg_feature_data_max[feature_id]);
+ }
+
+ seg->feature_data[segment_id][feature_id] = seg_data;
+}
+
+int vp9_get_segdata(const struct segmentation *seg, int segment_id,
+ SEG_LVL_FEATURES feature_id) {
+ return seg->feature_data[segment_id][feature_id];
+}
+
+
+const vp9_tree_index vp9_segment_tree[14] = {
+ 2, 4, 6, 8, 10, 12,
+ 0, -1, -2, -3, -4, -5, -6, -7
+};
+
+
+// TBD? Functions to read and write segment data with range / validity checking
diff --git a/libvpx/vp9/common/vp9_seg_common.h b/libvpx/vp9/common/vp9_seg_common.h
new file mode 100644
index 0000000..f22239b
--- /dev/null
+++ b/libvpx/vp9/common/vp9_seg_common.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_SEG_COMMON_H_
+#define VP9_COMMON_VP9_SEG_COMMON_H_
+
+#include "vp9/common/vp9_treecoder.h"
+
+#define SEGMENT_DELTADATA 0
+#define SEGMENT_ABSDATA 1
+
+#define MAX_SEGMENTS 8
+#define SEG_TREE_PROBS (MAX_SEGMENTS-1)
+
+#define PREDICTION_PROBS 3
+
+// Segment level features.
+typedef enum {
+ SEG_LVL_ALT_Q = 0, // Use alternate Quantizer ....
+ SEG_LVL_ALT_LF = 1, // Use alternate loop filter value...
+ SEG_LVL_REF_FRAME = 2, // Optional Segment reference frame
+ SEG_LVL_SKIP = 3, // Optional Segment (0,0) + skip mode
+ SEG_LVL_MAX = 4 // Number of features supported
+} SEG_LVL_FEATURES;
+
+
+struct segmentation {
+ uint8_t enabled;
+ uint8_t update_map;
+ uint8_t update_data;
+ uint8_t abs_delta;
+ uint8_t temporal_update;
+
+ vp9_prob tree_probs[SEG_TREE_PROBS];
+ vp9_prob pred_probs[PREDICTION_PROBS];
+
+ int16_t feature_data[MAX_SEGMENTS][SEG_LVL_MAX];
+ unsigned int feature_mask[MAX_SEGMENTS];
+};
+
+int vp9_segfeature_active(const struct segmentation *seg,
+ int segment_id,
+ SEG_LVL_FEATURES feature_id);
+
+void vp9_clearall_segfeatures(struct segmentation *seg);
+
+void vp9_enable_segfeature(struct segmentation *seg,
+ int segment_id,
+ SEG_LVL_FEATURES feature_id);
+
+void vp9_disable_segfeature(struct segmentation *seg,
+ int segment_id,
+ SEG_LVL_FEATURES feature_id);
+
+int vp9_seg_feature_data_max(SEG_LVL_FEATURES feature_id);
+
+int vp9_is_segfeature_signed(SEG_LVL_FEATURES feature_id);
+
+void vp9_clear_segdata(struct segmentation *seg,
+ int segment_id,
+ SEG_LVL_FEATURES feature_id);
+
+void vp9_set_segdata(struct segmentation *seg,
+ int segment_id,
+ SEG_LVL_FEATURES feature_id,
+ int seg_data);
+
+int vp9_get_segdata(const struct segmentation *seg,
+ int segment_id,
+ SEG_LVL_FEATURES feature_id);
+
+extern const vp9_tree_index vp9_segment_tree[14];
+
+#endif // VP9_COMMON_VP9_SEG_COMMON_H_
+
diff --git a/libvpx/vp9/common/vp9_subpelvar.h b/libvpx/vp9/common/vp9_subpelvar.h
new file mode 100644
index 0000000..fe75481
--- /dev/null
+++ b/libvpx/vp9/common/vp9_subpelvar.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_SUBPELVAR_H_
+#define VP9_COMMON_VP9_SUBPELVAR_H_
+
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_convolve.h"
+
+static void variance(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ int w,
+ int h,
+ unsigned int *sse,
+ int *sum) {
+ int i, j;
+ int diff;
+
+ *sum = 0;
+ *sse = 0;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ diff = src_ptr[j] - ref_ptr[j];
+ *sum += diff;
+ *sse += diff * diff;
+ }
+
+ src_ptr += source_stride;
+ ref_ptr += recon_stride;
+ }
+}
+
+/****************************************************************************
+ *
+ * ROUTINE : filter_block2d_bil_first_pass
+ *
+ * INPUTS : uint8_t *src_ptr : Pointer to source block.
+ * uint32_t src_pixels_per_line : Stride of input block.
+ * uint32_t pixel_step : Offset between filter input samples (see notes).
+ * uint32_t output_height : Input block height.
+ * uint32_t output_width : Input block width.
+ * int32_t *vp9_filter : Array of 2 bi-linear filter taps.
+ *
+ * OUTPUTS : int32_t *output_ptr : Pointer to filtered block.
+ *
+ * RETURNS : void
+ *
+ * FUNCTION : Applies a 1-D 2-tap bi-linear filter to the source block in
+ * either horizontal or vertical direction to produce the
+ * filtered output block. Used to implement first-pass
+ * of 2-D separable filter.
+ *
+ * SPECIAL NOTES : Produces int32_t output to retain precision for next pass.
+ * Two filter taps should sum to VP9_FILTER_WEIGHT.
+ * pixel_step defines whether the filter is applied
+ * horizontally (pixel_step=1) or vertically (pixel_step=stride).
+ * It defines the offset required to move from one input
+ * to the next.
+ *
+ ****************************************************************************/
+static void var_filter_block2d_bil_first_pass(const uint8_t *src_ptr,
+ uint16_t *output_ptr,
+ unsigned int src_pixels_per_line,
+ int pixel_step,
+ unsigned int output_height,
+ unsigned int output_width,
+ const int16_t *vp9_filter) {
+ unsigned int i, j;
+
+ for (i = 0; i < output_height; i++) {
+ for (j = 0; j < output_width; j++) {
+ output_ptr[j] = ROUND_POWER_OF_TWO((int)src_ptr[0] * vp9_filter[0] +
+ (int)src_ptr[pixel_step] * vp9_filter[1],
+ FILTER_BITS);
+
+ src_ptr++;
+ }
+
+ // Next row...
+ src_ptr += src_pixels_per_line - output_width;
+ output_ptr += output_width;
+ }
+}
+
+/****************************************************************************
+ *
+ * ROUTINE : filter_block2d_bil_second_pass
+ *
+ * INPUTS : int32_t *src_ptr : Pointer to source block.
+ * uint32_t src_pixels_per_line : Stride of input block.
+ * uint32_t pixel_step : Offset between filter input samples (see notes).
+ * uint32_t output_height : Input block height.
+ * uint32_t output_width : Input block width.
+ * int32_t *vp9_filter : Array of 2 bi-linear filter taps.
+ *
+ * OUTPUTS : uint16_t *output_ptr : Pointer to filtered block.
+ *
+ * RETURNS : void
+ *
+ * FUNCTION : Applies a 1-D 2-tap bi-linear filter to the source block in
+ * either horizontal or vertical direction to produce the
+ * filtered output block. Used to implement second-pass
+ * of 2-D separable filter.
+ *
+ * SPECIAL NOTES : Requires 32-bit input as produced by filter_block2d_bil_first_pass.
+ * Two filter taps should sum to VP9_FILTER_WEIGHT.
+ * pixel_step defines whether the filter is applied
+ * horizontally (pixel_step=1) or vertically (pixel_step=stride).
+ * It defines the offset required to move from one input
+ * to the next.
+ *
+ ****************************************************************************/
+static void var_filter_block2d_bil_second_pass(const uint16_t *src_ptr,
+ uint8_t *output_ptr,
+ unsigned int src_pixels_per_line,
+ unsigned int pixel_step,
+ unsigned int output_height,
+ unsigned int output_width,
+ const int16_t *vp9_filter) {
+ unsigned int i, j;
+
+ for (i = 0; i < output_height; i++) {
+ for (j = 0; j < output_width; j++) {
+ output_ptr[j] = ROUND_POWER_OF_TWO((int)src_ptr[0] * vp9_filter[0] +
+ (int)src_ptr[pixel_step] * vp9_filter[1],
+ FILTER_BITS);
+ src_ptr++;
+ }
+
+ src_ptr += src_pixels_per_line - output_width;
+ output_ptr += output_width;
+ }
+}
+
+#endif // VP9_COMMON_VP9_SUBPELVAR_H_
diff --git a/libvpx/vp9/common/vp9_systemdependent.h b/libvpx/vp9/common/vp9_systemdependent.h
new file mode 100644
index 0000000..cc909e2
--- /dev/null
+++ b/libvpx/vp9/common/vp9_systemdependent.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_SYSTEMDEPENDENT_H_
+#define VP9_COMMON_VP9_SYSTEMDEPENDENT_H_
+
+#ifdef _MSC_VER
+#include <math.h>
+#endif
+
+#include "./vpx_config.h"
+#if ARCH_X86 || ARCH_X86_64
+void vpx_reset_mmx_state(void);
+#define vp9_clear_system_state() vpx_reset_mmx_state()
+#else
+#define vp9_clear_system_state()
+#endif
+
+#ifdef _MSC_VER
+// round is not defined in MSVC
+static int round(double x) {
+ if (x < 0)
+ return (int)ceil(x - 0.5);
+ else
+ return (int)floor(x + 0.5);
+}
+#endif
+
+struct VP9Common;
+void vp9_machine_specific_config(struct VP9Common *cm);
+
+#endif // VP9_COMMON_VP9_SYSTEMDEPENDENT_H_
diff --git a/libvpx/vp9/common/vp9_tapify.py b/libvpx/vp9/common/vp9_tapify.py
new file mode 100644
index 0000000..99529cf
--- /dev/null
+++ b/libvpx/vp9/common/vp9_tapify.py
@@ -0,0 +1,106 @@
+"""
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+"""
+#!/usr/bin/env python
+import sys,string,os,re,math,numpy
+scale = 2**16
+def dist(p1,p2):
+ x1,y1 = p1
+ x2,y2 = p2
+ if x1==x2 and y1==y2 :
+ return 1.0
+ return 1/ math.sqrt((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2))
+
+def gettaps(p):
+ def l(b):
+ return int(math.floor(b))
+ def h(b):
+ return int(math.ceil(b))
+ def t(b,p,s):
+ return int((scale*dist(b,p)+s/2)/s)
+ r,c = p
+ ul=[l(r),l(c)]
+ ur=[l(r),h(c)]
+ ll=[h(r),l(c)]
+ lr=[h(r),h(c)]
+ sum = dist(ul,p)+dist(ur,p)+dist(ll,p)+dist(lr,p)
+ t4 = scale - t(ul,p,sum) - t(ur,p,sum) - t(ll,p,sum);
+ return [[ul,t(ul,p,sum)],[ur,t(ur,p,sum)],
+ [ll,t(ll,p,sum)],[lr,t4]]
+
+def print_mb_taps(angle,blocksize):
+ theta = angle / 57.2957795;
+ affine = [[math.cos(theta),-math.sin(theta)],
+ [math.sin(theta),math.cos(theta)]]
+ radius = (float(blocksize)-1)/2
+ print " // angle of",angle,"degrees"
+ for y in range(blocksize) :
+ for x in range(blocksize) :
+ r,c = numpy.dot(affine,[y-radius, x-radius])
+ tps = gettaps([r+radius,c+radius])
+ for t in tps :
+ p,t = t
+ tr,tc = p
+ print " %2d, %2d, %5d, " % (tr,tc,t,),
+ print " // %2d,%2d " % (y,x)
+
+i=float(sys.argv[1])
+while i <= float(sys.argv[2]) :
+ print_mb_taps(i,float(sys.argv[4]))
+ i=i+float(sys.argv[3])
+"""
+
+taps = []
+pt=dict()
+ptr=dict()
+for y in range(16) :
+ for x in range(16) :
+ r,c = numpy.dot(affine,[y-7.5, x-7.5])
+ tps = gettaps([r+7.5,c+7.5])
+ j=0
+ for tp in tps :
+ p,i = tp
+ r,c = p
+ pt[y,x,j]= [p,i]
+ try:
+ ptr[r,j,c].append([y,x])
+ except:
+ ptr[r,j,c]=[[y,x]]
+ j = j+1
+
+for key in sorted(pt.keys()) :
+ print key,pt[key]
+
+lr = -99
+lj = -99
+lc = 0
+
+shuf=""
+mask=""
+for r,j,c in sorted(ptr.keys()) :
+ for y,x in ptr[r,j,c] :
+ if lr != r or lj != j :
+ print "shuf_"+str(lr)+"_"+str(lj)+"_"+shuf.ljust(16,"0"), lc
+ shuf=""
+ lc = 0
+ for i in range(lc,c-1) :
+ shuf = shuf +"0"
+ shuf = shuf + hex(x)[2]
+ lc =c
+ break
+ lr = r
+ lj = j
+# print r,j,c,ptr[r,j,c]
+# print
+
+for r,j,c in sorted(ptr.keys()) :
+ for y,x in ptr[r,j,c] :
+ print r,j,c,y,x
+ break
+"""
diff --git a/libvpx/vp9/common/vp9_textblit.c b/libvpx/vp9/common/vp9_textblit.c
new file mode 100644
index 0000000..60e95e0
--- /dev/null
+++ b/libvpx/vp9/common/vp9_textblit.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdlib.h>
+
+#include "vp9/common/vp9_textblit.h"
+
+static const int font[] = {
+ 0x0, 0x5C00, 0x8020, 0xAFABEA, 0xD7EC0, 0x1111111, 0x1855740, 0x18000,
+ 0x45C0, 0x74400, 0x51140, 0x23880, 0xC4000, 0x21080, 0x80000, 0x111110,
+ 0xE9D72E, 0x87E40, 0x12AD732, 0xAAD62A, 0x4F94C4, 0x4D6B7, 0x456AA,
+ 0x3E8423, 0xAAD6AA, 0xAAD6A2, 0x2800, 0x2A00, 0x8A880, 0x52940, 0x22A20,
+ 0x15422, 0x6AD62E, 0x1E4A53E, 0xAAD6BF, 0x8C62E, 0xE8C63F, 0x118D6BF,
+ 0x1094BF, 0xCAC62E, 0x1F2109F, 0x118FE31, 0xF8C628, 0x8A89F, 0x108421F,
+ 0x1F1105F, 0x1F4105F, 0xE8C62E, 0x2294BF, 0x164C62E, 0x12694BF, 0x8AD6A2,
+ 0x10FC21, 0x1F8421F, 0x744107, 0xF8220F, 0x1151151, 0x117041, 0x119D731,
+ 0x47E0, 0x1041041, 0xFC400, 0x10440, 0x1084210, 0x820
+};
+
+static void plot(int x, int y, unsigned char *image, int pitch) {
+ image[x + y * pitch] ^= 255;
+}
+
+void vp9_blit_text(const char *msg, unsigned char *address, const int pitch) {
+ int letter_bitmap;
+ unsigned char *output_pos = address;
+ int colpos = 0;
+
+ while (msg[colpos] != 0) {
+ char letter = msg[colpos];
+ int fontcol, fontrow;
+
+ if (letter <= 'Z' && letter >= ' ')
+ letter_bitmap = font[letter - ' '];
+ else if (letter <= 'z' && letter >= 'a')
+ letter_bitmap = font[letter - 'a' + 'A' - ' '];
+ else
+ letter_bitmap = font[0];
+
+ for (fontcol = 6; fontcol >= 0; fontcol--)
+ for (fontrow = 0; fontrow < 5; fontrow++)
+ output_pos[fontrow * pitch + fontcol] =
+ ((letter_bitmap >> (fontcol * 5)) & (1 << fontrow) ? 255 : 0);
+
+ output_pos += 7;
+ colpos++;
+ }
+}
+
+
+
+/* Bresenham line algorithm */
+void vp9_blit_line(int x0, int x1, int y0, int y1, unsigned char *image,
+ int pitch) {
+ int steep = abs(y1 - y0) > abs(x1 - x0);
+ int deltax, deltay;
+ int error, ystep, y, x;
+
+ if (steep) {
+ int t;
+ t = x0;
+ x0 = y0;
+ y0 = t;
+
+ t = x1;
+ x1 = y1;
+ y1 = t;
+ }
+
+ if (x0 > x1) {
+ int t;
+ t = x0;
+ x0 = x1;
+ x1 = t;
+
+ t = y0;
+ y0 = y1;
+ y1 = t;
+ }
+
+ deltax = x1 - x0;
+ deltay = abs(y1 - y0);
+ error = deltax / 2;
+
+ y = y0;
+
+ if (y0 < y1)
+ ystep = 1;
+ else
+ ystep = -1;
+
+ if (steep) {
+ for (x = x0; x <= x1; x++) {
+ plot(y, x, image, pitch);
+
+ error = error - deltay;
+ if (error < 0) {
+ y = y + ystep;
+ error = error + deltax;
+ }
+ }
+ } else {
+ for (x = x0; x <= x1; x++) {
+ plot(x, y, image, pitch);
+
+ error = error - deltay;
+ if (error < 0) {
+ y = y + ystep;
+ error = error + deltax;
+ }
+ }
+ }
+}
diff --git a/libvpx/vp9/common/vp9_textblit.h b/libvpx/vp9/common/vp9_textblit.h
new file mode 100644
index 0000000..c968628
--- /dev/null
+++ b/libvpx/vp9/common/vp9_textblit.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_TEXTBLIT_H_
+#define VP9_COMMON_VP9_TEXTBLIT_H_
+
+void vp9_blit_text(const char *msg, unsigned char *address, int pitch);
+
+void vp9_blit_line(int x0, int x1, int y0, int y1, unsigned char *image,
+ int pitch);
+
+#endif // VP9_COMMON_VP9_TEXTBLIT_H_
diff --git a/libvpx/vp9/common/vp9_tile_common.c b/libvpx/vp9/common/vp9_tile_common.c
new file mode 100644
index 0000000..1791c1a
--- /dev/null
+++ b/libvpx/vp9/common/vp9_tile_common.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp9/common/vp9_tile_common.h"
+
+#define MIN_TILE_WIDTH_B64 4
+#define MAX_TILE_WIDTH_B64 64
+
+static int to_sbs(n_mis) {
+ return mi_cols_aligned_to_sb(n_mis) >> MI_BLOCK_SIZE_LOG2;
+}
+
+static void vp9_get_tile_offsets(int *min_tile_off, int *max_tile_off,
+ int tile_idx, int log2_n_tiles, int n_mis) {
+ const int n_sbs = to_sbs(n_mis);
+ const int sb_off1 = (tile_idx * n_sbs) >> log2_n_tiles;
+ const int sb_off2 = ((tile_idx + 1) * n_sbs) >> log2_n_tiles;
+
+ *min_tile_off = MIN(sb_off1 << 3, n_mis);
+ *max_tile_off = MIN(sb_off2 << 3, n_mis);
+}
+
+void vp9_get_tile_col_offsets(VP9_COMMON *cm, int tile_col_idx) {
+ vp9_get_tile_offsets(&cm->cur_tile_mi_col_start, &cm->cur_tile_mi_col_end,
+ tile_col_idx, cm->log2_tile_cols, cm->mi_cols);
+}
+
+void vp9_get_tile_row_offsets(VP9_COMMON *cm, int tile_row_idx) {
+ vp9_get_tile_offsets(&cm->cur_tile_mi_row_start, &cm->cur_tile_mi_row_end,
+ tile_row_idx, cm->log2_tile_rows, cm->mi_rows);
+}
+
+
+void vp9_get_tile_n_bits(int mi_cols,
+ int *min_log2_tile_cols, int *max_log2_tile_cols) {
+ const int sb_cols = to_sbs(mi_cols);
+ int min_log2_n_tiles, max_log2_n_tiles;
+
+ for (max_log2_n_tiles = 0;
+ (sb_cols >> max_log2_n_tiles) >= MIN_TILE_WIDTH_B64;
+ max_log2_n_tiles++) {}
+ max_log2_n_tiles--;
+ if (max_log2_n_tiles < 0)
+ max_log2_n_tiles = 0;
+
+ for (min_log2_n_tiles = 0;
+ (MAX_TILE_WIDTH_B64 << min_log2_n_tiles) < sb_cols;
+ min_log2_n_tiles++) {}
+
+ assert(min_log2_n_tiles <= max_log2_n_tiles);
+
+ *min_log2_tile_cols = min_log2_n_tiles;
+ *max_log2_tile_cols = max_log2_n_tiles;
+}
diff --git a/libvpx/vp9/common/vp9_tile_common.h b/libvpx/vp9/common/vp9_tile_common.h
new file mode 100644
index 0000000..6d14560
--- /dev/null
+++ b/libvpx/vp9/common/vp9_tile_common.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_TILE_COMMON_H_
+#define VP9_COMMON_VP9_TILE_COMMON_H_
+
+#include "vp9/common/vp9_onyxc_int.h"
+
+void vp9_get_tile_col_offsets(VP9_COMMON *cm, int tile_col_idx);
+
+void vp9_get_tile_row_offsets(VP9_COMMON *cm, int tile_row_idx);
+
+void vp9_get_tile_n_bits(int mi_cols,
+ int *min_log2_tile_cols, int *max_log2_tile_cols);
+
+#endif // VP9_COMMON_VP9_TILE_COMMON_H_
diff --git a/libvpx/vp9/common/vp9_treecoder.c b/libvpx/vp9/common/vp9_treecoder.c
new file mode 100644
index 0000000..2e21a5b
--- /dev/null
+++ b/libvpx/vp9/common/vp9_treecoder.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include <assert.h>
+
+#include "./vpx_config.h"
+#include "vp9/common/vp9_treecoder.h"
+
+static void tree2tok(struct vp9_token *const p, vp9_tree t,
+ int i, int v, int l) {
+ v += v;
+ ++l;
+
+ do {
+ const vp9_tree_index j = t[i++];
+
+ if (j <= 0) {
+ p[-j].value = v;
+ p[-j].len = l;
+ } else
+ tree2tok(p, t, j, v, l);
+ } while (++v & 1);
+}
+
+void vp9_tokens_from_tree(struct vp9_token *p, vp9_tree t) {
+ tree2tok(p, t, 0, 0, 0);
+}
+
+void vp9_tokens_from_tree_offset(struct vp9_token *p, vp9_tree t,
+ int offset) {
+ tree2tok(p - offset, t, 0, 0, 0);
+}
+
+static unsigned int convert_distribution(unsigned int i,
+ vp9_tree tree,
+ vp9_prob probs[],
+ unsigned int branch_ct[][2],
+ const unsigned int num_events[],
+ unsigned int tok0_offset) {
+ unsigned int left, right;
+
+ if (tree[i] <= 0) {
+ left = num_events[-tree[i] - tok0_offset];
+ } else {
+ left = convert_distribution(tree[i], tree, probs, branch_ct,
+ num_events, tok0_offset);
+ }
+ if (tree[i + 1] <= 0)
+ right = num_events[-tree[i + 1] - tok0_offset];
+ else
+ right = convert_distribution(tree[i + 1], tree, probs, branch_ct,
+ num_events, tok0_offset);
+
+ probs[i>>1] = get_binary_prob(left, right);
+ branch_ct[i>>1][0] = left;
+ branch_ct[i>>1][1] = right;
+ return left + right;
+}
+
+void vp9_tree_probs_from_distribution(
+ vp9_tree tree,
+ vp9_prob probs [ /* n-1 */ ],
+ unsigned int branch_ct [ /* n-1 */ ] [2],
+ const unsigned int num_events[ /* n */ ],
+ unsigned int tok0_offset) {
+ convert_distribution(0, tree, probs, branch_ct, num_events, tok0_offset);
+}
diff --git a/libvpx/vp9/common/vp9_treecoder.h b/libvpx/vp9/common/vp9_treecoder.h
new file mode 100644
index 0000000..31182c3
--- /dev/null
+++ b/libvpx/vp9/common/vp9_treecoder.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_TREECODER_H_
+#define VP9_COMMON_VP9_TREECODER_H_
+
+#include "./vpx_config.h"
+#include "vpx/vpx_integer.h"
+#include "vp9/common/vp9_common.h"
+
+typedef uint8_t vp9_prob;
+
+#define vp9_prob_half ((vp9_prob) 128)
+
+typedef int8_t vp9_tree_index;
+
+#define vp9_complement(x) (255 - x)
+
+/* We build coding trees compactly in arrays.
+ Each node of the tree is a pair of vp9_tree_indices.
+ Array index often references a corresponding probability table.
+ Index <= 0 means done encoding/decoding and value = -Index,
+ Index > 0 means need another bit, specification at index.
+ Nonnegative indices are always even; processing begins at node 0. */
+
+typedef const vp9_tree_index vp9_tree[], *vp9_tree_p;
+
+struct vp9_token {
+ int value;
+ int len;
+};
+
+/* Construct encoding array from tree. */
+
+void vp9_tokens_from_tree(struct vp9_token*, vp9_tree);
+void vp9_tokens_from_tree_offset(struct vp9_token*, vp9_tree, int offset);
+
+/* Convert array of token occurrence counts into a table of probabilities
+ for the associated binary encoding tree. Also writes count of branches
+ taken for each node on the tree; this facilitiates decisions as to
+ probability updates. */
+
+void vp9_tree_probs_from_distribution(vp9_tree tree,
+ vp9_prob probs[ /* n - 1 */ ],
+ unsigned int branch_ct[ /* n - 1 */ ][2],
+ const unsigned int num_events[ /* n */ ],
+ unsigned int tok0_offset);
+
+static INLINE vp9_prob clip_prob(int p) {
+ return (p > 255) ? 255u : (p < 1) ? 1u : p;
+}
+
+// int64 is not needed for normal frame level calculations.
+// However when outputing entropy stats accumulated over many frames
+// or even clips we can overflow int math.
+#ifdef ENTROPY_STATS
+static INLINE vp9_prob get_prob(int num, int den) {
+ return (den == 0) ? 128u : clip_prob(((int64_t)num * 256 + (den >> 1)) / den);
+}
+#else
+static INLINE vp9_prob get_prob(int num, int den) {
+ return (den == 0) ? 128u : clip_prob((num * 256 + (den >> 1)) / den);
+}
+#endif
+
+static INLINE vp9_prob get_binary_prob(int n0, int n1) {
+ return get_prob(n0, n0 + n1);
+}
+
+/* this function assumes prob1 and prob2 are already within [1,255] range */
+static INLINE vp9_prob weighted_prob(int prob1, int prob2, int factor) {
+ return ROUND_POWER_OF_TWO(prob1 * (256 - factor) + prob2 * factor, 8);
+}
+
+static INLINE vp9_prob merge_probs(vp9_prob pre_prob, vp9_prob prob,
+ const unsigned int ct[2],
+ unsigned int count_sat,
+ unsigned int max_update_factor) {
+ const unsigned int count = MIN(ct[0] + ct[1], count_sat);
+ const unsigned int factor = max_update_factor * count / count_sat;
+ return weighted_prob(pre_prob, prob, factor);
+}
+
+static INLINE vp9_prob merge_probs2(vp9_prob pre_prob,
+ const unsigned int ct[2],
+ unsigned int count_sat,
+ unsigned int max_update_factor) {
+ return merge_probs(pre_prob, get_binary_prob(ct[0], ct[1]), ct, count_sat,
+ max_update_factor);
+}
+
+
+#endif // VP9_COMMON_VP9_TREECODER_H_
diff --git a/libvpx/vp9/common/x86/vp9_asm_stubs.c b/libvpx/vp9/common/x86/vp9_asm_stubs.c
new file mode 100644
index 0000000..3f1c198
--- /dev/null
+++ b/libvpx/vp9/common/x86/vp9_asm_stubs.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+
+#include "./vpx_config.h"
+#include "./vp9_rtcd.h"
+#include "vpx_ports/mem.h"
+///////////////////////////////////////////////////////////////////////////
+// the mmx function that does the bilinear filtering and var calculation //
+// int one pass //
+///////////////////////////////////////////////////////////////////////////
+DECLARE_ALIGNED(16, const short, vp9_bilinear_filters_mmx[16][8]) = {
+ { 128, 128, 128, 128, 0, 0, 0, 0 },
+ { 120, 120, 120, 120, 8, 8, 8, 8 },
+ { 112, 112, 112, 112, 16, 16, 16, 16 },
+ { 104, 104, 104, 104, 24, 24, 24, 24 },
+ { 96, 96, 96, 96, 32, 32, 32, 32 },
+ { 88, 88, 88, 88, 40, 40, 40, 40 },
+ { 80, 80, 80, 80, 48, 48, 48, 48 },
+ { 72, 72, 72, 72, 56, 56, 56, 56 },
+ { 64, 64, 64, 64, 64, 64, 64, 64 },
+ { 56, 56, 56, 56, 72, 72, 72, 72 },
+ { 48, 48, 48, 48, 80, 80, 80, 80 },
+ { 40, 40, 40, 40, 88, 88, 88, 88 },
+ { 32, 32, 32, 32, 96, 96, 96, 96 },
+ { 24, 24, 24, 24, 104, 104, 104, 104 },
+ { 16, 16, 16, 16, 112, 112, 112, 112 },
+ { 8, 8, 8, 8, 120, 120, 120, 120 }
+};
+
+#if HAVE_SSSE3
+void vp9_filter_block1d16_v8_ssse3(const unsigned char *src_ptr,
+ const unsigned int src_pitch,
+ unsigned char *output_ptr,
+ unsigned int out_pitch,
+ unsigned int output_height,
+ const short *filter);
+
+void vp9_filter_block1d16_h8_ssse3(const unsigned char *src_ptr,
+ const unsigned int src_pitch,
+ unsigned char *output_ptr,
+ unsigned int out_pitch,
+ unsigned int output_height,
+ const short *filter);
+
+void vp9_filter_block1d8_v8_ssse3(const unsigned char *src_ptr,
+ const unsigned int src_pitch,
+ unsigned char *output_ptr,
+ unsigned int out_pitch,
+ unsigned int output_height,
+ const short *filter);
+
+void vp9_filter_block1d8_h8_ssse3(const unsigned char *src_ptr,
+ const unsigned int src_pitch,
+ unsigned char *output_ptr,
+ unsigned int out_pitch,
+ unsigned int output_height,
+ const short *filter);
+
+void vp9_filter_block1d4_v8_ssse3(const unsigned char *src_ptr,
+ const unsigned int src_pitch,
+ unsigned char *output_ptr,
+ unsigned int out_pitch,
+ unsigned int output_height,
+ const short *filter);
+
+void vp9_filter_block1d4_h8_ssse3(const unsigned char *src_ptr,
+ const unsigned int src_pitch,
+ unsigned char *output_ptr,
+ unsigned int out_pitch,
+ unsigned int output_height,
+ const short *filter);
+
+void vp9_filter_block1d16_v8_avg_ssse3(const unsigned char *src_ptr,
+ const unsigned int src_pitch,
+ unsigned char *output_ptr,
+ unsigned int out_pitch,
+ unsigned int output_height,
+ const short *filter);
+
+void vp9_filter_block1d16_h8_avg_ssse3(const unsigned char *src_ptr,
+ const unsigned int src_pitch,
+ unsigned char *output_ptr,
+ unsigned int out_pitch,
+ unsigned int output_height,
+ const short *filter);
+
+void vp9_filter_block1d8_v8_avg_ssse3(const unsigned char *src_ptr,
+ const unsigned int src_pitch,
+ unsigned char *output_ptr,
+ unsigned int out_pitch,
+ unsigned int output_height,
+ const short *filter);
+
+void vp9_filter_block1d8_h8_avg_ssse3(const unsigned char *src_ptr,
+ const unsigned int src_pitch,
+ unsigned char *output_ptr,
+ unsigned int out_pitch,
+ unsigned int output_height,
+ const short *filter);
+
+void vp9_filter_block1d4_v8_avg_ssse3(const unsigned char *src_ptr,
+ const unsigned int src_pitch,
+ unsigned char *output_ptr,
+ unsigned int out_pitch,
+ unsigned int output_height,
+ const short *filter);
+
+void vp9_filter_block1d4_h8_avg_ssse3(const unsigned char *src_ptr,
+ const unsigned int src_pitch,
+ unsigned char *output_ptr,
+ unsigned int out_pitch,
+ unsigned int output_height,
+ const short *filter);
+
+void vp9_convolve8_horiz_ssse3(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h) {
+ /* Ensure the filter can be compressed to int16_t. */
+ if (x_step_q4 == 16 && filter_x[3] != 128) {
+ while (w >= 16) {
+ vp9_filter_block1d16_h8_ssse3(src, src_stride,
+ dst, dst_stride,
+ h, filter_x);
+ src += 16;
+ dst += 16;
+ w -= 16;
+ }
+ while (w >= 8) {
+ vp9_filter_block1d8_h8_ssse3(src, src_stride,
+ dst, dst_stride,
+ h, filter_x);
+ src += 8;
+ dst += 8;
+ w -= 8;
+ }
+ while (w >= 4) {
+ vp9_filter_block1d4_h8_ssse3(src, src_stride,
+ dst, dst_stride,
+ h, filter_x);
+ src += 4;
+ dst += 4;
+ w -= 4;
+ }
+ }
+ if (w) {
+ vp9_convolve8_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, h);
+ }
+}
+
+void vp9_convolve8_vert_ssse3(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h) {
+ if (y_step_q4 == 16 && filter_y[3] != 128) {
+ while (w >= 16) {
+ vp9_filter_block1d16_v8_ssse3(src - src_stride * 3, src_stride,
+ dst, dst_stride,
+ h, filter_y);
+ src += 16;
+ dst += 16;
+ w -= 16;
+ }
+ while (w >= 8) {
+ vp9_filter_block1d8_v8_ssse3(src - src_stride * 3, src_stride,
+ dst, dst_stride,
+ h, filter_y);
+ src += 8;
+ dst += 8;
+ w -= 8;
+ }
+ while (w >= 4) {
+ vp9_filter_block1d4_v8_ssse3(src - src_stride * 3, src_stride,
+ dst, dst_stride,
+ h, filter_y);
+ src += 4;
+ dst += 4;
+ w -= 4;
+ }
+ }
+ if (w) {
+ vp9_convolve8_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, h);
+ }
+}
+
+void vp9_convolve8_avg_horiz_ssse3(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h) {
+ if (x_step_q4 == 16 && filter_x[3] != 128) {
+ while (w >= 16) {
+ vp9_filter_block1d16_h8_avg_ssse3(src, src_stride,
+ dst, dst_stride,
+ h, filter_x);
+ src += 16;
+ dst += 16;
+ w -= 16;
+ }
+ while (w >= 8) {
+ vp9_filter_block1d8_h8_avg_ssse3(src, src_stride,
+ dst, dst_stride,
+ h, filter_x);
+ src += 8;
+ dst += 8;
+ w -= 8;
+ }
+ while (w >= 4) {
+ vp9_filter_block1d4_h8_avg_ssse3(src, src_stride,
+ dst, dst_stride,
+ h, filter_x);
+ src += 4;
+ dst += 4;
+ w -= 4;
+ }
+ }
+ if (w) {
+ vp9_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, h);
+ }
+}
+
+void vp9_convolve8_avg_vert_ssse3(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h) {
+ if (y_step_q4 == 16 && filter_y[3] != 128) {
+ while (w >= 16) {
+ vp9_filter_block1d16_v8_avg_ssse3(src - src_stride * 3, src_stride,
+ dst, dst_stride,
+ h, filter_y);
+ src += 16;
+ dst += 16;
+ w -= 16;
+ }
+ while (w >= 8) {
+ vp9_filter_block1d8_v8_avg_ssse3(src - src_stride * 3, src_stride,
+ dst, dst_stride,
+ h, filter_y);
+ src += 8;
+ dst += 8;
+ w -= 8;
+ }
+ while (w >= 4) {
+ vp9_filter_block1d4_v8_avg_ssse3(src - src_stride * 3, src_stride,
+ dst, dst_stride,
+ h, filter_y);
+ src += 4;
+ dst += 4;
+ w -= 4;
+ }
+ }
+ if (w) {
+ vp9_convolve8_avg_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, h);
+ }
+}
+
+void vp9_convolve8_ssse3(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h) {
+ DECLARE_ALIGNED_ARRAY(16, unsigned char, fdata2, 64*71);
+
+ assert(w <= 64);
+ assert(h <= 64);
+ if (x_step_q4 == 16 && y_step_q4 == 16) {
+ vp9_convolve8_horiz_ssse3(src - 3 * src_stride, src_stride, fdata2, 64,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, h + 7);
+ vp9_convolve8_vert_ssse3(fdata2 + 3 * 64, 64, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4, w, h);
+ } else {
+ vp9_convolve8_c(src, src_stride, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4, w, h);
+ }
+}
+
+void vp9_convolve8_avg_ssse3(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h) {
+ DECLARE_ALIGNED_ARRAY(16, unsigned char, fdata2, 64*71);
+
+ assert(w <= 64);
+ assert(h <= 64);
+ if (x_step_q4 == 16 && y_step_q4 == 16) {
+ vp9_convolve8_horiz_ssse3(src - 3 * src_stride, src_stride, fdata2, 64,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, h + 7);
+ vp9_convolve8_avg_vert_ssse3(fdata2 + 3 * 64, 64, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, h);
+ } else {
+ vp9_convolve8_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4, w, h);
+ }
+}
+#endif
diff --git a/libvpx/vp9/common/x86/vp9_copy_sse2.asm b/libvpx/vp9/common/x86/vp9_copy_sse2.asm
new file mode 100644
index 0000000..dd522c6
--- /dev/null
+++ b/libvpx/vp9/common/x86/vp9_copy_sse2.asm
@@ -0,0 +1,152 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+%include "third_party/x86inc/x86inc.asm"
+
+SECTION .text
+
+%macro convolve_fn 1
+INIT_XMM sse2
+cglobal convolve_%1, 4, 7, 4, src, src_stride, dst, dst_stride, \
+ fx, fxs, fy, fys, w, h
+ mov r4d, dword wm
+ cmp r4d, 4
+ je .w4
+ cmp r4d, 8
+ je .w8
+ cmp r4d, 16
+ je .w16
+ cmp r4d, 32
+ je .w32
+
+ mov r4d, dword hm
+.loop64:
+ movu m0, [srcq]
+ movu m1, [srcq+16]
+ movu m2, [srcq+32]
+ movu m3, [srcq+48]
+ add srcq, src_strideq
+%ifidn %1, avg
+ pavgb m0, [dstq]
+ pavgb m1, [dstq+16]
+ pavgb m2, [dstq+32]
+ pavgb m3, [dstq+48]
+%endif
+ mova [dstq ], m0
+ mova [dstq+16], m1
+ mova [dstq+32], m2
+ mova [dstq+48], m3
+ add dstq, dst_strideq
+ dec r4d
+ jnz .loop64
+ RET
+
+.w32:
+ mov r4d, dword hm
+.loop32:
+ movu m0, [srcq]
+ movu m1, [srcq+16]
+ movu m2, [srcq+src_strideq]
+ movu m3, [srcq+src_strideq+16]
+ lea srcq, [srcq+src_strideq*2]
+%ifidn %1, avg
+ pavgb m0, [dstq]
+ pavgb m1, [dstq +16]
+ pavgb m2, [dstq+dst_strideq]
+ pavgb m3, [dstq+dst_strideq+16]
+%endif
+ mova [dstq ], m0
+ mova [dstq +16], m1
+ mova [dstq+dst_strideq ], m2
+ mova [dstq+dst_strideq+16], m3
+ lea dstq, [dstq+dst_strideq*2]
+ sub r4d, 2
+ jnz .loop32
+ RET
+
+.w16:
+ mov r4d, dword hm
+ lea r5q, [src_strideq*3]
+ lea r6q, [dst_strideq*3]
+.loop16:
+ movu m0, [srcq]
+ movu m1, [srcq+src_strideq]
+ movu m2, [srcq+src_strideq*2]
+ movu m3, [srcq+r5q]
+ lea srcq, [srcq+src_strideq*4]
+%ifidn %1, avg
+ pavgb m0, [dstq]
+ pavgb m1, [dstq+dst_strideq]
+ pavgb m2, [dstq+dst_strideq*2]
+ pavgb m3, [dstq+r6q]
+%endif
+ mova [dstq ], m0
+ mova [dstq+dst_strideq ], m1
+ mova [dstq+dst_strideq*2], m2
+ mova [dstq+r6q ], m3
+ lea dstq, [dstq+dst_strideq*4]
+ sub r4d, 4
+ jnz .loop16
+ RET
+
+INIT_MMX sse
+.w8:
+ mov r4d, dword hm
+ lea r5q, [src_strideq*3]
+ lea r6q, [dst_strideq*3]
+.loop8:
+ movu m0, [srcq]
+ movu m1, [srcq+src_strideq]
+ movu m2, [srcq+src_strideq*2]
+ movu m3, [srcq+r5q]
+ lea srcq, [srcq+src_strideq*4]
+%ifidn %1, avg
+ pavgb m0, [dstq]
+ pavgb m1, [dstq+dst_strideq]
+ pavgb m2, [dstq+dst_strideq*2]
+ pavgb m3, [dstq+r6q]
+%endif
+ mova [dstq ], m0
+ mova [dstq+dst_strideq ], m1
+ mova [dstq+dst_strideq*2], m2
+ mova [dstq+r6q ], m3
+ lea dstq, [dstq+dst_strideq*4]
+ sub r4d, 4
+ jnz .loop8
+ RET
+
+.w4:
+ mov r4d, dword hm
+ lea r5q, [src_strideq*3]
+ lea r6q, [dst_strideq*3]
+.loop4:
+ movh m0, [srcq]
+ movh m1, [srcq+src_strideq]
+ movh m2, [srcq+src_strideq*2]
+ movh m3, [srcq+r5q]
+ lea srcq, [srcq+src_strideq*4]
+%ifidn %1, avg
+ pavgb m0, [dstq]
+ pavgb m1, [dstq+dst_strideq]
+ pavgb m2, [dstq+dst_strideq*2]
+ pavgb m3, [dstq+r6q]
+%endif
+ movh [dstq ], m0
+ movh [dstq+dst_strideq ], m1
+ movh [dstq+dst_strideq*2], m2
+ movh [dstq+r6q ], m3
+ lea dstq, [dstq+dst_strideq*4]
+ sub r4d, 4
+ jnz .loop4
+ RET
+%endmacro
+
+convolve_fn copy
+convolve_fn avg
diff --git a/libvpx/vp9/common/x86/vp9_idct_intrin_sse2.c b/libvpx/vp9/common/x86/vp9_idct_intrin_sse2.c
new file mode 100644
index 0000000..8f740f4
--- /dev/null
+++ b/libvpx/vp9/common/x86/vp9_idct_intrin_sse2.c
@@ -0,0 +1,3551 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <emmintrin.h> // SSE2
+#include "./vpx_config.h"
+#include "vpx/vpx_integer.h"
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_idct.h"
+
+void vp9_short_idct4x4_add_sse2(int16_t *input, uint8_t *dest, int stride) {
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i eight = _mm_set1_epi16(8);
+ const __m128i cst = _mm_setr_epi16((int16_t)cospi_16_64, (int16_t)cospi_16_64,
+ (int16_t)cospi_16_64, (int16_t)-cospi_16_64,
+ (int16_t)cospi_24_64, (int16_t)-cospi_8_64,
+ (int16_t)cospi_8_64, (int16_t)cospi_24_64);
+ const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
+ __m128i input0, input1, input2, input3;
+
+ // Rows
+ input0 = _mm_loadl_epi64((__m128i *)input);
+ input1 = _mm_loadl_epi64((__m128i *)(input + 4));
+ input2 = _mm_loadl_epi64((__m128i *)(input + 8));
+ input3 = _mm_loadl_epi64((__m128i *)(input + 12));
+
+ // Construct i3, i1, i3, i1, i2, i0, i2, i0
+ input0 = _mm_shufflelo_epi16(input0, 0xd8);
+ input1 = _mm_shufflelo_epi16(input1, 0xd8);
+ input2 = _mm_shufflelo_epi16(input2, 0xd8);
+ input3 = _mm_shufflelo_epi16(input3, 0xd8);
+
+ input0 = _mm_unpacklo_epi32(input0, input0);
+ input1 = _mm_unpacklo_epi32(input1, input1);
+ input2 = _mm_unpacklo_epi32(input2, input2);
+ input3 = _mm_unpacklo_epi32(input3, input3);
+
+ // Stage 1
+ input0 = _mm_madd_epi16(input0, cst);
+ input1 = _mm_madd_epi16(input1, cst);
+ input2 = _mm_madd_epi16(input2, cst);
+ input3 = _mm_madd_epi16(input3, cst);
+
+ input0 = _mm_add_epi32(input0, rounding);
+ input1 = _mm_add_epi32(input1, rounding);
+ input2 = _mm_add_epi32(input2, rounding);
+ input3 = _mm_add_epi32(input3, rounding);
+
+ input0 = _mm_srai_epi32(input0, DCT_CONST_BITS);
+ input1 = _mm_srai_epi32(input1, DCT_CONST_BITS);
+ input2 = _mm_srai_epi32(input2, DCT_CONST_BITS);
+ input3 = _mm_srai_epi32(input3, DCT_CONST_BITS);
+
+ // Stage 2
+ input0 = _mm_packs_epi32(input0, zero);
+ input1 = _mm_packs_epi32(input1, zero);
+ input2 = _mm_packs_epi32(input2, zero);
+ input3 = _mm_packs_epi32(input3, zero);
+
+ // Transpose
+ input1 = _mm_unpacklo_epi16(input0, input1);
+ input3 = _mm_unpacklo_epi16(input2, input3);
+ input0 = _mm_unpacklo_epi32(input1, input3);
+ input1 = _mm_unpackhi_epi32(input1, input3);
+
+ // Switch column2, column 3, and then, we got:
+ // input2: column1, column 0; input3: column2, column 3.
+ input1 = _mm_shuffle_epi32(input1, 0x4e);
+ input2 = _mm_add_epi16(input0, input1);
+ input3 = _mm_sub_epi16(input0, input1);
+
+ // Columns
+ // Construct i3, i1, i3, i1, i2, i0, i2, i0
+ input0 = _mm_shufflelo_epi16(input2, 0xd8);
+ input1 = _mm_shufflehi_epi16(input2, 0xd8);
+ input2 = _mm_shufflehi_epi16(input3, 0xd8);
+ input3 = _mm_shufflelo_epi16(input3, 0xd8);
+
+ input0 = _mm_unpacklo_epi32(input0, input0);
+ input1 = _mm_unpackhi_epi32(input1, input1);
+ input2 = _mm_unpackhi_epi32(input2, input2);
+ input3 = _mm_unpacklo_epi32(input3, input3);
+
+ // Stage 1
+ input0 = _mm_madd_epi16(input0, cst);
+ input1 = _mm_madd_epi16(input1, cst);
+ input2 = _mm_madd_epi16(input2, cst);
+ input3 = _mm_madd_epi16(input3, cst);
+
+ input0 = _mm_add_epi32(input0, rounding);
+ input1 = _mm_add_epi32(input1, rounding);
+ input2 = _mm_add_epi32(input2, rounding);
+ input3 = _mm_add_epi32(input3, rounding);
+
+ input0 = _mm_srai_epi32(input0, DCT_CONST_BITS);
+ input1 = _mm_srai_epi32(input1, DCT_CONST_BITS);
+ input2 = _mm_srai_epi32(input2, DCT_CONST_BITS);
+ input3 = _mm_srai_epi32(input3, DCT_CONST_BITS);
+
+ // Stage 2
+ input0 = _mm_packs_epi32(input0, zero);
+ input1 = _mm_packs_epi32(input1, zero);
+ input2 = _mm_packs_epi32(input2, zero);
+ input3 = _mm_packs_epi32(input3, zero);
+
+ // Transpose
+ input1 = _mm_unpacklo_epi16(input0, input1);
+ input3 = _mm_unpacklo_epi16(input2, input3);
+ input0 = _mm_unpacklo_epi32(input1, input3);
+ input1 = _mm_unpackhi_epi32(input1, input3);
+
+ // Switch column2, column 3, and then, we got:
+ // input2: column1, column 0; input3: column2, column 3.
+ input1 = _mm_shuffle_epi32(input1, 0x4e);
+ input2 = _mm_add_epi16(input0, input1);
+ input3 = _mm_sub_epi16(input0, input1);
+
+ // Final round and shift
+ input2 = _mm_add_epi16(input2, eight);
+ input3 = _mm_add_epi16(input3, eight);
+
+ input2 = _mm_srai_epi16(input2, 4);
+ input3 = _mm_srai_epi16(input3, 4);
+
+#define RECON_AND_STORE4X4(dest, in_x) \
+ { \
+ __m128i d0 = _mm_cvtsi32_si128(*(const int *)(dest)); \
+ d0 = _mm_unpacklo_epi8(d0, zero); \
+ d0 = _mm_add_epi16(in_x, d0); \
+ d0 = _mm_packus_epi16(d0, d0); \
+ *(int *)dest = _mm_cvtsi128_si32(d0); \
+ dest += stride; \
+ }
+
+ input0 = _mm_srli_si128(input2, 8);
+ input1 = _mm_srli_si128(input3, 8);
+
+ RECON_AND_STORE4X4(dest, input2);
+ RECON_AND_STORE4X4(dest, input0);
+ RECON_AND_STORE4X4(dest, input1);
+ RECON_AND_STORE4X4(dest, input3);
+}
+
+void vp9_short_idct4x4_1_add_sse2(int16_t *input, uint8_t *dest, int stride) {
+ __m128i dc_value;
+ const __m128i zero = _mm_setzero_si128();
+ int a;
+
+ a = dct_const_round_shift(input[0] * cospi_16_64);
+ a = dct_const_round_shift(a * cospi_16_64);
+ a = ROUND_POWER_OF_TWO(a, 4);
+
+ dc_value = _mm_set1_epi16(a);
+
+ RECON_AND_STORE4X4(dest, dc_value);
+ RECON_AND_STORE4X4(dest, dc_value);
+ RECON_AND_STORE4X4(dest, dc_value);
+ RECON_AND_STORE4X4(dest, dc_value);
+}
+
+void vp9_idct4_1d_sse2(int16_t *input, int16_t *output) {
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i c1 = _mm_setr_epi16((int16_t)cospi_16_64, (int16_t)cospi_16_64,
+ (int16_t)cospi_16_64, (int16_t)-cospi_16_64,
+ (int16_t)cospi_24_64, (int16_t)-cospi_8_64,
+ (int16_t)cospi_8_64, (int16_t)cospi_24_64);
+ const __m128i c2 = _mm_setr_epi16(1, 1, 1, 1, 1, -1, 1, -1);
+
+ const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
+ __m128i in, temp;
+
+ // Load input data.
+ in = _mm_loadl_epi64((__m128i *)input);
+
+ // Construct i3, i1, i3, i1, i2, i0, i2, i0
+ in = _mm_shufflelo_epi16(in, 0xd8);
+ in = _mm_unpacklo_epi32(in, in);
+
+ // Stage 1
+ in = _mm_madd_epi16(in, c1);
+ in = _mm_add_epi32(in, rounding);
+ in = _mm_srai_epi32(in, DCT_CONST_BITS);
+ in = _mm_packs_epi32(in, zero);
+
+ // Stage 2
+ temp = _mm_shufflelo_epi16(in, 0x9c);
+ in = _mm_shufflelo_epi16(in, 0xc9);
+ in = _mm_unpacklo_epi64(temp, in);
+ in = _mm_madd_epi16(in, c2);
+ in = _mm_packs_epi32(in, zero);
+
+ // Store results
+ _mm_storel_epi64((__m128i *)output, in);
+}
+
+static INLINE void transpose_4x4(__m128i *res) {
+ const __m128i tr0_0 = _mm_unpacklo_epi16(res[0], res[1]);
+ const __m128i tr0_1 = _mm_unpacklo_epi16(res[2], res[3]);
+ res[0] = _mm_unpacklo_epi32(tr0_0, tr0_1);
+ res[2] = _mm_unpackhi_epi32(tr0_0, tr0_1);
+
+ res[1] = _mm_unpackhi_epi64(res[0], res[0]);
+ res[3] = _mm_unpackhi_epi64(res[2], res[2]);
+}
+
+void idct4_1d_sse2(__m128i *in) {
+ const __m128i k__cospi_p16_p16 = pair_set_epi16(cospi_16_64, cospi_16_64);
+ const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+ const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
+ const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64);
+ const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
+ __m128i u[8], v[8];
+
+ transpose_4x4(in);
+ // stage 1
+ u[0] = _mm_unpacklo_epi16(in[0], in[2]);
+ u[1] = _mm_unpacklo_epi16(in[1], in[3]);
+ v[0] = _mm_madd_epi16(u[0], k__cospi_p16_p16);
+ v[1] = _mm_madd_epi16(u[0], k__cospi_p16_m16);
+ v[2] = _mm_madd_epi16(u[1], k__cospi_p24_m08);
+ v[3] = _mm_madd_epi16(u[1], k__cospi_p08_p24);
+
+ u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
+ u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
+ u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
+ u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
+
+ v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
+ v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
+ v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
+ v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
+
+ u[0] = _mm_packs_epi32(v[0], v[2]);
+ u[1] = _mm_packs_epi32(v[1], v[3]);
+ u[2] = _mm_unpackhi_epi64(u[0], u[0]);
+ u[3] = _mm_unpackhi_epi64(u[1], u[1]);
+
+ // stage 2
+ in[0] = _mm_add_epi16(u[0], u[3]);
+ in[1] = _mm_add_epi16(u[1], u[2]);
+ in[2] = _mm_sub_epi16(u[1], u[2]);
+ in[3] = _mm_sub_epi16(u[0], u[3]);
+}
+
+void iadst4_1d_sse2(__m128i *in) {
+ const __m128i k__sinpi_p01_p04 = pair_set_epi16(sinpi_1_9, sinpi_4_9);
+ const __m128i k__sinpi_p03_p02 = pair_set_epi16(sinpi_3_9, sinpi_2_9);
+ const __m128i k__sinpi_p02_m01 = pair_set_epi16(sinpi_2_9, -sinpi_1_9);
+ const __m128i k__sinpi_p03_m04 = pair_set_epi16(sinpi_3_9, -sinpi_4_9);
+ const __m128i k__sinpi_p03_p03 = _mm_set1_epi16(sinpi_3_9);
+ const __m128i kZero = _mm_set1_epi16(0);
+ const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
+ __m128i u[8], v[8], in7;
+
+ transpose_4x4(in);
+ in7 = _mm_add_epi16(in[0], in[3]);
+ in7 = _mm_sub_epi16(in7, in[2]);
+
+ u[0] = _mm_unpacklo_epi16(in[0], in[2]);
+ u[1] = _mm_unpacklo_epi16(in[1], in[3]);
+ u[2] = _mm_unpacklo_epi16(in7, kZero);
+ u[3] = _mm_unpacklo_epi16(in[1], kZero);
+
+ v[0] = _mm_madd_epi16(u[0], k__sinpi_p01_p04); // s0 + s3
+ v[1] = _mm_madd_epi16(u[1], k__sinpi_p03_p02); // s2 + s5
+ v[2] = _mm_madd_epi16(u[2], k__sinpi_p03_p03); // x2
+ v[3] = _mm_madd_epi16(u[0], k__sinpi_p02_m01); // s1 - s4
+ v[4] = _mm_madd_epi16(u[1], k__sinpi_p03_m04); // s2 - s6
+ v[5] = _mm_madd_epi16(u[3], k__sinpi_p03_p03); // s2
+
+ u[0] = _mm_add_epi32(v[0], v[1]);
+ u[1] = _mm_add_epi32(v[3], v[4]);
+ u[2] = v[2];
+ u[3] = _mm_add_epi32(u[0], u[1]);
+ u[4] = _mm_slli_epi32(v[5], 2);
+ u[5] = _mm_add_epi32(u[3], v[5]);
+ u[6] = _mm_sub_epi32(u[5], u[4]);
+
+ v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+ v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+ v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+ v[3] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+
+ u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
+ u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
+ u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
+ u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
+
+ in[0] = _mm_packs_epi32(u[0], u[2]);
+ in[1] = _mm_packs_epi32(u[1], u[3]);
+ in[2] = _mm_unpackhi_epi64(in[0], in[0]);
+ in[3] = _mm_unpackhi_epi64(in[1], in[1]);
+}
+
+void vp9_short_iht4x4_add_sse2(int16_t *input, uint8_t *dest, int stride,
+ int tx_type) {
+ __m128i in[4];
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i eight = _mm_set1_epi16(8);
+
+ in[0] = _mm_loadl_epi64((__m128i *)input);
+ in[1] = _mm_loadl_epi64((__m128i *)(input + 4));
+ in[2] = _mm_loadl_epi64((__m128i *)(input + 8));
+ in[3] = _mm_loadl_epi64((__m128i *)(input + 12));
+
+ switch (tx_type) {
+ case 0: // DCT_DCT
+ idct4_1d_sse2(in);
+ idct4_1d_sse2(in);
+ break;
+ case 1: // ADST_DCT
+ idct4_1d_sse2(in);
+ iadst4_1d_sse2(in);
+ break;
+ case 2: // DCT_ADST
+ iadst4_1d_sse2(in);
+ idct4_1d_sse2(in);
+ break;
+ case 3: // ADST_ADST
+ iadst4_1d_sse2(in);
+ iadst4_1d_sse2(in);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ // Final round and shift
+ in[0] = _mm_add_epi16(in[0], eight);
+ in[1] = _mm_add_epi16(in[1], eight);
+ in[2] = _mm_add_epi16(in[2], eight);
+ in[3] = _mm_add_epi16(in[3], eight);
+
+ in[0] = _mm_srai_epi16(in[0], 4);
+ in[1] = _mm_srai_epi16(in[1], 4);
+ in[2] = _mm_srai_epi16(in[2], 4);
+ in[3] = _mm_srai_epi16(in[3], 4);
+
+ RECON_AND_STORE4X4(dest, in[0]);
+ RECON_AND_STORE4X4(dest, in[1]);
+ RECON_AND_STORE4X4(dest, in[2]);
+ RECON_AND_STORE4X4(dest, in[3]);
+}
+
+#define TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, \
+ out0, out1, out2, out3, out4, out5, out6, out7) \
+ { \
+ const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \
+ const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \
+ const __m128i tr0_2 = _mm_unpackhi_epi16(in0, in1); \
+ const __m128i tr0_3 = _mm_unpackhi_epi16(in2, in3); \
+ const __m128i tr0_4 = _mm_unpacklo_epi16(in4, in5); \
+ const __m128i tr0_5 = _mm_unpacklo_epi16(in6, in7); \
+ const __m128i tr0_6 = _mm_unpackhi_epi16(in4, in5); \
+ const __m128i tr0_7 = _mm_unpackhi_epi16(in6, in7); \
+ \
+ const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); \
+ const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3); \
+ const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); \
+ const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3); \
+ const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); \
+ const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7); \
+ const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); \
+ const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7); \
+ \
+ out0 = _mm_unpacklo_epi64(tr1_0, tr1_4); \
+ out1 = _mm_unpackhi_epi64(tr1_0, tr1_4); \
+ out2 = _mm_unpacklo_epi64(tr1_2, tr1_6); \
+ out3 = _mm_unpackhi_epi64(tr1_2, tr1_6); \
+ out4 = _mm_unpacklo_epi64(tr1_1, tr1_5); \
+ out5 = _mm_unpackhi_epi64(tr1_1, tr1_5); \
+ out6 = _mm_unpacklo_epi64(tr1_3, tr1_7); \
+ out7 = _mm_unpackhi_epi64(tr1_3, tr1_7); \
+ }
+
+#define TRANSPOSE_4X8(in0, in1, in2, in3, in4, in5, in6, in7, \
+ out0, out1, out2, out3, out4, out5, out6, out7) \
+ { \
+ const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \
+ const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \
+ const __m128i tr0_4 = _mm_unpacklo_epi16(in4, in5); \
+ const __m128i tr0_5 = _mm_unpacklo_epi16(in6, in7); \
+ \
+ const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); \
+ const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); \
+ const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); \
+ const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); \
+ \
+ out0 = _mm_unpacklo_epi64(tr1_0, tr1_4); \
+ out1 = _mm_unpackhi_epi64(tr1_0, tr1_4); \
+ out2 = _mm_unpacklo_epi64(tr1_2, tr1_6); \
+ out3 = _mm_unpackhi_epi64(tr1_2, tr1_6); \
+ out4 = out5 = out6 = out7 = zero; \
+ }
+
+#define TRANSPOSE_8X4(in0, in1, in2, in3, out0, out1, out2, out3) \
+ { \
+ const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \
+ const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \
+ const __m128i tr0_2 = _mm_unpackhi_epi16(in0, in1); \
+ const __m128i tr0_3 = _mm_unpackhi_epi16(in2, in3); \
+ \
+ in0 = _mm_unpacklo_epi32(tr0_0, tr0_1); /* i1 i0 */ \
+ in1 = _mm_unpackhi_epi32(tr0_0, tr0_1); /* i3 i2 */ \
+ in2 = _mm_unpacklo_epi32(tr0_2, tr0_3); /* i5 i4 */ \
+ in3 = _mm_unpackhi_epi32(tr0_2, tr0_3); /* i7 i6 */ \
+ }
+
+// Define Macro for multiplying elements by constants and adding them together.
+#define MULTIPLICATION_AND_ADD(lo_0, hi_0, lo_1, hi_1, \
+ cst0, cst1, cst2, cst3, res0, res1, res2, res3) \
+ { \
+ tmp0 = _mm_madd_epi16(lo_0, cst0); \
+ tmp1 = _mm_madd_epi16(hi_0, cst0); \
+ tmp2 = _mm_madd_epi16(lo_0, cst1); \
+ tmp3 = _mm_madd_epi16(hi_0, cst1); \
+ tmp4 = _mm_madd_epi16(lo_1, cst2); \
+ tmp5 = _mm_madd_epi16(hi_1, cst2); \
+ tmp6 = _mm_madd_epi16(lo_1, cst3); \
+ tmp7 = _mm_madd_epi16(hi_1, cst3); \
+ \
+ tmp0 = _mm_add_epi32(tmp0, rounding); \
+ tmp1 = _mm_add_epi32(tmp1, rounding); \
+ tmp2 = _mm_add_epi32(tmp2, rounding); \
+ tmp3 = _mm_add_epi32(tmp3, rounding); \
+ tmp4 = _mm_add_epi32(tmp4, rounding); \
+ tmp5 = _mm_add_epi32(tmp5, rounding); \
+ tmp6 = _mm_add_epi32(tmp6, rounding); \
+ tmp7 = _mm_add_epi32(tmp7, rounding); \
+ \
+ tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \
+ tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \
+ tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \
+ tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \
+ tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS); \
+ tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS); \
+ tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS); \
+ tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS); \
+ \
+ res0 = _mm_packs_epi32(tmp0, tmp1); \
+ res1 = _mm_packs_epi32(tmp2, tmp3); \
+ res2 = _mm_packs_epi32(tmp4, tmp5); \
+ res3 = _mm_packs_epi32(tmp6, tmp7); \
+ }
+
+#define IDCT8x8_1D \
+ /* Stage1 */ \
+ { \
+ const __m128i lo_17 = _mm_unpacklo_epi16(in1, in7); \
+ const __m128i hi_17 = _mm_unpackhi_epi16(in1, in7); \
+ const __m128i lo_35 = _mm_unpacklo_epi16(in3, in5); \
+ const __m128i hi_35 = _mm_unpackhi_epi16(in3, in5); \
+ \
+ MULTIPLICATION_AND_ADD(lo_17, hi_17, lo_35, hi_35, stg1_0, \
+ stg1_1, stg1_2, stg1_3, stp1_4, \
+ stp1_7, stp1_5, stp1_6) \
+ } \
+ \
+ /* Stage2 */ \
+ { \
+ const __m128i lo_04 = _mm_unpacklo_epi16(in0, in4); \
+ const __m128i hi_04 = _mm_unpackhi_epi16(in0, in4); \
+ const __m128i lo_26 = _mm_unpacklo_epi16(in2, in6); \
+ const __m128i hi_26 = _mm_unpackhi_epi16(in2, in6); \
+ \
+ MULTIPLICATION_AND_ADD(lo_04, hi_04, lo_26, hi_26, stg2_0, \
+ stg2_1, stg2_2, stg2_3, stp2_0, \
+ stp2_1, stp2_2, stp2_3) \
+ \
+ stp2_4 = _mm_adds_epi16(stp1_4, stp1_5); \
+ stp2_5 = _mm_subs_epi16(stp1_4, stp1_5); \
+ stp2_6 = _mm_subs_epi16(stp1_7, stp1_6); \
+ stp2_7 = _mm_adds_epi16(stp1_7, stp1_6); \
+ } \
+ \
+ /* Stage3 */ \
+ { \
+ const __m128i lo_56 = _mm_unpacklo_epi16(stp2_6, stp2_5); \
+ const __m128i hi_56 = _mm_unpackhi_epi16(stp2_6, stp2_5); \
+ \
+ stp1_0 = _mm_adds_epi16(stp2_0, stp2_3); \
+ stp1_1 = _mm_adds_epi16(stp2_1, stp2_2); \
+ stp1_2 = _mm_subs_epi16(stp2_1, stp2_2); \
+ stp1_3 = _mm_subs_epi16(stp2_0, stp2_3); \
+ \
+ tmp0 = _mm_madd_epi16(lo_56, stg2_1); \
+ tmp1 = _mm_madd_epi16(hi_56, stg2_1); \
+ tmp2 = _mm_madd_epi16(lo_56, stg2_0); \
+ tmp3 = _mm_madd_epi16(hi_56, stg2_0); \
+ \
+ tmp0 = _mm_add_epi32(tmp0, rounding); \
+ tmp1 = _mm_add_epi32(tmp1, rounding); \
+ tmp2 = _mm_add_epi32(tmp2, rounding); \
+ tmp3 = _mm_add_epi32(tmp3, rounding); \
+ \
+ tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \
+ tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \
+ tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \
+ tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \
+ \
+ stp1_5 = _mm_packs_epi32(tmp0, tmp1); \
+ stp1_6 = _mm_packs_epi32(tmp2, tmp3); \
+ } \
+ \
+ /* Stage4 */ \
+ in0 = _mm_adds_epi16(stp1_0, stp2_7); \
+ in1 = _mm_adds_epi16(stp1_1, stp1_6); \
+ in2 = _mm_adds_epi16(stp1_2, stp1_5); \
+ in3 = _mm_adds_epi16(stp1_3, stp2_4); \
+ in4 = _mm_subs_epi16(stp1_3, stp2_4); \
+ in5 = _mm_subs_epi16(stp1_2, stp1_5); \
+ in6 = _mm_subs_epi16(stp1_1, stp1_6); \
+ in7 = _mm_subs_epi16(stp1_0, stp2_7);
+
+#define RECON_AND_STORE(dest, in_x) \
+ { \
+ __m128i d0 = _mm_loadl_epi64((__m128i *)(dest)); \
+ d0 = _mm_unpacklo_epi8(d0, zero); \
+ d0 = _mm_add_epi16(in_x, d0); \
+ d0 = _mm_packus_epi16(d0, d0); \
+ _mm_storel_epi64((__m128i *)(dest), d0); \
+ dest += stride; \
+ }
+
+void vp9_short_idct8x8_add_sse2(int16_t *input, uint8_t *dest, int stride) {
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
+ const __m128i final_rounding = _mm_set1_epi16(1<<4);
+ const __m128i stg1_0 = pair_set_epi16(cospi_28_64, -cospi_4_64);
+ const __m128i stg1_1 = pair_set_epi16(cospi_4_64, cospi_28_64);
+ const __m128i stg1_2 = pair_set_epi16(-cospi_20_64, cospi_12_64);
+ const __m128i stg1_3 = pair_set_epi16(cospi_12_64, cospi_20_64);
+ const __m128i stg2_0 = pair_set_epi16(cospi_16_64, cospi_16_64);
+ const __m128i stg2_1 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+ const __m128i stg2_2 = pair_set_epi16(cospi_24_64, -cospi_8_64);
+ const __m128i stg2_3 = pair_set_epi16(cospi_8_64, cospi_24_64);
+
+ __m128i in0, in1, in2, in3, in4, in5, in6, in7;
+ __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7;
+ __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ int i;
+
+ // Load input data.
+ in0 = _mm_load_si128((__m128i *)input);
+ in1 = _mm_load_si128((__m128i *)(input + 8 * 1));
+ in2 = _mm_load_si128((__m128i *)(input + 8 * 2));
+ in3 = _mm_load_si128((__m128i *)(input + 8 * 3));
+ in4 = _mm_load_si128((__m128i *)(input + 8 * 4));
+ in5 = _mm_load_si128((__m128i *)(input + 8 * 5));
+ in6 = _mm_load_si128((__m128i *)(input + 8 * 6));
+ in7 = _mm_load_si128((__m128i *)(input + 8 * 7));
+
+ // 2-D
+ for (i = 0; i < 2; i++) {
+ // 8x8 Transpose is copied from vp9_short_fdct8x8_sse2()
+ TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+ in4, in5, in6, in7);
+
+ // 4-stage 1D idct8x8
+ IDCT8x8_1D
+ }
+
+ // Final rounding and shift
+ in0 = _mm_adds_epi16(in0, final_rounding);
+ in1 = _mm_adds_epi16(in1, final_rounding);
+ in2 = _mm_adds_epi16(in2, final_rounding);
+ in3 = _mm_adds_epi16(in3, final_rounding);
+ in4 = _mm_adds_epi16(in4, final_rounding);
+ in5 = _mm_adds_epi16(in5, final_rounding);
+ in6 = _mm_adds_epi16(in6, final_rounding);
+ in7 = _mm_adds_epi16(in7, final_rounding);
+
+ in0 = _mm_srai_epi16(in0, 5);
+ in1 = _mm_srai_epi16(in1, 5);
+ in2 = _mm_srai_epi16(in2, 5);
+ in3 = _mm_srai_epi16(in3, 5);
+ in4 = _mm_srai_epi16(in4, 5);
+ in5 = _mm_srai_epi16(in5, 5);
+ in6 = _mm_srai_epi16(in6, 5);
+ in7 = _mm_srai_epi16(in7, 5);
+
+ RECON_AND_STORE(dest, in0);
+ RECON_AND_STORE(dest, in1);
+ RECON_AND_STORE(dest, in2);
+ RECON_AND_STORE(dest, in3);
+ RECON_AND_STORE(dest, in4);
+ RECON_AND_STORE(dest, in5);
+ RECON_AND_STORE(dest, in6);
+ RECON_AND_STORE(dest, in7);
+}
+
+void vp9_short_idct8x8_1_add_sse2(int16_t *input, uint8_t *dest, int stride) {
+ __m128i dc_value;
+ const __m128i zero = _mm_setzero_si128();
+ int a;
+
+ a = dct_const_round_shift(input[0] * cospi_16_64);
+ a = dct_const_round_shift(a * cospi_16_64);
+ a = ROUND_POWER_OF_TWO(a, 5);
+
+ dc_value = _mm_set1_epi16(a);
+
+ RECON_AND_STORE(dest, dc_value);
+ RECON_AND_STORE(dest, dc_value);
+ RECON_AND_STORE(dest, dc_value);
+ RECON_AND_STORE(dest, dc_value);
+ RECON_AND_STORE(dest, dc_value);
+ RECON_AND_STORE(dest, dc_value);
+ RECON_AND_STORE(dest, dc_value);
+ RECON_AND_STORE(dest, dc_value);
+}
+
+// perform 8x8 transpose
+static INLINE void array_transpose_8x8(__m128i *in, __m128i *res) {
+ const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]);
+ const __m128i tr0_1 = _mm_unpacklo_epi16(in[2], in[3]);
+ const __m128i tr0_2 = _mm_unpackhi_epi16(in[0], in[1]);
+ const __m128i tr0_3 = _mm_unpackhi_epi16(in[2], in[3]);
+ const __m128i tr0_4 = _mm_unpacklo_epi16(in[4], in[5]);
+ const __m128i tr0_5 = _mm_unpacklo_epi16(in[6], in[7]);
+ const __m128i tr0_6 = _mm_unpackhi_epi16(in[4], in[5]);
+ const __m128i tr0_7 = _mm_unpackhi_epi16(in[6], in[7]);
+
+ const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
+ const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_4, tr0_5);
+ const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
+ const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_4, tr0_5);
+ const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_2, tr0_3);
+ const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);
+ const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_2, tr0_3);
+ const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);
+
+ res[0] = _mm_unpacklo_epi64(tr1_0, tr1_1);
+ res[1] = _mm_unpackhi_epi64(tr1_0, tr1_1);
+ res[2] = _mm_unpacklo_epi64(tr1_2, tr1_3);
+ res[3] = _mm_unpackhi_epi64(tr1_2, tr1_3);
+ res[4] = _mm_unpacklo_epi64(tr1_4, tr1_5);
+ res[5] = _mm_unpackhi_epi64(tr1_4, tr1_5);
+ res[6] = _mm_unpacklo_epi64(tr1_6, tr1_7);
+ res[7] = _mm_unpackhi_epi64(tr1_6, tr1_7);
+}
+
+void idct8_1d_sse2(__m128i *in) {
+ const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
+ const __m128i stg1_0 = pair_set_epi16(cospi_28_64, -cospi_4_64);
+ const __m128i stg1_1 = pair_set_epi16(cospi_4_64, cospi_28_64);
+ const __m128i stg1_2 = pair_set_epi16(-cospi_20_64, cospi_12_64);
+ const __m128i stg1_3 = pair_set_epi16(cospi_12_64, cospi_20_64);
+ const __m128i stg2_0 = pair_set_epi16(cospi_16_64, cospi_16_64);
+ const __m128i stg2_1 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+ const __m128i stg2_2 = pair_set_epi16(cospi_24_64, -cospi_8_64);
+ const __m128i stg2_3 = pair_set_epi16(cospi_8_64, cospi_24_64);
+
+ __m128i in0, in1, in2, in3, in4, in5, in6, in7;
+ __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7;
+ __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+
+ in0 = in[0];
+ in1 = in[1];
+ in2 = in[2];
+ in3 = in[3];
+ in4 = in[4];
+ in5 = in[5];
+ in6 = in[6];
+ in7 = in[7];
+
+ // 8x8 Transpose is copied from vp9_short_fdct8x8_sse2()
+ TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+ in4, in5, in6, in7);
+
+ // 4-stage 1D idct8x8
+ IDCT8x8_1D
+ in[0] = in0;
+ in[1] = in1;
+ in[2] = in2;
+ in[3] = in3;
+ in[4] = in4;
+ in[5] = in5;
+ in[6] = in6;
+ in[7] = in7;
+}
+
+void iadst8_1d_sse2(__m128i *in) {
+ const __m128i k__cospi_p02_p30 = pair_set_epi16(cospi_2_64, cospi_30_64);
+ const __m128i k__cospi_p30_m02 = pair_set_epi16(cospi_30_64, -cospi_2_64);
+ const __m128i k__cospi_p10_p22 = pair_set_epi16(cospi_10_64, cospi_22_64);
+ const __m128i k__cospi_p22_m10 = pair_set_epi16(cospi_22_64, -cospi_10_64);
+ const __m128i k__cospi_p18_p14 = pair_set_epi16(cospi_18_64, cospi_14_64);
+ const __m128i k__cospi_p14_m18 = pair_set_epi16(cospi_14_64, -cospi_18_64);
+ const __m128i k__cospi_p26_p06 = pair_set_epi16(cospi_26_64, cospi_6_64);
+ const __m128i k__cospi_p06_m26 = pair_set_epi16(cospi_6_64, -cospi_26_64);
+ const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64);
+ const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
+ const __m128i k__cospi_m24_p08 = pair_set_epi16(-cospi_24_64, cospi_8_64);
+ const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+ const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
+ const __m128i k__const_0 = _mm_set1_epi16(0);
+ const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
+
+ __m128i u0, u1, u2, u3, u4, u5, u6, u7, u8, u9, u10, u11, u12, u13, u14, u15;
+ __m128i v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15;
+ __m128i w0, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15;
+ __m128i s0, s1, s2, s3, s4, s5, s6, s7;
+ __m128i in0, in1, in2, in3, in4, in5, in6, in7;
+
+ // transpose
+ array_transpose_8x8(in, in);
+
+ // properly aligned for butterfly input
+ in0 = in[7];
+ in1 = in[0];
+ in2 = in[5];
+ in3 = in[2];
+ in4 = in[3];
+ in5 = in[4];
+ in6 = in[1];
+ in7 = in[6];
+
+ // column transformation
+ // stage 1
+ // interleave and multiply/add into 32-bit integer
+ s0 = _mm_unpacklo_epi16(in0, in1);
+ s1 = _mm_unpackhi_epi16(in0, in1);
+ s2 = _mm_unpacklo_epi16(in2, in3);
+ s3 = _mm_unpackhi_epi16(in2, in3);
+ s4 = _mm_unpacklo_epi16(in4, in5);
+ s5 = _mm_unpackhi_epi16(in4, in5);
+ s6 = _mm_unpacklo_epi16(in6, in7);
+ s7 = _mm_unpackhi_epi16(in6, in7);
+
+ u0 = _mm_madd_epi16(s0, k__cospi_p02_p30);
+ u1 = _mm_madd_epi16(s1, k__cospi_p02_p30);
+ u2 = _mm_madd_epi16(s0, k__cospi_p30_m02);
+ u3 = _mm_madd_epi16(s1, k__cospi_p30_m02);
+ u4 = _mm_madd_epi16(s2, k__cospi_p10_p22);
+ u5 = _mm_madd_epi16(s3, k__cospi_p10_p22);
+ u6 = _mm_madd_epi16(s2, k__cospi_p22_m10);
+ u7 = _mm_madd_epi16(s3, k__cospi_p22_m10);
+ u8 = _mm_madd_epi16(s4, k__cospi_p18_p14);
+ u9 = _mm_madd_epi16(s5, k__cospi_p18_p14);
+ u10 = _mm_madd_epi16(s4, k__cospi_p14_m18);
+ u11 = _mm_madd_epi16(s5, k__cospi_p14_m18);
+ u12 = _mm_madd_epi16(s6, k__cospi_p26_p06);
+ u13 = _mm_madd_epi16(s7, k__cospi_p26_p06);
+ u14 = _mm_madd_epi16(s6, k__cospi_p06_m26);
+ u15 = _mm_madd_epi16(s7, k__cospi_p06_m26);
+
+ // addition
+ w0 = _mm_add_epi32(u0, u8);
+ w1 = _mm_add_epi32(u1, u9);
+ w2 = _mm_add_epi32(u2, u10);
+ w3 = _mm_add_epi32(u3, u11);
+ w4 = _mm_add_epi32(u4, u12);
+ w5 = _mm_add_epi32(u5, u13);
+ w6 = _mm_add_epi32(u6, u14);
+ w7 = _mm_add_epi32(u7, u15);
+ w8 = _mm_sub_epi32(u0, u8);
+ w9 = _mm_sub_epi32(u1, u9);
+ w10 = _mm_sub_epi32(u2, u10);
+ w11 = _mm_sub_epi32(u3, u11);
+ w12 = _mm_sub_epi32(u4, u12);
+ w13 = _mm_sub_epi32(u5, u13);
+ w14 = _mm_sub_epi32(u6, u14);
+ w15 = _mm_sub_epi32(u7, u15);
+
+ // shift and rounding
+ v0 = _mm_add_epi32(w0, k__DCT_CONST_ROUNDING);
+ v1 = _mm_add_epi32(w1, k__DCT_CONST_ROUNDING);
+ v2 = _mm_add_epi32(w2, k__DCT_CONST_ROUNDING);
+ v3 = _mm_add_epi32(w3, k__DCT_CONST_ROUNDING);
+ v4 = _mm_add_epi32(w4, k__DCT_CONST_ROUNDING);
+ v5 = _mm_add_epi32(w5, k__DCT_CONST_ROUNDING);
+ v6 = _mm_add_epi32(w6, k__DCT_CONST_ROUNDING);
+ v7 = _mm_add_epi32(w7, k__DCT_CONST_ROUNDING);
+ v8 = _mm_add_epi32(w8, k__DCT_CONST_ROUNDING);
+ v9 = _mm_add_epi32(w9, k__DCT_CONST_ROUNDING);
+ v10 = _mm_add_epi32(w10, k__DCT_CONST_ROUNDING);
+ v11 = _mm_add_epi32(w11, k__DCT_CONST_ROUNDING);
+ v12 = _mm_add_epi32(w12, k__DCT_CONST_ROUNDING);
+ v13 = _mm_add_epi32(w13, k__DCT_CONST_ROUNDING);
+ v14 = _mm_add_epi32(w14, k__DCT_CONST_ROUNDING);
+ v15 = _mm_add_epi32(w15, k__DCT_CONST_ROUNDING);
+
+ u0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+ u1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+ u2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+ u3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+ u4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
+ u5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
+ u6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
+ u7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
+ u8 = _mm_srai_epi32(v8, DCT_CONST_BITS);
+ u9 = _mm_srai_epi32(v9, DCT_CONST_BITS);
+ u10 = _mm_srai_epi32(v10, DCT_CONST_BITS);
+ u11 = _mm_srai_epi32(v11, DCT_CONST_BITS);
+ u12 = _mm_srai_epi32(v12, DCT_CONST_BITS);
+ u13 = _mm_srai_epi32(v13, DCT_CONST_BITS);
+ u14 = _mm_srai_epi32(v14, DCT_CONST_BITS);
+ u15 = _mm_srai_epi32(v15, DCT_CONST_BITS);
+
+ // back to 16-bit and pack 8 integers into __m128i
+ in[0] = _mm_packs_epi32(u0, u1);
+ in[1] = _mm_packs_epi32(u2, u3);
+ in[2] = _mm_packs_epi32(u4, u5);
+ in[3] = _mm_packs_epi32(u6, u7);
+ in[4] = _mm_packs_epi32(u8, u9);
+ in[5] = _mm_packs_epi32(u10, u11);
+ in[6] = _mm_packs_epi32(u12, u13);
+ in[7] = _mm_packs_epi32(u14, u15);
+
+ // stage 2
+ s0 = _mm_add_epi16(in[0], in[2]);
+ s1 = _mm_add_epi16(in[1], in[3]);
+ s2 = _mm_sub_epi16(in[0], in[2]);
+ s3 = _mm_sub_epi16(in[1], in[3]);
+ u0 = _mm_unpacklo_epi16(in[4], in[5]);
+ u1 = _mm_unpackhi_epi16(in[4], in[5]);
+ u2 = _mm_unpacklo_epi16(in[6], in[7]);
+ u3 = _mm_unpackhi_epi16(in[6], in[7]);
+
+ v0 = _mm_madd_epi16(u0, k__cospi_p08_p24);
+ v1 = _mm_madd_epi16(u1, k__cospi_p08_p24);
+ v2 = _mm_madd_epi16(u0, k__cospi_p24_m08);
+ v3 = _mm_madd_epi16(u1, k__cospi_p24_m08);
+ v4 = _mm_madd_epi16(u2, k__cospi_m24_p08);
+ v5 = _mm_madd_epi16(u3, k__cospi_m24_p08);
+ v6 = _mm_madd_epi16(u2, k__cospi_p08_p24);
+ v7 = _mm_madd_epi16(u3, k__cospi_p08_p24);
+
+ w0 = _mm_add_epi32(v0, v4);
+ w1 = _mm_add_epi32(v1, v5);
+ w2 = _mm_add_epi32(v2, v6);
+ w3 = _mm_add_epi32(v3, v7);
+ w4 = _mm_sub_epi32(v0, v4);
+ w5 = _mm_sub_epi32(v1, v5);
+ w6 = _mm_sub_epi32(v2, v6);
+ w7 = _mm_sub_epi32(v3, v7);
+
+ v0 = _mm_add_epi32(w0, k__DCT_CONST_ROUNDING);
+ v1 = _mm_add_epi32(w1, k__DCT_CONST_ROUNDING);
+ v2 = _mm_add_epi32(w2, k__DCT_CONST_ROUNDING);
+ v3 = _mm_add_epi32(w3, k__DCT_CONST_ROUNDING);
+ v4 = _mm_add_epi32(w4, k__DCT_CONST_ROUNDING);
+ v5 = _mm_add_epi32(w5, k__DCT_CONST_ROUNDING);
+ v6 = _mm_add_epi32(w6, k__DCT_CONST_ROUNDING);
+ v7 = _mm_add_epi32(w7, k__DCT_CONST_ROUNDING);
+
+ u0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+ u1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+ u2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+ u3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+ u4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
+ u5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
+ u6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
+ u7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
+
+ // back to 16-bit intergers
+ s4 = _mm_packs_epi32(u0, u1);
+ s5 = _mm_packs_epi32(u2, u3);
+ s6 = _mm_packs_epi32(u4, u5);
+ s7 = _mm_packs_epi32(u6, u7);
+
+ // stage 3
+ u0 = _mm_unpacklo_epi16(s2, s3);
+ u1 = _mm_unpackhi_epi16(s2, s3);
+ u2 = _mm_unpacklo_epi16(s6, s7);
+ u3 = _mm_unpackhi_epi16(s6, s7);
+
+ v0 = _mm_madd_epi16(u0, k__cospi_p16_p16);
+ v1 = _mm_madd_epi16(u1, k__cospi_p16_p16);
+ v2 = _mm_madd_epi16(u0, k__cospi_p16_m16);
+ v3 = _mm_madd_epi16(u1, k__cospi_p16_m16);
+ v4 = _mm_madd_epi16(u2, k__cospi_p16_p16);
+ v5 = _mm_madd_epi16(u3, k__cospi_p16_p16);
+ v6 = _mm_madd_epi16(u2, k__cospi_p16_m16);
+ v7 = _mm_madd_epi16(u3, k__cospi_p16_m16);
+
+ u0 = _mm_add_epi32(v0, k__DCT_CONST_ROUNDING);
+ u1 = _mm_add_epi32(v1, k__DCT_CONST_ROUNDING);
+ u2 = _mm_add_epi32(v2, k__DCT_CONST_ROUNDING);
+ u3 = _mm_add_epi32(v3, k__DCT_CONST_ROUNDING);
+ u4 = _mm_add_epi32(v4, k__DCT_CONST_ROUNDING);
+ u5 = _mm_add_epi32(v5, k__DCT_CONST_ROUNDING);
+ u6 = _mm_add_epi32(v6, k__DCT_CONST_ROUNDING);
+ u7 = _mm_add_epi32(v7, k__DCT_CONST_ROUNDING);
+
+ v0 = _mm_srai_epi32(u0, DCT_CONST_BITS);
+ v1 = _mm_srai_epi32(u1, DCT_CONST_BITS);
+ v2 = _mm_srai_epi32(u2, DCT_CONST_BITS);
+ v3 = _mm_srai_epi32(u3, DCT_CONST_BITS);
+ v4 = _mm_srai_epi32(u4, DCT_CONST_BITS);
+ v5 = _mm_srai_epi32(u5, DCT_CONST_BITS);
+ v6 = _mm_srai_epi32(u6, DCT_CONST_BITS);
+ v7 = _mm_srai_epi32(u7, DCT_CONST_BITS);
+
+ s2 = _mm_packs_epi32(v0, v1);
+ s3 = _mm_packs_epi32(v2, v3);
+ s6 = _mm_packs_epi32(v4, v5);
+ s7 = _mm_packs_epi32(v6, v7);
+
+ in[0] = s0;
+ in[1] = _mm_sub_epi16(k__const_0, s4);
+ in[2] = s6;
+ in[3] = _mm_sub_epi16(k__const_0, s2);
+ in[4] = s3;
+ in[5] = _mm_sub_epi16(k__const_0, s7);
+ in[6] = s5;
+ in[7] = _mm_sub_epi16(k__const_0, s1);
+}
+
+
+void vp9_short_iht8x8_add_sse2(int16_t *input, uint8_t *dest, int stride,
+ int tx_type) {
+ __m128i in[8];
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i final_rounding = _mm_set1_epi16(1<<4);
+
+ // load input data
+ in[0] = _mm_load_si128((__m128i *)input);
+ in[1] = _mm_load_si128((__m128i *)(input + 8 * 1));
+ in[2] = _mm_load_si128((__m128i *)(input + 8 * 2));
+ in[3] = _mm_load_si128((__m128i *)(input + 8 * 3));
+ in[4] = _mm_load_si128((__m128i *)(input + 8 * 4));
+ in[5] = _mm_load_si128((__m128i *)(input + 8 * 5));
+ in[6] = _mm_load_si128((__m128i *)(input + 8 * 6));
+ in[7] = _mm_load_si128((__m128i *)(input + 8 * 7));
+
+ switch (tx_type) {
+ case 0: // DCT_DCT
+ idct8_1d_sse2(in);
+ idct8_1d_sse2(in);
+ break;
+ case 1: // ADST_DCT
+ idct8_1d_sse2(in);
+ iadst8_1d_sse2(in);
+ break;
+ case 2: // DCT_ADST
+ iadst8_1d_sse2(in);
+ idct8_1d_sse2(in);
+ break;
+ case 3: // ADST_ADST
+ iadst8_1d_sse2(in);
+ iadst8_1d_sse2(in);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ // Final rounding and shift
+ in[0] = _mm_adds_epi16(in[0], final_rounding);
+ in[1] = _mm_adds_epi16(in[1], final_rounding);
+ in[2] = _mm_adds_epi16(in[2], final_rounding);
+ in[3] = _mm_adds_epi16(in[3], final_rounding);
+ in[4] = _mm_adds_epi16(in[4], final_rounding);
+ in[5] = _mm_adds_epi16(in[5], final_rounding);
+ in[6] = _mm_adds_epi16(in[6], final_rounding);
+ in[7] = _mm_adds_epi16(in[7], final_rounding);
+
+ in[0] = _mm_srai_epi16(in[0], 5);
+ in[1] = _mm_srai_epi16(in[1], 5);
+ in[2] = _mm_srai_epi16(in[2], 5);
+ in[3] = _mm_srai_epi16(in[3], 5);
+ in[4] = _mm_srai_epi16(in[4], 5);
+ in[5] = _mm_srai_epi16(in[5], 5);
+ in[6] = _mm_srai_epi16(in[6], 5);
+ in[7] = _mm_srai_epi16(in[7], 5);
+
+ RECON_AND_STORE(dest, in[0]);
+ RECON_AND_STORE(dest, in[1]);
+ RECON_AND_STORE(dest, in[2]);
+ RECON_AND_STORE(dest, in[3]);
+ RECON_AND_STORE(dest, in[4]);
+ RECON_AND_STORE(dest, in[5]);
+ RECON_AND_STORE(dest, in[6]);
+ RECON_AND_STORE(dest, in[7]);
+}
+
+void vp9_short_idct10_8x8_add_sse2(int16_t *input, uint8_t *dest, int stride) {
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
+ const __m128i final_rounding = _mm_set1_epi16(1<<4);
+ const __m128i stg1_0 = pair_set_epi16(cospi_28_64, -cospi_4_64);
+ const __m128i stg1_1 = pair_set_epi16(cospi_4_64, cospi_28_64);
+ const __m128i stg1_2 = pair_set_epi16(-cospi_20_64, cospi_12_64);
+ const __m128i stg1_3 = pair_set_epi16(cospi_12_64, cospi_20_64);
+ const __m128i stg2_0 = pair_set_epi16(cospi_16_64, cospi_16_64);
+ const __m128i stg2_1 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+ const __m128i stg2_2 = pair_set_epi16(cospi_24_64, -cospi_8_64);
+ const __m128i stg2_3 = pair_set_epi16(cospi_8_64, cospi_24_64);
+ const __m128i stg3_0 = pair_set_epi16(-cospi_16_64, cospi_16_64);
+
+ __m128i in0, in1, in2, in3, in4, in5, in6, in7;
+ __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7;
+ __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+
+ // Rows. Load 4-row input data.
+ in0 = _mm_load_si128((__m128i *)input);
+ in1 = _mm_load_si128((__m128i *)(input + 8 * 1));
+ in2 = _mm_load_si128((__m128i *)(input + 8 * 2));
+ in3 = _mm_load_si128((__m128i *)(input + 8 * 3));
+
+ // 8x4 Transpose
+ TRANSPOSE_8X4(in0, in1, in2, in3, in0, in1, in2, in3)
+
+ // Stage1
+ {
+ const __m128i lo_17 = _mm_unpackhi_epi16(in0, in3);
+ const __m128i lo_35 = _mm_unpackhi_epi16(in1, in2);
+
+ tmp0 = _mm_madd_epi16(lo_17, stg1_0);
+ tmp2 = _mm_madd_epi16(lo_17, stg1_1);
+ tmp4 = _mm_madd_epi16(lo_35, stg1_2);
+ tmp6 = _mm_madd_epi16(lo_35, stg1_3);
+
+ tmp0 = _mm_add_epi32(tmp0, rounding);
+ tmp2 = _mm_add_epi32(tmp2, rounding);
+ tmp4 = _mm_add_epi32(tmp4, rounding);
+ tmp6 = _mm_add_epi32(tmp6, rounding);
+ tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);
+ tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
+ tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS);
+ tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS);
+
+ stp1_4 = _mm_packs_epi32(tmp0, zero);
+ stp1_7 = _mm_packs_epi32(tmp2, zero);
+ stp1_5 = _mm_packs_epi32(tmp4, zero);
+ stp1_6 = _mm_packs_epi32(tmp6, zero);
+ }
+
+ // Stage2
+ {
+ const __m128i lo_04 = _mm_unpacklo_epi16(in0, in2);
+ const __m128i lo_26 = _mm_unpacklo_epi16(in1, in3);
+
+ tmp0 = _mm_madd_epi16(lo_04, stg2_0);
+ tmp2 = _mm_madd_epi16(lo_04, stg2_1);
+ tmp4 = _mm_madd_epi16(lo_26, stg2_2);
+ tmp6 = _mm_madd_epi16(lo_26, stg2_3);
+
+ tmp0 = _mm_add_epi32(tmp0, rounding);
+ tmp2 = _mm_add_epi32(tmp2, rounding);
+ tmp4 = _mm_add_epi32(tmp4, rounding);
+ tmp6 = _mm_add_epi32(tmp6, rounding);
+ tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);
+ tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
+ tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS);
+ tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS);
+
+ stp2_0 = _mm_packs_epi32(tmp0, zero);
+ stp2_1 = _mm_packs_epi32(tmp2, zero);
+ stp2_2 = _mm_packs_epi32(tmp4, zero);
+ stp2_3 = _mm_packs_epi32(tmp6, zero);
+
+ stp2_4 = _mm_adds_epi16(stp1_4, stp1_5);
+ stp2_5 = _mm_subs_epi16(stp1_4, stp1_5);
+ stp2_6 = _mm_subs_epi16(stp1_7, stp1_6);
+ stp2_7 = _mm_adds_epi16(stp1_7, stp1_6);
+ }
+
+ // Stage3
+ {
+ const __m128i lo_56 = _mm_unpacklo_epi16(stp2_5, stp2_6);
+ stp1_0 = _mm_adds_epi16(stp2_0, stp2_3);
+ stp1_1 = _mm_adds_epi16(stp2_1, stp2_2);
+ stp1_2 = _mm_subs_epi16(stp2_1, stp2_2);
+ stp1_3 = _mm_subs_epi16(stp2_0, stp2_3);
+
+ tmp0 = _mm_madd_epi16(lo_56, stg3_0);
+ tmp2 = _mm_madd_epi16(lo_56, stg2_0); // stg3_1 = stg2_0
+
+ tmp0 = _mm_add_epi32(tmp0, rounding);
+ tmp2 = _mm_add_epi32(tmp2, rounding);
+ tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);
+ tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
+
+ stp1_5 = _mm_packs_epi32(tmp0, zero);
+ stp1_6 = _mm_packs_epi32(tmp2, zero);
+ }
+
+ // Stage4
+ in0 = _mm_adds_epi16(stp1_0, stp2_7);
+ in1 = _mm_adds_epi16(stp1_1, stp1_6);
+ in2 = _mm_adds_epi16(stp1_2, stp1_5);
+ in3 = _mm_adds_epi16(stp1_3, stp2_4);
+ in4 = _mm_subs_epi16(stp1_3, stp2_4);
+ in5 = _mm_subs_epi16(stp1_2, stp1_5);
+ in6 = _mm_subs_epi16(stp1_1, stp1_6);
+ in7 = _mm_subs_epi16(stp1_0, stp2_7);
+
+ // Columns. 4x8 Transpose
+ TRANSPOSE_4X8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+ in4, in5, in6, in7)
+
+ // 1D idct8x8
+ IDCT8x8_1D
+
+ // Final rounding and shift
+ in0 = _mm_adds_epi16(in0, final_rounding);
+ in1 = _mm_adds_epi16(in1, final_rounding);
+ in2 = _mm_adds_epi16(in2, final_rounding);
+ in3 = _mm_adds_epi16(in3, final_rounding);
+ in4 = _mm_adds_epi16(in4, final_rounding);
+ in5 = _mm_adds_epi16(in5, final_rounding);
+ in6 = _mm_adds_epi16(in6, final_rounding);
+ in7 = _mm_adds_epi16(in7, final_rounding);
+
+ in0 = _mm_srai_epi16(in0, 5);
+ in1 = _mm_srai_epi16(in1, 5);
+ in2 = _mm_srai_epi16(in2, 5);
+ in3 = _mm_srai_epi16(in3, 5);
+ in4 = _mm_srai_epi16(in4, 5);
+ in5 = _mm_srai_epi16(in5, 5);
+ in6 = _mm_srai_epi16(in6, 5);
+ in7 = _mm_srai_epi16(in7, 5);
+
+ RECON_AND_STORE(dest, in0);
+ RECON_AND_STORE(dest, in1);
+ RECON_AND_STORE(dest, in2);
+ RECON_AND_STORE(dest, in3);
+ RECON_AND_STORE(dest, in4);
+ RECON_AND_STORE(dest, in5);
+ RECON_AND_STORE(dest, in6);
+ RECON_AND_STORE(dest, in7);
+}
+
+#define IDCT16x16_1D \
+ /* Stage2 */ \
+ { \
+ const __m128i lo_1_15 = _mm_unpacklo_epi16(in1, in15); \
+ const __m128i hi_1_15 = _mm_unpackhi_epi16(in1, in15); \
+ const __m128i lo_9_7 = _mm_unpacklo_epi16(in9, in7); \
+ const __m128i hi_9_7 = _mm_unpackhi_epi16(in9, in7); \
+ const __m128i lo_5_11 = _mm_unpacklo_epi16(in5, in11); \
+ const __m128i hi_5_11 = _mm_unpackhi_epi16(in5, in11); \
+ const __m128i lo_13_3 = _mm_unpacklo_epi16(in13, in3); \
+ const __m128i hi_13_3 = _mm_unpackhi_epi16(in13, in3); \
+ \
+ MULTIPLICATION_AND_ADD(lo_1_15, hi_1_15, lo_9_7, hi_9_7, \
+ stg2_0, stg2_1, stg2_2, stg2_3, \
+ stp2_8, stp2_15, stp2_9, stp2_14) \
+ \
+ MULTIPLICATION_AND_ADD(lo_5_11, hi_5_11, lo_13_3, hi_13_3, \
+ stg2_4, stg2_5, stg2_6, stg2_7, \
+ stp2_10, stp2_13, stp2_11, stp2_12) \
+ } \
+ \
+ /* Stage3 */ \
+ { \
+ const __m128i lo_2_14 = _mm_unpacklo_epi16(in2, in14); \
+ const __m128i hi_2_14 = _mm_unpackhi_epi16(in2, in14); \
+ const __m128i lo_10_6 = _mm_unpacklo_epi16(in10, in6); \
+ const __m128i hi_10_6 = _mm_unpackhi_epi16(in10, in6); \
+ \
+ MULTIPLICATION_AND_ADD(lo_2_14, hi_2_14, lo_10_6, hi_10_6, \
+ stg3_0, stg3_1, stg3_2, stg3_3, \
+ stp1_4, stp1_7, stp1_5, stp1_6) \
+ \
+ stp1_8_0 = _mm_add_epi16(stp2_8, stp2_9); \
+ stp1_9 = _mm_sub_epi16(stp2_8, stp2_9); \
+ stp1_10 = _mm_sub_epi16(stp2_11, stp2_10); \
+ stp1_11 = _mm_add_epi16(stp2_11, stp2_10); \
+ \
+ stp1_12_0 = _mm_add_epi16(stp2_12, stp2_13); \
+ stp1_13 = _mm_sub_epi16(stp2_12, stp2_13); \
+ stp1_14 = _mm_sub_epi16(stp2_15, stp2_14); \
+ stp1_15 = _mm_add_epi16(stp2_15, stp2_14); \
+ } \
+ \
+ /* Stage4 */ \
+ { \
+ const __m128i lo_0_8 = _mm_unpacklo_epi16(in0, in8); \
+ const __m128i hi_0_8 = _mm_unpackhi_epi16(in0, in8); \
+ const __m128i lo_4_12 = _mm_unpacklo_epi16(in4, in12); \
+ const __m128i hi_4_12 = _mm_unpackhi_epi16(in4, in12); \
+ \
+ const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14); \
+ const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14); \
+ const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \
+ const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \
+ \
+ MULTIPLICATION_AND_ADD(lo_0_8, hi_0_8, lo_4_12, hi_4_12, \
+ stg4_0, stg4_1, stg4_2, stg4_3, \
+ stp2_0, stp2_1, stp2_2, stp2_3) \
+ \
+ stp2_4 = _mm_add_epi16(stp1_4, stp1_5); \
+ stp2_5 = _mm_sub_epi16(stp1_4, stp1_5); \
+ stp2_6 = _mm_sub_epi16(stp1_7, stp1_6); \
+ stp2_7 = _mm_add_epi16(stp1_7, stp1_6); \
+ \
+ MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, \
+ stg4_4, stg4_5, stg4_6, stg4_7, \
+ stp2_9, stp2_14, stp2_10, stp2_13) \
+ } \
+ \
+ /* Stage5 */ \
+ { \
+ const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5); \
+ const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5); \
+ \
+ stp1_0 = _mm_add_epi16(stp2_0, stp2_3); \
+ stp1_1 = _mm_add_epi16(stp2_1, stp2_2); \
+ stp1_2 = _mm_sub_epi16(stp2_1, stp2_2); \
+ stp1_3 = _mm_sub_epi16(stp2_0, stp2_3); \
+ \
+ tmp0 = _mm_madd_epi16(lo_6_5, stg4_1); \
+ tmp1 = _mm_madd_epi16(hi_6_5, stg4_1); \
+ tmp2 = _mm_madd_epi16(lo_6_5, stg4_0); \
+ tmp3 = _mm_madd_epi16(hi_6_5, stg4_0); \
+ \
+ tmp0 = _mm_add_epi32(tmp0, rounding); \
+ tmp1 = _mm_add_epi32(tmp1, rounding); \
+ tmp2 = _mm_add_epi32(tmp2, rounding); \
+ tmp3 = _mm_add_epi32(tmp3, rounding); \
+ \
+ tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \
+ tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \
+ tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \
+ tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \
+ \
+ stp1_5 = _mm_packs_epi32(tmp0, tmp1); \
+ stp1_6 = _mm_packs_epi32(tmp2, tmp3); \
+ \
+ stp1_8 = _mm_add_epi16(stp1_8_0, stp1_11); \
+ stp1_9 = _mm_add_epi16(stp2_9, stp2_10); \
+ stp1_10 = _mm_sub_epi16(stp2_9, stp2_10); \
+ stp1_11 = _mm_sub_epi16(stp1_8_0, stp1_11); \
+ \
+ stp1_12 = _mm_sub_epi16(stp1_15, stp1_12_0); \
+ stp1_13 = _mm_sub_epi16(stp2_14, stp2_13); \
+ stp1_14 = _mm_add_epi16(stp2_14, stp2_13); \
+ stp1_15 = _mm_add_epi16(stp1_15, stp1_12_0); \
+ } \
+ \
+ /* Stage6 */ \
+ { \
+ const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \
+ const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \
+ const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12); \
+ const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12); \
+ \
+ stp2_0 = _mm_add_epi16(stp1_0, stp2_7); \
+ stp2_1 = _mm_add_epi16(stp1_1, stp1_6); \
+ stp2_2 = _mm_add_epi16(stp1_2, stp1_5); \
+ stp2_3 = _mm_add_epi16(stp1_3, stp2_4); \
+ stp2_4 = _mm_sub_epi16(stp1_3, stp2_4); \
+ stp2_5 = _mm_sub_epi16(stp1_2, stp1_5); \
+ stp2_6 = _mm_sub_epi16(stp1_1, stp1_6); \
+ stp2_7 = _mm_sub_epi16(stp1_0, stp2_7); \
+ \
+ MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, \
+ stg6_0, stg4_0, stg6_0, stg4_0, \
+ stp2_10, stp2_13, stp2_11, stp2_12) \
+ }
+
+void vp9_short_idct16x16_add_sse2(int16_t *input, uint8_t *dest, int stride) {
+ const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
+ const __m128i final_rounding = _mm_set1_epi16(1<<5);
+ const __m128i zero = _mm_setzero_si128();
+
+ const __m128i stg2_0 = pair_set_epi16(cospi_30_64, -cospi_2_64);
+ const __m128i stg2_1 = pair_set_epi16(cospi_2_64, cospi_30_64);
+ const __m128i stg2_2 = pair_set_epi16(cospi_14_64, -cospi_18_64);
+ const __m128i stg2_3 = pair_set_epi16(cospi_18_64, cospi_14_64);
+ const __m128i stg2_4 = pair_set_epi16(cospi_22_64, -cospi_10_64);
+ const __m128i stg2_5 = pair_set_epi16(cospi_10_64, cospi_22_64);
+ const __m128i stg2_6 = pair_set_epi16(cospi_6_64, -cospi_26_64);
+ const __m128i stg2_7 = pair_set_epi16(cospi_26_64, cospi_6_64);
+
+ const __m128i stg3_0 = pair_set_epi16(cospi_28_64, -cospi_4_64);
+ const __m128i stg3_1 = pair_set_epi16(cospi_4_64, cospi_28_64);
+ const __m128i stg3_2 = pair_set_epi16(cospi_12_64, -cospi_20_64);
+ const __m128i stg3_3 = pair_set_epi16(cospi_20_64, cospi_12_64);
+
+ const __m128i stg4_0 = pair_set_epi16(cospi_16_64, cospi_16_64);
+ const __m128i stg4_1 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+ const __m128i stg4_2 = pair_set_epi16(cospi_24_64, -cospi_8_64);
+ const __m128i stg4_3 = pair_set_epi16(cospi_8_64, cospi_24_64);
+ const __m128i stg4_4 = pair_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m128i stg4_5 = pair_set_epi16(cospi_24_64, cospi_8_64);
+ const __m128i stg4_6 = pair_set_epi16(-cospi_24_64, -cospi_8_64);
+ const __m128i stg4_7 = pair_set_epi16(-cospi_8_64, cospi_24_64);
+
+ const __m128i stg6_0 = pair_set_epi16(-cospi_16_64, cospi_16_64);
+
+ __m128i in0 = zero, in1 = zero, in2 = zero, in3 = zero, in4 = zero,
+ in5 = zero, in6 = zero, in7 = zero, in8 = zero, in9 = zero,
+ in10 = zero, in11 = zero, in12 = zero, in13 = zero,
+ in14 = zero, in15 = zero;
+ __m128i l0 = zero, l1 = zero, l2 = zero, l3 = zero, l4 = zero, l5 = zero,
+ l6 = zero, l7 = zero, l8 = zero, l9 = zero, l10 = zero, l11 = zero,
+ l12 = zero, l13 = zero, l14 = zero, l15 = zero;
+ __m128i r0 = zero, r1 = zero, r2 = zero, r3 = zero, r4 = zero, r5 = zero,
+ r6 = zero, r7 = zero, r8 = zero, r9 = zero, r10 = zero, r11 = zero,
+ r12 = zero, r13 = zero, r14 = zero, r15 = zero;
+ __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7,
+ stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15,
+ stp1_8_0, stp1_12_0;
+ __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7,
+ stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ int i;
+
+ // We work on a 8x16 block each time, and loop 4 times for 2-D 16x16 idct.
+ for (i = 0; i < 4; i++) {
+ // 1-D idct
+ if (i < 2) {
+ if (i == 1) input += 128;
+
+ // Load input data.
+ in0 = _mm_load_si128((__m128i *)input);
+ in8 = _mm_load_si128((__m128i *)(input + 8 * 1));
+ in1 = _mm_load_si128((__m128i *)(input + 8 * 2));
+ in9 = _mm_load_si128((__m128i *)(input + 8 * 3));
+ in2 = _mm_load_si128((__m128i *)(input + 8 * 4));
+ in10 = _mm_load_si128((__m128i *)(input + 8 * 5));
+ in3 = _mm_load_si128((__m128i *)(input + 8 * 6));
+ in11 = _mm_load_si128((__m128i *)(input + 8 * 7));
+ in4 = _mm_load_si128((__m128i *)(input + 8 * 8));
+ in12 = _mm_load_si128((__m128i *)(input + 8 * 9));
+ in5 = _mm_load_si128((__m128i *)(input + 8 * 10));
+ in13 = _mm_load_si128((__m128i *)(input + 8 * 11));
+ in6 = _mm_load_si128((__m128i *)(input + 8 * 12));
+ in14 = _mm_load_si128((__m128i *)(input + 8 * 13));
+ in7 = _mm_load_si128((__m128i *)(input + 8 * 14));
+ in15 = _mm_load_si128((__m128i *)(input + 8 * 15));
+
+ TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+ in4, in5, in6, in7);
+ TRANSPOSE_8X8(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9,
+ in10, in11, in12, in13, in14, in15);
+ }
+
+ if (i == 2) {
+ TRANSPOSE_8X8(l0, l1, l2, l3, l4, l5, l6, l7, in0, in1, in2, in3, in4,
+ in5, in6, in7);
+ TRANSPOSE_8X8(r0, r1, r2, r3, r4, r5, r6, r7, in8, in9, in10, in11, in12,
+ in13, in14, in15);
+ }
+
+ if (i == 3) {
+ TRANSPOSE_8X8(l8, l9, l10, l11, l12, l13, l14, l15, in0, in1, in2, in3,
+ in4, in5, in6, in7);
+ TRANSPOSE_8X8(r8, r9, r10, r11, r12, r13, r14, r15, in8, in9, in10, in11,
+ in12, in13, in14, in15);
+ }
+
+ IDCT16x16_1D
+
+ // Stage7
+ if (i == 0) {
+ // Left 8x16
+ l0 = _mm_add_epi16(stp2_0, stp1_15);
+ l1 = _mm_add_epi16(stp2_1, stp1_14);
+ l2 = _mm_add_epi16(stp2_2, stp2_13);
+ l3 = _mm_add_epi16(stp2_3, stp2_12);
+ l4 = _mm_add_epi16(stp2_4, stp2_11);
+ l5 = _mm_add_epi16(stp2_5, stp2_10);
+ l6 = _mm_add_epi16(stp2_6, stp1_9);
+ l7 = _mm_add_epi16(stp2_7, stp1_8);
+ l8 = _mm_sub_epi16(stp2_7, stp1_8);
+ l9 = _mm_sub_epi16(stp2_6, stp1_9);
+ l10 = _mm_sub_epi16(stp2_5, stp2_10);
+ l11 = _mm_sub_epi16(stp2_4, stp2_11);
+ l12 = _mm_sub_epi16(stp2_3, stp2_12);
+ l13 = _mm_sub_epi16(stp2_2, stp2_13);
+ l14 = _mm_sub_epi16(stp2_1, stp1_14);
+ l15 = _mm_sub_epi16(stp2_0, stp1_15);
+ } else if (i == 1) {
+ // Right 8x16
+ r0 = _mm_add_epi16(stp2_0, stp1_15);
+ r1 = _mm_add_epi16(stp2_1, stp1_14);
+ r2 = _mm_add_epi16(stp2_2, stp2_13);
+ r3 = _mm_add_epi16(stp2_3, stp2_12);
+ r4 = _mm_add_epi16(stp2_4, stp2_11);
+ r5 = _mm_add_epi16(stp2_5, stp2_10);
+ r6 = _mm_add_epi16(stp2_6, stp1_9);
+ r7 = _mm_add_epi16(stp2_7, stp1_8);
+ r8 = _mm_sub_epi16(stp2_7, stp1_8);
+ r9 = _mm_sub_epi16(stp2_6, stp1_9);
+ r10 = _mm_sub_epi16(stp2_5, stp2_10);
+ r11 = _mm_sub_epi16(stp2_4, stp2_11);
+ r12 = _mm_sub_epi16(stp2_3, stp2_12);
+ r13 = _mm_sub_epi16(stp2_2, stp2_13);
+ r14 = _mm_sub_epi16(stp2_1, stp1_14);
+ r15 = _mm_sub_epi16(stp2_0, stp1_15);
+ } else {
+ // 2-D
+ in0 = _mm_add_epi16(stp2_0, stp1_15);
+ in1 = _mm_add_epi16(stp2_1, stp1_14);
+ in2 = _mm_add_epi16(stp2_2, stp2_13);
+ in3 = _mm_add_epi16(stp2_3, stp2_12);
+ in4 = _mm_add_epi16(stp2_4, stp2_11);
+ in5 = _mm_add_epi16(stp2_5, stp2_10);
+ in6 = _mm_add_epi16(stp2_6, stp1_9);
+ in7 = _mm_add_epi16(stp2_7, stp1_8);
+ in8 = _mm_sub_epi16(stp2_7, stp1_8);
+ in9 = _mm_sub_epi16(stp2_6, stp1_9);
+ in10 = _mm_sub_epi16(stp2_5, stp2_10);
+ in11 = _mm_sub_epi16(stp2_4, stp2_11);
+ in12 = _mm_sub_epi16(stp2_3, stp2_12);
+ in13 = _mm_sub_epi16(stp2_2, stp2_13);
+ in14 = _mm_sub_epi16(stp2_1, stp1_14);
+ in15 = _mm_sub_epi16(stp2_0, stp1_15);
+
+ // Final rounding and shift
+ in0 = _mm_adds_epi16(in0, final_rounding);
+ in1 = _mm_adds_epi16(in1, final_rounding);
+ in2 = _mm_adds_epi16(in2, final_rounding);
+ in3 = _mm_adds_epi16(in3, final_rounding);
+ in4 = _mm_adds_epi16(in4, final_rounding);
+ in5 = _mm_adds_epi16(in5, final_rounding);
+ in6 = _mm_adds_epi16(in6, final_rounding);
+ in7 = _mm_adds_epi16(in7, final_rounding);
+ in8 = _mm_adds_epi16(in8, final_rounding);
+ in9 = _mm_adds_epi16(in9, final_rounding);
+ in10 = _mm_adds_epi16(in10, final_rounding);
+ in11 = _mm_adds_epi16(in11, final_rounding);
+ in12 = _mm_adds_epi16(in12, final_rounding);
+ in13 = _mm_adds_epi16(in13, final_rounding);
+ in14 = _mm_adds_epi16(in14, final_rounding);
+ in15 = _mm_adds_epi16(in15, final_rounding);
+
+ in0 = _mm_srai_epi16(in0, 6);
+ in1 = _mm_srai_epi16(in1, 6);
+ in2 = _mm_srai_epi16(in2, 6);
+ in3 = _mm_srai_epi16(in3, 6);
+ in4 = _mm_srai_epi16(in4, 6);
+ in5 = _mm_srai_epi16(in5, 6);
+ in6 = _mm_srai_epi16(in6, 6);
+ in7 = _mm_srai_epi16(in7, 6);
+ in8 = _mm_srai_epi16(in8, 6);
+ in9 = _mm_srai_epi16(in9, 6);
+ in10 = _mm_srai_epi16(in10, 6);
+ in11 = _mm_srai_epi16(in11, 6);
+ in12 = _mm_srai_epi16(in12, 6);
+ in13 = _mm_srai_epi16(in13, 6);
+ in14 = _mm_srai_epi16(in14, 6);
+ in15 = _mm_srai_epi16(in15, 6);
+
+ RECON_AND_STORE(dest, in0);
+ RECON_AND_STORE(dest, in1);
+ RECON_AND_STORE(dest, in2);
+ RECON_AND_STORE(dest, in3);
+ RECON_AND_STORE(dest, in4);
+ RECON_AND_STORE(dest, in5);
+ RECON_AND_STORE(dest, in6);
+ RECON_AND_STORE(dest, in7);
+ RECON_AND_STORE(dest, in8);
+ RECON_AND_STORE(dest, in9);
+ RECON_AND_STORE(dest, in10);
+ RECON_AND_STORE(dest, in11);
+ RECON_AND_STORE(dest, in12);
+ RECON_AND_STORE(dest, in13);
+ RECON_AND_STORE(dest, in14);
+ RECON_AND_STORE(dest, in15);
+
+ dest += 8 - (stride * 16);
+ }
+ }
+}
+
+void vp9_short_idct16x16_1_add_sse2(int16_t *input, uint8_t *dest, int stride) {
+ __m128i dc_value;
+ const __m128i zero = _mm_setzero_si128();
+ int a, i;
+
+ a = dct_const_round_shift(input[0] * cospi_16_64);
+ a = dct_const_round_shift(a * cospi_16_64);
+ a = ROUND_POWER_OF_TWO(a, 6);
+
+ dc_value = _mm_set1_epi16(a);
+
+ for (i = 0; i < 2; ++i) {
+ RECON_AND_STORE(dest, dc_value);
+ RECON_AND_STORE(dest, dc_value);
+ RECON_AND_STORE(dest, dc_value);
+ RECON_AND_STORE(dest, dc_value);
+ RECON_AND_STORE(dest, dc_value);
+ RECON_AND_STORE(dest, dc_value);
+ RECON_AND_STORE(dest, dc_value);
+ RECON_AND_STORE(dest, dc_value);
+ RECON_AND_STORE(dest, dc_value);
+ RECON_AND_STORE(dest, dc_value);
+ RECON_AND_STORE(dest, dc_value);
+ RECON_AND_STORE(dest, dc_value);
+ RECON_AND_STORE(dest, dc_value);
+ RECON_AND_STORE(dest, dc_value);
+ RECON_AND_STORE(dest, dc_value);
+ RECON_AND_STORE(dest, dc_value);
+ dest += 8 - (stride * 16);
+ }
+}
+
+static INLINE void array_transpose_16x16(__m128i *res0, __m128i *res1) {
+ __m128i tbuf[8];
+ array_transpose_8x8(res0, res0);
+ array_transpose_8x8(res1, tbuf);
+ array_transpose_8x8(res0 + 8, res1);
+ array_transpose_8x8(res1 + 8, res1 + 8);
+
+ res0[8] = tbuf[0];
+ res0[9] = tbuf[1];
+ res0[10] = tbuf[2];
+ res0[11] = tbuf[3];
+ res0[12] = tbuf[4];
+ res0[13] = tbuf[5];
+ res0[14] = tbuf[6];
+ res0[15] = tbuf[7];
+}
+
+void iadst16_1d_8col(__m128i *in) {
+ // perform 16x16 1-D ADST for 8 columns
+ __m128i s[16], x[16], u[32], v[32];
+ const __m128i k__cospi_p01_p31 = pair_set_epi16(cospi_1_64, cospi_31_64);
+ const __m128i k__cospi_p31_m01 = pair_set_epi16(cospi_31_64, -cospi_1_64);
+ const __m128i k__cospi_p05_p27 = pair_set_epi16(cospi_5_64, cospi_27_64);
+ const __m128i k__cospi_p27_m05 = pair_set_epi16(cospi_27_64, -cospi_5_64);
+ const __m128i k__cospi_p09_p23 = pair_set_epi16(cospi_9_64, cospi_23_64);
+ const __m128i k__cospi_p23_m09 = pair_set_epi16(cospi_23_64, -cospi_9_64);
+ const __m128i k__cospi_p13_p19 = pair_set_epi16(cospi_13_64, cospi_19_64);
+ const __m128i k__cospi_p19_m13 = pair_set_epi16(cospi_19_64, -cospi_13_64);
+ const __m128i k__cospi_p17_p15 = pair_set_epi16(cospi_17_64, cospi_15_64);
+ const __m128i k__cospi_p15_m17 = pair_set_epi16(cospi_15_64, -cospi_17_64);
+ const __m128i k__cospi_p21_p11 = pair_set_epi16(cospi_21_64, cospi_11_64);
+ const __m128i k__cospi_p11_m21 = pair_set_epi16(cospi_11_64, -cospi_21_64);
+ const __m128i k__cospi_p25_p07 = pair_set_epi16(cospi_25_64, cospi_7_64);
+ const __m128i k__cospi_p07_m25 = pair_set_epi16(cospi_7_64, -cospi_25_64);
+ const __m128i k__cospi_p29_p03 = pair_set_epi16(cospi_29_64, cospi_3_64);
+ const __m128i k__cospi_p03_m29 = pair_set_epi16(cospi_3_64, -cospi_29_64);
+ const __m128i k__cospi_p04_p28 = pair_set_epi16(cospi_4_64, cospi_28_64);
+ const __m128i k__cospi_p28_m04 = pair_set_epi16(cospi_28_64, -cospi_4_64);
+ const __m128i k__cospi_p20_p12 = pair_set_epi16(cospi_20_64, cospi_12_64);
+ const __m128i k__cospi_p12_m20 = pair_set_epi16(cospi_12_64, -cospi_20_64);
+ const __m128i k__cospi_m28_p04 = pair_set_epi16(-cospi_28_64, cospi_4_64);
+ const __m128i k__cospi_m12_p20 = pair_set_epi16(-cospi_12_64, cospi_20_64);
+ const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64);
+ const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
+ const __m128i k__cospi_m24_p08 = pair_set_epi16(-cospi_24_64, cospi_8_64);
+ const __m128i k__cospi_m16_m16 = _mm_set1_epi16(-cospi_16_64);
+ const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
+ const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+ const __m128i k__cospi_m16_p16 = pair_set_epi16(-cospi_16_64, cospi_16_64);
+ const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
+ const __m128i kZero = _mm_set1_epi16(0);
+
+ u[0] = _mm_unpacklo_epi16(in[15], in[0]);
+ u[1] = _mm_unpackhi_epi16(in[15], in[0]);
+ u[2] = _mm_unpacklo_epi16(in[13], in[2]);
+ u[3] = _mm_unpackhi_epi16(in[13], in[2]);
+ u[4] = _mm_unpacklo_epi16(in[11], in[4]);
+ u[5] = _mm_unpackhi_epi16(in[11], in[4]);
+ u[6] = _mm_unpacklo_epi16(in[9], in[6]);
+ u[7] = _mm_unpackhi_epi16(in[9], in[6]);
+ u[8] = _mm_unpacklo_epi16(in[7], in[8]);
+ u[9] = _mm_unpackhi_epi16(in[7], in[8]);
+ u[10] = _mm_unpacklo_epi16(in[5], in[10]);
+ u[11] = _mm_unpackhi_epi16(in[5], in[10]);
+ u[12] = _mm_unpacklo_epi16(in[3], in[12]);
+ u[13] = _mm_unpackhi_epi16(in[3], in[12]);
+ u[14] = _mm_unpacklo_epi16(in[1], in[14]);
+ u[15] = _mm_unpackhi_epi16(in[1], in[14]);
+
+ v[0] = _mm_madd_epi16(u[0], k__cospi_p01_p31);
+ v[1] = _mm_madd_epi16(u[1], k__cospi_p01_p31);
+ v[2] = _mm_madd_epi16(u[0], k__cospi_p31_m01);
+ v[3] = _mm_madd_epi16(u[1], k__cospi_p31_m01);
+ v[4] = _mm_madd_epi16(u[2], k__cospi_p05_p27);
+ v[5] = _mm_madd_epi16(u[3], k__cospi_p05_p27);
+ v[6] = _mm_madd_epi16(u[2], k__cospi_p27_m05);
+ v[7] = _mm_madd_epi16(u[3], k__cospi_p27_m05);
+ v[8] = _mm_madd_epi16(u[4], k__cospi_p09_p23);
+ v[9] = _mm_madd_epi16(u[5], k__cospi_p09_p23);
+ v[10] = _mm_madd_epi16(u[4], k__cospi_p23_m09);
+ v[11] = _mm_madd_epi16(u[5], k__cospi_p23_m09);
+ v[12] = _mm_madd_epi16(u[6], k__cospi_p13_p19);
+ v[13] = _mm_madd_epi16(u[7], k__cospi_p13_p19);
+ v[14] = _mm_madd_epi16(u[6], k__cospi_p19_m13);
+ v[15] = _mm_madd_epi16(u[7], k__cospi_p19_m13);
+ v[16] = _mm_madd_epi16(u[8], k__cospi_p17_p15);
+ v[17] = _mm_madd_epi16(u[9], k__cospi_p17_p15);
+ v[18] = _mm_madd_epi16(u[8], k__cospi_p15_m17);
+ v[19] = _mm_madd_epi16(u[9], k__cospi_p15_m17);
+ v[20] = _mm_madd_epi16(u[10], k__cospi_p21_p11);
+ v[21] = _mm_madd_epi16(u[11], k__cospi_p21_p11);
+ v[22] = _mm_madd_epi16(u[10], k__cospi_p11_m21);
+ v[23] = _mm_madd_epi16(u[11], k__cospi_p11_m21);
+ v[24] = _mm_madd_epi16(u[12], k__cospi_p25_p07);
+ v[25] = _mm_madd_epi16(u[13], k__cospi_p25_p07);
+ v[26] = _mm_madd_epi16(u[12], k__cospi_p07_m25);
+ v[27] = _mm_madd_epi16(u[13], k__cospi_p07_m25);
+ v[28] = _mm_madd_epi16(u[14], k__cospi_p29_p03);
+ v[29] = _mm_madd_epi16(u[15], k__cospi_p29_p03);
+ v[30] = _mm_madd_epi16(u[14], k__cospi_p03_m29);
+ v[31] = _mm_madd_epi16(u[15], k__cospi_p03_m29);
+
+ u[0] = _mm_add_epi32(v[0], v[16]);
+ u[1] = _mm_add_epi32(v[1], v[17]);
+ u[2] = _mm_add_epi32(v[2], v[18]);
+ u[3] = _mm_add_epi32(v[3], v[19]);
+ u[4] = _mm_add_epi32(v[4], v[20]);
+ u[5] = _mm_add_epi32(v[5], v[21]);
+ u[6] = _mm_add_epi32(v[6], v[22]);
+ u[7] = _mm_add_epi32(v[7], v[23]);
+ u[8] = _mm_add_epi32(v[8], v[24]);
+ u[9] = _mm_add_epi32(v[9], v[25]);
+ u[10] = _mm_add_epi32(v[10], v[26]);
+ u[11] = _mm_add_epi32(v[11], v[27]);
+ u[12] = _mm_add_epi32(v[12], v[28]);
+ u[13] = _mm_add_epi32(v[13], v[29]);
+ u[14] = _mm_add_epi32(v[14], v[30]);
+ u[15] = _mm_add_epi32(v[15], v[31]);
+ u[16] = _mm_sub_epi32(v[0], v[16]);
+ u[17] = _mm_sub_epi32(v[1], v[17]);
+ u[18] = _mm_sub_epi32(v[2], v[18]);
+ u[19] = _mm_sub_epi32(v[3], v[19]);
+ u[20] = _mm_sub_epi32(v[4], v[20]);
+ u[21] = _mm_sub_epi32(v[5], v[21]);
+ u[22] = _mm_sub_epi32(v[6], v[22]);
+ u[23] = _mm_sub_epi32(v[7], v[23]);
+ u[24] = _mm_sub_epi32(v[8], v[24]);
+ u[25] = _mm_sub_epi32(v[9], v[25]);
+ u[26] = _mm_sub_epi32(v[10], v[26]);
+ u[27] = _mm_sub_epi32(v[11], v[27]);
+ u[28] = _mm_sub_epi32(v[12], v[28]);
+ u[29] = _mm_sub_epi32(v[13], v[29]);
+ u[30] = _mm_sub_epi32(v[14], v[30]);
+ u[31] = _mm_sub_epi32(v[15], v[31]);
+
+ v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+ v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+ v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+ v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+ v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+ v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+ v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+ v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+ v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING);
+ v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING);
+ v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
+ v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
+ v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
+ v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING);
+ v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
+ v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
+ v[16] = _mm_add_epi32(u[16], k__DCT_CONST_ROUNDING);
+ v[17] = _mm_add_epi32(u[17], k__DCT_CONST_ROUNDING);
+ v[18] = _mm_add_epi32(u[18], k__DCT_CONST_ROUNDING);
+ v[19] = _mm_add_epi32(u[19], k__DCT_CONST_ROUNDING);
+ v[20] = _mm_add_epi32(u[20], k__DCT_CONST_ROUNDING);
+ v[21] = _mm_add_epi32(u[21], k__DCT_CONST_ROUNDING);
+ v[22] = _mm_add_epi32(u[22], k__DCT_CONST_ROUNDING);
+ v[23] = _mm_add_epi32(u[23], k__DCT_CONST_ROUNDING);
+ v[24] = _mm_add_epi32(u[24], k__DCT_CONST_ROUNDING);
+ v[25] = _mm_add_epi32(u[25], k__DCT_CONST_ROUNDING);
+ v[26] = _mm_add_epi32(u[26], k__DCT_CONST_ROUNDING);
+ v[27] = _mm_add_epi32(u[27], k__DCT_CONST_ROUNDING);
+ v[28] = _mm_add_epi32(u[28], k__DCT_CONST_ROUNDING);
+ v[29] = _mm_add_epi32(u[29], k__DCT_CONST_ROUNDING);
+ v[30] = _mm_add_epi32(u[30], k__DCT_CONST_ROUNDING);
+ v[31] = _mm_add_epi32(u[31], k__DCT_CONST_ROUNDING);
+
+ u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
+ u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
+ u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
+ u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
+ u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS);
+ u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS);
+ u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS);
+ u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS);
+ u[8] = _mm_srai_epi32(v[8], DCT_CONST_BITS);
+ u[9] = _mm_srai_epi32(v[9], DCT_CONST_BITS);
+ u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
+ u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
+ u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
+ u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS);
+ u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS);
+ u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS);
+ u[16] = _mm_srai_epi32(v[16], DCT_CONST_BITS);
+ u[17] = _mm_srai_epi32(v[17], DCT_CONST_BITS);
+ u[18] = _mm_srai_epi32(v[18], DCT_CONST_BITS);
+ u[19] = _mm_srai_epi32(v[19], DCT_CONST_BITS);
+ u[20] = _mm_srai_epi32(v[20], DCT_CONST_BITS);
+ u[21] = _mm_srai_epi32(v[21], DCT_CONST_BITS);
+ u[22] = _mm_srai_epi32(v[22], DCT_CONST_BITS);
+ u[23] = _mm_srai_epi32(v[23], DCT_CONST_BITS);
+ u[24] = _mm_srai_epi32(v[24], DCT_CONST_BITS);
+ u[25] = _mm_srai_epi32(v[25], DCT_CONST_BITS);
+ u[26] = _mm_srai_epi32(v[26], DCT_CONST_BITS);
+ u[27] = _mm_srai_epi32(v[27], DCT_CONST_BITS);
+ u[28] = _mm_srai_epi32(v[28], DCT_CONST_BITS);
+ u[29] = _mm_srai_epi32(v[29], DCT_CONST_BITS);
+ u[30] = _mm_srai_epi32(v[30], DCT_CONST_BITS);
+ u[31] = _mm_srai_epi32(v[31], DCT_CONST_BITS);
+
+ s[0] = _mm_packs_epi32(u[0], u[1]);
+ s[1] = _mm_packs_epi32(u[2], u[3]);
+ s[2] = _mm_packs_epi32(u[4], u[5]);
+ s[3] = _mm_packs_epi32(u[6], u[7]);
+ s[4] = _mm_packs_epi32(u[8], u[9]);
+ s[5] = _mm_packs_epi32(u[10], u[11]);
+ s[6] = _mm_packs_epi32(u[12], u[13]);
+ s[7] = _mm_packs_epi32(u[14], u[15]);
+ s[8] = _mm_packs_epi32(u[16], u[17]);
+ s[9] = _mm_packs_epi32(u[18], u[19]);
+ s[10] = _mm_packs_epi32(u[20], u[21]);
+ s[11] = _mm_packs_epi32(u[22], u[23]);
+ s[12] = _mm_packs_epi32(u[24], u[25]);
+ s[13] = _mm_packs_epi32(u[26], u[27]);
+ s[14] = _mm_packs_epi32(u[28], u[29]);
+ s[15] = _mm_packs_epi32(u[30], u[31]);
+
+ // stage 2
+ u[0] = _mm_unpacklo_epi16(s[8], s[9]);
+ u[1] = _mm_unpackhi_epi16(s[8], s[9]);
+ u[2] = _mm_unpacklo_epi16(s[10], s[11]);
+ u[3] = _mm_unpackhi_epi16(s[10], s[11]);
+ u[4] = _mm_unpacklo_epi16(s[12], s[13]);
+ u[5] = _mm_unpackhi_epi16(s[12], s[13]);
+ u[6] = _mm_unpacklo_epi16(s[14], s[15]);
+ u[7] = _mm_unpackhi_epi16(s[14], s[15]);
+
+ v[0] = _mm_madd_epi16(u[0], k__cospi_p04_p28);
+ v[1] = _mm_madd_epi16(u[1], k__cospi_p04_p28);
+ v[2] = _mm_madd_epi16(u[0], k__cospi_p28_m04);
+ v[3] = _mm_madd_epi16(u[1], k__cospi_p28_m04);
+ v[4] = _mm_madd_epi16(u[2], k__cospi_p20_p12);
+ v[5] = _mm_madd_epi16(u[3], k__cospi_p20_p12);
+ v[6] = _mm_madd_epi16(u[2], k__cospi_p12_m20);
+ v[7] = _mm_madd_epi16(u[3], k__cospi_p12_m20);
+ v[8] = _mm_madd_epi16(u[4], k__cospi_m28_p04);
+ v[9] = _mm_madd_epi16(u[5], k__cospi_m28_p04);
+ v[10] = _mm_madd_epi16(u[4], k__cospi_p04_p28);
+ v[11] = _mm_madd_epi16(u[5], k__cospi_p04_p28);
+ v[12] = _mm_madd_epi16(u[6], k__cospi_m12_p20);
+ v[13] = _mm_madd_epi16(u[7], k__cospi_m12_p20);
+ v[14] = _mm_madd_epi16(u[6], k__cospi_p20_p12);
+ v[15] = _mm_madd_epi16(u[7], k__cospi_p20_p12);
+
+ u[0] = _mm_add_epi32(v[0], v[8]);
+ u[1] = _mm_add_epi32(v[1], v[9]);
+ u[2] = _mm_add_epi32(v[2], v[10]);
+ u[3] = _mm_add_epi32(v[3], v[11]);
+ u[4] = _mm_add_epi32(v[4], v[12]);
+ u[5] = _mm_add_epi32(v[5], v[13]);
+ u[6] = _mm_add_epi32(v[6], v[14]);
+ u[7] = _mm_add_epi32(v[7], v[15]);
+ u[8] = _mm_sub_epi32(v[0], v[8]);
+ u[9] = _mm_sub_epi32(v[1], v[9]);
+ u[10] = _mm_sub_epi32(v[2], v[10]);
+ u[11] = _mm_sub_epi32(v[3], v[11]);
+ u[12] = _mm_sub_epi32(v[4], v[12]);
+ u[13] = _mm_sub_epi32(v[5], v[13]);
+ u[14] = _mm_sub_epi32(v[6], v[14]);
+ u[15] = _mm_sub_epi32(v[7], v[15]);
+
+ v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+ v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+ v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+ v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+ v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+ v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+ v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+ v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+ v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING);
+ v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING);
+ v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
+ v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
+ v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
+ v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING);
+ v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
+ v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
+
+ u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
+ u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
+ u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
+ u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
+ u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS);
+ u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS);
+ u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS);
+ u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS);
+ u[8] = _mm_srai_epi32(v[8], DCT_CONST_BITS);
+ u[9] = _mm_srai_epi32(v[9], DCT_CONST_BITS);
+ u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
+ u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
+ u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
+ u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS);
+ u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS);
+ u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS);
+
+ x[0] = _mm_add_epi16(s[0], s[4]);
+ x[1] = _mm_add_epi16(s[1], s[5]);
+ x[2] = _mm_add_epi16(s[2], s[6]);
+ x[3] = _mm_add_epi16(s[3], s[7]);
+ x[4] = _mm_sub_epi16(s[0], s[4]);
+ x[5] = _mm_sub_epi16(s[1], s[5]);
+ x[6] = _mm_sub_epi16(s[2], s[6]);
+ x[7] = _mm_sub_epi16(s[3], s[7]);
+ x[8] = _mm_packs_epi32(u[0], u[1]);
+ x[9] = _mm_packs_epi32(u[2], u[3]);
+ x[10] = _mm_packs_epi32(u[4], u[5]);
+ x[11] = _mm_packs_epi32(u[6], u[7]);
+ x[12] = _mm_packs_epi32(u[8], u[9]);
+ x[13] = _mm_packs_epi32(u[10], u[11]);
+ x[14] = _mm_packs_epi32(u[12], u[13]);
+ x[15] = _mm_packs_epi32(u[14], u[15]);
+
+ // stage 3
+ u[0] = _mm_unpacklo_epi16(x[4], x[5]);
+ u[1] = _mm_unpackhi_epi16(x[4], x[5]);
+ u[2] = _mm_unpacklo_epi16(x[6], x[7]);
+ u[3] = _mm_unpackhi_epi16(x[6], x[7]);
+ u[4] = _mm_unpacklo_epi16(x[12], x[13]);
+ u[5] = _mm_unpackhi_epi16(x[12], x[13]);
+ u[6] = _mm_unpacklo_epi16(x[14], x[15]);
+ u[7] = _mm_unpackhi_epi16(x[14], x[15]);
+
+ v[0] = _mm_madd_epi16(u[0], k__cospi_p08_p24);
+ v[1] = _mm_madd_epi16(u[1], k__cospi_p08_p24);
+ v[2] = _mm_madd_epi16(u[0], k__cospi_p24_m08);
+ v[3] = _mm_madd_epi16(u[1], k__cospi_p24_m08);
+ v[4] = _mm_madd_epi16(u[2], k__cospi_m24_p08);
+ v[5] = _mm_madd_epi16(u[3], k__cospi_m24_p08);
+ v[6] = _mm_madd_epi16(u[2], k__cospi_p08_p24);
+ v[7] = _mm_madd_epi16(u[3], k__cospi_p08_p24);
+ v[8] = _mm_madd_epi16(u[4], k__cospi_p08_p24);
+ v[9] = _mm_madd_epi16(u[5], k__cospi_p08_p24);
+ v[10] = _mm_madd_epi16(u[4], k__cospi_p24_m08);
+ v[11] = _mm_madd_epi16(u[5], k__cospi_p24_m08);
+ v[12] = _mm_madd_epi16(u[6], k__cospi_m24_p08);
+ v[13] = _mm_madd_epi16(u[7], k__cospi_m24_p08);
+ v[14] = _mm_madd_epi16(u[6], k__cospi_p08_p24);
+ v[15] = _mm_madd_epi16(u[7], k__cospi_p08_p24);
+
+ u[0] = _mm_add_epi32(v[0], v[4]);
+ u[1] = _mm_add_epi32(v[1], v[5]);
+ u[2] = _mm_add_epi32(v[2], v[6]);
+ u[3] = _mm_add_epi32(v[3], v[7]);
+ u[4] = _mm_sub_epi32(v[0], v[4]);
+ u[5] = _mm_sub_epi32(v[1], v[5]);
+ u[6] = _mm_sub_epi32(v[2], v[6]);
+ u[7] = _mm_sub_epi32(v[3], v[7]);
+ u[8] = _mm_add_epi32(v[8], v[12]);
+ u[9] = _mm_add_epi32(v[9], v[13]);
+ u[10] = _mm_add_epi32(v[10], v[14]);
+ u[11] = _mm_add_epi32(v[11], v[15]);
+ u[12] = _mm_sub_epi32(v[8], v[12]);
+ u[13] = _mm_sub_epi32(v[9], v[13]);
+ u[14] = _mm_sub_epi32(v[10], v[14]);
+ u[15] = _mm_sub_epi32(v[11], v[15]);
+
+ u[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+ u[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+ u[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+ u[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+ u[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+ u[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+ u[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+ u[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+ u[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING);
+ u[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING);
+ u[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
+ u[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
+ u[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
+ u[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING);
+ u[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
+ u[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
+
+ v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
+ v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
+ v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
+ v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
+ v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
+ v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
+ v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
+ v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
+ v[8] = _mm_srai_epi32(u[8], DCT_CONST_BITS);
+ v[9] = _mm_srai_epi32(u[9], DCT_CONST_BITS);
+ v[10] = _mm_srai_epi32(u[10], DCT_CONST_BITS);
+ v[11] = _mm_srai_epi32(u[11], DCT_CONST_BITS);
+ v[12] = _mm_srai_epi32(u[12], DCT_CONST_BITS);
+ v[13] = _mm_srai_epi32(u[13], DCT_CONST_BITS);
+ v[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS);
+ v[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS);
+
+ s[0] = _mm_add_epi16(x[0], x[2]);
+ s[1] = _mm_add_epi16(x[1], x[3]);
+ s[2] = _mm_sub_epi16(x[0], x[2]);
+ s[3] = _mm_sub_epi16(x[1], x[3]);
+ s[4] = _mm_packs_epi32(v[0], v[1]);
+ s[5] = _mm_packs_epi32(v[2], v[3]);
+ s[6] = _mm_packs_epi32(v[4], v[5]);
+ s[7] = _mm_packs_epi32(v[6], v[7]);
+ s[8] = _mm_add_epi16(x[8], x[10]);
+ s[9] = _mm_add_epi16(x[9], x[11]);
+ s[10] = _mm_sub_epi16(x[8], x[10]);
+ s[11] = _mm_sub_epi16(x[9], x[11]);
+ s[12] = _mm_packs_epi32(v[8], v[9]);
+ s[13] = _mm_packs_epi32(v[10], v[11]);
+ s[14] = _mm_packs_epi32(v[12], v[13]);
+ s[15] = _mm_packs_epi32(v[14], v[15]);
+
+ // stage 4
+ u[0] = _mm_unpacklo_epi16(s[2], s[3]);
+ u[1] = _mm_unpackhi_epi16(s[2], s[3]);
+ u[2] = _mm_unpacklo_epi16(s[6], s[7]);
+ u[3] = _mm_unpackhi_epi16(s[6], s[7]);
+ u[4] = _mm_unpacklo_epi16(s[10], s[11]);
+ u[5] = _mm_unpackhi_epi16(s[10], s[11]);
+ u[6] = _mm_unpacklo_epi16(s[14], s[15]);
+ u[7] = _mm_unpackhi_epi16(s[14], s[15]);
+
+ v[0] = _mm_madd_epi16(u[0], k__cospi_m16_m16);
+ v[1] = _mm_madd_epi16(u[1], k__cospi_m16_m16);
+ v[2] = _mm_madd_epi16(u[0], k__cospi_p16_m16);
+ v[3] = _mm_madd_epi16(u[1], k__cospi_p16_m16);
+ v[4] = _mm_madd_epi16(u[2], k__cospi_p16_p16);
+ v[5] = _mm_madd_epi16(u[3], k__cospi_p16_p16);
+ v[6] = _mm_madd_epi16(u[2], k__cospi_m16_p16);
+ v[7] = _mm_madd_epi16(u[3], k__cospi_m16_p16);
+ v[8] = _mm_madd_epi16(u[4], k__cospi_p16_p16);
+ v[9] = _mm_madd_epi16(u[5], k__cospi_p16_p16);
+ v[10] = _mm_madd_epi16(u[4], k__cospi_m16_p16);
+ v[11] = _mm_madd_epi16(u[5], k__cospi_m16_p16);
+ v[12] = _mm_madd_epi16(u[6], k__cospi_m16_m16);
+ v[13] = _mm_madd_epi16(u[7], k__cospi_m16_m16);
+ v[14] = _mm_madd_epi16(u[6], k__cospi_p16_m16);
+ v[15] = _mm_madd_epi16(u[7], k__cospi_p16_m16);
+
+ u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
+ u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
+ u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
+ u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
+ u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING);
+ u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING);
+ u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING);
+ u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING);
+ u[8] = _mm_add_epi32(v[8], k__DCT_CONST_ROUNDING);
+ u[9] = _mm_add_epi32(v[9], k__DCT_CONST_ROUNDING);
+ u[10] = _mm_add_epi32(v[10], k__DCT_CONST_ROUNDING);
+ u[11] = _mm_add_epi32(v[11], k__DCT_CONST_ROUNDING);
+ u[12] = _mm_add_epi32(v[12], k__DCT_CONST_ROUNDING);
+ u[13] = _mm_add_epi32(v[13], k__DCT_CONST_ROUNDING);
+ u[14] = _mm_add_epi32(v[14], k__DCT_CONST_ROUNDING);
+ u[15] = _mm_add_epi32(v[15], k__DCT_CONST_ROUNDING);
+
+ v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
+ v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
+ v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
+ v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
+ v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
+ v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
+ v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
+ v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
+ v[8] = _mm_srai_epi32(u[8], DCT_CONST_BITS);
+ v[9] = _mm_srai_epi32(u[9], DCT_CONST_BITS);
+ v[10] = _mm_srai_epi32(u[10], DCT_CONST_BITS);
+ v[11] = _mm_srai_epi32(u[11], DCT_CONST_BITS);
+ v[12] = _mm_srai_epi32(u[12], DCT_CONST_BITS);
+ v[13] = _mm_srai_epi32(u[13], DCT_CONST_BITS);
+ v[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS);
+ v[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS);
+
+ in[0] = s[0];
+ in[1] = _mm_sub_epi16(kZero, s[8]);
+ in[2] = s[12];
+ in[3] = _mm_sub_epi16(kZero, s[4]);
+ in[4] = _mm_packs_epi32(v[4], v[5]);
+ in[5] = _mm_packs_epi32(v[12], v[13]);
+ in[6] = _mm_packs_epi32(v[8], v[9]);
+ in[7] = _mm_packs_epi32(v[0], v[1]);
+ in[8] = _mm_packs_epi32(v[2], v[3]);
+ in[9] = _mm_packs_epi32(v[10], v[11]);
+ in[10] = _mm_packs_epi32(v[14], v[15]);
+ in[11] = _mm_packs_epi32(v[6], v[7]);
+ in[12] = s[5];
+ in[13] = _mm_sub_epi16(kZero, s[13]);
+ in[14] = s[9];
+ in[15] = _mm_sub_epi16(kZero, s[1]);
+}
+
+void idct16_1d_8col(__m128i *in) {
+ const __m128i k__cospi_p30_m02 = pair_set_epi16(cospi_30_64, -cospi_2_64);
+ const __m128i k__cospi_p02_p30 = pair_set_epi16(cospi_2_64, cospi_30_64);
+ const __m128i k__cospi_p14_m18 = pair_set_epi16(cospi_14_64, -cospi_18_64);
+ const __m128i k__cospi_p18_p14 = pair_set_epi16(cospi_18_64, cospi_14_64);
+ const __m128i k__cospi_p22_m10 = pair_set_epi16(cospi_22_64, -cospi_10_64);
+ const __m128i k__cospi_p10_p22 = pair_set_epi16(cospi_10_64, cospi_22_64);
+ const __m128i k__cospi_p06_m26 = pair_set_epi16(cospi_6_64, -cospi_26_64);
+ const __m128i k__cospi_p26_p06 = pair_set_epi16(cospi_26_64, cospi_6_64);
+ const __m128i k__cospi_p28_m04 = pair_set_epi16(cospi_28_64, -cospi_4_64);
+ const __m128i k__cospi_p04_p28 = pair_set_epi16(cospi_4_64, cospi_28_64);
+ const __m128i k__cospi_p12_m20 = pair_set_epi16(cospi_12_64, -cospi_20_64);
+ const __m128i k__cospi_p20_p12 = pair_set_epi16(cospi_20_64, cospi_12_64);
+ const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
+ const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+ const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
+ const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64);
+ const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
+ const __m128i k__cospi_m24_m08 = pair_set_epi16(-cospi_24_64, -cospi_8_64);
+ const __m128i k__cospi_m16_p16 = pair_set_epi16(-cospi_16_64, cospi_16_64);
+ const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
+ __m128i v[16], u[16], s[16], t[16];
+
+ // stage 1
+ s[0] = in[0];
+ s[1] = in[8];
+ s[2] = in[4];
+ s[3] = in[12];
+ s[4] = in[2];
+ s[5] = in[10];
+ s[6] = in[6];
+ s[7] = in[14];
+ s[8] = in[1];
+ s[9] = in[9];
+ s[10] = in[5];
+ s[11] = in[13];
+ s[12] = in[3];
+ s[13] = in[11];
+ s[14] = in[7];
+ s[15] = in[15];
+
+ // stage 2
+ u[0] = _mm_unpacklo_epi16(s[8], s[15]);
+ u[1] = _mm_unpackhi_epi16(s[8], s[15]);
+ u[2] = _mm_unpacklo_epi16(s[9], s[14]);
+ u[3] = _mm_unpackhi_epi16(s[9], s[14]);
+ u[4] = _mm_unpacklo_epi16(s[10], s[13]);
+ u[5] = _mm_unpackhi_epi16(s[10], s[13]);
+ u[6] = _mm_unpacklo_epi16(s[11], s[12]);
+ u[7] = _mm_unpackhi_epi16(s[11], s[12]);
+
+ v[0] = _mm_madd_epi16(u[0], k__cospi_p30_m02);
+ v[1] = _mm_madd_epi16(u[1], k__cospi_p30_m02);
+ v[2] = _mm_madd_epi16(u[0], k__cospi_p02_p30);
+ v[3] = _mm_madd_epi16(u[1], k__cospi_p02_p30);
+ v[4] = _mm_madd_epi16(u[2], k__cospi_p14_m18);
+ v[5] = _mm_madd_epi16(u[3], k__cospi_p14_m18);
+ v[6] = _mm_madd_epi16(u[2], k__cospi_p18_p14);
+ v[7] = _mm_madd_epi16(u[3], k__cospi_p18_p14);
+ v[8] = _mm_madd_epi16(u[4], k__cospi_p22_m10);
+ v[9] = _mm_madd_epi16(u[5], k__cospi_p22_m10);
+ v[10] = _mm_madd_epi16(u[4], k__cospi_p10_p22);
+ v[11] = _mm_madd_epi16(u[5], k__cospi_p10_p22);
+ v[12] = _mm_madd_epi16(u[6], k__cospi_p06_m26);
+ v[13] = _mm_madd_epi16(u[7], k__cospi_p06_m26);
+ v[14] = _mm_madd_epi16(u[6], k__cospi_p26_p06);
+ v[15] = _mm_madd_epi16(u[7], k__cospi_p26_p06);
+
+ u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
+ u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
+ u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
+ u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
+ u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING);
+ u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING);
+ u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING);
+ u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING);
+ u[8] = _mm_add_epi32(v[8], k__DCT_CONST_ROUNDING);
+ u[9] = _mm_add_epi32(v[9], k__DCT_CONST_ROUNDING);
+ u[10] = _mm_add_epi32(v[10], k__DCT_CONST_ROUNDING);
+ u[11] = _mm_add_epi32(v[11], k__DCT_CONST_ROUNDING);
+ u[12] = _mm_add_epi32(v[12], k__DCT_CONST_ROUNDING);
+ u[13] = _mm_add_epi32(v[13], k__DCT_CONST_ROUNDING);
+ u[14] = _mm_add_epi32(v[14], k__DCT_CONST_ROUNDING);
+ u[15] = _mm_add_epi32(v[15], k__DCT_CONST_ROUNDING);
+
+ u[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
+ u[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
+ u[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
+ u[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
+ u[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
+ u[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
+ u[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
+ u[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
+ u[8] = _mm_srai_epi32(u[8], DCT_CONST_BITS);
+ u[9] = _mm_srai_epi32(u[9], DCT_CONST_BITS);
+ u[10] = _mm_srai_epi32(u[10], DCT_CONST_BITS);
+ u[11] = _mm_srai_epi32(u[11], DCT_CONST_BITS);
+ u[12] = _mm_srai_epi32(u[12], DCT_CONST_BITS);
+ u[13] = _mm_srai_epi32(u[13], DCT_CONST_BITS);
+ u[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS);
+ u[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS);
+
+ s[8] = _mm_packs_epi32(u[0], u[1]);
+ s[15] = _mm_packs_epi32(u[2], u[3]);
+ s[9] = _mm_packs_epi32(u[4], u[5]);
+ s[14] = _mm_packs_epi32(u[6], u[7]);
+ s[10] = _mm_packs_epi32(u[8], u[9]);
+ s[13] = _mm_packs_epi32(u[10], u[11]);
+ s[11] = _mm_packs_epi32(u[12], u[13]);
+ s[12] = _mm_packs_epi32(u[14], u[15]);
+
+ // stage 3
+ t[0] = s[0];
+ t[1] = s[1];
+ t[2] = s[2];
+ t[3] = s[3];
+ u[0] = _mm_unpacklo_epi16(s[4], s[7]);
+ u[1] = _mm_unpackhi_epi16(s[4], s[7]);
+ u[2] = _mm_unpacklo_epi16(s[5], s[6]);
+ u[3] = _mm_unpackhi_epi16(s[5], s[6]);
+
+ v[0] = _mm_madd_epi16(u[0], k__cospi_p28_m04);
+ v[1] = _mm_madd_epi16(u[1], k__cospi_p28_m04);
+ v[2] = _mm_madd_epi16(u[0], k__cospi_p04_p28);
+ v[3] = _mm_madd_epi16(u[1], k__cospi_p04_p28);
+ v[4] = _mm_madd_epi16(u[2], k__cospi_p12_m20);
+ v[5] = _mm_madd_epi16(u[3], k__cospi_p12_m20);
+ v[6] = _mm_madd_epi16(u[2], k__cospi_p20_p12);
+ v[7] = _mm_madd_epi16(u[3], k__cospi_p20_p12);
+
+ u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
+ u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
+ u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
+ u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
+ u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING);
+ u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING);
+ u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING);
+ u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING);
+
+ u[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
+ u[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
+ u[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
+ u[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
+ u[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
+ u[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
+ u[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
+ u[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
+
+ t[4] = _mm_packs_epi32(u[0], u[1]);
+ t[7] = _mm_packs_epi32(u[2], u[3]);
+ t[5] = _mm_packs_epi32(u[4], u[5]);
+ t[6] = _mm_packs_epi32(u[6], u[7]);
+ t[8] = _mm_add_epi16(s[8], s[9]);
+ t[9] = _mm_sub_epi16(s[8], s[9]);
+ t[10] = _mm_sub_epi16(s[11], s[10]);
+ t[11] = _mm_add_epi16(s[10], s[11]);
+ t[12] = _mm_add_epi16(s[12], s[13]);
+ t[13] = _mm_sub_epi16(s[12], s[13]);
+ t[14] = _mm_sub_epi16(s[15], s[14]);
+ t[15] = _mm_add_epi16(s[14], s[15]);
+
+ // stage 4
+ u[0] = _mm_unpacklo_epi16(t[0], t[1]);
+ u[1] = _mm_unpackhi_epi16(t[0], t[1]);
+ u[2] = _mm_unpacklo_epi16(t[2], t[3]);
+ u[3] = _mm_unpackhi_epi16(t[2], t[3]);
+ u[4] = _mm_unpacklo_epi16(t[9], t[14]);
+ u[5] = _mm_unpackhi_epi16(t[9], t[14]);
+ u[6] = _mm_unpacklo_epi16(t[10], t[13]);
+ u[7] = _mm_unpackhi_epi16(t[10], t[13]);
+
+ v[0] = _mm_madd_epi16(u[0], k__cospi_p16_p16);
+ v[1] = _mm_madd_epi16(u[1], k__cospi_p16_p16);
+ v[2] = _mm_madd_epi16(u[0], k__cospi_p16_m16);
+ v[3] = _mm_madd_epi16(u[1], k__cospi_p16_m16);
+ v[4] = _mm_madd_epi16(u[2], k__cospi_p24_m08);
+ v[5] = _mm_madd_epi16(u[3], k__cospi_p24_m08);
+ v[6] = _mm_madd_epi16(u[2], k__cospi_p08_p24);
+ v[7] = _mm_madd_epi16(u[3], k__cospi_p08_p24);
+ v[8] = _mm_madd_epi16(u[4], k__cospi_m08_p24);
+ v[9] = _mm_madd_epi16(u[5], k__cospi_m08_p24);
+ v[10] = _mm_madd_epi16(u[4], k__cospi_p24_p08);
+ v[11] = _mm_madd_epi16(u[5], k__cospi_p24_p08);
+ v[12] = _mm_madd_epi16(u[6], k__cospi_m24_m08);
+ v[13] = _mm_madd_epi16(u[7], k__cospi_m24_m08);
+ v[14] = _mm_madd_epi16(u[6], k__cospi_m08_p24);
+ v[15] = _mm_madd_epi16(u[7], k__cospi_m08_p24);
+
+ u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
+ u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
+ u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
+ u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
+ u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING);
+ u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING);
+ u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING);
+ u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING);
+ u[8] = _mm_add_epi32(v[8], k__DCT_CONST_ROUNDING);
+ u[9] = _mm_add_epi32(v[9], k__DCT_CONST_ROUNDING);
+ u[10] = _mm_add_epi32(v[10], k__DCT_CONST_ROUNDING);
+ u[11] = _mm_add_epi32(v[11], k__DCT_CONST_ROUNDING);
+ u[12] = _mm_add_epi32(v[12], k__DCT_CONST_ROUNDING);
+ u[13] = _mm_add_epi32(v[13], k__DCT_CONST_ROUNDING);
+ u[14] = _mm_add_epi32(v[14], k__DCT_CONST_ROUNDING);
+ u[15] = _mm_add_epi32(v[15], k__DCT_CONST_ROUNDING);
+
+ u[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
+ u[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
+ u[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
+ u[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
+ u[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
+ u[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
+ u[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
+ u[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
+ u[8] = _mm_srai_epi32(u[8], DCT_CONST_BITS);
+ u[9] = _mm_srai_epi32(u[9], DCT_CONST_BITS);
+ u[10] = _mm_srai_epi32(u[10], DCT_CONST_BITS);
+ u[11] = _mm_srai_epi32(u[11], DCT_CONST_BITS);
+ u[12] = _mm_srai_epi32(u[12], DCT_CONST_BITS);
+ u[13] = _mm_srai_epi32(u[13], DCT_CONST_BITS);
+ u[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS);
+ u[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS);
+
+ s[0] = _mm_packs_epi32(u[0], u[1]);
+ s[1] = _mm_packs_epi32(u[2], u[3]);
+ s[2] = _mm_packs_epi32(u[4], u[5]);
+ s[3] = _mm_packs_epi32(u[6], u[7]);
+ s[4] = _mm_add_epi16(t[4], t[5]);
+ s[5] = _mm_sub_epi16(t[4], t[5]);
+ s[6] = _mm_sub_epi16(t[7], t[6]);
+ s[7] = _mm_add_epi16(t[6], t[7]);
+ s[8] = t[8];
+ s[15] = t[15];
+ s[9] = _mm_packs_epi32(u[8], u[9]);
+ s[14] = _mm_packs_epi32(u[10], u[11]);
+ s[10] = _mm_packs_epi32(u[12], u[13]);
+ s[13] = _mm_packs_epi32(u[14], u[15]);
+ s[11] = t[11];
+ s[12] = t[12];
+
+ // stage 5
+ t[0] = _mm_add_epi16(s[0], s[3]);
+ t[1] = _mm_add_epi16(s[1], s[2]);
+ t[2] = _mm_sub_epi16(s[1], s[2]);
+ t[3] = _mm_sub_epi16(s[0], s[3]);
+ t[4] = s[4];
+ t[7] = s[7];
+
+ u[0] = _mm_unpacklo_epi16(s[5], s[6]);
+ u[1] = _mm_unpackhi_epi16(s[5], s[6]);
+ v[0] = _mm_madd_epi16(u[0], k__cospi_m16_p16);
+ v[1] = _mm_madd_epi16(u[1], k__cospi_m16_p16);
+ v[2] = _mm_madd_epi16(u[0], k__cospi_p16_p16);
+ v[3] = _mm_madd_epi16(u[1], k__cospi_p16_p16);
+ u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
+ u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
+ u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
+ u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
+ u[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
+ u[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
+ u[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
+ u[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
+ t[5] = _mm_packs_epi32(u[0], u[1]);
+ t[6] = _mm_packs_epi32(u[2], u[3]);
+
+ t[8] = _mm_add_epi16(s[8], s[11]);
+ t[9] = _mm_add_epi16(s[9], s[10]);
+ t[10] = _mm_sub_epi16(s[9], s[10]);
+ t[11] = _mm_sub_epi16(s[8], s[11]);
+ t[12] = _mm_sub_epi16(s[15], s[12]);
+ t[13] = _mm_sub_epi16(s[14], s[13]);
+ t[14] = _mm_add_epi16(s[13], s[14]);
+ t[15] = _mm_add_epi16(s[12], s[15]);
+
+ // stage 6
+ s[0] = _mm_add_epi16(t[0], t[7]);
+ s[1] = _mm_add_epi16(t[1], t[6]);
+ s[2] = _mm_add_epi16(t[2], t[5]);
+ s[3] = _mm_add_epi16(t[3], t[4]);
+ s[4] = _mm_sub_epi16(t[3], t[4]);
+ s[5] = _mm_sub_epi16(t[2], t[5]);
+ s[6] = _mm_sub_epi16(t[1], t[6]);
+ s[7] = _mm_sub_epi16(t[0], t[7]);
+ s[8] = t[8];
+ s[9] = t[9];
+
+ u[0] = _mm_unpacklo_epi16(t[10], t[13]);
+ u[1] = _mm_unpackhi_epi16(t[10], t[13]);
+ u[2] = _mm_unpacklo_epi16(t[11], t[12]);
+ u[3] = _mm_unpackhi_epi16(t[11], t[12]);
+
+ v[0] = _mm_madd_epi16(u[0], k__cospi_m16_p16);
+ v[1] = _mm_madd_epi16(u[1], k__cospi_m16_p16);
+ v[2] = _mm_madd_epi16(u[0], k__cospi_p16_p16);
+ v[3] = _mm_madd_epi16(u[1], k__cospi_p16_p16);
+ v[4] = _mm_madd_epi16(u[2], k__cospi_m16_p16);
+ v[5] = _mm_madd_epi16(u[3], k__cospi_m16_p16);
+ v[6] = _mm_madd_epi16(u[2], k__cospi_p16_p16);
+ v[7] = _mm_madd_epi16(u[3], k__cospi_p16_p16);
+
+ u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
+ u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
+ u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
+ u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
+ u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING);
+ u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING);
+ u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING);
+ u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING);
+
+ u[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
+ u[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
+ u[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
+ u[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
+ u[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
+ u[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
+ u[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
+ u[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
+
+ s[10] = _mm_packs_epi32(u[0], u[1]);
+ s[13] = _mm_packs_epi32(u[2], u[3]);
+ s[11] = _mm_packs_epi32(u[4], u[5]);
+ s[12] = _mm_packs_epi32(u[6], u[7]);
+ s[14] = t[14];
+ s[15] = t[15];
+
+ // stage 7
+ in[0] = _mm_add_epi16(s[0], s[15]);
+ in[1] = _mm_add_epi16(s[1], s[14]);
+ in[2] = _mm_add_epi16(s[2], s[13]);
+ in[3] = _mm_add_epi16(s[3], s[12]);
+ in[4] = _mm_add_epi16(s[4], s[11]);
+ in[5] = _mm_add_epi16(s[5], s[10]);
+ in[6] = _mm_add_epi16(s[6], s[9]);
+ in[7] = _mm_add_epi16(s[7], s[8]);
+ in[8] = _mm_sub_epi16(s[7], s[8]);
+ in[9] = _mm_sub_epi16(s[6], s[9]);
+ in[10] = _mm_sub_epi16(s[5], s[10]);
+ in[11] = _mm_sub_epi16(s[4], s[11]);
+ in[12] = _mm_sub_epi16(s[3], s[12]);
+ in[13] = _mm_sub_epi16(s[2], s[13]);
+ in[14] = _mm_sub_epi16(s[1], s[14]);
+ in[15] = _mm_sub_epi16(s[0], s[15]);
+}
+
+void idct16_1d_sse2(__m128i *in0, __m128i *in1) {
+ array_transpose_16x16(in0, in1);
+ idct16_1d_8col(in0);
+ idct16_1d_8col(in1);
+}
+
+void iadst16_1d_sse2(__m128i *in0, __m128i *in1) {
+ array_transpose_16x16(in0, in1);
+ iadst16_1d_8col(in0);
+ iadst16_1d_8col(in1);
+}
+
+static INLINE void load_buffer_8x16(int16_t *input, __m128i *in) {
+ in[0] = _mm_load_si128((__m128i *)(input + 0 * 16));
+ in[1] = _mm_load_si128((__m128i *)(input + 1 * 16));
+ in[2] = _mm_load_si128((__m128i *)(input + 2 * 16));
+ in[3] = _mm_load_si128((__m128i *)(input + 3 * 16));
+ in[4] = _mm_load_si128((__m128i *)(input + 4 * 16));
+ in[5] = _mm_load_si128((__m128i *)(input + 5 * 16));
+ in[6] = _mm_load_si128((__m128i *)(input + 6 * 16));
+ in[7] = _mm_load_si128((__m128i *)(input + 7 * 16));
+
+ in[8] = _mm_load_si128((__m128i *)(input + 8 * 16));
+ in[9] = _mm_load_si128((__m128i *)(input + 9 * 16));
+ in[10] = _mm_load_si128((__m128i *)(input + 10 * 16));
+ in[11] = _mm_load_si128((__m128i *)(input + 11 * 16));
+ in[12] = _mm_load_si128((__m128i *)(input + 12 * 16));
+ in[13] = _mm_load_si128((__m128i *)(input + 13 * 16));
+ in[14] = _mm_load_si128((__m128i *)(input + 14 * 16));
+ in[15] = _mm_load_si128((__m128i *)(input + 15 * 16));
+}
+
+static INLINE void write_buffer_8x16(uint8_t *dest, __m128i *in, int stride) {
+ const __m128i final_rounding = _mm_set1_epi16(1<<5);
+ const __m128i zero = _mm_setzero_si128();
+ // Final rounding and shift
+ in[0] = _mm_adds_epi16(in[0], final_rounding);
+ in[1] = _mm_adds_epi16(in[1], final_rounding);
+ in[2] = _mm_adds_epi16(in[2], final_rounding);
+ in[3] = _mm_adds_epi16(in[3], final_rounding);
+ in[4] = _mm_adds_epi16(in[4], final_rounding);
+ in[5] = _mm_adds_epi16(in[5], final_rounding);
+ in[6] = _mm_adds_epi16(in[6], final_rounding);
+ in[7] = _mm_adds_epi16(in[7], final_rounding);
+ in[8] = _mm_adds_epi16(in[8], final_rounding);
+ in[9] = _mm_adds_epi16(in[9], final_rounding);
+ in[10] = _mm_adds_epi16(in[10], final_rounding);
+ in[11] = _mm_adds_epi16(in[11], final_rounding);
+ in[12] = _mm_adds_epi16(in[12], final_rounding);
+ in[13] = _mm_adds_epi16(in[13], final_rounding);
+ in[14] = _mm_adds_epi16(in[14], final_rounding);
+ in[15] = _mm_adds_epi16(in[15], final_rounding);
+
+ in[0] = _mm_srai_epi16(in[0], 6);
+ in[1] = _mm_srai_epi16(in[1], 6);
+ in[2] = _mm_srai_epi16(in[2], 6);
+ in[3] = _mm_srai_epi16(in[3], 6);
+ in[4] = _mm_srai_epi16(in[4], 6);
+ in[5] = _mm_srai_epi16(in[5], 6);
+ in[6] = _mm_srai_epi16(in[6], 6);
+ in[7] = _mm_srai_epi16(in[7], 6);
+ in[8] = _mm_srai_epi16(in[8], 6);
+ in[9] = _mm_srai_epi16(in[9], 6);
+ in[10] = _mm_srai_epi16(in[10], 6);
+ in[11] = _mm_srai_epi16(in[11], 6);
+ in[12] = _mm_srai_epi16(in[12], 6);
+ in[13] = _mm_srai_epi16(in[13], 6);
+ in[14] = _mm_srai_epi16(in[14], 6);
+ in[15] = _mm_srai_epi16(in[15], 6);
+
+ RECON_AND_STORE(dest, in[0]);
+ RECON_AND_STORE(dest, in[1]);
+ RECON_AND_STORE(dest, in[2]);
+ RECON_AND_STORE(dest, in[3]);
+ RECON_AND_STORE(dest, in[4]);
+ RECON_AND_STORE(dest, in[5]);
+ RECON_AND_STORE(dest, in[6]);
+ RECON_AND_STORE(dest, in[7]);
+ RECON_AND_STORE(dest, in[8]);
+ RECON_AND_STORE(dest, in[9]);
+ RECON_AND_STORE(dest, in[10]);
+ RECON_AND_STORE(dest, in[11]);
+ RECON_AND_STORE(dest, in[12]);
+ RECON_AND_STORE(dest, in[13]);
+ RECON_AND_STORE(dest, in[14]);
+ RECON_AND_STORE(dest, in[15]);
+}
+
+void vp9_short_iht16x16_add_sse2(int16_t *input, uint8_t *dest, int stride,
+ int tx_type) {
+ __m128i in0[16], in1[16];
+
+ load_buffer_8x16(input, in0);
+ input += 8;
+ load_buffer_8x16(input, in1);
+
+ switch (tx_type) {
+ case 0: // DCT_DCT
+ idct16_1d_sse2(in0, in1);
+ idct16_1d_sse2(in0, in1);
+ break;
+ case 1: // ADST_DCT
+ idct16_1d_sse2(in0, in1);
+ iadst16_1d_sse2(in0, in1);
+ break;
+ case 2: // DCT_ADST
+ iadst16_1d_sse2(in0, in1);
+ idct16_1d_sse2(in0, in1);
+ break;
+ case 3: // ADST_ADST
+ iadst16_1d_sse2(in0, in1);
+ iadst16_1d_sse2(in0, in1);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ write_buffer_8x16(dest, in0, stride);
+ dest += 8;
+ write_buffer_8x16(dest, in1, stride);
+}
+
+void vp9_short_idct10_16x16_add_sse2(int16_t *input, uint8_t *dest,
+ int stride) {
+ const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
+ const __m128i final_rounding = _mm_set1_epi16(1<<5);
+ const __m128i zero = _mm_setzero_si128();
+
+ const __m128i stg2_0 = pair_set_epi16(cospi_30_64, -cospi_2_64);
+ const __m128i stg2_1 = pair_set_epi16(cospi_2_64, cospi_30_64);
+ const __m128i stg2_2 = pair_set_epi16(cospi_14_64, -cospi_18_64);
+ const __m128i stg2_3 = pair_set_epi16(cospi_18_64, cospi_14_64);
+ const __m128i stg2_4 = pair_set_epi16(cospi_22_64, -cospi_10_64);
+ const __m128i stg2_5 = pair_set_epi16(cospi_10_64, cospi_22_64);
+ const __m128i stg2_6 = pair_set_epi16(cospi_6_64, -cospi_26_64);
+ const __m128i stg2_7 = pair_set_epi16(cospi_26_64, cospi_6_64);
+
+ const __m128i stg3_0 = pair_set_epi16(cospi_28_64, -cospi_4_64);
+ const __m128i stg3_1 = pair_set_epi16(cospi_4_64, cospi_28_64);
+ const __m128i stg3_2 = pair_set_epi16(cospi_12_64, -cospi_20_64);
+ const __m128i stg3_3 = pair_set_epi16(cospi_20_64, cospi_12_64);
+
+ const __m128i stg4_0 = pair_set_epi16(cospi_16_64, cospi_16_64);
+ const __m128i stg4_1 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+ const __m128i stg4_2 = pair_set_epi16(cospi_24_64, -cospi_8_64);
+ const __m128i stg4_3 = pair_set_epi16(cospi_8_64, cospi_24_64);
+ const __m128i stg4_4 = pair_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m128i stg4_5 = pair_set_epi16(cospi_24_64, cospi_8_64);
+ const __m128i stg4_6 = pair_set_epi16(-cospi_24_64, -cospi_8_64);
+ const __m128i stg4_7 = pair_set_epi16(-cospi_8_64, cospi_24_64);
+
+ const __m128i stg6_0 = pair_set_epi16(-cospi_16_64, cospi_16_64);
+
+ __m128i in0 = zero, in1 = zero, in2 = zero, in3 = zero, in4 = zero,
+ in5 = zero, in6 = zero, in7 = zero, in8 = zero, in9 = zero,
+ in10 = zero, in11 = zero, in12 = zero, in13 = zero,
+ in14 = zero, in15 = zero;
+ __m128i l0 = zero, l1 = zero, l2 = zero, l3 = zero, l4 = zero, l5 = zero,
+ l6 = zero, l7 = zero, l8 = zero, l9 = zero, l10 = zero, l11 = zero,
+ l12 = zero, l13 = zero, l14 = zero, l15 = zero;
+
+ __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7,
+ stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15,
+ stp1_8_0, stp1_12_0;
+ __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7,
+ stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ int i;
+ // 1-D idct. Load input data.
+ in0 = _mm_load_si128((__m128i *)input);
+ in8 = _mm_load_si128((__m128i *)(input + 8 * 1));
+ in1 = _mm_load_si128((__m128i *)(input + 8 * 2));
+ in9 = _mm_load_si128((__m128i *)(input + 8 * 3));
+ in2 = _mm_load_si128((__m128i *)(input + 8 * 4));
+ in10 = _mm_load_si128((__m128i *)(input + 8 * 5));
+ in3 = _mm_load_si128((__m128i *)(input + 8 * 6));
+ in11 = _mm_load_si128((__m128i *)(input + 8 * 7));
+
+ TRANSPOSE_8X4(in0, in1, in2, in3, in0, in1, in2, in3);
+ TRANSPOSE_8X4(in8, in9, in10, in11, in8, in9, in10, in11);
+
+ // Stage2
+ {
+ const __m128i lo_1_15 = _mm_unpackhi_epi16(in0, in11);
+ const __m128i lo_9_7 = _mm_unpackhi_epi16(in8, in3);
+ const __m128i lo_5_11 = _mm_unpackhi_epi16(in2, in9);
+ const __m128i lo_13_3 = _mm_unpackhi_epi16(in10, in1);
+
+ tmp0 = _mm_madd_epi16(lo_1_15, stg2_0);
+ tmp2 = _mm_madd_epi16(lo_1_15, stg2_1);
+ tmp4 = _mm_madd_epi16(lo_9_7, stg2_2);
+ tmp6 = _mm_madd_epi16(lo_9_7, stg2_3);
+ tmp1 = _mm_madd_epi16(lo_5_11, stg2_4);
+ tmp3 = _mm_madd_epi16(lo_5_11, stg2_5);
+ tmp5 = _mm_madd_epi16(lo_13_3, stg2_6);
+ tmp7 = _mm_madd_epi16(lo_13_3, stg2_7);
+
+ tmp0 = _mm_add_epi32(tmp0, rounding);
+ tmp2 = _mm_add_epi32(tmp2, rounding);
+ tmp4 = _mm_add_epi32(tmp4, rounding);
+ tmp6 = _mm_add_epi32(tmp6, rounding);
+ tmp1 = _mm_add_epi32(tmp1, rounding);
+ tmp3 = _mm_add_epi32(tmp3, rounding);
+ tmp5 = _mm_add_epi32(tmp5, rounding);
+ tmp7 = _mm_add_epi32(tmp7, rounding);
+
+ tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);
+ tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
+ tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS);
+ tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS);
+ tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);
+ tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);
+ tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS);
+ tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS);
+
+ stp2_8 = _mm_packs_epi32(tmp0, zero);
+ stp2_15 = _mm_packs_epi32(tmp2, zero);
+ stp2_9 = _mm_packs_epi32(tmp4, zero);
+ stp2_14 = _mm_packs_epi32(tmp6, zero);
+
+ stp2_10 = _mm_packs_epi32(tmp1, zero);
+ stp2_13 = _mm_packs_epi32(tmp3, zero);
+ stp2_11 = _mm_packs_epi32(tmp5, zero);
+ stp2_12 = _mm_packs_epi32(tmp7, zero);
+ }
+
+ // Stage3
+ {
+ const __m128i lo_2_14 = _mm_unpacklo_epi16(in1, in11);
+ const __m128i lo_10_6 = _mm_unpacklo_epi16(in9, in3);
+
+ tmp0 = _mm_madd_epi16(lo_2_14, stg3_0);
+ tmp2 = _mm_madd_epi16(lo_2_14, stg3_1);
+ tmp4 = _mm_madd_epi16(lo_10_6, stg3_2);
+ tmp6 = _mm_madd_epi16(lo_10_6, stg3_3);
+
+ tmp0 = _mm_add_epi32(tmp0, rounding);
+ tmp2 = _mm_add_epi32(tmp2, rounding);
+ tmp4 = _mm_add_epi32(tmp4, rounding);
+ tmp6 = _mm_add_epi32(tmp6, rounding);
+
+ tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);
+ tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
+ tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS);
+ tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS);
+
+ stp1_4 = _mm_packs_epi32(tmp0, zero);
+ stp1_7 = _mm_packs_epi32(tmp2, zero);
+ stp1_5 = _mm_packs_epi32(tmp4, zero);
+ stp1_6 = _mm_packs_epi32(tmp6, zero);
+
+ stp1_8_0 = _mm_add_epi16(stp2_8, stp2_9);
+ stp1_9 = _mm_sub_epi16(stp2_8, stp2_9);
+ stp1_10 = _mm_sub_epi16(stp2_11, stp2_10);
+ stp1_11 = _mm_add_epi16(stp2_11, stp2_10);
+
+ stp1_12_0 = _mm_add_epi16(stp2_12, stp2_13);
+ stp1_13 = _mm_sub_epi16(stp2_12, stp2_13);
+ stp1_14 = _mm_sub_epi16(stp2_15, stp2_14);
+ stp1_15 = _mm_add_epi16(stp2_15, stp2_14);
+ }
+
+ // Stage4
+ {
+ const __m128i lo_0_8 = _mm_unpacklo_epi16(in0, in8);
+ const __m128i lo_4_12 = _mm_unpacklo_epi16(in2, in10);
+ const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14);
+ const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13);
+
+ tmp0 = _mm_madd_epi16(lo_0_8, stg4_0);
+ tmp2 = _mm_madd_epi16(lo_0_8, stg4_1);
+ tmp4 = _mm_madd_epi16(lo_4_12, stg4_2);
+ tmp6 = _mm_madd_epi16(lo_4_12, stg4_3);
+ tmp1 = _mm_madd_epi16(lo_9_14, stg4_4);
+ tmp3 = _mm_madd_epi16(lo_9_14, stg4_5);
+ tmp5 = _mm_madd_epi16(lo_10_13, stg4_6);
+ tmp7 = _mm_madd_epi16(lo_10_13, stg4_7);
+
+ tmp0 = _mm_add_epi32(tmp0, rounding);
+ tmp2 = _mm_add_epi32(tmp2, rounding);
+ tmp4 = _mm_add_epi32(tmp4, rounding);
+ tmp6 = _mm_add_epi32(tmp6, rounding);
+ tmp1 = _mm_add_epi32(tmp1, rounding);
+ tmp3 = _mm_add_epi32(tmp3, rounding);
+ tmp5 = _mm_add_epi32(tmp5, rounding);
+ tmp7 = _mm_add_epi32(tmp7, rounding);
+
+ tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);
+ tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
+ tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS);
+ tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS);
+ tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);
+ tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);
+ tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS);
+ tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS);
+
+ stp2_0 = _mm_packs_epi32(tmp0, zero);
+ stp2_1 = _mm_packs_epi32(tmp2, zero);
+ stp2_2 = _mm_packs_epi32(tmp4, zero);
+ stp2_3 = _mm_packs_epi32(tmp6, zero);
+ stp2_9 = _mm_packs_epi32(tmp1, zero);
+ stp2_14 = _mm_packs_epi32(tmp3, zero);
+ stp2_10 = _mm_packs_epi32(tmp5, zero);
+ stp2_13 = _mm_packs_epi32(tmp7, zero);
+
+ stp2_4 = _mm_add_epi16(stp1_4, stp1_5);
+ stp2_5 = _mm_sub_epi16(stp1_4, stp1_5);
+ stp2_6 = _mm_sub_epi16(stp1_7, stp1_6);
+ stp2_7 = _mm_add_epi16(stp1_7, stp1_6);
+ }
+
+ // Stage5 and Stage6
+ {
+ stp1_0 = _mm_add_epi16(stp2_0, stp2_3);
+ stp1_1 = _mm_add_epi16(stp2_1, stp2_2);
+ stp1_2 = _mm_sub_epi16(stp2_1, stp2_2);
+ stp1_3 = _mm_sub_epi16(stp2_0, stp2_3);
+
+ stp1_8 = _mm_add_epi16(stp1_8_0, stp1_11);
+ stp1_9 = _mm_add_epi16(stp2_9, stp2_10);
+ stp1_10 = _mm_sub_epi16(stp2_9, stp2_10);
+ stp1_11 = _mm_sub_epi16(stp1_8_0, stp1_11);
+
+ stp1_12 = _mm_sub_epi16(stp1_15, stp1_12_0);
+ stp1_13 = _mm_sub_epi16(stp2_14, stp2_13);
+ stp1_14 = _mm_add_epi16(stp2_14, stp2_13);
+ stp1_15 = _mm_add_epi16(stp1_15, stp1_12_0);
+ }
+
+ // Stage6
+ {
+ const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5);
+ const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13);
+ const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12);
+
+ tmp1 = _mm_madd_epi16(lo_6_5, stg4_1);
+ tmp3 = _mm_madd_epi16(lo_6_5, stg4_0);
+ tmp0 = _mm_madd_epi16(lo_10_13, stg6_0);
+ tmp2 = _mm_madd_epi16(lo_10_13, stg4_0);
+ tmp4 = _mm_madd_epi16(lo_11_12, stg6_0);
+ tmp6 = _mm_madd_epi16(lo_11_12, stg4_0);
+
+ tmp1 = _mm_add_epi32(tmp1, rounding);
+ tmp3 = _mm_add_epi32(tmp3, rounding);
+ tmp0 = _mm_add_epi32(tmp0, rounding);
+ tmp2 = _mm_add_epi32(tmp2, rounding);
+ tmp4 = _mm_add_epi32(tmp4, rounding);
+ tmp6 = _mm_add_epi32(tmp6, rounding);
+
+ tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);
+ tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);
+ tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);
+ tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
+ tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS);
+ tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS);
+
+ stp1_5 = _mm_packs_epi32(tmp1, zero);
+ stp1_6 = _mm_packs_epi32(tmp3, zero);
+ stp2_10 = _mm_packs_epi32(tmp0, zero);
+ stp2_13 = _mm_packs_epi32(tmp2, zero);
+ stp2_11 = _mm_packs_epi32(tmp4, zero);
+ stp2_12 = _mm_packs_epi32(tmp6, zero);
+
+ stp2_0 = _mm_add_epi16(stp1_0, stp2_7);
+ stp2_1 = _mm_add_epi16(stp1_1, stp1_6);
+ stp2_2 = _mm_add_epi16(stp1_2, stp1_5);
+ stp2_3 = _mm_add_epi16(stp1_3, stp2_4);
+ stp2_4 = _mm_sub_epi16(stp1_3, stp2_4);
+ stp2_5 = _mm_sub_epi16(stp1_2, stp1_5);
+ stp2_6 = _mm_sub_epi16(stp1_1, stp1_6);
+ stp2_7 = _mm_sub_epi16(stp1_0, stp2_7);
+ }
+
+ // Stage7. Left 8x16 only.
+ l0 = _mm_add_epi16(stp2_0, stp1_15);
+ l1 = _mm_add_epi16(stp2_1, stp1_14);
+ l2 = _mm_add_epi16(stp2_2, stp2_13);
+ l3 = _mm_add_epi16(stp2_3, stp2_12);
+ l4 = _mm_add_epi16(stp2_4, stp2_11);
+ l5 = _mm_add_epi16(stp2_5, stp2_10);
+ l6 = _mm_add_epi16(stp2_6, stp1_9);
+ l7 = _mm_add_epi16(stp2_7, stp1_8);
+ l8 = _mm_sub_epi16(stp2_7, stp1_8);
+ l9 = _mm_sub_epi16(stp2_6, stp1_9);
+ l10 = _mm_sub_epi16(stp2_5, stp2_10);
+ l11 = _mm_sub_epi16(stp2_4, stp2_11);
+ l12 = _mm_sub_epi16(stp2_3, stp2_12);
+ l13 = _mm_sub_epi16(stp2_2, stp2_13);
+ l14 = _mm_sub_epi16(stp2_1, stp1_14);
+ l15 = _mm_sub_epi16(stp2_0, stp1_15);
+
+ // 2-D idct. We do 2 8x16 blocks.
+ for (i = 0; i < 2; i++) {
+ if (i == 0)
+ TRANSPOSE_4X8(l0, l1, l2, l3, l4, l5, l6, l7, in0, in1, in2, in3, in4,
+ in5, in6, in7);
+
+ if (i == 1)
+ TRANSPOSE_4X8(l8, l9, l10, l11, l12, l13, l14, l15, in0, in1, in2, in3,
+ in4, in5, in6, in7);
+
+ in8 = in9 = in10 = in11 = in12 = in13 = in14 = in15 = zero;
+
+ IDCT16x16_1D
+
+ // Stage7
+ in0 = _mm_add_epi16(stp2_0, stp1_15);
+ in1 = _mm_add_epi16(stp2_1, stp1_14);
+ in2 = _mm_add_epi16(stp2_2, stp2_13);
+ in3 = _mm_add_epi16(stp2_3, stp2_12);
+ in4 = _mm_add_epi16(stp2_4, stp2_11);
+ in5 = _mm_add_epi16(stp2_5, stp2_10);
+ in6 = _mm_add_epi16(stp2_6, stp1_9);
+ in7 = _mm_add_epi16(stp2_7, stp1_8);
+ in8 = _mm_sub_epi16(stp2_7, stp1_8);
+ in9 = _mm_sub_epi16(stp2_6, stp1_9);
+ in10 = _mm_sub_epi16(stp2_5, stp2_10);
+ in11 = _mm_sub_epi16(stp2_4, stp2_11);
+ in12 = _mm_sub_epi16(stp2_3, stp2_12);
+ in13 = _mm_sub_epi16(stp2_2, stp2_13);
+ in14 = _mm_sub_epi16(stp2_1, stp1_14);
+ in15 = _mm_sub_epi16(stp2_0, stp1_15);
+
+ // Final rounding and shift
+ in0 = _mm_adds_epi16(in0, final_rounding);
+ in1 = _mm_adds_epi16(in1, final_rounding);
+ in2 = _mm_adds_epi16(in2, final_rounding);
+ in3 = _mm_adds_epi16(in3, final_rounding);
+ in4 = _mm_adds_epi16(in4, final_rounding);
+ in5 = _mm_adds_epi16(in5, final_rounding);
+ in6 = _mm_adds_epi16(in6, final_rounding);
+ in7 = _mm_adds_epi16(in7, final_rounding);
+ in8 = _mm_adds_epi16(in8, final_rounding);
+ in9 = _mm_adds_epi16(in9, final_rounding);
+ in10 = _mm_adds_epi16(in10, final_rounding);
+ in11 = _mm_adds_epi16(in11, final_rounding);
+ in12 = _mm_adds_epi16(in12, final_rounding);
+ in13 = _mm_adds_epi16(in13, final_rounding);
+ in14 = _mm_adds_epi16(in14, final_rounding);
+ in15 = _mm_adds_epi16(in15, final_rounding);
+
+ in0 = _mm_srai_epi16(in0, 6);
+ in1 = _mm_srai_epi16(in1, 6);
+ in2 = _mm_srai_epi16(in2, 6);
+ in3 = _mm_srai_epi16(in3, 6);
+ in4 = _mm_srai_epi16(in4, 6);
+ in5 = _mm_srai_epi16(in5, 6);
+ in6 = _mm_srai_epi16(in6, 6);
+ in7 = _mm_srai_epi16(in7, 6);
+ in8 = _mm_srai_epi16(in8, 6);
+ in9 = _mm_srai_epi16(in9, 6);
+ in10 = _mm_srai_epi16(in10, 6);
+ in11 = _mm_srai_epi16(in11, 6);
+ in12 = _mm_srai_epi16(in12, 6);
+ in13 = _mm_srai_epi16(in13, 6);
+ in14 = _mm_srai_epi16(in14, 6);
+ in15 = _mm_srai_epi16(in15, 6);
+
+ RECON_AND_STORE(dest, in0);
+ RECON_AND_STORE(dest, in1);
+ RECON_AND_STORE(dest, in2);
+ RECON_AND_STORE(dest, in3);
+ RECON_AND_STORE(dest, in4);
+ RECON_AND_STORE(dest, in5);
+ RECON_AND_STORE(dest, in6);
+ RECON_AND_STORE(dest, in7);
+ RECON_AND_STORE(dest, in8);
+ RECON_AND_STORE(dest, in9);
+ RECON_AND_STORE(dest, in10);
+ RECON_AND_STORE(dest, in11);
+ RECON_AND_STORE(dest, in12);
+ RECON_AND_STORE(dest, in13);
+ RECON_AND_STORE(dest, in14);
+ RECON_AND_STORE(dest, in15);
+
+ dest += 8 - (stride * 16);
+ }
+}
+
+#define LOAD_DQCOEFF(reg, input) \
+ { \
+ reg = _mm_load_si128((__m128i *) input); \
+ input += 8; \
+ } \
+
+void vp9_short_idct32x32_add_sse2(int16_t *input, uint8_t *dest, int stride) {
+ const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
+ const __m128i final_rounding = _mm_set1_epi16(1<<5);
+
+ // idct constants for each stage
+ const __m128i stg1_0 = pair_set_epi16(cospi_31_64, -cospi_1_64);
+ const __m128i stg1_1 = pair_set_epi16(cospi_1_64, cospi_31_64);
+ const __m128i stg1_2 = pair_set_epi16(cospi_15_64, -cospi_17_64);
+ const __m128i stg1_3 = pair_set_epi16(cospi_17_64, cospi_15_64);
+ const __m128i stg1_4 = pair_set_epi16(cospi_23_64, -cospi_9_64);
+ const __m128i stg1_5 = pair_set_epi16(cospi_9_64, cospi_23_64);
+ const __m128i stg1_6 = pair_set_epi16(cospi_7_64, -cospi_25_64);
+ const __m128i stg1_7 = pair_set_epi16(cospi_25_64, cospi_7_64);
+ const __m128i stg1_8 = pair_set_epi16(cospi_27_64, -cospi_5_64);
+ const __m128i stg1_9 = pair_set_epi16(cospi_5_64, cospi_27_64);
+ const __m128i stg1_10 = pair_set_epi16(cospi_11_64, -cospi_21_64);
+ const __m128i stg1_11 = pair_set_epi16(cospi_21_64, cospi_11_64);
+ const __m128i stg1_12 = pair_set_epi16(cospi_19_64, -cospi_13_64);
+ const __m128i stg1_13 = pair_set_epi16(cospi_13_64, cospi_19_64);
+ const __m128i stg1_14 = pair_set_epi16(cospi_3_64, -cospi_29_64);
+ const __m128i stg1_15 = pair_set_epi16(cospi_29_64, cospi_3_64);
+
+ const __m128i stg2_0 = pair_set_epi16(cospi_30_64, -cospi_2_64);
+ const __m128i stg2_1 = pair_set_epi16(cospi_2_64, cospi_30_64);
+ const __m128i stg2_2 = pair_set_epi16(cospi_14_64, -cospi_18_64);
+ const __m128i stg2_3 = pair_set_epi16(cospi_18_64, cospi_14_64);
+ const __m128i stg2_4 = pair_set_epi16(cospi_22_64, -cospi_10_64);
+ const __m128i stg2_5 = pair_set_epi16(cospi_10_64, cospi_22_64);
+ const __m128i stg2_6 = pair_set_epi16(cospi_6_64, -cospi_26_64);
+ const __m128i stg2_7 = pair_set_epi16(cospi_26_64, cospi_6_64);
+
+ const __m128i stg3_0 = pair_set_epi16(cospi_28_64, -cospi_4_64);
+ const __m128i stg3_1 = pair_set_epi16(cospi_4_64, cospi_28_64);
+ const __m128i stg3_2 = pair_set_epi16(cospi_12_64, -cospi_20_64);
+ const __m128i stg3_3 = pair_set_epi16(cospi_20_64, cospi_12_64);
+ const __m128i stg3_4 = pair_set_epi16(-cospi_4_64, cospi_28_64);
+ const __m128i stg3_5 = pair_set_epi16(cospi_28_64, cospi_4_64);
+ const __m128i stg3_6 = pair_set_epi16(-cospi_28_64, -cospi_4_64);
+ const __m128i stg3_8 = pair_set_epi16(-cospi_20_64, cospi_12_64);
+ const __m128i stg3_9 = pair_set_epi16(cospi_12_64, cospi_20_64);
+ const __m128i stg3_10 = pair_set_epi16(-cospi_12_64, -cospi_20_64);
+
+ const __m128i stg4_0 = pair_set_epi16(cospi_16_64, cospi_16_64);
+ const __m128i stg4_1 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+ const __m128i stg4_2 = pair_set_epi16(cospi_24_64, -cospi_8_64);
+ const __m128i stg4_3 = pair_set_epi16(cospi_8_64, cospi_24_64);
+ const __m128i stg4_4 = pair_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m128i stg4_5 = pair_set_epi16(cospi_24_64, cospi_8_64);
+ const __m128i stg4_6 = pair_set_epi16(-cospi_24_64, -cospi_8_64);
+
+ const __m128i stg6_0 = pair_set_epi16(-cospi_16_64, cospi_16_64);
+
+ __m128i in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11, in12,
+ in13, in14, in15, in16, in17, in18, in19, in20, in21, in22, in23,
+ in24, in25, in26, in27, in28, in29, in30, in31;
+ __m128i col[128];
+ __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7,
+ stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15,
+ stp1_16, stp1_17, stp1_18, stp1_19, stp1_20, stp1_21, stp1_22,
+ stp1_23, stp1_24, stp1_25, stp1_26, stp1_27, stp1_28, stp1_29,
+ stp1_30, stp1_31;
+ __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7,
+ stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15,
+ stp2_16, stp2_17, stp2_18, stp2_19, stp2_20, stp2_21, stp2_22,
+ stp2_23, stp2_24, stp2_25, stp2_26, stp2_27, stp2_28, stp2_29,
+ stp2_30, stp2_31;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ int i, j, i32;
+ __m128i zero_idx[16];
+ int zero_flag[2];
+
+ // We work on a 8x32 block each time, and loop 8 times for 2-D 32x32 idct.
+ for (i = 0; i < 8; i++) {
+ i32 = (i << 5);
+ if (i < 4) {
+ // First 1-D idct
+ // Load input data.
+ LOAD_DQCOEFF(in0, input);
+ LOAD_DQCOEFF(in8, input);
+ LOAD_DQCOEFF(in16, input);
+ LOAD_DQCOEFF(in24, input);
+ LOAD_DQCOEFF(in1, input);
+ LOAD_DQCOEFF(in9, input);
+ LOAD_DQCOEFF(in17, input);
+ LOAD_DQCOEFF(in25, input);
+ LOAD_DQCOEFF(in2, input);
+ LOAD_DQCOEFF(in10, input);
+ LOAD_DQCOEFF(in18, input);
+ LOAD_DQCOEFF(in26, input);
+ LOAD_DQCOEFF(in3, input);
+ LOAD_DQCOEFF(in11, input);
+ LOAD_DQCOEFF(in19, input);
+ LOAD_DQCOEFF(in27, input);
+
+ LOAD_DQCOEFF(in4, input);
+ LOAD_DQCOEFF(in12, input);
+ LOAD_DQCOEFF(in20, input);
+ LOAD_DQCOEFF(in28, input);
+ LOAD_DQCOEFF(in5, input);
+ LOAD_DQCOEFF(in13, input);
+ LOAD_DQCOEFF(in21, input);
+ LOAD_DQCOEFF(in29, input);
+ LOAD_DQCOEFF(in6, input);
+ LOAD_DQCOEFF(in14, input);
+ LOAD_DQCOEFF(in22, input);
+ LOAD_DQCOEFF(in30, input);
+ LOAD_DQCOEFF(in7, input);
+ LOAD_DQCOEFF(in15, input);
+ LOAD_DQCOEFF(in23, input);
+ LOAD_DQCOEFF(in31, input);
+
+ // checking if all entries are zero
+ zero_idx[0] = _mm_or_si128(in0, in1);
+ zero_idx[1] = _mm_or_si128(in2, in3);
+ zero_idx[2] = _mm_or_si128(in4, in5);
+ zero_idx[3] = _mm_or_si128(in6, in7);
+ zero_idx[4] = _mm_or_si128(in8, in9);
+ zero_idx[5] = _mm_or_si128(in10, in11);
+ zero_idx[6] = _mm_or_si128(in12, in13);
+ zero_idx[7] = _mm_or_si128(in14, in15);
+ zero_idx[8] = _mm_or_si128(in16, in17);
+ zero_idx[9] = _mm_or_si128(in18, in19);
+ zero_idx[10] = _mm_or_si128(in20, in21);
+ zero_idx[11] = _mm_or_si128(in22, in23);
+ zero_idx[12] = _mm_or_si128(in24, in25);
+ zero_idx[13] = _mm_or_si128(in26, in27);
+ zero_idx[14] = _mm_or_si128(in28, in29);
+ zero_idx[15] = _mm_or_si128(in30, in31);
+
+ zero_idx[0] = _mm_or_si128(zero_idx[0], zero_idx[1]);
+ zero_idx[1] = _mm_or_si128(zero_idx[2], zero_idx[3]);
+ zero_idx[2] = _mm_or_si128(zero_idx[4], zero_idx[5]);
+ zero_idx[3] = _mm_or_si128(zero_idx[6], zero_idx[7]);
+ zero_idx[4] = _mm_or_si128(zero_idx[8], zero_idx[9]);
+ zero_idx[5] = _mm_or_si128(zero_idx[10], zero_idx[11]);
+ zero_idx[6] = _mm_or_si128(zero_idx[12], zero_idx[13]);
+ zero_idx[7] = _mm_or_si128(zero_idx[14], zero_idx[15]);
+
+ zero_idx[8] = _mm_or_si128(zero_idx[0], zero_idx[1]);
+ zero_idx[9] = _mm_or_si128(zero_idx[2], zero_idx[3]);
+ zero_idx[10] = _mm_or_si128(zero_idx[4], zero_idx[5]);
+ zero_idx[11] = _mm_or_si128(zero_idx[6], zero_idx[7]);
+ zero_idx[12] = _mm_or_si128(zero_idx[8], zero_idx[9]);
+ zero_idx[13] = _mm_or_si128(zero_idx[10], zero_idx[11]);
+ zero_idx[14] = _mm_or_si128(zero_idx[12], zero_idx[13]);
+
+ zero_idx[0] = _mm_unpackhi_epi64(zero_idx[14], zero_idx[14]);
+ zero_idx[1] = _mm_or_si128(zero_idx[0], zero_idx[14]);
+ zero_idx[2] = _mm_srli_epi64(zero_idx[1], 32);
+ zero_flag[0] = _mm_cvtsi128_si32(zero_idx[1]);
+ zero_flag[1] = _mm_cvtsi128_si32(zero_idx[2]);
+
+ if (!zero_flag[0] && !zero_flag[1]) {
+ col[i32 + 0] = _mm_setzero_si128();
+ col[i32 + 1] = _mm_setzero_si128();
+ col[i32 + 2] = _mm_setzero_si128();
+ col[i32 + 3] = _mm_setzero_si128();
+ col[i32 + 4] = _mm_setzero_si128();
+ col[i32 + 5] = _mm_setzero_si128();
+ col[i32 + 6] = _mm_setzero_si128();
+ col[i32 + 7] = _mm_setzero_si128();
+ col[i32 + 8] = _mm_setzero_si128();
+ col[i32 + 9] = _mm_setzero_si128();
+ col[i32 + 10] = _mm_setzero_si128();
+ col[i32 + 11] = _mm_setzero_si128();
+ col[i32 + 12] = _mm_setzero_si128();
+ col[i32 + 13] = _mm_setzero_si128();
+ col[i32 + 14] = _mm_setzero_si128();
+ col[i32 + 15] = _mm_setzero_si128();
+ col[i32 + 16] = _mm_setzero_si128();
+ col[i32 + 17] = _mm_setzero_si128();
+ col[i32 + 18] = _mm_setzero_si128();
+ col[i32 + 19] = _mm_setzero_si128();
+ col[i32 + 20] = _mm_setzero_si128();
+ col[i32 + 21] = _mm_setzero_si128();
+ col[i32 + 22] = _mm_setzero_si128();
+ col[i32 + 23] = _mm_setzero_si128();
+ col[i32 + 24] = _mm_setzero_si128();
+ col[i32 + 25] = _mm_setzero_si128();
+ col[i32 + 26] = _mm_setzero_si128();
+ col[i32 + 27] = _mm_setzero_si128();
+ col[i32 + 28] = _mm_setzero_si128();
+ col[i32 + 29] = _mm_setzero_si128();
+ col[i32 + 30] = _mm_setzero_si128();
+ col[i32 + 31] = _mm_setzero_si128();
+ continue;
+ }
+
+ // Transpose 32x8 block to 8x32 block
+ TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+ in4, in5, in6, in7);
+ TRANSPOSE_8X8(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9,
+ in10, in11, in12, in13, in14, in15);
+ TRANSPOSE_8X8(in16, in17, in18, in19, in20, in21, in22, in23, in16, in17,
+ in18, in19, in20, in21, in22, in23);
+ TRANSPOSE_8X8(in24, in25, in26, in27, in28, in29, in30, in31, in24, in25,
+ in26, in27, in28, in29, in30, in31);
+ } else {
+ // Second 1-D idct
+ j = i - 4;
+
+ // Transpose 32x8 block to 8x32 block
+ TRANSPOSE_8X8(col[j * 8 + 0], col[j * 8 + 1], col[j * 8 + 2],
+ col[j * 8 + 3], col[j * 8 + 4], col[j * 8 + 5],
+ col[j * 8 + 6], col[j * 8 + 7], in0, in1, in2, in3, in4,
+ in5, in6, in7);
+ j += 4;
+ TRANSPOSE_8X8(col[j * 8 + 0], col[j * 8 + 1], col[j * 8 + 2],
+ col[j * 8 + 3], col[j * 8 + 4], col[j * 8 + 5],
+ col[j * 8 + 6], col[j * 8 + 7], in8, in9, in10,
+ in11, in12, in13, in14, in15);
+ j += 4;
+ TRANSPOSE_8X8(col[j * 8 + 0], col[j * 8 + 1], col[j * 8 + 2],
+ col[j * 8 + 3], col[j * 8 + 4], col[j * 8 + 5],
+ col[j * 8 + 6], col[j * 8 + 7], in16, in17, in18,
+ in19, in20, in21, in22, in23);
+ j += 4;
+ TRANSPOSE_8X8(col[j * 8 + 0], col[j * 8 + 1], col[j * 8 + 2],
+ col[j * 8 + 3], col[j * 8 + 4], col[j * 8 + 5],
+ col[j * 8 + 6], col[j * 8 + 7], in24, in25, in26, in27,
+ in28, in29, in30, in31);
+ }
+
+ // Stage1
+ {
+ const __m128i lo_1_31 = _mm_unpacklo_epi16(in1, in31);
+ const __m128i hi_1_31 = _mm_unpackhi_epi16(in1, in31);
+ const __m128i lo_17_15 = _mm_unpacklo_epi16(in17, in15);
+ const __m128i hi_17_15 = _mm_unpackhi_epi16(in17, in15);
+
+ const __m128i lo_9_23 = _mm_unpacklo_epi16(in9, in23);
+ const __m128i hi_9_23 = _mm_unpackhi_epi16(in9, in23);
+ const __m128i lo_25_7= _mm_unpacklo_epi16(in25, in7);
+ const __m128i hi_25_7 = _mm_unpackhi_epi16(in25, in7);
+
+ const __m128i lo_5_27 = _mm_unpacklo_epi16(in5, in27);
+ const __m128i hi_5_27 = _mm_unpackhi_epi16(in5, in27);
+ const __m128i lo_21_11 = _mm_unpacklo_epi16(in21, in11);
+ const __m128i hi_21_11 = _mm_unpackhi_epi16(in21, in11);
+
+ const __m128i lo_13_19 = _mm_unpacklo_epi16(in13, in19);
+ const __m128i hi_13_19 = _mm_unpackhi_epi16(in13, in19);
+ const __m128i lo_29_3 = _mm_unpacklo_epi16(in29, in3);
+ const __m128i hi_29_3 = _mm_unpackhi_epi16(in29, in3);
+
+ MULTIPLICATION_AND_ADD(lo_1_31, hi_1_31, lo_17_15, hi_17_15, stg1_0,
+ stg1_1, stg1_2, stg1_3, stp1_16, stp1_31,
+ stp1_17, stp1_30)
+ MULTIPLICATION_AND_ADD(lo_9_23, hi_9_23, lo_25_7, hi_25_7, stg1_4,
+ stg1_5, stg1_6, stg1_7, stp1_18, stp1_29,
+ stp1_19, stp1_28)
+ MULTIPLICATION_AND_ADD(lo_5_27, hi_5_27, lo_21_11, hi_21_11, stg1_8,
+ stg1_9, stg1_10, stg1_11, stp1_20, stp1_27,
+ stp1_21, stp1_26)
+ MULTIPLICATION_AND_ADD(lo_13_19, hi_13_19, lo_29_3, hi_29_3, stg1_12,
+ stg1_13, stg1_14, stg1_15, stp1_22, stp1_25,
+ stp1_23, stp1_24)
+ }
+
+ // Stage2
+ {
+ const __m128i lo_2_30 = _mm_unpacklo_epi16(in2, in30);
+ const __m128i hi_2_30 = _mm_unpackhi_epi16(in2, in30);
+ const __m128i lo_18_14 = _mm_unpacklo_epi16(in18, in14);
+ const __m128i hi_18_14 = _mm_unpackhi_epi16(in18, in14);
+
+ const __m128i lo_10_22 = _mm_unpacklo_epi16(in10, in22);
+ const __m128i hi_10_22 = _mm_unpackhi_epi16(in10, in22);
+ const __m128i lo_26_6 = _mm_unpacklo_epi16(in26, in6);
+ const __m128i hi_26_6 = _mm_unpackhi_epi16(in26, in6);
+
+ MULTIPLICATION_AND_ADD(lo_2_30, hi_2_30, lo_18_14, hi_18_14, stg2_0,
+ stg2_1, stg2_2, stg2_3, stp2_8, stp2_15, stp2_9,
+ stp2_14)
+ MULTIPLICATION_AND_ADD(lo_10_22, hi_10_22, lo_26_6, hi_26_6, stg2_4,
+ stg2_5, stg2_6, stg2_7, stp2_10, stp2_13,
+ stp2_11, stp2_12)
+
+ stp2_16 = _mm_add_epi16(stp1_16, stp1_17);
+ stp2_17 = _mm_sub_epi16(stp1_16, stp1_17);
+ stp2_18 = _mm_sub_epi16(stp1_19, stp1_18);
+ stp2_19 = _mm_add_epi16(stp1_19, stp1_18);
+
+ stp2_20 = _mm_add_epi16(stp1_20, stp1_21);
+ stp2_21 = _mm_sub_epi16(stp1_20, stp1_21);
+ stp2_22 = _mm_sub_epi16(stp1_23, stp1_22);
+ stp2_23 = _mm_add_epi16(stp1_23, stp1_22);
+
+ stp2_24 = _mm_add_epi16(stp1_24, stp1_25);
+ stp2_25 = _mm_sub_epi16(stp1_24, stp1_25);
+ stp2_26 = _mm_sub_epi16(stp1_27, stp1_26);
+ stp2_27 = _mm_add_epi16(stp1_27, stp1_26);
+
+ stp2_28 = _mm_add_epi16(stp1_28, stp1_29);
+ stp2_29 = _mm_sub_epi16(stp1_28, stp1_29);
+ stp2_30 = _mm_sub_epi16(stp1_31, stp1_30);
+ stp2_31 = _mm_add_epi16(stp1_31, stp1_30);
+ }
+
+ // Stage3
+ {
+ const __m128i lo_4_28 = _mm_unpacklo_epi16(in4, in28);
+ const __m128i hi_4_28 = _mm_unpackhi_epi16(in4, in28);
+ const __m128i lo_20_12 = _mm_unpacklo_epi16(in20, in12);
+ const __m128i hi_20_12 = _mm_unpackhi_epi16(in20, in12);
+
+ const __m128i lo_17_30 = _mm_unpacklo_epi16(stp2_17, stp2_30);
+ const __m128i hi_17_30 = _mm_unpackhi_epi16(stp2_17, stp2_30);
+ const __m128i lo_18_29 = _mm_unpacklo_epi16(stp2_18, stp2_29);
+ const __m128i hi_18_29 = _mm_unpackhi_epi16(stp2_18, stp2_29);
+
+ const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26);
+ const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26);
+ const __m128i lo_22_25 = _mm_unpacklo_epi16(stp2_22, stp2_25);
+ const __m128i hi_22_25 = _mm_unpackhi_epi16(stp2_22, stp2_25);
+
+ MULTIPLICATION_AND_ADD(lo_4_28, hi_4_28, lo_20_12, hi_20_12, stg3_0,
+ stg3_1, stg3_2, stg3_3, stp1_4, stp1_7, stp1_5,
+ stp1_6)
+
+ stp1_8 = _mm_add_epi16(stp2_8, stp2_9);
+ stp1_9 = _mm_sub_epi16(stp2_8, stp2_9);
+ stp1_10 = _mm_sub_epi16(stp2_11, stp2_10);
+ stp1_11 = _mm_add_epi16(stp2_11, stp2_10);
+ stp1_12 = _mm_add_epi16(stp2_12, stp2_13);
+ stp1_13 = _mm_sub_epi16(stp2_12, stp2_13);
+ stp1_14 = _mm_sub_epi16(stp2_15, stp2_14);
+ stp1_15 = _mm_add_epi16(stp2_15, stp2_14);
+
+ MULTIPLICATION_AND_ADD(lo_17_30, hi_17_30, lo_18_29, hi_18_29, stg3_4,
+ stg3_5, stg3_6, stg3_4, stp1_17, stp1_30,
+ stp1_18, stp1_29)
+ MULTIPLICATION_AND_ADD(lo_21_26, hi_21_26, lo_22_25, hi_22_25, stg3_8,
+ stg3_9, stg3_10, stg3_8, stp1_21, stp1_26,
+ stp1_22, stp1_25)
+
+ stp1_16 = stp2_16;
+ stp1_31 = stp2_31;
+ stp1_19 = stp2_19;
+ stp1_20 = stp2_20;
+ stp1_23 = stp2_23;
+ stp1_24 = stp2_24;
+ stp1_27 = stp2_27;
+ stp1_28 = stp2_28;
+ }
+
+ // Stage4
+ {
+ const __m128i lo_0_16 = _mm_unpacklo_epi16(in0, in16);
+ const __m128i hi_0_16 = _mm_unpackhi_epi16(in0, in16);
+ const __m128i lo_8_24 = _mm_unpacklo_epi16(in8, in24);
+ const __m128i hi_8_24 = _mm_unpackhi_epi16(in8, in24);
+
+ const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14);
+ const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14);
+ const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13);
+ const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13);
+
+ MULTIPLICATION_AND_ADD(lo_0_16, hi_0_16, lo_8_24, hi_8_24, stg4_0,
+ stg4_1, stg4_2, stg4_3, stp2_0, stp2_1,
+ stp2_2, stp2_3)
+
+ stp2_4 = _mm_add_epi16(stp1_4, stp1_5);
+ stp2_5 = _mm_sub_epi16(stp1_4, stp1_5);
+ stp2_6 = _mm_sub_epi16(stp1_7, stp1_6);
+ stp2_7 = _mm_add_epi16(stp1_7, stp1_6);
+
+ MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, stg4_4,
+ stg4_5, stg4_6, stg4_4, stp2_9, stp2_14,
+ stp2_10, stp2_13)
+
+ stp2_8 = stp1_8;
+ stp2_15 = stp1_15;
+ stp2_11 = stp1_11;
+ stp2_12 = stp1_12;
+
+ stp2_16 = _mm_add_epi16(stp1_16, stp1_19);
+ stp2_17 = _mm_add_epi16(stp1_17, stp1_18);
+ stp2_18 = _mm_sub_epi16(stp1_17, stp1_18);
+ stp2_19 = _mm_sub_epi16(stp1_16, stp1_19);
+ stp2_20 = _mm_sub_epi16(stp1_23, stp1_20);
+ stp2_21 = _mm_sub_epi16(stp1_22, stp1_21);
+ stp2_22 = _mm_add_epi16(stp1_22, stp1_21);
+ stp2_23 = _mm_add_epi16(stp1_23, stp1_20);
+
+ stp2_24 = _mm_add_epi16(stp1_24, stp1_27);
+ stp2_25 = _mm_add_epi16(stp1_25, stp1_26);
+ stp2_26 = _mm_sub_epi16(stp1_25, stp1_26);
+ stp2_27 = _mm_sub_epi16(stp1_24, stp1_27);
+ stp2_28 = _mm_sub_epi16(stp1_31, stp1_28);
+ stp2_29 = _mm_sub_epi16(stp1_30, stp1_29);
+ stp2_30 = _mm_add_epi16(stp1_29, stp1_30);
+ stp2_31 = _mm_add_epi16(stp1_28, stp1_31);
+ }
+
+ // Stage5
+ {
+ const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5);
+ const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5);
+ const __m128i lo_18_29 = _mm_unpacklo_epi16(stp2_18, stp2_29);
+ const __m128i hi_18_29 = _mm_unpackhi_epi16(stp2_18, stp2_29);
+
+ const __m128i lo_19_28 = _mm_unpacklo_epi16(stp2_19, stp2_28);
+ const __m128i hi_19_28 = _mm_unpackhi_epi16(stp2_19, stp2_28);
+ const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27);
+ const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27);
+
+ const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26);
+ const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26);
+
+ stp1_0 = _mm_add_epi16(stp2_0, stp2_3);
+ stp1_1 = _mm_add_epi16(stp2_1, stp2_2);
+ stp1_2 = _mm_sub_epi16(stp2_1, stp2_2);
+ stp1_3 = _mm_sub_epi16(stp2_0, stp2_3);
+
+ tmp0 = _mm_madd_epi16(lo_6_5, stg4_1);
+ tmp1 = _mm_madd_epi16(hi_6_5, stg4_1);
+ tmp2 = _mm_madd_epi16(lo_6_5, stg4_0);
+ tmp3 = _mm_madd_epi16(hi_6_5, stg4_0);
+
+ tmp0 = _mm_add_epi32(tmp0, rounding);
+ tmp1 = _mm_add_epi32(tmp1, rounding);
+ tmp2 = _mm_add_epi32(tmp2, rounding);
+ tmp3 = _mm_add_epi32(tmp3, rounding);
+
+ tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);
+ tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);
+ tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
+ tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);
+
+ stp1_5 = _mm_packs_epi32(tmp0, tmp1);
+ stp1_6 = _mm_packs_epi32(tmp2, tmp3);
+
+ stp1_4 = stp2_4;
+ stp1_7 = stp2_7;
+
+ stp1_8 = _mm_add_epi16(stp2_8, stp2_11);
+ stp1_9 = _mm_add_epi16(stp2_9, stp2_10);
+ stp1_10 = _mm_sub_epi16(stp2_9, stp2_10);
+ stp1_11 = _mm_sub_epi16(stp2_8, stp2_11);
+ stp1_12 = _mm_sub_epi16(stp2_15, stp2_12);
+ stp1_13 = _mm_sub_epi16(stp2_14, stp2_13);
+ stp1_14 = _mm_add_epi16(stp2_14, stp2_13);
+ stp1_15 = _mm_add_epi16(stp2_15, stp2_12);
+
+ stp1_16 = stp2_16;
+ stp1_17 = stp2_17;
+
+ MULTIPLICATION_AND_ADD(lo_18_29, hi_18_29, lo_19_28, hi_19_28, stg4_4,
+ stg4_5, stg4_4, stg4_5, stp1_18, stp1_29,
+ stp1_19, stp1_28)
+ MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg4_6,
+ stg4_4, stg4_6, stg4_4, stp1_20, stp1_27,
+ stp1_21, stp1_26)
+
+ stp1_22 = stp2_22;
+ stp1_23 = stp2_23;
+ stp1_24 = stp2_24;
+ stp1_25 = stp2_25;
+ stp1_30 = stp2_30;
+ stp1_31 = stp2_31;
+ }
+
+ // Stage6
+ {
+ const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13);
+ const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13);
+ const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12);
+ const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12);
+
+ stp2_0 = _mm_add_epi16(stp1_0, stp1_7);
+ stp2_1 = _mm_add_epi16(stp1_1, stp1_6);
+ stp2_2 = _mm_add_epi16(stp1_2, stp1_5);
+ stp2_3 = _mm_add_epi16(stp1_3, stp1_4);
+ stp2_4 = _mm_sub_epi16(stp1_3, stp1_4);
+ stp2_5 = _mm_sub_epi16(stp1_2, stp1_5);
+ stp2_6 = _mm_sub_epi16(stp1_1, stp1_6);
+ stp2_7 = _mm_sub_epi16(stp1_0, stp1_7);
+
+ stp2_8 = stp1_8;
+ stp2_9 = stp1_9;
+ stp2_14 = stp1_14;
+ stp2_15 = stp1_15;
+
+ MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12,
+ stg6_0, stg4_0, stg6_0, stg4_0, stp2_10,
+ stp2_13, stp2_11, stp2_12)
+
+ stp2_16 = _mm_add_epi16(stp1_16, stp1_23);
+ stp2_17 = _mm_add_epi16(stp1_17, stp1_22);
+ stp2_18 = _mm_add_epi16(stp1_18, stp1_21);
+ stp2_19 = _mm_add_epi16(stp1_19, stp1_20);
+ stp2_20 = _mm_sub_epi16(stp1_19, stp1_20);
+ stp2_21 = _mm_sub_epi16(stp1_18, stp1_21);
+ stp2_22 = _mm_sub_epi16(stp1_17, stp1_22);
+ stp2_23 = _mm_sub_epi16(stp1_16, stp1_23);
+
+ stp2_24 = _mm_sub_epi16(stp1_31, stp1_24);
+ stp2_25 = _mm_sub_epi16(stp1_30, stp1_25);
+ stp2_26 = _mm_sub_epi16(stp1_29, stp1_26);
+ stp2_27 = _mm_sub_epi16(stp1_28, stp1_27);
+ stp2_28 = _mm_add_epi16(stp1_27, stp1_28);
+ stp2_29 = _mm_add_epi16(stp1_26, stp1_29);
+ stp2_30 = _mm_add_epi16(stp1_25, stp1_30);
+ stp2_31 = _mm_add_epi16(stp1_24, stp1_31);
+ }
+
+ // Stage7
+ {
+ const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27);
+ const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27);
+ const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26);
+ const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26);
+
+ const __m128i lo_22_25 = _mm_unpacklo_epi16(stp2_22, stp2_25);
+ const __m128i hi_22_25 = _mm_unpackhi_epi16(stp2_22, stp2_25);
+ const __m128i lo_23_24 = _mm_unpacklo_epi16(stp2_23, stp2_24);
+ const __m128i hi_23_24 = _mm_unpackhi_epi16(stp2_23, stp2_24);
+
+ stp1_0 = _mm_add_epi16(stp2_0, stp2_15);
+ stp1_1 = _mm_add_epi16(stp2_1, stp2_14);
+ stp1_2 = _mm_add_epi16(stp2_2, stp2_13);
+ stp1_3 = _mm_add_epi16(stp2_3, stp2_12);
+ stp1_4 = _mm_add_epi16(stp2_4, stp2_11);
+ stp1_5 = _mm_add_epi16(stp2_5, stp2_10);
+ stp1_6 = _mm_add_epi16(stp2_6, stp2_9);
+ stp1_7 = _mm_add_epi16(stp2_7, stp2_8);
+ stp1_8 = _mm_sub_epi16(stp2_7, stp2_8);
+ stp1_9 = _mm_sub_epi16(stp2_6, stp2_9);
+ stp1_10 = _mm_sub_epi16(stp2_5, stp2_10);
+ stp1_11 = _mm_sub_epi16(stp2_4, stp2_11);
+ stp1_12 = _mm_sub_epi16(stp2_3, stp2_12);
+ stp1_13 = _mm_sub_epi16(stp2_2, stp2_13);
+ stp1_14 = _mm_sub_epi16(stp2_1, stp2_14);
+ stp1_15 = _mm_sub_epi16(stp2_0, stp2_15);
+
+ stp1_16 = stp2_16;
+ stp1_17 = stp2_17;
+ stp1_18 = stp2_18;
+ stp1_19 = stp2_19;
+
+ MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg6_0,
+ stg4_0, stg6_0, stg4_0, stp1_20, stp1_27,
+ stp1_21, stp1_26)
+ MULTIPLICATION_AND_ADD(lo_22_25, hi_22_25, lo_23_24, hi_23_24, stg6_0,
+ stg4_0, stg6_0, stg4_0, stp1_22, stp1_25,
+ stp1_23, stp1_24)
+
+ stp1_28 = stp2_28;
+ stp1_29 = stp2_29;
+ stp1_30 = stp2_30;
+ stp1_31 = stp2_31;
+ }
+
+ // final stage
+ if (i < 4) {
+ // 1_D: Store 32 intermediate results for each 8x32 block.
+ col[i32 + 0] = _mm_add_epi16(stp1_0, stp1_31);
+ col[i32 + 1] = _mm_add_epi16(stp1_1, stp1_30);
+ col[i32 + 2] = _mm_add_epi16(stp1_2, stp1_29);
+ col[i32 + 3] = _mm_add_epi16(stp1_3, stp1_28);
+ col[i32 + 4] = _mm_add_epi16(stp1_4, stp1_27);
+ col[i32 + 5] = _mm_add_epi16(stp1_5, stp1_26);
+ col[i32 + 6] = _mm_add_epi16(stp1_6, stp1_25);
+ col[i32 + 7] = _mm_add_epi16(stp1_7, stp1_24);
+ col[i32 + 8] = _mm_add_epi16(stp1_8, stp1_23);
+ col[i32 + 9] = _mm_add_epi16(stp1_9, stp1_22);
+ col[i32 + 10] = _mm_add_epi16(stp1_10, stp1_21);
+ col[i32 + 11] = _mm_add_epi16(stp1_11, stp1_20);
+ col[i32 + 12] = _mm_add_epi16(stp1_12, stp1_19);
+ col[i32 + 13] = _mm_add_epi16(stp1_13, stp1_18);
+ col[i32 + 14] = _mm_add_epi16(stp1_14, stp1_17);
+ col[i32 + 15] = _mm_add_epi16(stp1_15, stp1_16);
+ col[i32 + 16] = _mm_sub_epi16(stp1_15, stp1_16);
+ col[i32 + 17] = _mm_sub_epi16(stp1_14, stp1_17);
+ col[i32 + 18] = _mm_sub_epi16(stp1_13, stp1_18);
+ col[i32 + 19] = _mm_sub_epi16(stp1_12, stp1_19);
+ col[i32 + 20] = _mm_sub_epi16(stp1_11, stp1_20);
+ col[i32 + 21] = _mm_sub_epi16(stp1_10, stp1_21);
+ col[i32 + 22] = _mm_sub_epi16(stp1_9, stp1_22);
+ col[i32 + 23] = _mm_sub_epi16(stp1_8, stp1_23);
+ col[i32 + 24] = _mm_sub_epi16(stp1_7, stp1_24);
+ col[i32 + 25] = _mm_sub_epi16(stp1_6, stp1_25);
+ col[i32 + 26] = _mm_sub_epi16(stp1_5, stp1_26);
+ col[i32 + 27] = _mm_sub_epi16(stp1_4, stp1_27);
+ col[i32 + 28] = _mm_sub_epi16(stp1_3, stp1_28);
+ col[i32 + 29] = _mm_sub_epi16(stp1_2, stp1_29);
+ col[i32 + 30] = _mm_sub_epi16(stp1_1, stp1_30);
+ col[i32 + 31] = _mm_sub_epi16(stp1_0, stp1_31);
+ } else {
+ const __m128i zero = _mm_setzero_si128();
+
+ // 2_D: Calculate the results and store them to destination.
+ in0 = _mm_add_epi16(stp1_0, stp1_31);
+ in1 = _mm_add_epi16(stp1_1, stp1_30);
+ in2 = _mm_add_epi16(stp1_2, stp1_29);
+ in3 = _mm_add_epi16(stp1_3, stp1_28);
+ in4 = _mm_add_epi16(stp1_4, stp1_27);
+ in5 = _mm_add_epi16(stp1_5, stp1_26);
+ in6 = _mm_add_epi16(stp1_6, stp1_25);
+ in7 = _mm_add_epi16(stp1_7, stp1_24);
+ in8 = _mm_add_epi16(stp1_8, stp1_23);
+ in9 = _mm_add_epi16(stp1_9, stp1_22);
+ in10 = _mm_add_epi16(stp1_10, stp1_21);
+ in11 = _mm_add_epi16(stp1_11, stp1_20);
+ in12 = _mm_add_epi16(stp1_12, stp1_19);
+ in13 = _mm_add_epi16(stp1_13, stp1_18);
+ in14 = _mm_add_epi16(stp1_14, stp1_17);
+ in15 = _mm_add_epi16(stp1_15, stp1_16);
+ in16 = _mm_sub_epi16(stp1_15, stp1_16);
+ in17 = _mm_sub_epi16(stp1_14, stp1_17);
+ in18 = _mm_sub_epi16(stp1_13, stp1_18);
+ in19 = _mm_sub_epi16(stp1_12, stp1_19);
+ in20 = _mm_sub_epi16(stp1_11, stp1_20);
+ in21 = _mm_sub_epi16(stp1_10, stp1_21);
+ in22 = _mm_sub_epi16(stp1_9, stp1_22);
+ in23 = _mm_sub_epi16(stp1_8, stp1_23);
+ in24 = _mm_sub_epi16(stp1_7, stp1_24);
+ in25 = _mm_sub_epi16(stp1_6, stp1_25);
+ in26 = _mm_sub_epi16(stp1_5, stp1_26);
+ in27 = _mm_sub_epi16(stp1_4, stp1_27);
+ in28 = _mm_sub_epi16(stp1_3, stp1_28);
+ in29 = _mm_sub_epi16(stp1_2, stp1_29);
+ in30 = _mm_sub_epi16(stp1_1, stp1_30);
+ in31 = _mm_sub_epi16(stp1_0, stp1_31);
+
+ // Final rounding and shift
+ in0 = _mm_adds_epi16(in0, final_rounding);
+ in1 = _mm_adds_epi16(in1, final_rounding);
+ in2 = _mm_adds_epi16(in2, final_rounding);
+ in3 = _mm_adds_epi16(in3, final_rounding);
+ in4 = _mm_adds_epi16(in4, final_rounding);
+ in5 = _mm_adds_epi16(in5, final_rounding);
+ in6 = _mm_adds_epi16(in6, final_rounding);
+ in7 = _mm_adds_epi16(in7, final_rounding);
+ in8 = _mm_adds_epi16(in8, final_rounding);
+ in9 = _mm_adds_epi16(in9, final_rounding);
+ in10 = _mm_adds_epi16(in10, final_rounding);
+ in11 = _mm_adds_epi16(in11, final_rounding);
+ in12 = _mm_adds_epi16(in12, final_rounding);
+ in13 = _mm_adds_epi16(in13, final_rounding);
+ in14 = _mm_adds_epi16(in14, final_rounding);
+ in15 = _mm_adds_epi16(in15, final_rounding);
+ in16 = _mm_adds_epi16(in16, final_rounding);
+ in17 = _mm_adds_epi16(in17, final_rounding);
+ in18 = _mm_adds_epi16(in18, final_rounding);
+ in19 = _mm_adds_epi16(in19, final_rounding);
+ in20 = _mm_adds_epi16(in20, final_rounding);
+ in21 = _mm_adds_epi16(in21, final_rounding);
+ in22 = _mm_adds_epi16(in22, final_rounding);
+ in23 = _mm_adds_epi16(in23, final_rounding);
+ in24 = _mm_adds_epi16(in24, final_rounding);
+ in25 = _mm_adds_epi16(in25, final_rounding);
+ in26 = _mm_adds_epi16(in26, final_rounding);
+ in27 = _mm_adds_epi16(in27, final_rounding);
+ in28 = _mm_adds_epi16(in28, final_rounding);
+ in29 = _mm_adds_epi16(in29, final_rounding);
+ in30 = _mm_adds_epi16(in30, final_rounding);
+ in31 = _mm_adds_epi16(in31, final_rounding);
+
+ in0 = _mm_srai_epi16(in0, 6);
+ in1 = _mm_srai_epi16(in1, 6);
+ in2 = _mm_srai_epi16(in2, 6);
+ in3 = _mm_srai_epi16(in3, 6);
+ in4 = _mm_srai_epi16(in4, 6);
+ in5 = _mm_srai_epi16(in5, 6);
+ in6 = _mm_srai_epi16(in6, 6);
+ in7 = _mm_srai_epi16(in7, 6);
+ in8 = _mm_srai_epi16(in8, 6);
+ in9 = _mm_srai_epi16(in9, 6);
+ in10 = _mm_srai_epi16(in10, 6);
+ in11 = _mm_srai_epi16(in11, 6);
+ in12 = _mm_srai_epi16(in12, 6);
+ in13 = _mm_srai_epi16(in13, 6);
+ in14 = _mm_srai_epi16(in14, 6);
+ in15 = _mm_srai_epi16(in15, 6);
+ in16 = _mm_srai_epi16(in16, 6);
+ in17 = _mm_srai_epi16(in17, 6);
+ in18 = _mm_srai_epi16(in18, 6);
+ in19 = _mm_srai_epi16(in19, 6);
+ in20 = _mm_srai_epi16(in20, 6);
+ in21 = _mm_srai_epi16(in21, 6);
+ in22 = _mm_srai_epi16(in22, 6);
+ in23 = _mm_srai_epi16(in23, 6);
+ in24 = _mm_srai_epi16(in24, 6);
+ in25 = _mm_srai_epi16(in25, 6);
+ in26 = _mm_srai_epi16(in26, 6);
+ in27 = _mm_srai_epi16(in27, 6);
+ in28 = _mm_srai_epi16(in28, 6);
+ in29 = _mm_srai_epi16(in29, 6);
+ in30 = _mm_srai_epi16(in30, 6);
+ in31 = _mm_srai_epi16(in31, 6);
+
+ RECON_AND_STORE(dest, in0);
+ RECON_AND_STORE(dest, in1);
+ RECON_AND_STORE(dest, in2);
+ RECON_AND_STORE(dest, in3);
+ RECON_AND_STORE(dest, in4);
+ RECON_AND_STORE(dest, in5);
+ RECON_AND_STORE(dest, in6);
+ RECON_AND_STORE(dest, in7);
+ RECON_AND_STORE(dest, in8);
+ RECON_AND_STORE(dest, in9);
+ RECON_AND_STORE(dest, in10);
+ RECON_AND_STORE(dest, in11);
+ RECON_AND_STORE(dest, in12);
+ RECON_AND_STORE(dest, in13);
+ RECON_AND_STORE(dest, in14);
+ RECON_AND_STORE(dest, in15);
+ RECON_AND_STORE(dest, in16);
+ RECON_AND_STORE(dest, in17);
+ RECON_AND_STORE(dest, in18);
+ RECON_AND_STORE(dest, in19);
+ RECON_AND_STORE(dest, in20);
+ RECON_AND_STORE(dest, in21);
+ RECON_AND_STORE(dest, in22);
+ RECON_AND_STORE(dest, in23);
+ RECON_AND_STORE(dest, in24);
+ RECON_AND_STORE(dest, in25);
+ RECON_AND_STORE(dest, in26);
+ RECON_AND_STORE(dest, in27);
+ RECON_AND_STORE(dest, in28);
+ RECON_AND_STORE(dest, in29);
+ RECON_AND_STORE(dest, in30);
+ RECON_AND_STORE(dest, in31);
+
+ dest += 8 - (stride * 32);
+ }
+ }
+}
diff --git a/libvpx/vp9/common/x86/vp9_intrapred_sse2.asm b/libvpx/vp9/common/x86/vp9_intrapred_sse2.asm
new file mode 100644
index 0000000..980b8b9
--- /dev/null
+++ b/libvpx/vp9/common/x86/vp9_intrapred_sse2.asm
@@ -0,0 +1,341 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+%include "third_party/x86inc/x86inc.asm"
+
+SECTION_RODATA
+pw_4: times 8 dw 4
+pw_8: times 8 dw 8
+pw_16: times 8 dw 16
+pw_32: times 8 dw 32
+
+SECTION .text
+
+INIT_MMX sse
+cglobal dc_predictor_4x4, 4, 4, 2, dst, stride, above, left
+ pxor m1, m1
+ movd m0, [aboveq]
+ punpckldq m0, [leftq]
+ psadbw m0, m1
+ paddw m0, [pw_4]
+ psraw m0, 3
+ pshufw m0, m0, 0x0
+ packuswb m0, m0
+ movd [dstq ], m0
+ movd [dstq+strideq], m0
+ lea dstq, [dstq+strideq*2]
+ movd [dstq ], m0
+ movd [dstq+strideq], m0
+ RET
+
+INIT_MMX sse
+cglobal dc_predictor_8x8, 4, 4, 3, dst, stride, above, left
+ pxor m1, m1
+ movq m0, [aboveq]
+ movq m2, [leftq]
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+ psadbw m0, m1
+ psadbw m2, m1
+ paddw m0, m2
+ paddw m0, [pw_8]
+ psraw m0, 4
+ pshufw m0, m0, 0x0
+ packuswb m0, m0
+ movq [dstq ], m0
+ movq [dstq+strideq ], m0
+ movq [dstq+strideq*2], m0
+ movq [dstq+stride3q ], m0
+ lea dstq, [dstq+strideq*4]
+ movq [dstq ], m0
+ movq [dstq+strideq ], m0
+ movq [dstq+strideq*2], m0
+ movq [dstq+stride3q ], m0
+ RET
+
+INIT_XMM sse2
+cglobal dc_predictor_16x16, 4, 4, 3, dst, stride, above, left
+ pxor m1, m1
+ mova m0, [aboveq]
+ mova m2, [leftq]
+ DEFINE_ARGS dst, stride, stride3, lines4
+ lea stride3q, [strideq*3]
+ mov lines4d, 4
+ psadbw m0, m1
+ psadbw m2, m1
+ paddw m0, m2
+ movhlps m2, m0
+ paddw m0, m2
+ paddw m0, [pw_16]
+ psraw m0, 5
+ pshuflw m0, m0, 0x0
+ punpcklqdq m0, m0
+ packuswb m0, m0
+.loop:
+ mova [dstq ], m0
+ mova [dstq+strideq ], m0
+ mova [dstq+strideq*2], m0
+ mova [dstq+stride3q ], m0
+ lea dstq, [dstq+strideq*4]
+ dec lines4d
+ jnz .loop
+ REP_RET
+
+INIT_XMM sse2
+cglobal dc_predictor_32x32, 4, 4, 5, dst, stride, above, left
+ pxor m1, m1
+ mova m0, [aboveq]
+ mova m2, [aboveq+16]
+ mova m3, [leftq]
+ mova m4, [leftq+16]
+ DEFINE_ARGS dst, stride, stride3, lines4
+ lea stride3q, [strideq*3]
+ mov lines4d, 8
+ psadbw m0, m1
+ psadbw m2, m1
+ psadbw m3, m1
+ psadbw m4, m1
+ paddw m0, m2
+ paddw m0, m3
+ paddw m0, m4
+ movhlps m2, m0
+ paddw m0, m2
+ paddw m0, [pw_32]
+ psraw m0, 6
+ pshuflw m0, m0, 0x0
+ punpcklqdq m0, m0
+ packuswb m0, m0
+.loop:
+ mova [dstq ], m0
+ mova [dstq +16], m0
+ mova [dstq+strideq ], m0
+ mova [dstq+strideq +16], m0
+ mova [dstq+strideq*2 ], m0
+ mova [dstq+strideq*2+16], m0
+ mova [dstq+stride3q ], m0
+ mova [dstq+stride3q +16], m0
+ lea dstq, [dstq+strideq*4]
+ dec lines4d
+ jnz .loop
+ REP_RET
+
+INIT_MMX sse
+cglobal v_predictor_4x4, 3, 3, 1, dst, stride, above
+ movd m0, [aboveq]
+ movd [dstq ], m0
+ movd [dstq+strideq], m0
+ lea dstq, [dstq+strideq*2]
+ movd [dstq ], m0
+ movd [dstq+strideq], m0
+ RET
+
+INIT_MMX sse
+cglobal v_predictor_8x8, 3, 3, 1, dst, stride, above
+ movq m0, [aboveq]
+ DEFINE_ARGS dst, stride, stride3
+ lea stride3q, [strideq*3]
+ movq [dstq ], m0
+ movq [dstq+strideq ], m0
+ movq [dstq+strideq*2], m0
+ movq [dstq+stride3q ], m0
+ lea dstq, [dstq+strideq*4]
+ movq [dstq ], m0
+ movq [dstq+strideq ], m0
+ movq [dstq+strideq*2], m0
+ movq [dstq+stride3q ], m0
+ RET
+
+INIT_XMM sse2
+cglobal v_predictor_16x16, 3, 4, 1, dst, stride, above
+ mova m0, [aboveq]
+ DEFINE_ARGS dst, stride, stride3, nlines4
+ lea stride3q, [strideq*3]
+ mov nlines4d, 4
+.loop:
+ mova [dstq ], m0
+ mova [dstq+strideq ], m0
+ mova [dstq+strideq*2], m0
+ mova [dstq+stride3q ], m0
+ lea dstq, [dstq+strideq*4]
+ dec nlines4d
+ jnz .loop
+ REP_RET
+
+INIT_XMM sse2
+cglobal v_predictor_32x32, 3, 4, 2, dst, stride, above
+ mova m0, [aboveq]
+ mova m1, [aboveq+16]
+ DEFINE_ARGS dst, stride, stride3, nlines4
+ lea stride3q, [strideq*3]
+ mov nlines4d, 8
+.loop:
+ mova [dstq ], m0
+ mova [dstq +16], m1
+ mova [dstq+strideq ], m0
+ mova [dstq+strideq +16], m1
+ mova [dstq+strideq*2 ], m0
+ mova [dstq+strideq*2+16], m1
+ mova [dstq+stride3q ], m0
+ mova [dstq+stride3q +16], m1
+ lea dstq, [dstq+strideq*4]
+ dec nlines4d
+ jnz .loop
+ REP_RET
+
+INIT_MMX sse
+cglobal tm_predictor_4x4, 4, 4, 4, dst, stride, above, left
+ pxor m1, m1
+ movd m2, [aboveq-1]
+ movd m0, [aboveq]
+ punpcklbw m2, m1
+ punpcklbw m0, m1
+ pshufw m2, m2, 0x0
+ DEFINE_ARGS dst, stride, line, left
+ mov lineq, -2
+ add leftq, 4
+ psubw m0, m2
+.loop:
+ movd m2, [leftq+lineq*2]
+ movd m3, [leftq+lineq*2+1]
+ punpcklbw m2, m1
+ punpcklbw m3, m1
+ pshufw m2, m2, 0x0
+ pshufw m3, m3, 0x0
+ paddw m2, m0
+ paddw m3, m0
+ packuswb m2, m2
+ packuswb m3, m3
+ movd [dstq ], m2
+ movd [dstq+strideq], m3
+ lea dstq, [dstq+strideq*2]
+ inc lineq
+ jnz .loop
+ REP_RET
+
+INIT_XMM sse2
+cglobal tm_predictor_8x8, 4, 4, 4, dst, stride, above, left
+ pxor m1, m1
+ movd m2, [aboveq-1]
+ movq m0, [aboveq]
+ punpcklbw m2, m1
+ punpcklbw m0, m1
+ pshuflw m2, m2, 0x0
+ DEFINE_ARGS dst, stride, line, left
+ mov lineq, -4
+ punpcklqdq m2, m2
+ add leftq, 8
+ psubw m0, m2
+.loop:
+ movd m2, [leftq+lineq*2]
+ movd m3, [leftq+lineq*2+1]
+ punpcklbw m2, m1
+ punpcklbw m3, m1
+ pshuflw m2, m2, 0x0
+ pshuflw m3, m3, 0x0
+ punpcklqdq m2, m2
+ punpcklqdq m3, m3
+ paddw m2, m0
+ paddw m3, m0
+ packuswb m2, m3
+ movq [dstq ], m2
+ movhps [dstq+strideq], m2
+ lea dstq, [dstq+strideq*2]
+ inc lineq
+ jnz .loop
+ REP_RET
+
+INIT_XMM sse2
+cglobal tm_predictor_16x16, 4, 4, 7, dst, stride, above, left
+ pxor m1, m1
+ movd m2, [aboveq-1]
+ mova m0, [aboveq]
+ punpcklbw m2, m1
+ punpckhbw m4, m0, m1
+ punpcklbw m0, m1
+ pshuflw m2, m2, 0x0
+ DEFINE_ARGS dst, stride, line, left
+ mov lineq, -8
+ punpcklqdq m2, m2
+ add leftq, 16
+ psubw m0, m2
+ psubw m4, m2
+.loop:
+ movd m2, [leftq+lineq*2]
+ movd m3, [leftq+lineq*2+1]
+ punpcklbw m2, m1
+ punpcklbw m3, m1
+ pshuflw m2, m2, 0x0
+ pshuflw m3, m3, 0x0
+ punpcklqdq m2, m2
+ punpcklqdq m3, m3
+ paddw m5, m2, m0
+ paddw m6, m3, m0
+ paddw m2, m4
+ paddw m3, m4
+ packuswb m5, m2
+ packuswb m6, m3
+ mova [dstq ], m5
+ mova [dstq+strideq], m6
+ lea dstq, [dstq+strideq*2]
+ inc lineq
+ jnz .loop
+ REP_RET
+
+%if ARCH_X86_64
+INIT_XMM sse2
+cglobal tm_predictor_32x32, 4, 4, 10, dst, stride, above, left
+ pxor m1, m1
+ movd m2, [aboveq-1]
+ mova m0, [aboveq]
+ mova m4, [aboveq+16]
+ punpcklbw m2, m1
+ punpckhbw m3, m0, m1
+ punpckhbw m5, m4, m1
+ punpcklbw m0, m1
+ punpcklbw m4, m1
+ pshuflw m2, m2, 0x0
+ DEFINE_ARGS dst, stride, line, left
+ mov lineq, -16
+ punpcklqdq m2, m2
+ add leftq, 32
+ psubw m0, m2
+ psubw m3, m2
+ psubw m4, m2
+ psubw m5, m2
+.loop:
+ movd m2, [leftq+lineq*2]
+ movd m6, [leftq+lineq*2+1]
+ punpcklbw m2, m1
+ punpcklbw m6, m1
+ pshuflw m2, m2, 0x0
+ pshuflw m6, m6, 0x0
+ punpcklqdq m2, m2
+ punpcklqdq m6, m6
+ paddw m7, m2, m0
+ paddw m8, m2, m3
+ paddw m9, m2, m4
+ paddw m2, m5
+ packuswb m7, m8
+ packuswb m9, m2
+ paddw m2, m6, m0
+ paddw m8, m6, m3
+ mova [dstq ], m7
+ paddw m7, m6, m4
+ paddw m6, m5
+ mova [dstq +16], m9
+ packuswb m2, m8
+ packuswb m7, m6
+ mova [dstq+strideq ], m2
+ mova [dstq+strideq+16], m7
+ lea dstq, [dstq+strideq*2]
+ inc lineq
+ jnz .loop
+ REP_RET
+%endif
diff --git a/libvpx/vp9/common/x86/vp9_intrapred_ssse3.asm b/libvpx/vp9/common/x86/vp9_intrapred_ssse3.asm
new file mode 100644
index 0000000..8ba26f3
--- /dev/null
+++ b/libvpx/vp9/common/x86/vp9_intrapred_ssse3.asm
@@ -0,0 +1,291 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+%include "third_party/x86inc/x86inc.asm"
+
+SECTION_RODATA
+
+pb_1: times 16 db 1
+pw_2: times 8 dw 2
+pb_7m1: times 8 db 7, -1
+pb_15: times 16 db 15
+
+sh_b01234577: db 0, 1, 2, 3, 4, 5, 7, 7
+sh_b12345677: db 1, 2, 3, 4, 5, 6, 7, 7
+sh_b23456777: db 2, 3, 4, 5, 6, 7, 7, 7, 0, 0, 0, 0, 0, 0, 0, 0
+sh_b0123456777777777: db 0, 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7
+sh_b1234567777777777: db 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+sh_b2345677777777777: db 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+sh_b2w01234577: db 0, -1, 1, -1, 2, -1, 3, -1, 4, -1, 5, -1, 7, -1, 7, -1
+sh_b2w12345677: db 1, -1, 2, -1, 3, -1, 4, -1, 5, -1, 6, -1, 7, -1, 7, -1
+sh_b2w23456777: db 2, -1, 3, -1, 4, -1, 5, -1, 6, -1, 7, -1, 7, -1, 7, -1
+sh_b2w01234567: db 0, -1, 1, -1, 2, -1, 3, -1, 4, -1, 5, -1, 6, -1, 7, -1
+sh_b2w12345678: db 1, -1, 2, -1, 3, -1, 4, -1, 5, -1, 6, -1, 7, -1, 8, -1
+sh_b2w23456789: db 2, -1, 3, -1, 4, -1, 5, -1, 6, -1, 7, -1, 8, -1, 9, -1
+sh_b2w89abcdef: db 8, -1, 9, -1, 10, -1, 11, -1, 12, -1, 13, -1, 14, -1, 15, -1
+sh_b2w9abcdeff: db 9, -1, 10, -1, 11, -1, 12, -1, 13, -1, 14, -1, 15, -1, 15, -1
+sh_b2wabcdefff: db 10, -1, 11, -1, 12, -1, 13, -1, 14, -1, 15, -1, 15, -1, 15, -1
+sh_b123456789abcdeff: db 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 15
+sh_b23456789abcdefff: db 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 15, 15
+
+SECTION .text
+
+INIT_MMX ssse3
+cglobal h_predictor_4x4, 2, 4, 3, dst, stride, line, left
+ movifnidn leftq, leftmp
+ add leftq, 4
+ mov lineq, -2
+ pxor m0, m0
+.loop:
+ movd m1, [leftq+lineq*2 ]
+ movd m2, [leftq+lineq*2+1]
+ pshufb m1, m0
+ pshufb m2, m0
+ movd [dstq ], m1
+ movd [dstq+strideq], m2
+ lea dstq, [dstq+strideq*2]
+ inc lineq
+ jnz .loop
+ REP_RET
+
+INIT_MMX ssse3
+cglobal h_predictor_8x8, 2, 4, 3, dst, stride, line, left
+ movifnidn leftq, leftmp
+ add leftq, 8
+ mov lineq, -4
+ pxor m0, m0
+.loop:
+ movd m1, [leftq+lineq*2 ]
+ movd m2, [leftq+lineq*2+1]
+ pshufb m1, m0
+ pshufb m2, m0
+ movq [dstq ], m1
+ movq [dstq+strideq], m2
+ lea dstq, [dstq+strideq*2]
+ inc lineq
+ jnz .loop
+ REP_RET
+
+INIT_XMM ssse3
+cglobal h_predictor_16x16, 2, 4, 3, dst, stride, line, left
+ movifnidn leftq, leftmp
+ add leftq, 16
+ mov lineq, -8
+ pxor m0, m0
+.loop:
+ movd m1, [leftq+lineq*2 ]
+ movd m2, [leftq+lineq*2+1]
+ pshufb m1, m0
+ pshufb m2, m0
+ mova [dstq ], m1
+ mova [dstq+strideq], m2
+ lea dstq, [dstq+strideq*2]
+ inc lineq
+ jnz .loop
+ REP_RET
+
+INIT_XMM ssse3
+cglobal h_predictor_32x32, 2, 4, 3, dst, stride, line, left
+ movifnidn leftq, leftmp
+ add leftq, 32
+ mov lineq, -16
+ pxor m0, m0
+.loop:
+ movd m1, [leftq+lineq*2 ]
+ movd m2, [leftq+lineq*2+1]
+ pshufb m1, m0
+ pshufb m2, m0
+ mova [dstq ], m1
+ mova [dstq +16], m1
+ mova [dstq+strideq ], m2
+ mova [dstq+strideq+16], m2
+ lea dstq, [dstq+strideq*2]
+ inc lineq
+ jnz .loop
+ REP_RET
+
+INIT_MMX ssse3
+cglobal d45_predictor_4x4, 3, 3, 4, dst, stride, above
+ movq m0, [aboveq]
+ pshufb m2, m0, [sh_b23456777]
+ pshufb m1, m0, [sh_b01234577]
+ pshufb m0, [sh_b12345677]
+ pavgb m3, m2, m1
+ pxor m2, m1
+ pand m2, [pb_1]
+ psubb m3, m2
+ pavgb m0, m3
+
+ ; store 4 lines
+ movd [dstq ], m0
+ psrlq m0, 8
+ movd [dstq+strideq], m0
+ lea dstq, [dstq+strideq*2]
+ psrlq m0, 8
+ movd [dstq ], m0
+ psrlq m0, 8
+ movd [dstq+strideq], m0
+ RET
+
+INIT_MMX ssse3
+cglobal d45_predictor_8x8, 3, 3, 4, dst, stride, above
+ movq m0, [aboveq]
+ mova m1, [sh_b12345677]
+ DEFINE_ARGS dst, stride, stride3, line
+ lea stride3q, [strideq*3]
+ pshufb m2, m0, [sh_b23456777]
+ pavgb m3, m2, m0
+ pxor m2, m0
+ pshufb m0, m1
+ pand m2, [pb_1]
+ psubb m3, m2
+ pavgb m0, m3
+
+ ; store 4 lines
+ movq [dstq ], m0
+ pshufb m0, m1
+ movq [dstq+strideq ], m0
+ pshufb m0, m1
+ movq [dstq+strideq*2], m0
+ pshufb m0, m1
+ movq [dstq+stride3q ], m0
+ pshufb m0, m1
+ lea dstq, [dstq+strideq*4]
+
+ ; store next 4 lines
+ movq [dstq ], m0
+ pshufb m0, m1
+ movq [dstq+strideq ], m0
+ pshufb m0, m1
+ movq [dstq+strideq*2], m0
+ pshufb m0, m1
+ movq [dstq+stride3q ], m0
+ RET
+
+INIT_XMM ssse3
+cglobal d45_predictor_16x16, 3, 5, 4, dst, stride, above, dst8, line
+ mova m0, [aboveq]
+ DEFINE_ARGS dst, stride, stride3, dst8, line
+ lea stride3q, [strideq*3]
+ lea dst8q, [dstq+strideq*8]
+ mova m1, [sh_b123456789abcdeff]
+ pshufb m2, m0, [sh_b23456789abcdefff]
+ pavgb m3, m2, m0
+ pxor m2, m0
+ pshufb m0, m1
+ pand m2, [pb_1]
+ psubb m3, m2
+ pavgb m0, m3
+
+ ; first 4 lines and first half of 3rd 4 lines
+ mov lined, 2
+.loop:
+ mova [dstq ], m0
+ movhps [dst8q ], m0
+ pshufb m0, m1
+ mova [dstq +strideq ], m0
+ movhps [dst8q+strideq ], m0
+ pshufb m0, m1
+ mova [dstq +strideq*2 ], m0
+ movhps [dst8q+strideq*2 ], m0
+ pshufb m0, m1
+ mova [dstq +stride3q ], m0
+ movhps [dst8q+stride3q ], m0
+ pshufb m0, m1
+ lea dstq, [dstq +strideq*4]
+ lea dst8q, [dst8q+strideq*4]
+ dec lined
+ jnz .loop
+
+ ; bottom-right 8x8 block
+ movhps [dstq +8], m0
+ movhps [dstq+strideq +8], m0
+ movhps [dstq+strideq*2+8], m0
+ movhps [dstq+stride3q +8], m0
+ lea dstq, [dstq+strideq*4]
+ movhps [dstq +8], m0
+ movhps [dstq+strideq +8], m0
+ movhps [dstq+strideq*2+8], m0
+ movhps [dstq+stride3q +8], m0
+ RET
+
+INIT_XMM ssse3
+cglobal d45_predictor_32x32, 3, 5, 7, dst, stride, above, dst16, line
+ mova m0, [aboveq]
+ mova m4, [aboveq+16]
+ DEFINE_ARGS dst, stride, stride3, dst16, line
+ lea stride3q, [strideq*3]
+ lea dst16q, [dstq +strideq*8]
+ lea dst16q, [dst16q+strideq*8]
+ mova m1, [sh_b123456789abcdeff]
+ pshufb m2, m4, [sh_b23456789abcdefff]
+ pavgb m3, m2, m4
+ pxor m2, m4
+ palignr m5, m4, m0, 1
+ palignr m6, m4, m0, 2
+ pshufb m4, m1
+ pand m2, [pb_1]
+ psubb m3, m2
+ pavgb m4, m3
+ pavgb m3, m0, m6
+ pxor m0, m6
+ pand m0, [pb_1]
+ psubb m3, m0
+ pavgb m5, m3
+
+ ; write 4x4 lines (and the first half of the second 4x4 lines)
+ mov lined, 4
+.loop:
+ mova [dstq ], m5
+ mova [dstq +16], m4
+ mova [dst16q ], m4
+ palignr m3, m4, m5, 1
+ pshufb m4, m1
+ mova [dstq +strideq ], m3
+ mova [dstq +strideq +16], m4
+ mova [dst16q+strideq ], m4
+ palignr m5, m4, m3, 1
+ pshufb m4, m1
+ mova [dstq +strideq*2 ], m5
+ mova [dstq +strideq*2+16], m4
+ mova [dst16q+strideq*2 ], m4
+ palignr m3, m4, m5, 1
+ pshufb m4, m1
+ mova [dstq +stride3q ], m3
+ mova [dstq +stride3q +16], m4
+ mova [dst16q+stride3q ], m4
+ palignr m5, m4, m3, 1
+ pshufb m4, m1
+ lea dstq, [dstq +strideq*4]
+ lea dst16q, [dst16q+strideq*4]
+ dec lined
+ jnz .loop
+
+ ; write second half of second 4x4 lines
+ mova [dstq +16], m4
+ mova [dstq +strideq +16], m4
+ mova [dstq +strideq*2+16], m4
+ mova [dstq +stride3q +16], m4
+ lea dstq, [dstq +strideq*4]
+ mova [dstq +16], m4
+ mova [dstq +strideq +16], m4
+ mova [dstq +strideq*2+16], m4
+ mova [dstq +stride3q +16], m4
+ lea dstq, [dstq +strideq*4]
+ mova [dstq +16], m4
+ mova [dstq +strideq +16], m4
+ mova [dstq +strideq*2+16], m4
+ mova [dstq +stride3q +16], m4
+ lea dstq, [dstq +strideq*4]
+ mova [dstq +16], m4
+ mova [dstq +strideq +16], m4
+ mova [dstq +strideq*2+16], m4
+ mova [dstq +stride3q +16], m4
+ RET
diff --git a/libvpx/vp9/common/x86/vp9_loopfilter_intrin_sse2.c b/libvpx/vp9/common/x86/vp9_loopfilter_intrin_sse2.c
new file mode 100644
index 0000000..fa4dd9b
--- /dev/null
+++ b/libvpx/vp9/common/x86/vp9_loopfilter_intrin_sse2.c
@@ -0,0 +1,1282 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <emmintrin.h> /* SSE2 */
+#include "vp9/common/vp9_loopfilter.h"
+#include "vpx_ports/emmintrin_compat.h"
+
+static void mb_lpf_horizontal_edge_w_sse2_8(unsigned char *s,
+ int p,
+ const unsigned char *_blimit,
+ const unsigned char *_limit,
+ const unsigned char *_thresh) {
+ __m128i mask, hev, flat, flat2;
+ const __m128i zero = _mm_set1_epi16(0);
+ const __m128i one = _mm_set1_epi8(1);
+ __m128i q7p7, q6p6, q5p5, q4p4, q3p3, q2p2, q1p1, q0p0, p0q0, p1q1;
+ __m128i abs_p1p0;
+ const unsigned int extended_thresh = _thresh[0] * 0x01010101u;
+ const unsigned int extended_limit = _limit[0] * 0x01010101u;
+ const unsigned int extended_blimit = _blimit[0] * 0x01010101u;
+ const __m128i thresh =
+ _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_thresh), 0);
+ const __m128i limit =
+ _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_limit), 0);
+ const __m128i blimit =
+ _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_blimit), 0);
+
+ q4p4 = _mm_loadl_epi64((__m128i *)(s - 5 * p));
+ q4p4 = _mm_castps_si128(_mm_loadh_pi(_mm_castsi128_ps(q4p4),
+ (__m64 *)(s + 4 * p)));
+ q3p3 = _mm_loadl_epi64((__m128i *)(s - 4 * p));
+ q3p3 = _mm_castps_si128(_mm_loadh_pi(_mm_castsi128_ps(q3p3),
+ (__m64 *)(s + 3 * p)));
+ q2p2 = _mm_loadl_epi64((__m128i *)(s - 3 * p));
+ q2p2 = _mm_castps_si128(_mm_loadh_pi(_mm_castsi128_ps(q2p2),
+ (__m64 *)(s + 2 * p)));
+ q1p1 = _mm_loadl_epi64((__m128i *)(s - 2 * p));
+ q1p1 = _mm_castps_si128(_mm_loadh_pi(_mm_castsi128_ps(q1p1),
+ (__m64 *)(s + 1 * p)));
+ p1q1 = _mm_shuffle_epi32(q1p1, 78);
+ q0p0 = _mm_loadl_epi64((__m128i *)(s - 1 * p));
+ q0p0 = _mm_castps_si128(_mm_loadh_pi(_mm_castsi128_ps(q0p0),
+ (__m64 *)(s - 0 * p)));
+ p0q0 = _mm_shuffle_epi32(q0p0, 78);
+
+ {
+ __m128i abs_p1q1, abs_p0q0, abs_q1q0, fe, ff, work;
+ abs_p1p0 = _mm_or_si128(_mm_subs_epu8(q1p1, q0p0),
+ _mm_subs_epu8(q0p0, q1p1));
+ abs_q1q0 = _mm_srli_si128(abs_p1p0, 8);
+ fe = _mm_set1_epi8(0xfe);
+ ff = _mm_cmpeq_epi8(abs_p1p0, abs_p1p0);
+ abs_p0q0 = _mm_or_si128(_mm_subs_epu8(q0p0, p0q0),
+ _mm_subs_epu8(p0q0, q0p0));
+ abs_p1q1 = _mm_or_si128(_mm_subs_epu8(q1p1, p1q1),
+ _mm_subs_epu8(p1q1, q1p1));
+ flat = _mm_max_epu8(abs_p1p0, abs_q1q0);
+ hev = _mm_subs_epu8(flat, thresh);
+ hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff);
+
+ abs_p0q0 =_mm_adds_epu8(abs_p0q0, abs_p0q0);
+ abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1);
+ mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit);
+ mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff);
+ // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1;
+ mask = _mm_max_epu8(abs_p1p0, mask);
+ // mask |= (abs(p1 - p0) > limit) * -1;
+ // mask |= (abs(q1 - q0) > limit) * -1;
+
+ work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(q2p2, q1p1),
+ _mm_subs_epu8(q1p1, q2p2)),
+ _mm_or_si128(_mm_subs_epu8(q3p3, q2p2),
+ _mm_subs_epu8(q2p2, q3p3)));
+ mask = _mm_max_epu8(work, mask);
+ mask = _mm_max_epu8(mask, _mm_srli_si128(mask, 8));
+ mask = _mm_subs_epu8(mask, limit);
+ mask = _mm_cmpeq_epi8(mask, zero);
+ }
+
+ // lp filter
+ {
+ const __m128i t4 = _mm_set1_epi8(4);
+ const __m128i t3 = _mm_set1_epi8(3);
+ const __m128i t80 = _mm_set1_epi8(0x80);
+ const __m128i t1 = _mm_set1_epi16(0x1);
+ __m128i qs1ps1 = _mm_xor_si128(q1p1, t80);
+ __m128i qs0ps0 = _mm_xor_si128(q0p0, t80);
+ __m128i qs0 = _mm_xor_si128(p0q0, t80);
+ __m128i qs1 = _mm_xor_si128(p1q1, t80);
+ __m128i filt;
+ __m128i work_a;
+ __m128i filter1, filter2;
+ __m128i flat2_q6p6, flat2_q5p5, flat2_q4p4, flat2_q3p3, flat2_q2p2;
+ __m128i flat2_q1p1, flat2_q0p0, flat_q2p2, flat_q1p1, flat_q0p0;
+
+ filt = _mm_and_si128(_mm_subs_epi8(qs1ps1, qs1), hev);
+ work_a = _mm_subs_epi8(qs0, qs0ps0);
+ filt = _mm_adds_epi8(filt, work_a);
+ filt = _mm_adds_epi8(filt, work_a);
+ filt = _mm_adds_epi8(filt, work_a);
+ /* (vp9_filter + 3 * (qs0 - ps0)) & mask */
+ filt = _mm_and_si128(filt, mask);
+
+ filter1 = _mm_adds_epi8(filt, t4);
+ filter2 = _mm_adds_epi8(filt, t3);
+
+ filter1 = _mm_unpacklo_epi8(zero, filter1);
+ filter1 = _mm_srai_epi16(filter1, 0xB);
+ filter2 = _mm_unpacklo_epi8(zero, filter2);
+ filter2 = _mm_srai_epi16(filter2, 0xB);
+
+ /* Filter1 >> 3 */
+ filt = _mm_packs_epi16(filter2, _mm_subs_epi16(zero, filter1));
+ qs0ps0 = _mm_xor_si128(_mm_adds_epi8(qs0ps0, filt), t80);
+
+ /* filt >> 1 */
+ filt = _mm_adds_epi16(filter1, t1);
+ filt = _mm_srai_epi16(filt, 1);
+ filt = _mm_andnot_si128(_mm_srai_epi16(_mm_unpacklo_epi8(zero, hev), 0x8),
+ filt);
+ filt = _mm_packs_epi16(filt, _mm_subs_epi16(zero, filt));
+ qs1ps1 = _mm_xor_si128(_mm_adds_epi8(qs1ps1, filt), t80);
+ // loopfilter done
+
+ {
+ __m128i work;
+ flat = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(q2p2, q0p0),
+ _mm_subs_epu8(q0p0, q2p2)),
+ _mm_or_si128(_mm_subs_epu8(q3p3, q0p0),
+ _mm_subs_epu8(q0p0, q3p3)));
+ flat = _mm_max_epu8(abs_p1p0, flat);
+ flat = _mm_max_epu8(flat, _mm_srli_si128(flat, 8));
+ flat = _mm_subs_epu8(flat, one);
+ flat = _mm_cmpeq_epi8(flat, zero);
+ flat = _mm_and_si128(flat, mask);
+
+ q5p5 = _mm_loadl_epi64((__m128i *)(s - 6 * p));
+ q5p5 = _mm_castps_si128(_mm_loadh_pi(_mm_castsi128_ps(q5p5),
+ (__m64 *)(s + 5 * p)));
+
+ q6p6 = _mm_loadl_epi64((__m128i *)(s - 7 * p));
+ q6p6 = _mm_castps_si128(_mm_loadh_pi(_mm_castsi128_ps(q6p6),
+ (__m64 *)(s + 6 * p)));
+
+ flat2 = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(q4p4, q0p0),
+ _mm_subs_epu8(q0p0, q4p4)),
+ _mm_or_si128(_mm_subs_epu8(q5p5, q0p0),
+ _mm_subs_epu8(q0p0, q5p5)));
+
+ q7p7 = _mm_loadl_epi64((__m128i *)(s - 8 * p));
+ q7p7 = _mm_castps_si128(_mm_loadh_pi(_mm_castsi128_ps(q7p7),
+ (__m64 *)(s + 7 * p)));
+
+ work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(q6p6, q0p0),
+ _mm_subs_epu8(q0p0, q6p6)),
+ _mm_or_si128(_mm_subs_epu8(q7p7, q0p0),
+ _mm_subs_epu8(q0p0, q7p7)));
+
+ flat2 = _mm_max_epu8(work, flat2);
+ flat2 = _mm_max_epu8(flat2, _mm_srli_si128(flat2, 8));
+ flat2 = _mm_subs_epu8(flat2, one);
+ flat2 = _mm_cmpeq_epi8(flat2, zero);
+ flat2 = _mm_and_si128(flat2, flat); // flat2 & flat & mask
+ }
+
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ // flat and wide flat calculations
+ {
+ const __m128i eight = _mm_set1_epi16(8);
+ const __m128i four = _mm_set1_epi16(4);
+ __m128i p7_16, p6_16, p5_16, p4_16, p3_16, p2_16, p1_16, p0_16;
+ __m128i q7_16, q6_16, q5_16, q4_16, q3_16, q2_16, q1_16, q0_16;
+ __m128i pixelFilter_p, pixelFilter_q;
+ __m128i pixetFilter_p2p1p0, pixetFilter_q2q1q0;
+ __m128i sum_p7, sum_q7, sum_p3, sum_q3, res_p, res_q;
+
+ p7_16 = _mm_unpacklo_epi8(q7p7, zero);;
+ p6_16 = _mm_unpacklo_epi8(q6p6, zero);
+ p5_16 = _mm_unpacklo_epi8(q5p5, zero);
+ p4_16 = _mm_unpacklo_epi8(q4p4, zero);
+ p3_16 = _mm_unpacklo_epi8(q3p3, zero);
+ p2_16 = _mm_unpacklo_epi8(q2p2, zero);
+ p1_16 = _mm_unpacklo_epi8(q1p1, zero);
+ p0_16 = _mm_unpacklo_epi8(q0p0, zero);
+ q0_16 = _mm_unpackhi_epi8(q0p0, zero);
+ q1_16 = _mm_unpackhi_epi8(q1p1, zero);
+ q2_16 = _mm_unpackhi_epi8(q2p2, zero);
+ q3_16 = _mm_unpackhi_epi8(q3p3, zero);
+ q4_16 = _mm_unpackhi_epi8(q4p4, zero);
+ q5_16 = _mm_unpackhi_epi8(q5p5, zero);
+ q6_16 = _mm_unpackhi_epi8(q6p6, zero);
+ q7_16 = _mm_unpackhi_epi8(q7p7, zero);
+
+ pixelFilter_p = _mm_add_epi16(_mm_add_epi16(p6_16, p5_16),
+ _mm_add_epi16(p4_16, p3_16));
+ pixelFilter_q = _mm_add_epi16(_mm_add_epi16(q6_16, q5_16),
+ _mm_add_epi16(q4_16, q3_16));
+
+ pixetFilter_p2p1p0 = _mm_add_epi16(p0_16, _mm_add_epi16(p2_16, p1_16));
+ pixelFilter_p = _mm_add_epi16(pixelFilter_p, pixetFilter_p2p1p0);
+
+ pixetFilter_q2q1q0 = _mm_add_epi16(q0_16, _mm_add_epi16(q2_16, q1_16));
+ pixelFilter_q = _mm_add_epi16(pixelFilter_q, pixetFilter_q2q1q0);
+ pixelFilter_p = _mm_add_epi16(eight, _mm_add_epi16(pixelFilter_p,
+ pixelFilter_q));
+ pixetFilter_p2p1p0 = _mm_add_epi16(four,
+ _mm_add_epi16(pixetFilter_p2p1p0,
+ pixetFilter_q2q1q0));
+ res_p = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p,
+ _mm_add_epi16(p7_16, p0_16)), 4);
+ res_q = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p,
+ _mm_add_epi16(q7_16, q0_16)), 4);
+ flat2_q0p0 = _mm_packus_epi16(res_p, res_q);
+ res_p = _mm_srli_epi16(_mm_add_epi16(pixetFilter_p2p1p0,
+ _mm_add_epi16(p3_16, p0_16)), 3);
+ res_q = _mm_srli_epi16(_mm_add_epi16(pixetFilter_p2p1p0,
+ _mm_add_epi16(q3_16, q0_16)), 3);
+
+ flat_q0p0 = _mm_packus_epi16(res_p, res_q);
+
+ sum_p7 = _mm_add_epi16(p7_16, p7_16);
+ sum_q7 = _mm_add_epi16(q7_16, q7_16);
+ sum_p3 = _mm_add_epi16(p3_16, p3_16);
+ sum_q3 = _mm_add_epi16(q3_16, q3_16);
+
+ pixelFilter_q = _mm_sub_epi16(pixelFilter_p, p6_16);
+ pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q6_16);
+ res_p = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p,
+ _mm_add_epi16(sum_p7, p1_16)), 4);
+ res_q = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q,
+ _mm_add_epi16(sum_q7, q1_16)), 4);
+ flat2_q1p1 = _mm_packus_epi16(res_p, res_q);
+
+ pixetFilter_q2q1q0 = _mm_sub_epi16(pixetFilter_p2p1p0, p2_16);
+ pixetFilter_p2p1p0 = _mm_sub_epi16(pixetFilter_p2p1p0, q2_16);
+ res_p = _mm_srli_epi16(_mm_add_epi16(pixetFilter_p2p1p0,
+ _mm_add_epi16(sum_p3, p1_16)), 3);
+ res_q = _mm_srli_epi16(_mm_add_epi16(pixetFilter_q2q1q0,
+ _mm_add_epi16(sum_q3, q1_16)), 3);
+ flat_q1p1 = _mm_packus_epi16(res_p, res_q);
+
+ sum_p7 = _mm_add_epi16(sum_p7, p7_16);
+ sum_q7 = _mm_add_epi16(sum_q7, q7_16);
+ sum_p3 = _mm_add_epi16(sum_p3, p3_16);
+ sum_q3 = _mm_add_epi16(sum_q3, q3_16);
+
+ pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q5_16);
+ pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p5_16);
+ res_p = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p,
+ _mm_add_epi16(sum_p7, p2_16)), 4);
+ res_q = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q,
+ _mm_add_epi16(sum_q7, q2_16)), 4);
+ flat2_q2p2 = _mm_packus_epi16(res_p, res_q);
+
+ pixetFilter_p2p1p0 = _mm_sub_epi16(pixetFilter_p2p1p0, q1_16);
+ pixetFilter_q2q1q0 = _mm_sub_epi16(pixetFilter_q2q1q0, p1_16);
+
+ res_p = _mm_srli_epi16(_mm_add_epi16(pixetFilter_p2p1p0,
+ _mm_add_epi16(sum_p3, p2_16)), 3);
+ res_q = _mm_srli_epi16(_mm_add_epi16(pixetFilter_q2q1q0,
+ _mm_add_epi16(sum_q3, q2_16)), 3);
+ flat_q2p2 = _mm_packus_epi16(res_p, res_q);
+
+ sum_p7 = _mm_add_epi16(sum_p7, p7_16);
+ sum_q7 = _mm_add_epi16(sum_q7, q7_16);
+ pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q4_16);
+ pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p4_16);
+ res_p = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p,
+ _mm_add_epi16(sum_p7, p3_16)), 4);
+ res_q = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q,
+ _mm_add_epi16(sum_q7, q3_16)), 4);
+ flat2_q3p3 = _mm_packus_epi16(res_p, res_q);
+
+ sum_p7 = _mm_add_epi16(sum_p7, p7_16);
+ sum_q7 = _mm_add_epi16(sum_q7, q7_16);
+ pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q3_16);
+ pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p3_16);
+ res_p = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p,
+ _mm_add_epi16(sum_p7, p4_16)), 4);
+ res_q = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q,
+ _mm_add_epi16(sum_q7, q4_16)), 4);
+ flat2_q4p4 = _mm_packus_epi16(res_p, res_q);
+
+ sum_p7 = _mm_add_epi16(sum_p7, p7_16);
+ sum_q7 = _mm_add_epi16(sum_q7, q7_16);
+ pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q2_16);
+ pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p2_16);
+ res_p = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p,
+ _mm_add_epi16(sum_p7, p5_16)), 4);
+ res_q = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q,
+ _mm_add_epi16(sum_q7, q5_16)), 4);
+ flat2_q5p5 = _mm_packus_epi16(res_p, res_q);
+
+ sum_p7 = _mm_add_epi16(sum_p7, p7_16);
+ sum_q7 = _mm_add_epi16(sum_q7, q7_16);
+ pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q1_16);
+ pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p1_16);
+ res_p = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p,
+ _mm_add_epi16(sum_p7, p6_16)), 4);
+ res_q = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q,
+ _mm_add_epi16(sum_q7, q6_16)), 4);
+ flat2_q6p6 = _mm_packus_epi16(res_p, res_q);
+ }
+ // wide flat
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ flat = _mm_shuffle_epi32(flat, 68);
+ flat2 = _mm_shuffle_epi32(flat2, 68);
+
+ q2p2 = _mm_andnot_si128(flat, q2p2);
+ flat_q2p2 = _mm_and_si128(flat, flat_q2p2);
+ q2p2 = _mm_or_si128(q2p2, flat_q2p2);
+
+ qs1ps1 = _mm_andnot_si128(flat, qs1ps1);
+ flat_q1p1 = _mm_and_si128(flat, flat_q1p1);
+ q1p1 = _mm_or_si128(qs1ps1, flat_q1p1);
+
+ qs0ps0 = _mm_andnot_si128(flat, qs0ps0);
+ flat_q0p0 = _mm_and_si128(flat, flat_q0p0);
+ q0p0 = _mm_or_si128(qs0ps0, flat_q0p0);
+
+ q6p6 = _mm_andnot_si128(flat2, q6p6);
+ flat2_q6p6 = _mm_and_si128(flat2, flat2_q6p6);
+ q6p6 = _mm_or_si128(q6p6, flat2_q6p6);
+ _mm_storel_epi64((__m128i *)(s - 7 * p), q6p6);
+ _mm_storeh_pi((__m64 *)(s + 6 * p), _mm_castsi128_ps(q6p6));
+
+ q5p5 = _mm_andnot_si128(flat2, q5p5);
+ flat2_q5p5 = _mm_and_si128(flat2, flat2_q5p5);
+ q5p5 = _mm_or_si128(q5p5, flat2_q5p5);
+ _mm_storel_epi64((__m128i *)(s - 6 * p), q5p5);
+ _mm_storeh_pi((__m64 *)(s + 5 * p), _mm_castsi128_ps(q5p5));
+
+ q4p4 = _mm_andnot_si128(flat2, q4p4);
+ flat2_q4p4 = _mm_and_si128(flat2, flat2_q4p4);
+ q4p4 = _mm_or_si128(q4p4, flat2_q4p4);
+ _mm_storel_epi64((__m128i *)(s - 5 * p), q4p4);
+ _mm_storeh_pi((__m64 *)(s + 4 * p), _mm_castsi128_ps(q4p4));
+
+ q3p3 = _mm_andnot_si128(flat2, q3p3);
+ flat2_q3p3 = _mm_and_si128(flat2, flat2_q3p3);
+ q3p3 = _mm_or_si128(q3p3, flat2_q3p3);
+ _mm_storel_epi64((__m128i *)(s - 4 * p), q3p3);
+ _mm_storeh_pi((__m64 *)(s + 3 * p), _mm_castsi128_ps(q3p3));
+
+ q2p2 = _mm_andnot_si128(flat2, q2p2);
+ flat2_q2p2 = _mm_and_si128(flat2, flat2_q2p2);
+ q2p2 = _mm_or_si128(q2p2, flat2_q2p2);
+ _mm_storel_epi64((__m128i *)(s - 3 * p), q2p2);
+ _mm_storeh_pi((__m64 *)(s + 2 * p), _mm_castsi128_ps(q2p2));
+
+ q1p1 = _mm_andnot_si128(flat2, q1p1);
+ flat2_q1p1 = _mm_and_si128(flat2, flat2_q1p1);
+ q1p1 = _mm_or_si128(q1p1, flat2_q1p1);
+ _mm_storel_epi64((__m128i *)(s - 2 * p), q1p1);
+ _mm_storeh_pi((__m64 *)(s + 1 * p), _mm_castsi128_ps(q1p1));
+
+ q0p0 = _mm_andnot_si128(flat2, q0p0);
+ flat2_q0p0 = _mm_and_si128(flat2, flat2_q0p0);
+ q0p0 = _mm_or_si128(q0p0, flat2_q0p0);
+ _mm_storel_epi64((__m128i *)(s - 1 * p), q0p0);
+ _mm_storeh_pi((__m64 *)(s - 0 * p), _mm_castsi128_ps(q0p0));
+ }
+}
+
+static void mb_lpf_horizontal_edge_w_sse2_16(unsigned char *s,
+ int p,
+ const unsigned char *_blimit,
+ const unsigned char *_limit,
+ const unsigned char *_thresh) {
+ DECLARE_ALIGNED(16, unsigned char, flat2_op[7][16]);
+ DECLARE_ALIGNED(16, unsigned char, flat2_oq[7][16]);
+
+ DECLARE_ALIGNED(16, unsigned char, flat_op[3][16]);
+ DECLARE_ALIGNED(16, unsigned char, flat_oq[3][16]);
+
+ DECLARE_ALIGNED(16, unsigned char, ap[8][16]);
+ DECLARE_ALIGNED(16, unsigned char, aq[8][16]);
+
+
+ __m128i mask, hev, flat, flat2;
+ const __m128i zero = _mm_set1_epi16(0);
+ const __m128i one = _mm_set1_epi8(1);
+ __m128i p7, p6, p5;
+ __m128i p4, p3, p2, p1, p0, q0, q1, q2, q3, q4;
+ __m128i q5, q6, q7;
+ int i = 0;
+ const unsigned int extended_thresh = _thresh[0] * 0x01010101u;
+ const unsigned int extended_limit = _limit[0] * 0x01010101u;
+ const unsigned int extended_blimit = _blimit[0] * 0x01010101u;
+ const __m128i thresh =
+ _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_thresh), 0);
+ const __m128i limit =
+ _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_limit), 0);
+ const __m128i blimit =
+ _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_blimit), 0);
+
+ p4 = _mm_loadu_si128((__m128i *)(s - 5 * p));
+ p3 = _mm_loadu_si128((__m128i *)(s - 4 * p));
+ p2 = _mm_loadu_si128((__m128i *)(s - 3 * p));
+ p1 = _mm_loadu_si128((__m128i *)(s - 2 * p));
+ p0 = _mm_loadu_si128((__m128i *)(s - 1 * p));
+ q0 = _mm_loadu_si128((__m128i *)(s - 0 * p));
+ q1 = _mm_loadu_si128((__m128i *)(s + 1 * p));
+ q2 = _mm_loadu_si128((__m128i *)(s + 2 * p));
+ q3 = _mm_loadu_si128((__m128i *)(s + 3 * p));
+ q4 = _mm_loadu_si128((__m128i *)(s + 4 * p));
+
+ _mm_store_si128((__m128i *)ap[4], p4);
+ _mm_store_si128((__m128i *)ap[3], p3);
+ _mm_store_si128((__m128i *)ap[2], p2);
+ _mm_store_si128((__m128i *)ap[1], p1);
+ _mm_store_si128((__m128i *)ap[0], p0);
+ _mm_store_si128((__m128i *)aq[4], q4);
+ _mm_store_si128((__m128i *)aq[3], q3);
+ _mm_store_si128((__m128i *)aq[2], q2);
+ _mm_store_si128((__m128i *)aq[1], q1);
+ _mm_store_si128((__m128i *)aq[0], q0);
+
+
+ {
+ const __m128i abs_p1p0 = _mm_or_si128(_mm_subs_epu8(p1, p0),
+ _mm_subs_epu8(p0, p1));
+ const __m128i abs_q1q0 = _mm_or_si128(_mm_subs_epu8(q1, q0),
+ _mm_subs_epu8(q0, q1));
+ const __m128i fe = _mm_set1_epi8(0xfe);
+ const __m128i ff = _mm_cmpeq_epi8(abs_p1p0, abs_p1p0);
+ __m128i abs_p0q0 = _mm_or_si128(_mm_subs_epu8(p0, q0),
+ _mm_subs_epu8(q0, p0));
+ __m128i abs_p1q1 = _mm_or_si128(_mm_subs_epu8(p1, q1),
+ _mm_subs_epu8(q1, p1));
+ __m128i work;
+ flat = _mm_max_epu8(abs_p1p0, abs_q1q0);
+ hev = _mm_subs_epu8(flat, thresh);
+ hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff);
+
+ abs_p0q0 =_mm_adds_epu8(abs_p0q0, abs_p0q0);
+ abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1);
+ mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit);
+ mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff);
+ // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1;
+ mask = _mm_max_epu8(flat, mask);
+ // mask |= (abs(p1 - p0) > limit) * -1;
+ // mask |= (abs(q1 - q0) > limit) * -1;
+ work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p2, p1),
+ _mm_subs_epu8(p1, p2)),
+ _mm_or_si128(_mm_subs_epu8(p3, p2),
+ _mm_subs_epu8(p2, p3)));
+ mask = _mm_max_epu8(work, mask);
+ work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(q2, q1),
+ _mm_subs_epu8(q1, q2)),
+ _mm_or_si128(_mm_subs_epu8(q3, q2),
+ _mm_subs_epu8(q2, q3)));
+ mask = _mm_max_epu8(work, mask);
+ mask = _mm_subs_epu8(mask, limit);
+ mask = _mm_cmpeq_epi8(mask, zero);
+ }
+
+ // lp filter
+ {
+ const __m128i t4 = _mm_set1_epi8(4);
+ const __m128i t3 = _mm_set1_epi8(3);
+ const __m128i t80 = _mm_set1_epi8(0x80);
+ const __m128i te0 = _mm_set1_epi8(0xe0);
+ const __m128i t1f = _mm_set1_epi8(0x1f);
+ const __m128i t1 = _mm_set1_epi8(0x1);
+ const __m128i t7f = _mm_set1_epi8(0x7f);
+
+ __m128i ps1 = _mm_xor_si128(p1, t80);
+ __m128i ps0 = _mm_xor_si128(p0, t80);
+ __m128i qs0 = _mm_xor_si128(q0, t80);
+ __m128i qs1 = _mm_xor_si128(q1, t80);
+ __m128i filt;
+ __m128i work_a;
+ __m128i filter1, filter2;
+
+ filt = _mm_and_si128(_mm_subs_epi8(ps1, qs1), hev);
+ work_a = _mm_subs_epi8(qs0, ps0);
+ filt = _mm_adds_epi8(filt, work_a);
+ filt = _mm_adds_epi8(filt, work_a);
+ filt = _mm_adds_epi8(filt, work_a);
+ /* (vp9_filter + 3 * (qs0 - ps0)) & mask */
+ filt = _mm_and_si128(filt, mask);
+
+ filter1 = _mm_adds_epi8(filt, t4);
+ filter2 = _mm_adds_epi8(filt, t3);
+
+ /* Filter1 >> 3 */
+ work_a = _mm_cmpgt_epi8(zero, filter1);
+ filter1 = _mm_srli_epi16(filter1, 3);
+ work_a = _mm_and_si128(work_a, te0);
+ filter1 = _mm_and_si128(filter1, t1f);
+ filter1 = _mm_or_si128(filter1, work_a);
+ qs0 = _mm_xor_si128(_mm_subs_epi8(qs0, filter1), t80);
+
+ /* Filter2 >> 3 */
+ work_a = _mm_cmpgt_epi8(zero, filter2);
+ filter2 = _mm_srli_epi16(filter2, 3);
+ work_a = _mm_and_si128(work_a, te0);
+ filter2 = _mm_and_si128(filter2, t1f);
+ filter2 = _mm_or_si128(filter2, work_a);
+ ps0 = _mm_xor_si128(_mm_adds_epi8(ps0, filter2), t80);
+
+ /* filt >> 1 */
+ filt = _mm_adds_epi8(filter1, t1);
+ work_a = _mm_cmpgt_epi8(zero, filt);
+ filt = _mm_srli_epi16(filt, 1);
+ work_a = _mm_and_si128(work_a, t80);
+ filt = _mm_and_si128(filt, t7f);
+ filt = _mm_or_si128(filt, work_a);
+ filt = _mm_andnot_si128(hev, filt);
+ ps1 = _mm_xor_si128(_mm_adds_epi8(ps1, filt), t80);
+ qs1 = _mm_xor_si128(_mm_subs_epi8(qs1, filt), t80);
+ // loopfilter done
+
+ {
+ __m128i work;
+ work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p2, p0),
+ _mm_subs_epu8(p0, p2)),
+ _mm_or_si128(_mm_subs_epu8(q2, q0),
+ _mm_subs_epu8(q0, q2)));
+ flat = _mm_max_epu8(work, flat);
+ work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p3, p0),
+ _mm_subs_epu8(p0, p3)),
+ _mm_or_si128(_mm_subs_epu8(q3, q0),
+ _mm_subs_epu8(q0, q3)));
+ flat = _mm_max_epu8(work, flat);
+ work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p4, p0),
+ _mm_subs_epu8(p0, p4)),
+ _mm_or_si128(_mm_subs_epu8(q4, q0),
+ _mm_subs_epu8(q0, q4)));
+ flat = _mm_subs_epu8(flat, one);
+ flat = _mm_cmpeq_epi8(flat, zero);
+ flat = _mm_and_si128(flat, mask);
+
+ p5 = _mm_loadu_si128((__m128i *)(s - 6 * p));
+ q5 = _mm_loadu_si128((__m128i *)(s + 5 * p));
+ flat2 = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p5, p0),
+ _mm_subs_epu8(p0, p5)),
+ _mm_or_si128(_mm_subs_epu8(q5, q0),
+ _mm_subs_epu8(q0, q5)));
+ _mm_store_si128((__m128i *)ap[5], p5);
+ _mm_store_si128((__m128i *)aq[5], q5);
+ flat2 = _mm_max_epu8(work, flat2);
+ p6 = _mm_loadu_si128((__m128i *)(s - 7 * p));
+ q6 = _mm_loadu_si128((__m128i *)(s + 6 * p));
+ work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p6, p0),
+ _mm_subs_epu8(p0, p6)),
+ _mm_or_si128(_mm_subs_epu8(q6, q0),
+ _mm_subs_epu8(q0, q6)));
+ _mm_store_si128((__m128i *)ap[6], p6);
+ _mm_store_si128((__m128i *)aq[6], q6);
+ flat2 = _mm_max_epu8(work, flat2);
+
+ p7 = _mm_loadu_si128((__m128i *)(s - 8 * p));
+ q7 = _mm_loadu_si128((__m128i *)(s + 7 * p));
+ work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p7, p0),
+ _mm_subs_epu8(p0, p7)),
+ _mm_or_si128(_mm_subs_epu8(q7, q0),
+ _mm_subs_epu8(q0, q7)));
+ _mm_store_si128((__m128i *)ap[7], p7);
+ _mm_store_si128((__m128i *)aq[7], q7);
+ flat2 = _mm_max_epu8(work, flat2);
+ flat2 = _mm_subs_epu8(flat2, one);
+ flat2 = _mm_cmpeq_epi8(flat2, zero);
+ flat2 = _mm_and_si128(flat2, flat); // flat2 & flat & mask
+ }
+
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ // flat and wide flat calculations
+ {
+ const __m128i eight = _mm_set1_epi16(8);
+ const __m128i four = _mm_set1_epi16(4);
+ __m128i temp_flat2 = flat2;
+ unsigned char *src = s;
+ int i = 0;
+ do {
+ __m128i workp_shft;
+ __m128i a, b, c;
+
+ unsigned int off = i * 8;
+ p7 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(ap[7] + off)), zero);
+ p6 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(ap[6] + off)), zero);
+ p5 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(ap[5] + off)), zero);
+ p4 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(ap[4] + off)), zero);
+ p3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(ap[3] + off)), zero);
+ p2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(ap[2] + off)), zero);
+ p1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(ap[1] + off)), zero);
+ p0 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(ap[0] + off)), zero);
+ q0 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(aq[0] + off)), zero);
+ q1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(aq[1] + off)), zero);
+ q2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(aq[2] + off)), zero);
+ q3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(aq[3] + off)), zero);
+ q4 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(aq[4] + off)), zero);
+ q5 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(aq[5] + off)), zero);
+ q6 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(aq[6] + off)), zero);
+ q7 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(aq[7] + off)), zero);
+
+ c = _mm_sub_epi16(_mm_slli_epi16(p7, 3), p7); // p7 * 7
+ c = _mm_add_epi16(_mm_slli_epi16(p6, 1), _mm_add_epi16(p4, c));
+
+ b = _mm_add_epi16(_mm_add_epi16(p3, four), _mm_add_epi16(p3, p2));
+ a = _mm_add_epi16(p3, _mm_add_epi16(p2, p1));
+ a = _mm_add_epi16(_mm_add_epi16(p0, q0), a);
+
+ _mm_storel_epi64((__m128i *)&flat_op[2][i*8],
+ _mm_packus_epi16(_mm_srli_epi16(_mm_add_epi16(a, b), 3)
+ , b));
+
+ c = _mm_add_epi16(_mm_add_epi16(p5, eight), c);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4);
+ _mm_storel_epi64((__m128i *)&flat2_op[6][i*8],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ a = _mm_add_epi16(q1, a);
+ b = _mm_add_epi16(_mm_sub_epi16(b, _mm_add_epi16(p3, p2)), p1);
+ _mm_storel_epi64((__m128i *)&flat_op[1][i*8],
+ _mm_packus_epi16(_mm_srli_epi16(_mm_add_epi16(a, b), 3)
+ , b));
+
+ c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p7, p6)), p5);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4);
+ _mm_storel_epi64((__m128i *)&flat2_op[5][i*8],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ a = _mm_add_epi16(q2, a);
+ b = _mm_add_epi16(_mm_sub_epi16(b, _mm_add_epi16(p3, p1)), p0);
+ _mm_storel_epi64((__m128i *)&flat_op[0][i*8],
+ _mm_packus_epi16(_mm_srli_epi16(_mm_add_epi16(a, b), 3)
+ , b));
+
+ c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p7, p5)), p4);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4);
+ _mm_storel_epi64((__m128i *)&flat2_op[4][i*8],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ a = _mm_add_epi16(q3, a);
+ b = _mm_add_epi16(_mm_sub_epi16(b, _mm_add_epi16(p3, p0)), q0);
+ _mm_storel_epi64((__m128i *)&flat_oq[0][i*8],
+ _mm_packus_epi16(_mm_srli_epi16(_mm_add_epi16(a, b), 3)
+ , b));
+
+ c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p7, p4)), p3);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4);
+ _mm_storel_epi64((__m128i *)&flat2_op[3][i*8],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ b = _mm_add_epi16(q3, b);
+ b = _mm_add_epi16(_mm_sub_epi16(b, _mm_add_epi16(p2, q0)), q1);
+ _mm_storel_epi64((__m128i *)&flat_oq[1][i*8],
+ _mm_packus_epi16(_mm_srli_epi16(_mm_add_epi16(a, b), 3)
+ , b));
+
+ c = _mm_add_epi16(q4, c);
+ c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p7, p3)), p2);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4);
+ _mm_storel_epi64((__m128i *)&flat2_op[2][i*8],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ b = _mm_add_epi16(q3, b);
+ b = _mm_add_epi16(_mm_sub_epi16(b, _mm_add_epi16(p1, q1)), q2);
+ _mm_storel_epi64((__m128i *)&flat_oq[2][i*8],
+ _mm_packus_epi16(_mm_srli_epi16(_mm_add_epi16(a, b), 3)
+ , b));
+ a = _mm_add_epi16(q5, a);
+ c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p7, p2)), p1);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4);
+ _mm_storel_epi64((__m128i *)&flat2_op[1][i*8],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ a = _mm_add_epi16(q6, a);
+ c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p7, p1)), p0);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4);
+ _mm_storel_epi64((__m128i *)&flat2_op[0][i*8],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ a = _mm_add_epi16(q7, a);
+ c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p7, p0)), q0);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4);
+ _mm_storel_epi64((__m128i *)&flat2_oq[0][i*8],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ a = _mm_add_epi16(q7, a);
+ c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p6, q0)), q1);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4);
+ _mm_storel_epi64((__m128i *)&flat2_oq[1][i*8],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ a = _mm_add_epi16(q7, a);
+ c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p5, q1)), q2);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4);
+ _mm_storel_epi64((__m128i *)&flat2_oq[2][i*8],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ a = _mm_add_epi16(q7, a);
+ c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p4, q2)), q3);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4);
+ _mm_storel_epi64((__m128i *)&flat2_oq[3][i*8],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ a = _mm_add_epi16(q7, a);
+ c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p3, q3)), q4);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4);
+ _mm_storel_epi64((__m128i *)&flat2_oq[4][i*8],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ a = _mm_add_epi16(q7, a);
+ c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p2, q4)), q5);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4);
+ _mm_storel_epi64((__m128i *)&flat2_oq[5][i*8],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ a = _mm_add_epi16(q7, a);
+ c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p1, q5)), q6);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4);
+ _mm_storel_epi64((__m128i *)&flat2_oq[6][i*8],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ temp_flat2 = _mm_srli_si128(temp_flat2, 8);
+ src += 8;
+ } while (++i < 2);
+ }
+ // wide flat
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ work_a = _mm_load_si128((__m128i *)ap[2]);
+ p2 = _mm_load_si128((__m128i *)flat_op[2]);
+ work_a = _mm_andnot_si128(flat, work_a);
+ p2 = _mm_and_si128(flat, p2);
+ p2 = _mm_or_si128(work_a, p2);
+ _mm_store_si128((__m128i *)flat_op[2], p2);
+
+ p1 = _mm_load_si128((__m128i *)flat_op[1]);
+ work_a = _mm_andnot_si128(flat, ps1);
+ p1 = _mm_and_si128(flat, p1);
+ p1 = _mm_or_si128(work_a, p1);
+ _mm_store_si128((__m128i *)flat_op[1], p1);
+
+ p0 = _mm_load_si128((__m128i *)flat_op[0]);
+ work_a = _mm_andnot_si128(flat, ps0);
+ p0 = _mm_and_si128(flat, p0);
+ p0 = _mm_or_si128(work_a, p0);
+ _mm_store_si128((__m128i *)flat_op[0], p0);
+
+ q0 = _mm_load_si128((__m128i *)flat_oq[0]);
+ work_a = _mm_andnot_si128(flat, qs0);
+ q0 = _mm_and_si128(flat, q0);
+ q0 = _mm_or_si128(work_a, q0);
+ _mm_store_si128((__m128i *)flat_oq[0], q0);
+
+ q1 = _mm_load_si128((__m128i *)flat_oq[1]);
+ work_a = _mm_andnot_si128(flat, qs1);
+ q1 = _mm_and_si128(flat, q1);
+ q1 = _mm_or_si128(work_a, q1);
+ _mm_store_si128((__m128i *)flat_oq[1], q1);
+
+ work_a = _mm_load_si128((__m128i *)aq[2]);
+ q2 = _mm_load_si128((__m128i *)flat_oq[2]);
+ work_a = _mm_andnot_si128(flat, work_a);
+ q2 = _mm_and_si128(flat, q2);
+ q2 = _mm_or_si128(work_a, q2);
+ _mm_store_si128((__m128i *)flat_oq[2], q2);
+
+ // write out op6 - op3
+ {
+ unsigned char *dst = (s - 7 * p);
+ for (i = 6; i > 2; i--) {
+ __m128i flat2_output;
+ work_a = _mm_load_si128((__m128i *)ap[i]);
+ flat2_output = _mm_load_si128((__m128i *)flat2_op[i]);
+ work_a = _mm_andnot_si128(flat2, work_a);
+ flat2_output = _mm_and_si128(flat2, flat2_output);
+ work_a = _mm_or_si128(work_a, flat2_output);
+ _mm_storeu_si128((__m128i *)dst, work_a);
+ dst += p;
+ }
+ }
+
+ work_a = _mm_load_si128((__m128i *)flat_op[2]);
+ p2 = _mm_load_si128((__m128i *)flat2_op[2]);
+ work_a = _mm_andnot_si128(flat2, work_a);
+ p2 = _mm_and_si128(flat2, p2);
+ p2 = _mm_or_si128(work_a, p2);
+ _mm_storeu_si128((__m128i *)(s - 3 * p), p2);
+
+ work_a = _mm_load_si128((__m128i *)flat_op[1]);
+ p1 = _mm_load_si128((__m128i *)flat2_op[1]);
+ work_a = _mm_andnot_si128(flat2, work_a);
+ p1 = _mm_and_si128(flat2, p1);
+ p1 = _mm_or_si128(work_a, p1);
+ _mm_storeu_si128((__m128i *)(s - 2 * p), p1);
+
+ work_a = _mm_load_si128((__m128i *)flat_op[0]);
+ p0 = _mm_load_si128((__m128i *)flat2_op[0]);
+ work_a = _mm_andnot_si128(flat2, work_a);
+ p0 = _mm_and_si128(flat2, p0);
+ p0 = _mm_or_si128(work_a, p0);
+ _mm_storeu_si128((__m128i *)(s - 1 * p), p0);
+
+ work_a = _mm_load_si128((__m128i *)flat_oq[0]);
+ q0 = _mm_load_si128((__m128i *)flat2_oq[0]);
+ work_a = _mm_andnot_si128(flat2, work_a);
+ q0 = _mm_and_si128(flat2, q0);
+ q0 = _mm_or_si128(work_a, q0);
+ _mm_storeu_si128((__m128i *)(s - 0 * p), q0);
+
+ work_a = _mm_load_si128((__m128i *)flat_oq[1]);
+ q1 = _mm_load_si128((__m128i *)flat2_oq[1]);
+ work_a = _mm_andnot_si128(flat2, work_a);
+ q1 = _mm_and_si128(flat2, q1);
+ q1 = _mm_or_si128(work_a, q1);
+ _mm_storeu_si128((__m128i *)(s + 1 * p), q1);
+
+ work_a = _mm_load_si128((__m128i *)flat_oq[2]);
+ q2 = _mm_load_si128((__m128i *)flat2_oq[2]);
+ work_a = _mm_andnot_si128(flat2, work_a);
+ q2 = _mm_and_si128(flat2, q2);
+ q2 = _mm_or_si128(work_a, q2);
+ _mm_storeu_si128((__m128i *)(s + 2 * p), q2);
+
+ // write out oq3 - oq7
+ {
+ unsigned char *dst = (s + 3 * p);
+ for (i = 3; i < 7; i++) {
+ __m128i flat2_output;
+ work_a = _mm_load_si128((__m128i *)aq[i]);
+ flat2_output = _mm_load_si128((__m128i *)flat2_oq[i]);
+ work_a = _mm_andnot_si128(flat2, work_a);
+ flat2_output = _mm_and_si128(flat2, flat2_output);
+ work_a = _mm_or_si128(work_a, flat2_output);
+ _mm_storeu_si128((__m128i *)dst, work_a);
+ dst += p;
+ }
+ }
+ }
+}
+
+void vp9_mb_lpf_horizontal_edge_w_sse2(unsigned char *s,
+ int p,
+ const unsigned char *_blimit,
+ const unsigned char *_limit,
+ const unsigned char *_thresh,
+ int count) {
+ if (count == 1)
+ mb_lpf_horizontal_edge_w_sse2_8(s, p, _blimit, _limit, _thresh);
+ else
+ mb_lpf_horizontal_edge_w_sse2_16(s, p, _blimit, _limit, _thresh);
+}
+
+void vp9_mbloop_filter_horizontal_edge_sse2(unsigned char *s,
+ int p,
+ const unsigned char *_blimit,
+ const unsigned char *_limit,
+ const unsigned char *_thresh,
+ int count) {
+ DECLARE_ALIGNED(16, unsigned char, flat_op2[16]);
+ DECLARE_ALIGNED(16, unsigned char, flat_op1[16]);
+ DECLARE_ALIGNED(16, unsigned char, flat_op0[16]);
+ DECLARE_ALIGNED(16, unsigned char, flat_oq2[16]);
+ DECLARE_ALIGNED(16, unsigned char, flat_oq1[16]);
+ DECLARE_ALIGNED(16, unsigned char, flat_oq0[16]);
+ __m128i mask, hev, flat;
+ const __m128i zero = _mm_set1_epi16(0);
+ __m128i p3, p2, p1, p0, q0, q1, q2, q3;
+ const unsigned int extended_thresh = _thresh[0] * 0x01010101u;
+ const unsigned int extended_limit = _limit[0] * 0x01010101u;
+ const unsigned int extended_blimit = _blimit[0] * 0x01010101u;
+ const __m128i thresh =
+ _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_thresh), 0);
+ const __m128i limit =
+ _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_limit), 0);
+ const __m128i blimit =
+ _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_blimit), 0);
+
+ (void)count;
+ p3 = _mm_loadl_epi64((__m128i *)(s - 4 * p));
+ p2 = _mm_loadl_epi64((__m128i *)(s - 3 * p));
+ p1 = _mm_loadl_epi64((__m128i *)(s - 2 * p));
+ p0 = _mm_loadl_epi64((__m128i *)(s - 1 * p));
+ q0 = _mm_loadl_epi64((__m128i *)(s - 0 * p));
+ q1 = _mm_loadl_epi64((__m128i *)(s + 1 * p));
+ q2 = _mm_loadl_epi64((__m128i *)(s + 2 * p));
+ q3 = _mm_loadl_epi64((__m128i *)(s + 3 * p));
+ {
+ const __m128i abs_p1p0 = _mm_or_si128(_mm_subs_epu8(p1, p0),
+ _mm_subs_epu8(p0, p1));
+ const __m128i abs_q1q0 = _mm_or_si128(_mm_subs_epu8(q1, q0),
+ _mm_subs_epu8(q0, q1));
+ const __m128i one = _mm_set1_epi8(1);
+ const __m128i fe = _mm_set1_epi8(0xfe);
+ const __m128i ff = _mm_cmpeq_epi8(abs_p1p0, abs_p1p0);
+ __m128i abs_p0q0 = _mm_or_si128(_mm_subs_epu8(p0, q0),
+ _mm_subs_epu8(q0, p0));
+ __m128i abs_p1q1 = _mm_or_si128(_mm_subs_epu8(p1, q1),
+ _mm_subs_epu8(q1, p1));
+ __m128i work;
+ flat = _mm_max_epu8(abs_p1p0, abs_q1q0);
+ hev = _mm_subs_epu8(flat, thresh);
+ hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff);
+
+ abs_p0q0 =_mm_adds_epu8(abs_p0q0, abs_p0q0);
+ abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1);
+ mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit);
+ mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff);
+ // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1;
+ mask = _mm_max_epu8(flat, mask);
+ // mask |= (abs(p1 - p0) > limit) * -1;
+ // mask |= (abs(q1 - q0) > limit) * -1;
+ work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p2, p1),
+ _mm_subs_epu8(p1, p2)),
+ _mm_or_si128(_mm_subs_epu8(p3, p2),
+ _mm_subs_epu8(p2, p3)));
+ mask = _mm_max_epu8(work, mask);
+ work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(q2, q1),
+ _mm_subs_epu8(q1, q2)),
+ _mm_or_si128(_mm_subs_epu8(q3, q2),
+ _mm_subs_epu8(q2, q3)));
+ mask = _mm_max_epu8(work, mask);
+ mask = _mm_subs_epu8(mask, limit);
+ mask = _mm_cmpeq_epi8(mask, zero);
+
+ work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p2, p0),
+ _mm_subs_epu8(p0, p2)),
+ _mm_or_si128(_mm_subs_epu8(q2, q0),
+ _mm_subs_epu8(q0, q2)));
+ flat = _mm_max_epu8(work, flat);
+ work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p3, p0),
+ _mm_subs_epu8(p0, p3)),
+ _mm_or_si128(_mm_subs_epu8(q3, q0),
+ _mm_subs_epu8(q0, q3)));
+ flat = _mm_max_epu8(work, flat);
+ flat = _mm_subs_epu8(flat, one);
+ flat = _mm_cmpeq_epi8(flat, zero);
+ flat = _mm_and_si128(flat, mask);
+ }
+ {
+ const __m128i four = _mm_set1_epi16(4);
+ unsigned char *src = s;
+ {
+ __m128i workp_a, workp_b, workp_shft;
+ p3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 4 * p)), zero);
+ p2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 3 * p)), zero);
+ p1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 2 * p)), zero);
+ p0 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 1 * p)), zero);
+ q0 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 0 * p)), zero);
+ q1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 1 * p)), zero);
+ q2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 2 * p)), zero);
+ q3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 3 * p)), zero);
+
+ workp_a = _mm_add_epi16(_mm_add_epi16(p3, p3), _mm_add_epi16(p2, p1));
+ workp_a = _mm_add_epi16(_mm_add_epi16(workp_a, four), p0);
+ workp_b = _mm_add_epi16(_mm_add_epi16(q0, p2), p3);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
+ _mm_storel_epi64((__m128i *)&flat_op2[0],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ workp_b = _mm_add_epi16(_mm_add_epi16(q0, q1), p1);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
+ _mm_storel_epi64((__m128i *)&flat_op1[0],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p3), q2);
+ workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, p1), p0);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
+ _mm_storel_epi64((__m128i *)&flat_op0[0],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p3), q3);
+ workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, p0), q0);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
+ _mm_storel_epi64((__m128i *)&flat_oq0[0],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p2), q3);
+ workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, q0), q1);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
+ _mm_storel_epi64((__m128i *)&flat_oq1[0],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p1), q3);
+ workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, q1), q2);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
+ _mm_storel_epi64((__m128i *)&flat_oq2[0],
+ _mm_packus_epi16(workp_shft, workp_shft));
+ }
+ }
+ // lp filter
+ {
+ const __m128i t4 = _mm_set1_epi8(4);
+ const __m128i t3 = _mm_set1_epi8(3);
+ const __m128i t80 = _mm_set1_epi8(0x80);
+ const __m128i te0 = _mm_set1_epi8(0xe0);
+ const __m128i t1f = _mm_set1_epi8(0x1f);
+ const __m128i t1 = _mm_set1_epi8(0x1);
+ const __m128i t7f = _mm_set1_epi8(0x7f);
+
+ const __m128i ps1 = _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s - 2 * p)),
+ t80);
+ const __m128i ps0 = _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s - 1 * p)),
+ t80);
+ const __m128i qs0 = _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s + 0 * p)),
+ t80);
+ const __m128i qs1 = _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s + 1 * p)),
+ t80);
+ __m128i filt;
+ __m128i work_a;
+ __m128i filter1, filter2;
+
+ filt = _mm_and_si128(_mm_subs_epi8(ps1, qs1), hev);
+ work_a = _mm_subs_epi8(qs0, ps0);
+ filt = _mm_adds_epi8(filt, work_a);
+ filt = _mm_adds_epi8(filt, work_a);
+ filt = _mm_adds_epi8(filt, work_a);
+ /* (vp9_filter + 3 * (qs0 - ps0)) & mask */
+ filt = _mm_and_si128(filt, mask);
+
+ filter1 = _mm_adds_epi8(filt, t4);
+ filter2 = _mm_adds_epi8(filt, t3);
+
+ /* Filter1 >> 3 */
+ work_a = _mm_cmpgt_epi8(zero, filter1);
+ filter1 = _mm_srli_epi16(filter1, 3);
+ work_a = _mm_and_si128(work_a, te0);
+ filter1 = _mm_and_si128(filter1, t1f);
+ filter1 = _mm_or_si128(filter1, work_a);
+
+ /* Filter2 >> 3 */
+ work_a = _mm_cmpgt_epi8(zero, filter2);
+ filter2 = _mm_srli_epi16(filter2, 3);
+ work_a = _mm_and_si128(work_a, te0);
+ filter2 = _mm_and_si128(filter2, t1f);
+ filter2 = _mm_or_si128(filter2, work_a);
+
+ /* filt >> 1 */
+ filt = _mm_adds_epi8(filter1, t1);
+ work_a = _mm_cmpgt_epi8(zero, filt);
+ filt = _mm_srli_epi16(filt, 1);
+ work_a = _mm_and_si128(work_a, t80);
+ filt = _mm_and_si128(filt, t7f);
+ filt = _mm_or_si128(filt, work_a);
+
+ filt = _mm_andnot_si128(hev, filt);
+
+ work_a = _mm_xor_si128(_mm_subs_epi8(qs0, filter1), t80);
+ q0 = _mm_loadl_epi64((__m128i *)flat_oq0);
+ work_a = _mm_andnot_si128(flat, work_a);
+ q0 = _mm_and_si128(flat, q0);
+ q0 = _mm_or_si128(work_a, q0);
+
+ work_a = _mm_xor_si128(_mm_subs_epi8(qs1, filt), t80);
+ q1 = _mm_loadl_epi64((__m128i *)flat_oq1);
+ work_a = _mm_andnot_si128(flat, work_a);
+ q1 = _mm_and_si128(flat, q1);
+ q1 = _mm_or_si128(work_a, q1);
+
+ work_a = _mm_loadu_si128((__m128i *)(s + 2 * p));
+ q2 = _mm_loadl_epi64((__m128i *)flat_oq2);
+ work_a = _mm_andnot_si128(flat, work_a);
+ q2 = _mm_and_si128(flat, q2);
+ q2 = _mm_or_si128(work_a, q2);
+
+ work_a = _mm_xor_si128(_mm_adds_epi8(ps0, filter2), t80);
+ p0 = _mm_loadl_epi64((__m128i *)flat_op0);
+ work_a = _mm_andnot_si128(flat, work_a);
+ p0 = _mm_and_si128(flat, p0);
+ p0 = _mm_or_si128(work_a, p0);
+
+ work_a = _mm_xor_si128(_mm_adds_epi8(ps1, filt), t80);
+ p1 = _mm_loadl_epi64((__m128i *)flat_op1);
+ work_a = _mm_andnot_si128(flat, work_a);
+ p1 = _mm_and_si128(flat, p1);
+ p1 = _mm_or_si128(work_a, p1);
+
+ work_a = _mm_loadu_si128((__m128i *)(s - 3 * p));
+ p2 = _mm_loadl_epi64((__m128i *)flat_op2);
+ work_a = _mm_andnot_si128(flat, work_a);
+ p2 = _mm_and_si128(flat, p2);
+ p2 = _mm_or_si128(work_a, p2);
+
+ _mm_storel_epi64((__m128i *)(s - 3 * p), p2);
+ _mm_storel_epi64((__m128i *)(s - 2 * p), p1);
+ _mm_storel_epi64((__m128i *)(s - 1 * p), p0);
+ _mm_storel_epi64((__m128i *)(s + 0 * p), q0);
+ _mm_storel_epi64((__m128i *)(s + 1 * p), q1);
+ _mm_storel_epi64((__m128i *)(s + 2 * p), q2);
+ }
+}
+
+static INLINE void transpose8x16(unsigned char *in0, unsigned char *in1,
+ int in_p, unsigned char *out, int out_p) {
+ __m128i x0, x1, x2, x3, x4, x5, x6, x7;
+ __m128i x8, x9, x10, x11, x12, x13, x14, x15;
+
+ /* Read in 16 lines */
+ x0 = _mm_loadl_epi64((__m128i *)in0);
+ x8 = _mm_loadl_epi64((__m128i *)in1);
+ x1 = _mm_loadl_epi64((__m128i *)(in0 + in_p));
+ x9 = _mm_loadl_epi64((__m128i *)(in1 + in_p));
+ x2 = _mm_loadl_epi64((__m128i *)(in0 + 2 * in_p));
+ x10 = _mm_loadl_epi64((__m128i *)(in1 + 2 * in_p));
+ x3 = _mm_loadl_epi64((__m128i *)(in0 + 3*in_p));
+ x11 = _mm_loadl_epi64((__m128i *)(in1 + 3*in_p));
+ x4 = _mm_loadl_epi64((__m128i *)(in0 + 4*in_p));
+ x12 = _mm_loadl_epi64((__m128i *)(in1 + 4*in_p));
+ x5 = _mm_loadl_epi64((__m128i *)(in0 + 5*in_p));
+ x13 = _mm_loadl_epi64((__m128i *)(in1 + 5*in_p));
+ x6 = _mm_loadl_epi64((__m128i *)(in0 + 6*in_p));
+ x14 = _mm_loadl_epi64((__m128i *)(in1 + 6*in_p));
+ x7 = _mm_loadl_epi64((__m128i *)(in0 + 7*in_p));
+ x15 = _mm_loadl_epi64((__m128i *)(in1 + 7*in_p));
+
+ x0 = _mm_unpacklo_epi8(x0, x1);
+ x1 = _mm_unpacklo_epi8(x2, x3);
+ x2 = _mm_unpacklo_epi8(x4, x5);
+ x3 = _mm_unpacklo_epi8(x6, x7);
+
+ x8 = _mm_unpacklo_epi8(x8, x9);
+ x9 = _mm_unpacklo_epi8(x10, x11);
+ x10 = _mm_unpacklo_epi8(x12, x13);
+ x11 = _mm_unpacklo_epi8(x14, x15);
+
+ x4 = _mm_unpacklo_epi16(x0, x1);
+ x5 = _mm_unpacklo_epi16(x2, x3);
+ x12 = _mm_unpacklo_epi16(x8, x9);
+ x13 = _mm_unpacklo_epi16(x10, x11);
+
+ x6 = _mm_unpacklo_epi32(x4, x5);
+ x7 = _mm_unpackhi_epi32(x4, x5);
+ x14 = _mm_unpacklo_epi32(x12, x13);
+ x15 = _mm_unpackhi_epi32(x12, x13);
+
+ /* Store first 4-line result */
+ _mm_storeu_si128((__m128i *)out, _mm_unpacklo_epi64(x6, x14));
+ _mm_storeu_si128((__m128i *)(out + out_p), _mm_unpackhi_epi64(x6, x14));
+ _mm_storeu_si128((__m128i *)(out + 2 * out_p), _mm_unpacklo_epi64(x7, x15));
+ _mm_storeu_si128((__m128i *)(out + 3 * out_p), _mm_unpackhi_epi64(x7, x15));
+
+ x4 = _mm_unpackhi_epi16(x0, x1);
+ x5 = _mm_unpackhi_epi16(x2, x3);
+ x12 = _mm_unpackhi_epi16(x8, x9);
+ x13 = _mm_unpackhi_epi16(x10, x11);
+
+ x6 = _mm_unpacklo_epi32(x4, x5);
+ x7 = _mm_unpackhi_epi32(x4, x5);
+ x14 = _mm_unpacklo_epi32(x12, x13);
+ x15 = _mm_unpackhi_epi32(x12, x13);
+
+ /* Store second 4-line result */
+ _mm_storeu_si128((__m128i *)(out + 4 * out_p), _mm_unpacklo_epi64(x6, x14));
+ _mm_storeu_si128((__m128i *)(out + 5 * out_p), _mm_unpackhi_epi64(x6, x14));
+ _mm_storeu_si128((__m128i *)(out + 6 * out_p), _mm_unpacklo_epi64(x7, x15));
+ _mm_storeu_si128((__m128i *)(out + 7 * out_p), _mm_unpackhi_epi64(x7, x15));
+}
+
+static INLINE void transpose(unsigned char *src[], int in_p,
+ unsigned char *dst[], int out_p,
+ int num_8x8_to_transpose) {
+ int idx8x8 = 0;
+ __m128i x0, x1, x2, x3, x4, x5, x6, x7;
+ do {
+ unsigned char *in = src[idx8x8];
+ unsigned char *out = dst[idx8x8];
+
+ x0 = _mm_loadl_epi64((__m128i *)(in + 0*in_p)); // 00 01 02 03 04 05 06 07
+ x1 = _mm_loadl_epi64((__m128i *)(in + 1*in_p)); // 10 11 12 13 14 15 16 17
+ x2 = _mm_loadl_epi64((__m128i *)(in + 2*in_p)); // 20 21 22 23 24 25 26 27
+ x3 = _mm_loadl_epi64((__m128i *)(in + 3*in_p)); // 30 31 32 33 34 35 36 37
+ x4 = _mm_loadl_epi64((__m128i *)(in + 4*in_p)); // 40 41 42 43 44 45 46 47
+ x5 = _mm_loadl_epi64((__m128i *)(in + 5*in_p)); // 50 51 52 53 54 55 56 57
+ x6 = _mm_loadl_epi64((__m128i *)(in + 6*in_p)); // 60 61 62 63 64 65 66 67
+ x7 = _mm_loadl_epi64((__m128i *)(in + 7*in_p)); // 70 71 72 73 74 75 76 77
+ // 00 10 01 11 02 12 03 13 04 14 05 15 06 16 07 17
+ x0 = _mm_unpacklo_epi8(x0, x1);
+ // 20 30 21 31 22 32 23 33 24 34 25 35 26 36 27 37
+ x1 = _mm_unpacklo_epi8(x2, x3);
+ // 40 50 41 51 42 52 43 53 44 54 45 55 46 56 47 57
+ x2 = _mm_unpacklo_epi8(x4, x5);
+ // 60 70 61 71 62 72 63 73 64 74 65 75 66 76 67 77
+ x3 = _mm_unpacklo_epi8(x6, x7);
+ // 00 10 20 30 01 11 21 31 02 12 22 32 03 13 23 33
+ x4 = _mm_unpacklo_epi16(x0, x1);
+ // 40 50 60 70 41 51 61 71 42 52 62 72 43 53 63 73
+ x5 = _mm_unpacklo_epi16(x2, x3);
+ // 00 10 20 30 40 50 60 70 01 11 21 31 41 51 61 71
+ x6 = _mm_unpacklo_epi32(x4, x5);
+ // 02 12 22 32 42 52 62 72 03 13 23 33 43 53 63 73
+ x7 = _mm_unpackhi_epi32(x4, x5);
+
+ _mm_storel_pd((double *)(out + 0*out_p),
+ _mm_castsi128_pd(x6)); // 00 10 20 30 40 50 60 70
+ _mm_storeh_pd((double *)(out + 1*out_p),
+ _mm_castsi128_pd(x6)); // 01 11 21 31 41 51 61 71
+ _mm_storel_pd((double *)(out + 2*out_p),
+ _mm_castsi128_pd(x7)); // 02 12 22 32 42 52 62 72
+ _mm_storeh_pd((double *)(out + 3*out_p),
+ _mm_castsi128_pd(x7)); // 03 13 23 33 43 53 63 73
+
+ // 04 14 24 34 05 15 25 35 06 16 26 36 07 17 27 37
+ x4 = _mm_unpackhi_epi16(x0, x1);
+ // 44 54 64 74 45 55 65 75 46 56 66 76 47 57 67 77
+ x5 = _mm_unpackhi_epi16(x2, x3);
+ // 04 14 24 34 44 54 64 74 05 15 25 35 45 55 65 75
+ x6 = _mm_unpacklo_epi32(x4, x5);
+ // 06 16 26 36 46 56 66 76 07 17 27 37 47 57 67 77
+ x7 = _mm_unpackhi_epi32(x4, x5);
+
+ _mm_storel_pd((double *)(out + 4*out_p),
+ _mm_castsi128_pd(x6)); // 04 14 24 34 44 54 64 74
+ _mm_storeh_pd((double *)(out + 5*out_p),
+ _mm_castsi128_pd(x6)); // 05 15 25 35 45 55 65 75
+ _mm_storel_pd((double *)(out + 6*out_p),
+ _mm_castsi128_pd(x7)); // 06 16 26 36 46 56 66 76
+ _mm_storeh_pd((double *)(out + 7*out_p),
+ _mm_castsi128_pd(x7)); // 07 17 27 37 47 57 67 77
+ } while (++idx8x8 < num_8x8_to_transpose);
+}
+
+void vp9_mbloop_filter_vertical_edge_sse2(unsigned char *s,
+ int p,
+ const unsigned char *blimit,
+ const unsigned char *limit,
+ const unsigned char *thresh,
+ int count) {
+ DECLARE_ALIGNED_ARRAY(16, unsigned char, t_dst, 256);
+ unsigned char *src[2];
+ unsigned char *dst[2];
+
+ (void)count;
+ /* Transpose 16x16 */
+ transpose8x16(s - 8, s - 8 + p * 8, p, t_dst, 16);
+ transpose8x16(s, s + p * 8, p, t_dst + 16 * 8, 16);
+
+ /* Loop filtering */
+ vp9_mbloop_filter_horizontal_edge_sse2(t_dst + 8 * 16, 16, blimit, limit,
+ thresh, 1);
+ src[0] = t_dst + 3 * 16;
+ src[1] = t_dst + 3 * 16 + 8;
+
+ dst[0] = s - 5;
+ dst[1] = s - 5 + p * 8;
+
+ /* Transpose 16x8 */
+ transpose(src, 16, dst, p, 2);
+}
+
+void vp9_mb_lpf_vertical_edge_w_sse2(unsigned char *s,
+ int p,
+ const unsigned char *blimit,
+ const unsigned char *limit,
+ const unsigned char *thresh) {
+ DECLARE_ALIGNED_ARRAY(16, unsigned char, t_dst, 256);
+ unsigned char *src[4];
+ unsigned char *dst[4];
+
+ dst[0] = t_dst;
+ dst[1] = t_dst + 8 * 16;
+
+ src[0] = s - 8;
+ src[1] = s - 8 + 8;
+
+ /* Transpose 16x16 */
+ transpose(src, p, dst, 16, 2);
+
+ /* Loop filtering */
+ vp9_mb_lpf_horizontal_edge_w_sse2(t_dst + 8 * 16, 16, blimit, limit,
+ thresh, 1);
+
+ src[0] = t_dst;
+ src[1] = t_dst + 8 * 16;
+
+ dst[0] = s - 8;
+ dst[1] = s - 8 + 8;
+
+ transpose(src, 16, dst, p, 2);
+}
diff --git a/libvpx/vp9/common/x86/vp9_loopfilter_mmx.asm b/libvpx/vp9/common/x86/vp9_loopfilter_mmx.asm
new file mode 100644
index 0000000..4ebb51b
--- /dev/null
+++ b/libvpx/vp9/common/x86/vp9_loopfilter_mmx.asm
@@ -0,0 +1,626 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+
+;void vp9_loop_filter_horizontal_edge_mmx
+;(
+; unsigned char *src_ptr,
+; int src_pixel_step,
+; const char *blimit,
+; const char *limit,
+; const char *thresh,
+; int count
+;)
+global sym(vp9_loop_filter_horizontal_edge_mmx) PRIVATE
+sym(vp9_loop_filter_horizontal_edge_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 32 ; reserve 32 bytes
+ %define t0 [rsp + 0] ;__declspec(align(16)) char t0[8];
+ %define t1 [rsp + 16] ;__declspec(align(16)) char t1[8];
+
+ mov rsi, arg(0) ;src_ptr
+ movsxd rax, dword ptr arg(1) ;src_pixel_step ; destination pitch?
+
+ movsxd rcx, dword ptr arg(5) ;count
+.next8_h:
+ mov rdx, arg(3) ;limit
+ movq mm7, [rdx]
+ mov rdi, rsi ; rdi points to row +1 for indirect addressing
+ add rdi, rax
+
+ ; calculate breakout conditions
+ movq mm2, [rdi+2*rax] ; q3
+ movq mm1, [rsi+2*rax] ; q2
+ movq mm6, mm1 ; q2
+ psubusb mm1, mm2 ; q2-=q3
+ psubusb mm2, mm6 ; q3-=q2
+ por mm1, mm2 ; abs(q3-q2)
+ psubusb mm1, mm7 ;
+
+
+ movq mm4, [rsi+rax] ; q1
+ movq mm3, mm4 ; q1
+ psubusb mm4, mm6 ; q1-=q2
+ psubusb mm6, mm3 ; q2-=q1
+ por mm4, mm6 ; abs(q2-q1)
+
+ psubusb mm4, mm7
+ por mm1, mm4
+
+ movq mm4, [rsi] ; q0
+ movq mm0, mm4 ; q0
+ psubusb mm4, mm3 ; q0-=q1
+ psubusb mm3, mm0 ; q1-=q0
+ por mm4, mm3 ; abs(q0-q1)
+ movq t0, mm4 ; save to t0
+ psubusb mm4, mm7
+ por mm1, mm4
+
+
+ neg rax ; negate pitch to deal with above border
+
+ movq mm2, [rsi+4*rax] ; p3
+ movq mm4, [rdi+4*rax] ; p2
+ movq mm5, mm4 ; p2
+ psubusb mm4, mm2 ; p2-=p3
+ psubusb mm2, mm5 ; p3-=p2
+ por mm4, mm2 ; abs(p3 - p2)
+ psubusb mm4, mm7
+ por mm1, mm4
+
+
+ movq mm4, [rsi+2*rax] ; p1
+ movq mm3, mm4 ; p1
+ psubusb mm4, mm5 ; p1-=p2
+ psubusb mm5, mm3 ; p2-=p1
+ por mm4, mm5 ; abs(p2 - p1)
+ psubusb mm4, mm7
+ por mm1, mm4
+
+ movq mm2, mm3 ; p1
+
+ movq mm4, [rsi+rax] ; p0
+ movq mm5, mm4 ; p0
+ psubusb mm4, mm3 ; p0-=p1
+ psubusb mm3, mm5 ; p1-=p0
+ por mm4, mm3 ; abs(p1 - p0)
+ movq t1, mm4 ; save to t1
+ psubusb mm4, mm7
+ por mm1, mm4
+
+ movq mm3, [rdi] ; q1
+ movq mm4, mm3 ; q1
+ psubusb mm3, mm2 ; q1-=p1
+ psubusb mm2, mm4 ; p1-=q1
+ por mm2, mm3 ; abs(p1-q1)
+ pand mm2, [GLOBAL(tfe)] ; set lsb of each byte to zero
+ psrlw mm2, 1 ; abs(p1-q1)/2
+
+ movq mm6, mm5 ; p0
+ movq mm3, [rsi] ; q0
+ psubusb mm5, mm3 ; p0-=q0
+ psubusb mm3, mm6 ; q0-=p0
+ por mm5, mm3 ; abs(p0 - q0)
+ paddusb mm5, mm5 ; abs(p0-q0)*2
+ paddusb mm5, mm2 ; abs (p0 - q0) *2 + abs(p1-q1)/2
+
+ mov rdx, arg(2) ;blimit ; get blimit
+ movq mm7, [rdx] ; blimit
+
+ psubusb mm5, mm7 ; abs (p0 - q0) *2 + abs(p1-q1)/2 > blimit
+ por mm1, mm5
+ pxor mm5, mm5
+ pcmpeqb mm1, mm5 ; mask mm1
+
+ ; calculate high edge variance
+ mov rdx, arg(4) ;thresh ; get thresh
+ movq mm7, [rdx] ;
+ movq mm4, t0 ; get abs (q1 - q0)
+ psubusb mm4, mm7
+ movq mm3, t1 ; get abs (p1 - p0)
+ psubusb mm3, mm7
+ paddb mm4, mm3 ; abs(q1 - q0) > thresh || abs(p1 - p0) > thresh
+
+ pcmpeqb mm4, mm5
+
+ pcmpeqb mm5, mm5
+ pxor mm4, mm5
+
+
+ ; start work on filters
+ movq mm2, [rsi+2*rax] ; p1
+ movq mm7, [rdi] ; q1
+ pxor mm2, [GLOBAL(t80)] ; p1 offset to convert to signed values
+ pxor mm7, [GLOBAL(t80)] ; q1 offset to convert to signed values
+ psubsb mm2, mm7 ; p1 - q1
+ pand mm2, mm4 ; high var mask (hvm)(p1 - q1)
+ pxor mm6, [GLOBAL(t80)] ; offset to convert to signed values
+ pxor mm0, [GLOBAL(t80)] ; offset to convert to signed values
+ movq mm3, mm0 ; q0
+ psubsb mm0, mm6 ; q0 - p0
+ paddsb mm2, mm0 ; 1 * (q0 - p0) + hvm(p1 - q1)
+ paddsb mm2, mm0 ; 2 * (q0 - p0) + hvm(p1 - q1)
+ paddsb mm2, mm0 ; 3 * (q0 - p0) + hvm(p1 - q1)
+ pand mm1, mm2 ; mask filter values we don't care about
+ movq mm2, mm1
+ paddsb mm1, [GLOBAL(t4)] ; 3* (q0 - p0) + hvm(p1 - q1) + 4
+ paddsb mm2, [GLOBAL(t3)] ; 3* (q0 - p0) + hvm(p1 - q1) + 3
+
+ pxor mm0, mm0 ;
+ pxor mm5, mm5
+ punpcklbw mm0, mm2 ;
+ punpckhbw mm5, mm2 ;
+ psraw mm0, 11 ;
+ psraw mm5, 11
+ packsswb mm0, mm5
+ movq mm2, mm0 ; (3* (q0 - p0) + hvm(p1 - q1) + 3) >> 3;
+
+ pxor mm0, mm0 ; 0
+ movq mm5, mm1 ; abcdefgh
+ punpcklbw mm0, mm1 ; e0f0g0h0
+ psraw mm0, 11 ; sign extended shift right by 3
+ pxor mm1, mm1 ; 0
+ punpckhbw mm1, mm5 ; a0b0c0d0
+ psraw mm1, 11 ; sign extended shift right by 3
+ movq mm5, mm0 ; save results
+
+ packsswb mm0, mm1 ; (3* (q0 - p0) + hvm(p1 - q1) + 4) >>3
+ paddsw mm5, [GLOBAL(ones)]
+ paddsw mm1, [GLOBAL(ones)]
+ psraw mm5, 1 ; partial shifted one more time for 2nd tap
+ psraw mm1, 1 ; partial shifted one more time for 2nd tap
+ packsswb mm5, mm1 ; (3* (q0 - p0) + hvm(p1 - q1) + 4) >>4
+ pandn mm4, mm5 ; high edge variance additive
+
+ paddsb mm6, mm2 ; p0+= p0 add
+ pxor mm6, [GLOBAL(t80)] ; unoffset
+ movq [rsi+rax], mm6 ; write back
+
+ movq mm6, [rsi+2*rax] ; p1
+ pxor mm6, [GLOBAL(t80)] ; reoffset
+ paddsb mm6, mm4 ; p1+= p1 add
+ pxor mm6, [GLOBAL(t80)] ; unoffset
+ movq [rsi+2*rax], mm6 ; write back
+
+ psubsb mm3, mm0 ; q0-= q0 add
+ pxor mm3, [GLOBAL(t80)] ; unoffset
+ movq [rsi], mm3 ; write back
+
+ psubsb mm7, mm4 ; q1-= q1 add
+ pxor mm7, [GLOBAL(t80)] ; unoffset
+ movq [rdi], mm7 ; write back
+
+ add rsi,8
+ neg rax
+ dec rcx
+ jnz .next8_h
+
+ add rsp, 32
+ pop rsp
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void vp9_loop_filter_vertical_edge_mmx
+;(
+; unsigned char *src_ptr,
+; int src_pixel_step,
+; const char *blimit,
+; const char *limit,
+; const char *thresh,
+; int count
+;)
+global sym(vp9_loop_filter_vertical_edge_mmx) PRIVATE
+sym(vp9_loop_filter_vertical_edge_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 64 ; reserve 64 bytes
+ %define t0 [rsp + 0] ;__declspec(align(16)) char t0[8];
+ %define t1 [rsp + 16] ;__declspec(align(16)) char t1[8];
+ %define srct [rsp + 32] ;__declspec(align(16)) char srct[32];
+
+ mov rsi, arg(0) ;src_ptr
+ movsxd rax, dword ptr arg(1) ;src_pixel_step ; destination pitch?
+
+ lea rsi, [rsi + rax*4 - 4]
+
+ movsxd rcx, dword ptr arg(5) ;count
+.next8_v:
+ mov rdi, rsi ; rdi points to row +1 for indirect addressing
+ add rdi, rax
+
+
+ ;transpose
+ movq mm6, [rsi+2*rax] ; 67 66 65 64 63 62 61 60
+ movq mm7, mm6 ; 77 76 75 74 73 72 71 70
+
+ punpckhbw mm7, [rdi+2*rax] ; 77 67 76 66 75 65 74 64
+ punpcklbw mm6, [rdi+2*rax] ; 73 63 72 62 71 61 70 60
+
+ movq mm4, [rsi] ; 47 46 45 44 43 42 41 40
+ movq mm5, mm4 ; 47 46 45 44 43 42 41 40
+
+ punpckhbw mm5, [rsi+rax] ; 57 47 56 46 55 45 54 44
+ punpcklbw mm4, [rsi+rax] ; 53 43 52 42 51 41 50 40
+
+ movq mm3, mm5 ; 57 47 56 46 55 45 54 44
+ punpckhwd mm5, mm7 ; 77 67 57 47 76 66 56 46
+
+ punpcklwd mm3, mm7 ; 75 65 55 45 74 64 54 44
+ movq mm2, mm4 ; 53 43 52 42 51 41 50 40
+
+ punpckhwd mm4, mm6 ; 73 63 53 43 72 62 52 42
+ punpcklwd mm2, mm6 ; 71 61 51 41 70 60 50 40
+
+ neg rax
+ movq mm6, [rsi+rax*2] ; 27 26 25 24 23 22 21 20
+
+ movq mm1, mm6 ; 27 26 25 24 23 22 21 20
+ punpckhbw mm6, [rsi+rax] ; 37 27 36 36 35 25 34 24
+
+ punpcklbw mm1, [rsi+rax] ; 33 23 32 22 31 21 30 20
+ movq mm7, [rsi+rax*4]; ; 07 06 05 04 03 02 01 00
+
+ punpckhbw mm7, [rdi+rax*4] ; 17 07 16 06 15 05 14 04
+ movq mm0, mm7 ; 17 07 16 06 15 05 14 04
+
+ punpckhwd mm7, mm6 ; 37 27 17 07 36 26 16 06
+ punpcklwd mm0, mm6 ; 35 25 15 05 34 24 14 04
+
+ movq mm6, mm7 ; 37 27 17 07 36 26 16 06
+ punpckhdq mm7, mm5 ; 77 67 57 47 37 27 17 07 = q3
+
+ punpckldq mm6, mm5 ; 76 66 56 46 36 26 16 06 = q2
+
+ movq mm5, mm6 ; 76 66 56 46 36 26 16 06
+ psubusb mm5, mm7 ; q2-q3
+
+ psubusb mm7, mm6 ; q3-q2
+ por mm7, mm5; ; mm7=abs (q3-q2)
+
+ movq mm5, mm0 ; 35 25 15 05 34 24 14 04
+ punpckhdq mm5, mm3 ; 75 65 55 45 35 25 15 05 = q1
+
+ punpckldq mm0, mm3 ; 74 64 54 44 34 24 15 04 = q0
+ movq mm3, mm5 ; 75 65 55 45 35 25 15 05 = q1
+
+ psubusb mm3, mm6 ; q1-q2
+ psubusb mm6, mm5 ; q2-q1
+
+ por mm6, mm3 ; mm6=abs(q2-q1)
+ lea rdx, srct
+
+ movq [rdx+24], mm5 ; save q1
+ movq [rdx+16], mm0 ; save q0
+
+ movq mm3, [rsi+rax*4] ; 07 06 05 04 03 02 01 00
+ punpcklbw mm3, [rdi+rax*4] ; 13 03 12 02 11 01 10 00
+
+ movq mm0, mm3 ; 13 03 12 02 11 01 10 00
+ punpcklwd mm0, mm1 ; 31 21 11 01 30 20 10 00
+
+ punpckhwd mm3, mm1 ; 33 23 13 03 32 22 12 02
+ movq mm1, mm0 ; 31 21 11 01 30 20 10 00
+
+ punpckldq mm0, mm2 ; 70 60 50 40 30 20 10 00 =p3
+ punpckhdq mm1, mm2 ; 71 61 51 41 31 21 11 01 =p2
+
+ movq mm2, mm1 ; 71 61 51 41 31 21 11 01 =p2
+ psubusb mm2, mm0 ; p2-p3
+
+ psubusb mm0, mm1 ; p3-p2
+ por mm0, mm2 ; mm0=abs(p3-p2)
+
+ movq mm2, mm3 ; 33 23 13 03 32 22 12 02
+ punpckldq mm2, mm4 ; 72 62 52 42 32 22 12 02 = p1
+
+ punpckhdq mm3, mm4 ; 73 63 53 43 33 23 13 03 = p0
+ movq [rdx+8], mm3 ; save p0
+
+ movq [rdx], mm2 ; save p1
+ movq mm5, mm2 ; mm5 = p1
+
+ psubusb mm2, mm1 ; p1-p2
+ psubusb mm1, mm5 ; p2-p1
+
+ por mm1, mm2 ; mm1=abs(p2-p1)
+ mov rdx, arg(3) ;limit
+
+ movq mm4, [rdx] ; mm4 = limit
+ psubusb mm7, mm4
+
+ psubusb mm0, mm4
+ psubusb mm1, mm4
+
+ psubusb mm6, mm4
+ por mm7, mm6
+
+ por mm0, mm1
+ por mm0, mm7 ; abs(q3-q2) > limit || abs(p3-p2) > limit ||abs(p2-p1) > limit || abs(q2-q1) > limit
+
+ movq mm1, mm5 ; p1
+
+ movq mm7, mm3 ; mm3=mm7=p0
+ psubusb mm7, mm5 ; p0 - p1
+
+ psubusb mm5, mm3 ; p1 - p0
+ por mm5, mm7 ; abs(p1-p0)
+
+ movq t0, mm5 ; save abs(p1-p0)
+ lea rdx, srct
+
+ psubusb mm5, mm4
+ por mm0, mm5 ; mm0=mask
+
+ movq mm5, [rdx+16] ; mm5=q0
+ movq mm7, [rdx+24] ; mm7=q1
+
+ movq mm6, mm5 ; mm6=q0
+ movq mm2, mm7 ; q1
+ psubusb mm5, mm7 ; q0-q1
+
+ psubusb mm7, mm6 ; q1-q0
+ por mm7, mm5 ; abs(q1-q0)
+
+ movq t1, mm7 ; save abs(q1-q0)
+ psubusb mm7, mm4
+
+ por mm0, mm7 ; mask
+
+ movq mm5, mm2 ; q1
+ psubusb mm5, mm1 ; q1-=p1
+ psubusb mm1, mm2 ; p1-=q1
+ por mm5, mm1 ; abs(p1-q1)
+ pand mm5, [GLOBAL(tfe)] ; set lsb of each byte to zero
+ psrlw mm5, 1 ; abs(p1-q1)/2
+
+ mov rdx, arg(2) ;blimit ;
+
+ movq mm4, [rdx] ;blimit
+ movq mm1, mm3 ; mm1=mm3=p0
+
+ movq mm7, mm6 ; mm7=mm6=q0
+ psubusb mm1, mm7 ; p0-q0
+
+ psubusb mm7, mm3 ; q0-p0
+ por mm1, mm7 ; abs(q0-p0)
+ paddusb mm1, mm1 ; abs(q0-p0)*2
+ paddusb mm1, mm5 ; abs (p0 - q0) *2 + abs(p1-q1)/2
+
+ psubusb mm1, mm4 ; abs (p0 - q0) *2 + abs(p1-q1)/2 > blimit
+ por mm1, mm0; ; mask
+
+ pxor mm0, mm0
+ pcmpeqb mm1, mm0
+
+ ; calculate high edge variance
+ mov rdx, arg(4) ;thresh ; get thresh
+ movq mm7, [rdx]
+ ;
+ movq mm4, t0 ; get abs (q1 - q0)
+ psubusb mm4, mm7
+
+ movq mm3, t1 ; get abs (p1 - p0)
+ psubusb mm3, mm7
+
+ por mm4, mm3 ; abs(q1 - q0) > thresh || abs(p1 - p0) > thresh
+ pcmpeqb mm4, mm0
+
+ pcmpeqb mm0, mm0
+ pxor mm4, mm0
+
+
+
+ ; start work on filters
+ lea rdx, srct
+
+ movq mm2, [rdx] ; p1
+ movq mm7, [rdx+24] ; q1
+
+ movq mm6, [rdx+8] ; p0
+ movq mm0, [rdx+16] ; q0
+
+ pxor mm2, [GLOBAL(t80)] ; p1 offset to convert to signed values
+ pxor mm7, [GLOBAL(t80)] ; q1 offset to convert to signed values
+
+ psubsb mm2, mm7 ; p1 - q1
+ pand mm2, mm4 ; high var mask (hvm)(p1 - q1)
+
+ pxor mm6, [GLOBAL(t80)] ; offset to convert to signed values
+ pxor mm0, [GLOBAL(t80)] ; offset to convert to signed values
+
+ movq mm3, mm0 ; q0
+ psubsb mm0, mm6 ; q0 - p0
+
+ paddsb mm2, mm0 ; 1 * (q0 - p0) + hvm(p1 - q1)
+ paddsb mm2, mm0 ; 2 * (q0 - p0) + hvm(p1 - q1)
+
+ paddsb mm2, mm0 ; 3 * (q0 - p0) + hvm(p1 - q1)
+ pand mm1, mm2 ; mask filter values we don't care about
+
+ movq mm2, mm1
+ paddsb mm1, [GLOBAL(t4)] ; 3* (q0 - p0) + hvm(p1 - q1) + 4
+
+ paddsb mm2, [GLOBAL(t3)] ; 3* (q0 - p0) + hvm(p1 - q1) + 3
+ pxor mm0, mm0 ;
+
+ pxor mm5, mm5
+ punpcklbw mm0, mm2 ;
+
+ punpckhbw mm5, mm2 ;
+ psraw mm0, 11 ;
+
+ psraw mm5, 11
+ packsswb mm0, mm5
+
+ movq mm2, mm0 ; (3* (q0 - p0) + hvm(p1 - q1) + 3) >> 3;
+
+ pxor mm0, mm0 ; 0
+ movq mm5, mm1 ; abcdefgh
+
+ punpcklbw mm0, mm1 ; e0f0g0h0
+ psraw mm0, 11 ; sign extended shift right by 3
+
+ pxor mm1, mm1 ; 0
+ punpckhbw mm1, mm5 ; a0b0c0d0
+
+ psraw mm1, 11 ; sign extended shift right by 3
+ movq mm5, mm0 ; save results
+
+ packsswb mm0, mm1 ; (3* (q0 - p0) + hvm(p1 - q1) + 4) >>3
+ paddsw mm5, [GLOBAL(ones)]
+
+ paddsw mm1, [GLOBAL(ones)]
+ psraw mm5, 1 ; partial shifted one more time for 2nd tap
+
+ psraw mm1, 1 ; partial shifted one more time for 2nd tap
+ packsswb mm5, mm1 ; (3* (q0 - p0) + hvm(p1 - q1) + 4) >>4
+
+ pandn mm4, mm5 ; high edge variance additive
+
+ paddsb mm6, mm2 ; p0+= p0 add
+ pxor mm6, [GLOBAL(t80)] ; unoffset
+
+ ; mm6=p0 ;
+ movq mm1, [rdx] ; p1
+ pxor mm1, [GLOBAL(t80)] ; reoffset
+
+ paddsb mm1, mm4 ; p1+= p1 add
+ pxor mm1, [GLOBAL(t80)] ; unoffset
+ ; mm6 = p0 mm1 = p1
+
+ psubsb mm3, mm0 ; q0-= q0 add
+ pxor mm3, [GLOBAL(t80)] ; unoffset
+
+ ; mm3 = q0
+ psubsb mm7, mm4 ; q1-= q1 add
+ pxor mm7, [GLOBAL(t80)] ; unoffset
+ ; mm7 = q1
+
+ ; tranpose and write back
+ ; mm1 = 72 62 52 42 32 22 12 02
+ ; mm6 = 73 63 53 43 33 23 13 03
+ ; mm3 = 74 64 54 44 34 24 14 04
+ ; mm7 = 75 65 55 45 35 25 15 05
+
+ movq mm2, mm1 ; 72 62 52 42 32 22 12 02
+ punpcklbw mm2, mm6 ; 33 32 23 22 13 12 03 02
+
+ movq mm4, mm3 ; 74 64 54 44 34 24 14 04
+ punpckhbw mm1, mm6 ; 73 72 63 62 53 52 43 42
+
+ punpcklbw mm4, mm7 ; 35 34 25 24 15 14 05 04
+ punpckhbw mm3, mm7 ; 75 74 65 64 55 54 45 44
+
+ movq mm6, mm2 ; 33 32 23 22 13 12 03 02
+ punpcklwd mm2, mm4 ; 15 14 13 12 05 04 03 02
+
+ punpckhwd mm6, mm4 ; 35 34 33 32 25 24 23 22
+ movq mm5, mm1 ; 73 72 63 62 53 52 43 42
+
+ punpcklwd mm1, mm3 ; 55 54 53 52 45 44 43 42
+ punpckhwd mm5, mm3 ; 75 74 73 72 65 64 63 62
+
+
+ ; mm2 = 15 14 13 12 05 04 03 02
+ ; mm6 = 35 34 33 32 25 24 23 22
+ ; mm5 = 55 54 53 52 45 44 43 42
+ ; mm1 = 75 74 73 72 65 64 63 62
+
+
+
+ movd [rsi+rax*4+2], mm2
+ psrlq mm2, 32
+
+ movd [rdi+rax*4+2], mm2
+ movd [rsi+rax*2+2], mm6
+
+ psrlq mm6, 32
+ movd [rsi+rax+2],mm6
+
+ movd [rsi+2], mm1
+ psrlq mm1, 32
+
+ movd [rdi+2], mm1
+ neg rax
+
+ movd [rdi+rax+2],mm5
+ psrlq mm5, 32
+
+ movd [rdi+rax*2+2], mm5
+
+ lea rsi, [rsi+rax*8]
+ dec rcx
+ jnz .next8_v
+
+ add rsp, 64
+ pop rsp
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+SECTION_RODATA
+align 16
+tfe:
+ times 8 db 0xfe
+align 16
+t80:
+ times 8 db 0x80
+align 16
+t1s:
+ times 8 db 0x01
+align 16
+t3:
+ times 8 db 0x03
+align 16
+t4:
+ times 8 db 0x04
+align 16
+ones:
+ times 4 dw 0x0001
+align 16
+s27:
+ times 4 dw 0x1b00
+align 16
+s18:
+ times 4 dw 0x1200
+align 16
+s9:
+ times 4 dw 0x0900
+align 16
+s63:
+ times 4 dw 0x003f
diff --git a/libvpx/vp9/common/x86/vp9_postproc_mmx.asm b/libvpx/vp9/common/x86/vp9_postproc_mmx.asm
new file mode 100644
index 0000000..c2118db
--- /dev/null
+++ b/libvpx/vp9/common/x86/vp9_postproc_mmx.asm
@@ -0,0 +1,534 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+%define VP9_FILTER_WEIGHT 128
+%define VP9_FILTER_SHIFT 7
+
+;void vp9_post_proc_down_and_across_mmx
+;(
+; unsigned char *src_ptr,
+; unsigned char *dst_ptr,
+; int src_pixels_per_line,
+; int dst_pixels_per_line,
+; int rows,
+; int cols,
+; int flimit
+;)
+global sym(vp9_post_proc_down_and_across_mmx) PRIVATE
+sym(vp9_post_proc_down_and_across_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+%if ABI_IS_32BIT=1 && CONFIG_PIC=1
+ ; move the global rd onto the stack, since we don't have enough registers
+ ; to do PIC addressing
+ movq mm0, [GLOBAL(rd)]
+ sub rsp, 8
+ movq [rsp], mm0
+%define RD [rsp]
+%else
+%define RD [GLOBAL(rd)]
+%endif
+
+ push rbx
+ lea rbx, [GLOBAL(Blur)]
+ movd mm2, dword ptr arg(6) ;flimit
+ punpcklwd mm2, mm2
+ punpckldq mm2, mm2
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(1) ;dst_ptr
+
+ movsxd rcx, DWORD PTR arg(4) ;rows
+ movsxd rax, DWORD PTR arg(2) ;src_pixels_per_line ; destination pitch?
+ pxor mm0, mm0 ; mm0 = 00000000
+
+.nextrow:
+
+ xor rdx, rdx ; clear out rdx for use as loop counter
+.nextcol:
+
+ pxor mm7, mm7 ; mm7 = 00000000
+ movq mm6, [rbx + 32 ] ; mm6 = kernel 2 taps
+ movq mm3, [rsi] ; mm4 = r0 p0..p7
+ punpcklbw mm3, mm0 ; mm3 = p0..p3
+ movq mm1, mm3 ; mm1 = p0..p3
+ pmullw mm3, mm6 ; mm3 *= kernel 2 modifiers
+
+ movq mm6, [rbx + 48] ; mm6 = kernel 3 taps
+ movq mm5, [rsi + rax] ; mm4 = r1 p0..p7
+ punpcklbw mm5, mm0 ; mm5 = r1 p0..p3
+ pmullw mm6, mm5 ; mm6 *= p0..p3 * kernel 3 modifiers
+ paddusw mm3, mm6 ; mm3 += mm6
+
+ ; thresholding
+ movq mm7, mm1 ; mm7 = r0 p0..p3
+ psubusw mm7, mm5 ; mm7 = r0 p0..p3 - r1 p0..p3
+ psubusw mm5, mm1 ; mm5 = r1 p0..p3 - r0 p0..p3
+ paddusw mm7, mm5 ; mm7 = abs(r0 p0..p3 - r1 p0..p3)
+ pcmpgtw mm7, mm2
+
+ movq mm6, [rbx + 64 ] ; mm6 = kernel 4 modifiers
+ movq mm5, [rsi + 2*rax] ; mm4 = r2 p0..p7
+ punpcklbw mm5, mm0 ; mm5 = r2 p0..p3
+ pmullw mm6, mm5 ; mm5 *= kernel 4 modifiers
+ paddusw mm3, mm6 ; mm3 += mm5
+
+ ; thresholding
+ movq mm6, mm1 ; mm6 = r0 p0..p3
+ psubusw mm6, mm5 ; mm6 = r0 p0..p3 - r2 p0..p3
+ psubusw mm5, mm1 ; mm5 = r2 p0..p3 - r2 p0..p3
+ paddusw mm6, mm5 ; mm6 = abs(r0 p0..p3 - r2 p0..p3)
+ pcmpgtw mm6, mm2
+ por mm7, mm6 ; accumulate thresholds
+
+
+ neg rax
+ movq mm6, [rbx ] ; kernel 0 taps
+ movq mm5, [rsi+2*rax] ; mm4 = r-2 p0..p7
+ punpcklbw mm5, mm0 ; mm5 = r-2 p0..p3
+ pmullw mm6, mm5 ; mm5 *= kernel 0 modifiers
+ paddusw mm3, mm6 ; mm3 += mm5
+
+ ; thresholding
+ movq mm6, mm1 ; mm6 = r0 p0..p3
+ psubusw mm6, mm5 ; mm6 = p0..p3 - r-2 p0..p3
+ psubusw mm5, mm1 ; mm5 = r-2 p0..p3 - p0..p3
+ paddusw mm6, mm5 ; mm6 = abs(r0 p0..p3 - r-2 p0..p3)
+ pcmpgtw mm6, mm2
+ por mm7, mm6 ; accumulate thresholds
+
+ movq mm6, [rbx + 16] ; kernel 1 taps
+ movq mm4, [rsi+rax] ; mm4 = r-1 p0..p7
+ punpcklbw mm4, mm0 ; mm4 = r-1 p0..p3
+ pmullw mm6, mm4 ; mm4 *= kernel 1 modifiers.
+ paddusw mm3, mm6 ; mm3 += mm5
+
+ ; thresholding
+ movq mm6, mm1 ; mm6 = r0 p0..p3
+ psubusw mm6, mm4 ; mm6 = p0..p3 - r-2 p0..p3
+ psubusw mm4, mm1 ; mm5 = r-1 p0..p3 - p0..p3
+ paddusw mm6, mm4 ; mm6 = abs(r0 p0..p3 - r-1 p0..p3)
+ pcmpgtw mm6, mm2
+ por mm7, mm6 ; accumulate thresholds
+
+
+ paddusw mm3, RD ; mm3 += round value
+ psraw mm3, VP9_FILTER_SHIFT ; mm3 /= 128
+
+ pand mm1, mm7 ; mm1 select vals > thresh from source
+ pandn mm7, mm3 ; mm7 select vals < thresh from blurred result
+ paddusw mm1, mm7 ; combination
+
+ packuswb mm1, mm0 ; pack to bytes
+
+ movd [rdi], mm1 ;
+ neg rax ; pitch is positive
+
+
+ add rsi, 4
+ add rdi, 4
+ add rdx, 4
+
+ cmp edx, dword ptr arg(5) ;cols
+ jl .nextcol
+ ; done with the all cols, start the across filtering in place
+ sub rsi, rdx
+ sub rdi, rdx
+
+
+ push rax
+ xor rdx, rdx
+ mov rax, [rdi-4];
+
+.acrossnextcol:
+ pxor mm7, mm7 ; mm7 = 00000000
+ movq mm6, [rbx + 32 ] ;
+ movq mm4, [rdi+rdx] ; mm4 = p0..p7
+ movq mm3, mm4 ; mm3 = p0..p7
+ punpcklbw mm3, mm0 ; mm3 = p0..p3
+ movq mm1, mm3 ; mm1 = p0..p3
+ pmullw mm3, mm6 ; mm3 *= kernel 2 modifiers
+
+ movq mm6, [rbx + 48]
+ psrlq mm4, 8 ; mm4 = p1..p7
+ movq mm5, mm4 ; mm5 = p1..p7
+ punpcklbw mm5, mm0 ; mm5 = p1..p4
+ pmullw mm6, mm5 ; mm6 *= p1..p4 * kernel 3 modifiers
+ paddusw mm3, mm6 ; mm3 += mm6
+
+ ; thresholding
+ movq mm7, mm1 ; mm7 = p0..p3
+ psubusw mm7, mm5 ; mm7 = p0..p3 - p1..p4
+ psubusw mm5, mm1 ; mm5 = p1..p4 - p0..p3
+ paddusw mm7, mm5 ; mm7 = abs(p0..p3 - p1..p4)
+ pcmpgtw mm7, mm2
+
+ movq mm6, [rbx + 64 ]
+ psrlq mm4, 8 ; mm4 = p2..p7
+ movq mm5, mm4 ; mm5 = p2..p7
+ punpcklbw mm5, mm0 ; mm5 = p2..p5
+ pmullw mm6, mm5 ; mm5 *= kernel 4 modifiers
+ paddusw mm3, mm6 ; mm3 += mm5
+
+ ; thresholding
+ movq mm6, mm1 ; mm6 = p0..p3
+ psubusw mm6, mm5 ; mm6 = p0..p3 - p1..p4
+ psubusw mm5, mm1 ; mm5 = p1..p4 - p0..p3
+ paddusw mm6, mm5 ; mm6 = abs(p0..p3 - p1..p4)
+ pcmpgtw mm6, mm2
+ por mm7, mm6 ; accumulate thresholds
+
+
+ movq mm6, [rbx ]
+ movq mm4, [rdi+rdx-2] ; mm4 = p-2..p5
+ movq mm5, mm4 ; mm5 = p-2..p5
+ punpcklbw mm5, mm0 ; mm5 = p-2..p1
+ pmullw mm6, mm5 ; mm5 *= kernel 0 modifiers
+ paddusw mm3, mm6 ; mm3 += mm5
+
+ ; thresholding
+ movq mm6, mm1 ; mm6 = p0..p3
+ psubusw mm6, mm5 ; mm6 = p0..p3 - p1..p4
+ psubusw mm5, mm1 ; mm5 = p1..p4 - p0..p3
+ paddusw mm6, mm5 ; mm6 = abs(p0..p3 - p1..p4)
+ pcmpgtw mm6, mm2
+ por mm7, mm6 ; accumulate thresholds
+
+ movq mm6, [rbx + 16]
+ psrlq mm4, 8 ; mm4 = p-1..p5
+ punpcklbw mm4, mm0 ; mm4 = p-1..p2
+ pmullw mm6, mm4 ; mm4 *= kernel 1 modifiers.
+ paddusw mm3, mm6 ; mm3 += mm5
+
+ ; thresholding
+ movq mm6, mm1 ; mm6 = p0..p3
+ psubusw mm6, mm4 ; mm6 = p0..p3 - p1..p4
+ psubusw mm4, mm1 ; mm5 = p1..p4 - p0..p3
+ paddusw mm6, mm4 ; mm6 = abs(p0..p3 - p1..p4)
+ pcmpgtw mm6, mm2
+ por mm7, mm6 ; accumulate thresholds
+
+ paddusw mm3, RD ; mm3 += round value
+ psraw mm3, VP9_FILTER_SHIFT ; mm3 /= 128
+
+ pand mm1, mm7 ; mm1 select vals > thresh from source
+ pandn mm7, mm3 ; mm7 select vals < thresh from blurred result
+ paddusw mm1, mm7 ; combination
+
+ packuswb mm1, mm0 ; pack to bytes
+ mov DWORD PTR [rdi+rdx-4], eax ; store previous four bytes
+ movd eax, mm1
+
+ add rdx, 4
+ cmp edx, dword ptr arg(5) ;cols
+ jl .acrossnextcol;
+
+ mov DWORD PTR [rdi+rdx-4], eax
+ pop rax
+
+ ; done with this rwo
+ add rsi,rax ; next line
+ movsxd rax, dword ptr arg(3) ;dst_pixels_per_line ; destination pitch?
+ add rdi,rax ; next destination
+ movsxd rax, dword ptr arg(2) ;src_pixels_per_line ; destination pitch?
+
+ dec rcx ; decrement count
+ jnz .nextrow ; next row
+ pop rbx
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+%undef RD
+
+
+;void vp9_mbpost_proc_down_mmx(unsigned char *dst,
+; int pitch, int rows, int cols,int flimit)
+extern sym(vp9_rv)
+global sym(vp9_mbpost_proc_down_mmx) PRIVATE
+sym(vp9_mbpost_proc_down_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 136
+
+ ; unsigned char d[16][8] at [rsp]
+ ; create flimit2 at [rsp+128]
+ mov eax, dword ptr arg(4) ;flimit
+ mov [rsp+128], eax
+ mov [rsp+128+4], eax
+%define flimit2 [rsp+128]
+
+%if ABI_IS_32BIT=0
+ lea r8, [GLOBAL(sym(vp9_rv))]
+%endif
+
+ ;rows +=8;
+ add dword ptr arg(2), 8
+
+ ;for(c=0; c<cols; c+=4)
+.loop_col:
+ mov rsi, arg(0) ;s
+ pxor mm0, mm0 ;
+
+ movsxd rax, dword ptr arg(1) ;pitch ;
+ neg rax ; rax = -pitch
+
+ lea rsi, [rsi + rax*8]; ; rdi = s[-pitch*8]
+ neg rax
+
+
+ pxor mm5, mm5
+ pxor mm6, mm6 ;
+
+ pxor mm7, mm7 ;
+ mov rdi, rsi
+
+ mov rcx, 15 ;
+
+.loop_initvar:
+ movd mm1, DWORD PTR [rdi];
+ punpcklbw mm1, mm0 ;
+
+ paddw mm5, mm1 ;
+ pmullw mm1, mm1 ;
+
+ movq mm2, mm1 ;
+ punpcklwd mm1, mm0 ;
+
+ punpckhwd mm2, mm0 ;
+ paddd mm6, mm1 ;
+
+ paddd mm7, mm2 ;
+ lea rdi, [rdi+rax] ;
+
+ dec rcx
+ jne .loop_initvar
+ ;save the var and sum
+ xor rdx, rdx
+.loop_row:
+ movd mm1, DWORD PTR [rsi] ; [s-pitch*8]
+ movd mm2, DWORD PTR [rdi] ; [s+pitch*7]
+
+ punpcklbw mm1, mm0
+ punpcklbw mm2, mm0
+
+ paddw mm5, mm2
+ psubw mm5, mm1
+
+ pmullw mm2, mm2
+ movq mm4, mm2
+
+ punpcklwd mm2, mm0
+ punpckhwd mm4, mm0
+
+ paddd mm6, mm2
+ paddd mm7, mm4
+
+ pmullw mm1, mm1
+ movq mm2, mm1
+
+ punpcklwd mm1, mm0
+ psubd mm6, mm1
+
+ punpckhwd mm2, mm0
+ psubd mm7, mm2
+
+
+ movq mm3, mm6
+ pslld mm3, 4
+
+ psubd mm3, mm6
+ movq mm1, mm5
+
+ movq mm4, mm5
+ pmullw mm1, mm1
+
+ pmulhw mm4, mm4
+ movq mm2, mm1
+
+ punpcklwd mm1, mm4
+ punpckhwd mm2, mm4
+
+ movq mm4, mm7
+ pslld mm4, 4
+
+ psubd mm4, mm7
+
+ psubd mm3, mm1
+ psubd mm4, mm2
+
+ psubd mm3, flimit2
+ psubd mm4, flimit2
+
+ psrad mm3, 31
+ psrad mm4, 31
+
+ packssdw mm3, mm4
+ packsswb mm3, mm0
+
+ movd mm1, DWORD PTR [rsi+rax*8]
+
+ movq mm2, mm1
+ punpcklbw mm1, mm0
+
+ paddw mm1, mm5
+ mov rcx, rdx
+
+ and rcx, 127
+%if ABI_IS_32BIT=1 && CONFIG_PIC=1
+ push rax
+ lea rax, [GLOBAL(sym(vp9_rv))]
+ movq mm4, [rax + rcx*2] ;vp9_rv[rcx*2]
+ pop rax
+%elif ABI_IS_32BIT=0
+ movq mm4, [r8 + rcx*2] ;vp9_rv[rcx*2]
+%else
+ movq mm4, [sym(vp9_rv) + rcx*2]
+%endif
+ paddw mm1, mm4
+ ;paddw xmm1, eight8s
+ psraw mm1, 4
+
+ packuswb mm1, mm0
+ pand mm1, mm3
+
+ pandn mm3, mm2
+ por mm1, mm3
+
+ and rcx, 15
+ movd DWORD PTR [rsp+rcx*4], mm1 ;d[rcx*4]
+
+ mov rcx, rdx
+ sub rcx, 8
+
+ and rcx, 15
+ movd mm1, DWORD PTR [rsp+rcx*4] ;d[rcx*4]
+
+ movd [rsi], mm1
+ lea rsi, [rsi+rax]
+
+ lea rdi, [rdi+rax]
+ add rdx, 1
+
+ cmp edx, dword arg(2) ;rows
+ jl .loop_row
+
+
+ add dword arg(0), 4 ; s += 4
+ sub dword arg(3), 4 ; cols -= 4
+ cmp dword arg(3), 0
+ jg .loop_col
+
+ add rsp, 136
+ pop rsp
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+%undef flimit2
+
+
+;void vp9_plane_add_noise_mmx (unsigned char *start, unsigned char *noise,
+; unsigned char blackclamp[16],
+; unsigned char whiteclamp[16],
+; unsigned char bothclamp[16],
+; unsigned int width, unsigned int height, int pitch)
+extern sym(rand)
+global sym(vp9_plane_add_noise_mmx) PRIVATE
+sym(vp9_plane_add_noise_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 8
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+.addnoise_loop:
+ call sym(rand) WRT_PLT
+ mov rcx, arg(1) ;noise
+ and rax, 0xff
+ add rcx, rax
+
+ ; we rely on the fact that the clamping vectors are stored contiguously
+ ; in black/white/both order. Note that we have to reload this here because
+ ; rdx could be trashed by rand()
+ mov rdx, arg(2) ; blackclamp
+
+
+ mov rdi, rcx
+ movsxd rcx, dword arg(5) ;[Width]
+ mov rsi, arg(0) ;Pos
+ xor rax,rax
+
+.addnoise_nextset:
+ movq mm1,[rsi+rax] ; get the source
+
+ psubusb mm1, [rdx] ;blackclamp ; clamp both sides so we don't outrange adding noise
+ paddusb mm1, [rdx+32] ;bothclamp
+ psubusb mm1, [rdx+16] ;whiteclamp
+
+ movq mm2,[rdi+rax] ; get the noise for this line
+ paddb mm1,mm2 ; add it in
+ movq [rsi+rax],mm1 ; store the result
+
+ add rax,8 ; move to the next line
+
+ cmp rax, rcx
+ jl .addnoise_nextset
+
+ movsxd rax, dword arg(7) ; Pitch
+ add arg(0), rax ; Start += Pitch
+ sub dword arg(6), 1 ; Height -= 1
+ jg .addnoise_loop
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+SECTION_RODATA
+align 16
+Blur:
+ times 16 dw 16
+ times 8 dw 64
+ times 16 dw 16
+ times 8 dw 0
+
+rd:
+ times 4 dw 0x40
diff --git a/libvpx/vp9/common/x86/vp9_postproc_sse2.asm b/libvpx/vp9/common/x86/vp9_postproc_sse2.asm
new file mode 100644
index 0000000..858fc99
--- /dev/null
+++ b/libvpx/vp9/common/x86/vp9_postproc_sse2.asm
@@ -0,0 +1,695 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+;void vp9_post_proc_down_and_across_xmm
+;(
+; unsigned char *src_ptr,
+; unsigned char *dst_ptr,
+; int src_pixels_per_line,
+; int dst_pixels_per_line,
+; int rows,
+; int cols,
+; int flimit
+;)
+global sym(vp9_post_proc_down_and_across_xmm) PRIVATE
+sym(vp9_post_proc_down_and_across_xmm):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 7
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+%if ABI_IS_32BIT=1 && CONFIG_PIC=1
+ ALIGN_STACK 16, rax
+ ; move the global rd onto the stack, since we don't have enough registers
+ ; to do PIC addressing
+ movdqa xmm0, [GLOBAL(rd42)]
+ sub rsp, 16
+ movdqa [rsp], xmm0
+%define RD42 [rsp]
+%else
+%define RD42 [GLOBAL(rd42)]
+%endif
+
+
+ movd xmm2, dword ptr arg(6) ;flimit
+ punpcklwd xmm2, xmm2
+ punpckldq xmm2, xmm2
+ punpcklqdq xmm2, xmm2
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(1) ;dst_ptr
+
+ movsxd rcx, DWORD PTR arg(4) ;rows
+ movsxd rax, DWORD PTR arg(2) ;src_pixels_per_line ; destination pitch?
+ pxor xmm0, xmm0 ; mm0 = 00000000
+
+.nextrow:
+
+ xor rdx, rdx ; clear out rdx for use as loop counter
+.nextcol:
+ movq xmm3, QWORD PTR [rsi] ; mm4 = r0 p0..p7
+ punpcklbw xmm3, xmm0 ; mm3 = p0..p3
+ movdqa xmm1, xmm3 ; mm1 = p0..p3
+ psllw xmm3, 2 ;
+
+ movq xmm5, QWORD PTR [rsi + rax] ; mm4 = r1 p0..p7
+ punpcklbw xmm5, xmm0 ; mm5 = r1 p0..p3
+ paddusw xmm3, xmm5 ; mm3 += mm6
+
+ ; thresholding
+ movdqa xmm7, xmm1 ; mm7 = r0 p0..p3
+ psubusw xmm7, xmm5 ; mm7 = r0 p0..p3 - r1 p0..p3
+ psubusw xmm5, xmm1 ; mm5 = r1 p0..p3 - r0 p0..p3
+ paddusw xmm7, xmm5 ; mm7 = abs(r0 p0..p3 - r1 p0..p3)
+ pcmpgtw xmm7, xmm2
+
+ movq xmm5, QWORD PTR [rsi + 2*rax] ; mm4 = r2 p0..p7
+ punpcklbw xmm5, xmm0 ; mm5 = r2 p0..p3
+ paddusw xmm3, xmm5 ; mm3 += mm5
+
+ ; thresholding
+ movdqa xmm6, xmm1 ; mm6 = r0 p0..p3
+ psubusw xmm6, xmm5 ; mm6 = r0 p0..p3 - r2 p0..p3
+ psubusw xmm5, xmm1 ; mm5 = r2 p0..p3 - r2 p0..p3
+ paddusw xmm6, xmm5 ; mm6 = abs(r0 p0..p3 - r2 p0..p3)
+ pcmpgtw xmm6, xmm2
+ por xmm7, xmm6 ; accumulate thresholds
+
+
+ neg rax
+ movq xmm5, QWORD PTR [rsi+2*rax] ; mm4 = r-2 p0..p7
+ punpcklbw xmm5, xmm0 ; mm5 = r-2 p0..p3
+ paddusw xmm3, xmm5 ; mm3 += mm5
+
+ ; thresholding
+ movdqa xmm6, xmm1 ; mm6 = r0 p0..p3
+ psubusw xmm6, xmm5 ; mm6 = p0..p3 - r-2 p0..p3
+ psubusw xmm5, xmm1 ; mm5 = r-2 p0..p3 - p0..p3
+ paddusw xmm6, xmm5 ; mm6 = abs(r0 p0..p3 - r-2 p0..p3)
+ pcmpgtw xmm6, xmm2
+ por xmm7, xmm6 ; accumulate thresholds
+
+ movq xmm4, QWORD PTR [rsi+rax] ; mm4 = r-1 p0..p7
+ punpcklbw xmm4, xmm0 ; mm4 = r-1 p0..p3
+ paddusw xmm3, xmm4 ; mm3 += mm5
+
+ ; thresholding
+ movdqa xmm6, xmm1 ; mm6 = r0 p0..p3
+ psubusw xmm6, xmm4 ; mm6 = p0..p3 - r-2 p0..p3
+ psubusw xmm4, xmm1 ; mm5 = r-1 p0..p3 - p0..p3
+ paddusw xmm6, xmm4 ; mm6 = abs(r0 p0..p3 - r-1 p0..p3)
+ pcmpgtw xmm6, xmm2
+ por xmm7, xmm6 ; accumulate thresholds
+
+
+ paddusw xmm3, RD42 ; mm3 += round value
+ psraw xmm3, 3 ; mm3 /= 8
+
+ pand xmm1, xmm7 ; mm1 select vals > thresh from source
+ pandn xmm7, xmm3 ; mm7 select vals < thresh from blurred result
+ paddusw xmm1, xmm7 ; combination
+
+ packuswb xmm1, xmm0 ; pack to bytes
+ movq QWORD PTR [rdi], xmm1 ;
+
+ neg rax ; pitch is positive
+ add rsi, 8
+ add rdi, 8
+
+ add rdx, 8
+ cmp edx, dword arg(5) ;cols
+
+ jl .nextcol
+
+ ; done with the all cols, start the across filtering in place
+ sub rsi, rdx
+ sub rdi, rdx
+
+ xor rdx, rdx
+ movq mm0, QWORD PTR [rdi-8];
+
+.acrossnextcol:
+ movq xmm7, QWORD PTR [rdi +rdx -2]
+ movd xmm4, DWORD PTR [rdi +rdx +6]
+
+ pslldq xmm4, 8
+ por xmm4, xmm7
+
+ movdqa xmm3, xmm4
+ psrldq xmm3, 2
+ punpcklbw xmm3, xmm0 ; mm3 = p0..p3
+ movdqa xmm1, xmm3 ; mm1 = p0..p3
+ psllw xmm3, 2
+
+
+ movdqa xmm5, xmm4
+ psrldq xmm5, 3
+ punpcklbw xmm5, xmm0 ; mm5 = p1..p4
+ paddusw xmm3, xmm5 ; mm3 += mm6
+
+ ; thresholding
+ movdqa xmm7, xmm1 ; mm7 = p0..p3
+ psubusw xmm7, xmm5 ; mm7 = p0..p3 - p1..p4
+ psubusw xmm5, xmm1 ; mm5 = p1..p4 - p0..p3
+ paddusw xmm7, xmm5 ; mm7 = abs(p0..p3 - p1..p4)
+ pcmpgtw xmm7, xmm2
+
+ movdqa xmm5, xmm4
+ psrldq xmm5, 4
+ punpcklbw xmm5, xmm0 ; mm5 = p2..p5
+ paddusw xmm3, xmm5 ; mm3 += mm5
+
+ ; thresholding
+ movdqa xmm6, xmm1 ; mm6 = p0..p3
+ psubusw xmm6, xmm5 ; mm6 = p0..p3 - p1..p4
+ psubusw xmm5, xmm1 ; mm5 = p1..p4 - p0..p3
+ paddusw xmm6, xmm5 ; mm6 = abs(p0..p3 - p1..p4)
+ pcmpgtw xmm6, xmm2
+ por xmm7, xmm6 ; accumulate thresholds
+
+
+ movdqa xmm5, xmm4 ; mm5 = p-2..p5
+ punpcklbw xmm5, xmm0 ; mm5 = p-2..p1
+ paddusw xmm3, xmm5 ; mm3 += mm5
+
+ ; thresholding
+ movdqa xmm6, xmm1 ; mm6 = p0..p3
+ psubusw xmm6, xmm5 ; mm6 = p0..p3 - p1..p4
+ psubusw xmm5, xmm1 ; mm5 = p1..p4 - p0..p3
+ paddusw xmm6, xmm5 ; mm6 = abs(p0..p3 - p1..p4)
+ pcmpgtw xmm6, xmm2
+ por xmm7, xmm6 ; accumulate thresholds
+
+ psrldq xmm4, 1 ; mm4 = p-1..p5
+ punpcklbw xmm4, xmm0 ; mm4 = p-1..p2
+ paddusw xmm3, xmm4 ; mm3 += mm5
+
+ ; thresholding
+ movdqa xmm6, xmm1 ; mm6 = p0..p3
+ psubusw xmm6, xmm4 ; mm6 = p0..p3 - p1..p4
+ psubusw xmm4, xmm1 ; mm5 = p1..p4 - p0..p3
+ paddusw xmm6, xmm4 ; mm6 = abs(p0..p3 - p1..p4)
+ pcmpgtw xmm6, xmm2
+ por xmm7, xmm6 ; accumulate thresholds
+
+ paddusw xmm3, RD42 ; mm3 += round value
+ psraw xmm3, 3 ; mm3 /= 8
+
+ pand xmm1, xmm7 ; mm1 select vals > thresh from source
+ pandn xmm7, xmm3 ; mm7 select vals < thresh from blurred result
+ paddusw xmm1, xmm7 ; combination
+
+ packuswb xmm1, xmm0 ; pack to bytes
+ movq QWORD PTR [rdi+rdx-8], mm0 ; store previous four bytes
+ movdq2q mm0, xmm1
+
+ add rdx, 8
+ cmp edx, dword arg(5) ;cols
+ jl .acrossnextcol;
+
+ ; last 8 pixels
+ movq QWORD PTR [rdi+rdx-8], mm0
+
+ ; done with this rwo
+ add rsi,rax ; next line
+ mov eax, dword arg(3) ;dst_pixels_per_line ; destination pitch?
+ add rdi,rax ; next destination
+ mov eax, dword arg(2) ;src_pixels_per_line ; destination pitch?
+
+ dec rcx ; decrement count
+ jnz .nextrow ; next row
+
+%if ABI_IS_32BIT=1 && CONFIG_PIC=1
+ add rsp,16
+ pop rsp
+%endif
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+%undef RD42
+
+
+;void vp9_mbpost_proc_down_xmm(unsigned char *dst,
+; int pitch, int rows, int cols,int flimit)
+extern sym(vp9_rv)
+global sym(vp9_mbpost_proc_down_xmm) PRIVATE
+sym(vp9_mbpost_proc_down_xmm):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 128+16
+
+ ; unsigned char d[16][8] at [rsp]
+ ; create flimit2 at [rsp+128]
+ mov eax, dword ptr arg(4) ;flimit
+ mov [rsp+128], eax
+ mov [rsp+128+4], eax
+ mov [rsp+128+8], eax
+ mov [rsp+128+12], eax
+%define flimit4 [rsp+128]
+
+%if ABI_IS_32BIT=0
+ lea r8, [GLOBAL(sym(vp9_rv))]
+%endif
+
+ ;rows +=8;
+ add dword arg(2), 8
+
+ ;for(c=0; c<cols; c+=8)
+.loop_col:
+ mov rsi, arg(0) ; s
+ pxor xmm0, xmm0 ;
+
+ movsxd rax, dword ptr arg(1) ;pitch ;
+ neg rax ; rax = -pitch
+
+ lea rsi, [rsi + rax*8]; ; rdi = s[-pitch*8]
+ neg rax
+
+
+ pxor xmm5, xmm5
+ pxor xmm6, xmm6 ;
+
+ pxor xmm7, xmm7 ;
+ mov rdi, rsi
+
+ mov rcx, 15 ;
+
+.loop_initvar:
+ movq xmm1, QWORD PTR [rdi];
+ punpcklbw xmm1, xmm0 ;
+
+ paddw xmm5, xmm1 ;
+ pmullw xmm1, xmm1 ;
+
+ movdqa xmm2, xmm1 ;
+ punpcklwd xmm1, xmm0 ;
+
+ punpckhwd xmm2, xmm0 ;
+ paddd xmm6, xmm1 ;
+
+ paddd xmm7, xmm2 ;
+ lea rdi, [rdi+rax] ;
+
+ dec rcx
+ jne .loop_initvar
+ ;save the var and sum
+ xor rdx, rdx
+.loop_row:
+ movq xmm1, QWORD PTR [rsi] ; [s-pitch*8]
+ movq xmm2, QWORD PTR [rdi] ; [s+pitch*7]
+
+ punpcklbw xmm1, xmm0
+ punpcklbw xmm2, xmm0
+
+ paddw xmm5, xmm2
+ psubw xmm5, xmm1
+
+ pmullw xmm2, xmm2
+ movdqa xmm4, xmm2
+
+ punpcklwd xmm2, xmm0
+ punpckhwd xmm4, xmm0
+
+ paddd xmm6, xmm2
+ paddd xmm7, xmm4
+
+ pmullw xmm1, xmm1
+ movdqa xmm2, xmm1
+
+ punpcklwd xmm1, xmm0
+ psubd xmm6, xmm1
+
+ punpckhwd xmm2, xmm0
+ psubd xmm7, xmm2
+
+
+ movdqa xmm3, xmm6
+ pslld xmm3, 4
+
+ psubd xmm3, xmm6
+ movdqa xmm1, xmm5
+
+ movdqa xmm4, xmm5
+ pmullw xmm1, xmm1
+
+ pmulhw xmm4, xmm4
+ movdqa xmm2, xmm1
+
+ punpcklwd xmm1, xmm4
+ punpckhwd xmm2, xmm4
+
+ movdqa xmm4, xmm7
+ pslld xmm4, 4
+
+ psubd xmm4, xmm7
+
+ psubd xmm3, xmm1
+ psubd xmm4, xmm2
+
+ psubd xmm3, flimit4
+ psubd xmm4, flimit4
+
+ psrad xmm3, 31
+ psrad xmm4, 31
+
+ packssdw xmm3, xmm4
+ packsswb xmm3, xmm0
+
+ movq xmm1, QWORD PTR [rsi+rax*8]
+
+ movq xmm2, xmm1
+ punpcklbw xmm1, xmm0
+
+ paddw xmm1, xmm5
+ mov rcx, rdx
+
+ and rcx, 127
+%if ABI_IS_32BIT=1 && CONFIG_PIC=1
+ push rax
+ lea rax, [GLOBAL(sym(vp9_rv))]
+ movdqu xmm4, [rax + rcx*2] ;vp9_rv[rcx*2]
+ pop rax
+%elif ABI_IS_32BIT=0
+ movdqu xmm4, [r8 + rcx*2] ;vp9_rv[rcx*2]
+%else
+ movdqu xmm4, [sym(vp9_rv) + rcx*2]
+%endif
+
+ paddw xmm1, xmm4
+ ;paddw xmm1, eight8s
+ psraw xmm1, 4
+
+ packuswb xmm1, xmm0
+ pand xmm1, xmm3
+
+ pandn xmm3, xmm2
+ por xmm1, xmm3
+
+ and rcx, 15
+ movq QWORD PTR [rsp + rcx*8], xmm1 ;d[rcx*8]
+
+ mov rcx, rdx
+ sub rcx, 8
+
+ and rcx, 15
+ movq mm0, [rsp + rcx*8] ;d[rcx*8]
+
+ movq [rsi], mm0
+ lea rsi, [rsi+rax]
+
+ lea rdi, [rdi+rax]
+ add rdx, 1
+
+ cmp edx, dword arg(2) ;rows
+ jl .loop_row
+
+ add dword arg(0), 8 ; s += 8
+ sub dword arg(3), 8 ; cols -= 8
+ cmp dword arg(3), 0
+ jg .loop_col
+
+ add rsp, 128+16
+ pop rsp
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+%undef flimit4
+
+
+;void vp9_mbpost_proc_across_ip_xmm(unsigned char *src,
+; int pitch, int rows, int cols,int flimit)
+global sym(vp9_mbpost_proc_across_ip_xmm) PRIVATE
+sym(vp9_mbpost_proc_across_ip_xmm):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 16
+
+ ; create flimit4 at [rsp]
+ mov eax, dword ptr arg(4) ;flimit
+ mov [rsp], eax
+ mov [rsp+4], eax
+ mov [rsp+8], eax
+ mov [rsp+12], eax
+%define flimit4 [rsp]
+
+
+ ;for(r=0;r<rows;r++)
+.ip_row_loop:
+
+ xor rdx, rdx ;sumsq=0;
+ xor rcx, rcx ;sum=0;
+ mov rsi, arg(0); s
+ mov rdi, -8
+.ip_var_loop:
+ ;for(i=-8;i<=6;i++)
+ ;{
+ ; sumsq += s[i]*s[i];
+ ; sum += s[i];
+ ;}
+ movzx eax, byte [rsi+rdi]
+ add ecx, eax
+ mul al
+ add edx, eax
+ add rdi, 1
+ cmp rdi, 6
+ jle .ip_var_loop
+
+
+ ;mov rax, sumsq
+ ;movd xmm7, rax
+ movd xmm7, edx
+
+ ;mov rax, sum
+ ;movd xmm6, rax
+ movd xmm6, ecx
+
+ mov rsi, arg(0) ;s
+ xor rcx, rcx
+
+ movsxd rdx, dword arg(3) ;cols
+ add rdx, 8
+ pxor mm0, mm0
+ pxor mm1, mm1
+
+ pxor xmm0, xmm0
+.nextcol4:
+
+ movd xmm1, DWORD PTR [rsi+rcx-8] ; -8 -7 -6 -5
+ movd xmm2, DWORD PTR [rsi+rcx+7] ; +7 +8 +9 +10
+
+ punpcklbw xmm1, xmm0 ; expanding
+ punpcklbw xmm2, xmm0 ; expanding
+
+ punpcklwd xmm1, xmm0 ; expanding to dwords
+ punpcklwd xmm2, xmm0 ; expanding to dwords
+
+ psubd xmm2, xmm1 ; 7--8 8--7 9--6 10--5
+ paddd xmm1, xmm1 ; -8*2 -7*2 -6*2 -5*2
+
+ paddd xmm1, xmm2 ; 7+-8 8+-7 9+-6 10+-5
+ pmaddwd xmm1, xmm2 ; squared of 7+-8 8+-7 9+-6 10+-5
+
+ paddd xmm6, xmm2
+ paddd xmm7, xmm1
+
+ pshufd xmm6, xmm6, 0 ; duplicate the last ones
+ pshufd xmm7, xmm7, 0 ; duplicate the last ones
+
+ psrldq xmm1, 4 ; 8--7 9--6 10--5 0000
+ psrldq xmm2, 4 ; 8--7 9--6 10--5 0000
+
+ pshufd xmm3, xmm1, 3 ; 0000 8--7 8--7 8--7 squared
+ pshufd xmm4, xmm2, 3 ; 0000 8--7 8--7 8--7 squared
+
+ paddd xmm6, xmm4
+ paddd xmm7, xmm3
+
+ pshufd xmm3, xmm1, 01011111b ; 0000 0000 9--6 9--6 squared
+ pshufd xmm4, xmm2, 01011111b ; 0000 0000 9--6 9--6 squared
+
+ paddd xmm7, xmm3
+ paddd xmm6, xmm4
+
+ pshufd xmm3, xmm1, 10111111b ; 0000 0000 8--7 8--7 squared
+ pshufd xmm4, xmm2, 10111111b ; 0000 0000 8--7 8--7 squared
+
+ paddd xmm7, xmm3
+ paddd xmm6, xmm4
+
+ movdqa xmm3, xmm6
+ pmaddwd xmm3, xmm3
+
+ movdqa xmm5, xmm7
+ pslld xmm5, 4
+
+ psubd xmm5, xmm7
+ psubd xmm5, xmm3
+
+ psubd xmm5, flimit4
+ psrad xmm5, 31
+
+ packssdw xmm5, xmm0
+ packsswb xmm5, xmm0
+
+ movd xmm1, DWORD PTR [rsi+rcx]
+ movq xmm2, xmm1
+
+ punpcklbw xmm1, xmm0
+ punpcklwd xmm1, xmm0
+
+ paddd xmm1, xmm6
+ paddd xmm1, [GLOBAL(four8s)]
+
+ psrad xmm1, 4
+ packssdw xmm1, xmm0
+
+ packuswb xmm1, xmm0
+ pand xmm1, xmm5
+
+ pandn xmm5, xmm2
+ por xmm5, xmm1
+
+ movd [rsi+rcx-8], mm0
+ movq mm0, mm1
+
+ movdq2q mm1, xmm5
+ psrldq xmm7, 12
+
+ psrldq xmm6, 12
+ add rcx, 4
+
+ cmp rcx, rdx
+ jl .nextcol4
+
+ ;s+=pitch;
+ movsxd rax, dword arg(1)
+ add arg(0), rax
+
+ sub dword arg(2), 1 ;rows-=1
+ cmp dword arg(2), 0
+ jg .ip_row_loop
+
+ add rsp, 16
+ pop rsp
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+%undef flimit4
+
+
+;void vp9_plane_add_noise_wmt (unsigned char *start, unsigned char *noise,
+; unsigned char blackclamp[16],
+; unsigned char whiteclamp[16],
+; unsigned char bothclamp[16],
+; unsigned int width, unsigned int height, int pitch)
+extern sym(rand)
+global sym(vp9_plane_add_noise_wmt) PRIVATE
+sym(vp9_plane_add_noise_wmt):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 8
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+.addnoise_loop:
+ call sym(rand) WRT_PLT
+ mov rcx, arg(1) ;noise
+ and rax, 0xff
+ add rcx, rax
+
+ ; we rely on the fact that the clamping vectors are stored contiguously
+ ; in black/white/both order. Note that we have to reload this here because
+ ; rdx could be trashed by rand()
+ mov rdx, arg(2) ; blackclamp
+
+
+ mov rdi, rcx
+ movsxd rcx, dword arg(5) ;[Width]
+ mov rsi, arg(0) ;Pos
+ xor rax,rax
+
+.addnoise_nextset:
+ movdqu xmm1,[rsi+rax] ; get the source
+
+ psubusb xmm1, [rdx] ;blackclamp ; clamp both sides so we don't outrange adding noise
+ paddusb xmm1, [rdx+32] ;bothclamp
+ psubusb xmm1, [rdx+16] ;whiteclamp
+
+ movdqu xmm2,[rdi+rax] ; get the noise for this line
+ paddb xmm1,xmm2 ; add it in
+ movdqu [rsi+rax],xmm1 ; store the result
+
+ add rax,16 ; move to the next line
+
+ cmp rax, rcx
+ jl .addnoise_nextset
+
+ movsxd rax, dword arg(7) ; Pitch
+ add arg(0), rax ; Start += Pitch
+ sub dword arg(6), 1 ; Height -= 1
+ jg .addnoise_loop
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+SECTION_RODATA
+align 16
+rd42:
+ times 8 dw 0x04
+four8s:
+ times 4 dd 8
diff --git a/libvpx/vp9/common/x86/vp9_postproc_x86.h b/libvpx/vp9/common/x86/vp9_postproc_x86.h
new file mode 100644
index 0000000..b0e8b18
--- /dev/null
+++ b/libvpx/vp9/common/x86/vp9_postproc_x86.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_COMMON_X86_VP9_POSTPROC_X86_H_
+#define VP9_COMMON_X86_VP9_POSTPROC_X86_H_
+
+/* Note:
+ *
+ * This platform is commonly built for runtime CPU detection. If you modify
+ * any of the function mappings present in this file, be sure to also update
+ * them in the function pointer initialization code
+ */
+
+#if HAVE_MMX
+extern prototype_postproc_inplace(vp9_mbpost_proc_down_mmx);
+extern prototype_postproc(vp9_post_proc_down_and_across_mmx);
+extern prototype_postproc_addnoise(vp9_plane_add_noise_mmx);
+
+#if !CONFIG_RUNTIME_CPU_DETECT
+#undef vp9_postproc_down
+#define vp9_postproc_down vp9_mbpost_proc_down_mmx
+
+#undef vp9_postproc_downacross
+#define vp9_postproc_downacross vp9_post_proc_down_and_across_mmx
+
+#undef vp9_postproc_addnoise
+#define vp9_postproc_addnoise vp9_plane_add_noise_mmx
+
+#endif
+#endif
+
+
+#if HAVE_SSE2
+extern prototype_postproc_inplace(vp9_mbpost_proc_down_xmm);
+extern prototype_postproc_inplace(vp9_mbpost_proc_across_ip_xmm);
+extern prototype_postproc(vp9_post_proc_down_and_across_xmm);
+extern prototype_postproc_addnoise(vp9_plane_add_noise_wmt);
+
+#if !CONFIG_RUNTIME_CPU_DETECT
+#undef vp9_postproc_down
+#define vp9_postproc_down vp9_mbpost_proc_down_xmm
+
+#undef vp9_postproc_across
+#define vp9_postproc_across vp9_mbpost_proc_across_ip_xmm
+
+#undef vp9_postproc_downacross
+#define vp9_postproc_downacross vp9_post_proc_down_and_across_xmm
+
+#undef vp9_postproc_addnoise
+#define vp9_postproc_addnoise vp9_plane_add_noise_wmt
+
+
+#endif
+#endif
+
+#endif
diff --git a/libvpx/vp9/common/x86/vp9_subpixel_8t_ssse3.asm b/libvpx/vp9/common/x86/vp9_subpixel_8t_ssse3.asm
new file mode 100644
index 0000000..bbf9888
--- /dev/null
+++ b/libvpx/vp9/common/x86/vp9_subpixel_8t_ssse3.asm
@@ -0,0 +1,1011 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+;/************************************************************************************
+; Notes: filter_block1d_h6 applies a 6 tap filter horizontally to the input pixels. The
+; input pixel array has output_height rows. This routine assumes that output_height is an
+; even number. This function handles 8 pixels in horizontal direction, calculating ONE
+; rows each iteration to take advantage of the 128 bits operations.
+;
+; This is an implementation of some of the SSE optimizations first seen in ffvp8
+;
+;*************************************************************************************/
+
+
+%macro VERTx4 1
+ mov rdx, arg(5) ;filter ptr
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;output_ptr
+ mov rcx, 0x0400040
+
+ movdqa xmm4, [rdx] ;load filters
+ movd xmm5, rcx
+ packsswb xmm4, xmm4
+ pshuflw xmm0, xmm4, 0b ;k0_k1
+ pshuflw xmm1, xmm4, 01010101b ;k2_k3
+ pshuflw xmm2, xmm4, 10101010b ;k4_k5
+ pshuflw xmm3, xmm4, 11111111b ;k6_k7
+
+ punpcklqdq xmm0, xmm0
+ punpcklqdq xmm1, xmm1
+ punpcklqdq xmm2, xmm2
+ punpcklqdq xmm3, xmm3
+
+ movdqa k0k1, xmm0
+ movdqa k2k3, xmm1
+ pshufd xmm5, xmm5, 0
+ movdqa k4k5, xmm2
+ movdqa k6k7, xmm3
+ movdqa krd, xmm5
+
+ movsxd rdx, DWORD PTR arg(1) ;pixels_per_line
+
+%if ABI_IS_32BIT=0
+ movsxd r8, DWORD PTR arg(3) ;out_pitch
+%endif
+ mov rax, rsi
+ movsxd rcx, DWORD PTR arg(4) ;output_height
+ add rax, rdx
+
+ lea rbx, [rdx + rdx*4]
+ add rbx, rdx ;pitch * 6
+
+.loop:
+ movd xmm0, [rsi] ;A
+ movd xmm1, [rsi + rdx] ;B
+ movd xmm2, [rsi + rdx * 2] ;C
+ movd xmm3, [rax + rdx * 2] ;D
+ movd xmm4, [rsi + rdx * 4] ;E
+ movd xmm5, [rax + rdx * 4] ;F
+
+ punpcklbw xmm0, xmm1 ;A B
+ punpcklbw xmm2, xmm3 ;C D
+ punpcklbw xmm4, xmm5 ;E F
+
+ movd xmm6, [rsi + rbx] ;G
+ movd xmm7, [rax + rbx] ;H
+
+ pmaddubsw xmm0, k0k1
+ pmaddubsw xmm2, k2k3
+ punpcklbw xmm6, xmm7 ;G H
+ pmaddubsw xmm4, k4k5
+ pmaddubsw xmm6, k6k7
+
+ paddsw xmm0, xmm6
+ paddsw xmm0, xmm2
+ paddsw xmm0, xmm4
+ paddsw xmm0, krd
+
+ psraw xmm0, 7
+ packuswb xmm0, xmm0
+
+ add rsi, rdx
+ add rax, rdx
+%if %1
+ movd xmm1, [rdi]
+ pavgb xmm0, xmm1
+%endif
+ movd [rdi], xmm0
+
+%if ABI_IS_32BIT
+ add rdi, DWORD PTR arg(3) ;out_pitch
+%else
+ add rdi, r8
+%endif
+ dec rcx
+ jnz .loop
+%endm
+
+%macro VERTx8 1
+ mov rdx, arg(5) ;filter ptr
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;output_ptr
+ mov rcx, 0x0400040
+
+ movdqa xmm4, [rdx] ;load filters
+ movq xmm5, rcx
+ packsswb xmm4, xmm4
+ pshuflw xmm0, xmm4, 0b ;k0_k1
+ pshuflw xmm1, xmm4, 01010101b ;k2_k3
+ pshuflw xmm2, xmm4, 10101010b ;k4_k5
+ pshuflw xmm3, xmm4, 11111111b ;k6_k7
+
+ punpcklqdq xmm0, xmm0
+ punpcklqdq xmm1, xmm1
+ punpcklqdq xmm2, xmm2
+ punpcklqdq xmm3, xmm3
+
+ movdqa k0k1, xmm0
+ movdqa k2k3, xmm1
+ pshufd xmm5, xmm5, 0
+ movdqa k4k5, xmm2
+ movdqa k6k7, xmm3
+ movdqa krd, xmm5
+
+ movsxd rdx, DWORD PTR arg(1) ;pixels_per_line
+
+%if ABI_IS_32BIT=0
+ movsxd r8, DWORD PTR arg(3) ;out_pitch
+%endif
+ mov rax, rsi
+ movsxd rcx, DWORD PTR arg(4) ;output_height
+ add rax, rdx
+
+ lea rbx, [rdx + rdx*4]
+ add rbx, rdx ;pitch * 6
+
+.loop:
+ movq xmm0, [rsi] ;A
+ movq xmm1, [rsi + rdx] ;B
+ movq xmm2, [rsi + rdx * 2] ;C
+ movq xmm3, [rax + rdx * 2] ;D
+ movq xmm4, [rsi + rdx * 4] ;E
+ movq xmm5, [rax + rdx * 4] ;F
+
+ punpcklbw xmm0, xmm1 ;A B
+ punpcklbw xmm2, xmm3 ;C D
+ punpcklbw xmm4, xmm5 ;E F
+
+ movq xmm6, [rsi + rbx] ;G
+ movq xmm7, [rax + rbx] ;H
+
+ pmaddubsw xmm0, k0k1
+ pmaddubsw xmm2, k2k3
+ punpcklbw xmm6, xmm7 ;G H
+ pmaddubsw xmm4, k4k5
+ pmaddubsw xmm6, k6k7
+
+ paddsw xmm0, xmm6
+ paddsw xmm0, xmm2
+ paddsw xmm0, xmm4
+ paddsw xmm0, krd
+
+ psraw xmm0, 7
+ packuswb xmm0, xmm0
+
+ add rsi, rdx
+ add rax, rdx
+%if %1
+ movq xmm1, [rdi]
+ pavgb xmm0, xmm1
+%endif
+ movq [rdi], xmm0
+
+%if ABI_IS_32BIT
+ add rdi, DWORD PTR arg(3) ;out_pitch
+%else
+ add rdi, r8
+%endif
+ dec rcx
+ jnz .loop
+%endm
+
+
+%macro VERTx16 1
+ mov rdx, arg(5) ;filter ptr
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;output_ptr
+ mov rcx, 0x0400040
+
+ movdqa xmm4, [rdx] ;load filters
+ movq xmm5, rcx
+ packsswb xmm4, xmm4
+ pshuflw xmm0, xmm4, 0b ;k0_k1
+ pshuflw xmm1, xmm4, 01010101b ;k2_k3
+ pshuflw xmm2, xmm4, 10101010b ;k4_k5
+ pshuflw xmm3, xmm4, 11111111b ;k6_k7
+
+ punpcklqdq xmm0, xmm0
+ punpcklqdq xmm1, xmm1
+ punpcklqdq xmm2, xmm2
+ punpcklqdq xmm3, xmm3
+
+ movdqa k0k1, xmm0
+ movdqa k2k3, xmm1
+ pshufd xmm5, xmm5, 0
+ movdqa k4k5, xmm2
+ movdqa k6k7, xmm3
+ movdqa krd, xmm5
+
+ movsxd rdx, DWORD PTR arg(1) ;pixels_per_line
+
+%if ABI_IS_32BIT=0
+ movsxd r8, DWORD PTR arg(3) ;out_pitch
+%endif
+ mov rax, rsi
+ movsxd rcx, DWORD PTR arg(4) ;output_height
+ add rax, rdx
+
+ lea rbx, [rdx + rdx*4]
+ add rbx, rdx ;pitch * 6
+
+.loop:
+ movq xmm0, [rsi] ;A
+ movq xmm1, [rsi + rdx] ;B
+ movq xmm2, [rsi + rdx * 2] ;C
+ movq xmm3, [rax + rdx * 2] ;D
+ movq xmm4, [rsi + rdx * 4] ;E
+ movq xmm5, [rax + rdx * 4] ;F
+
+ punpcklbw xmm0, xmm1 ;A B
+ punpcklbw xmm2, xmm3 ;C D
+ punpcklbw xmm4, xmm5 ;E F
+
+ movq xmm6, [rsi + rbx] ;G
+ movq xmm7, [rax + rbx] ;H
+
+ pmaddubsw xmm0, k0k1
+ pmaddubsw xmm2, k2k3
+ punpcklbw xmm6, xmm7 ;G H
+ pmaddubsw xmm4, k4k5
+ pmaddubsw xmm6, k6k7
+
+ paddsw xmm0, xmm6
+ paddsw xmm0, xmm2
+ paddsw xmm0, xmm4
+ paddsw xmm0, krd
+
+ psraw xmm0, 7
+ packuswb xmm0, xmm0
+%if %1
+ movq xmm1, [rdi]
+ pavgb xmm0, xmm1
+%endif
+ movq [rdi], xmm0
+
+ movq xmm0, [rsi + 8] ;A
+ movq xmm1, [rsi + rdx + 8] ;B
+ movq xmm2, [rsi + rdx * 2 + 8] ;C
+ movq xmm3, [rax + rdx * 2 + 8] ;D
+ movq xmm4, [rsi + rdx * 4 + 8] ;E
+ movq xmm5, [rax + rdx * 4 + 8] ;F
+
+ punpcklbw xmm0, xmm1 ;A B
+ punpcklbw xmm2, xmm3 ;C D
+ punpcklbw xmm4, xmm5 ;E F
+
+
+ movq xmm6, [rsi + rbx + 8] ;G
+ movq xmm7, [rax + rbx + 8] ;H
+ punpcklbw xmm6, xmm7 ;G H
+
+
+ pmaddubsw xmm0, k0k1
+ pmaddubsw xmm2, k2k3
+ pmaddubsw xmm4, k4k5
+ pmaddubsw xmm6, k6k7
+
+ paddsw xmm0, xmm6
+ paddsw xmm0, xmm2
+ paddsw xmm0, xmm4
+ paddsw xmm0, krd
+
+ psraw xmm0, 7
+ packuswb xmm0, xmm0
+
+ add rsi, rdx
+ add rax, rdx
+%if %1
+ movq xmm1, [rdi+8]
+ pavgb xmm0, xmm1
+%endif
+
+ movq [rdi+8], xmm0
+
+%if ABI_IS_32BIT
+ add rdi, DWORD PTR arg(3) ;out_pitch
+%else
+ add rdi, r8
+%endif
+ dec rcx
+ jnz .loop
+%endm
+
+;void vp9_filter_block1d8_v8_ssse3
+;(
+; unsigned char *src_ptr,
+; unsigned int src_pitch,
+; unsigned char *output_ptr,
+; unsigned int out_pitch,
+; unsigned int output_height,
+; short *filter
+;)
+global sym(vp9_filter_block1d4_v8_ssse3) PRIVATE
+sym(vp9_filter_block1d4_v8_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ push rsi
+ push rdi
+ push rbx
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 16*5
+ %define k0k1 [rsp + 16*0]
+ %define k2k3 [rsp + 16*1]
+ %define k4k5 [rsp + 16*2]
+ %define k6k7 [rsp + 16*3]
+ %define krd [rsp + 16*4]
+
+ VERTx4 0
+
+ add rsp, 16*5
+ pop rsp
+ pop rbx
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void vp9_filter_block1d8_v8_ssse3
+;(
+; unsigned char *src_ptr,
+; unsigned int src_pitch,
+; unsigned char *output_ptr,
+; unsigned int out_pitch,
+; unsigned int output_height,
+; short *filter
+;)
+global sym(vp9_filter_block1d8_v8_ssse3) PRIVATE
+sym(vp9_filter_block1d8_v8_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ push rsi
+ push rdi
+ push rbx
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 16*5
+ %define k0k1 [rsp + 16*0]
+ %define k2k3 [rsp + 16*1]
+ %define k4k5 [rsp + 16*2]
+ %define k6k7 [rsp + 16*3]
+ %define krd [rsp + 16*4]
+
+ VERTx8 0
+
+ add rsp, 16*5
+ pop rsp
+ pop rbx
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void vp9_filter_block1d16_v8_ssse3
+;(
+; unsigned char *src_ptr,
+; unsigned int src_pitch,
+; unsigned char *output_ptr,
+; unsigned int out_pitch,
+; unsigned int output_height,
+; short *filter
+;)
+global sym(vp9_filter_block1d16_v8_ssse3) PRIVATE
+sym(vp9_filter_block1d16_v8_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ push rsi
+ push rdi
+ push rbx
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 16*5
+ %define k0k1 [rsp + 16*0]
+ %define k2k3 [rsp + 16*1]
+ %define k4k5 [rsp + 16*2]
+ %define k6k7 [rsp + 16*3]
+ %define krd [rsp + 16*4]
+
+ VERTx16 0
+
+ add rsp, 16*5
+ pop rsp
+ pop rbx
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+
+global sym(vp9_filter_block1d4_v8_avg_ssse3) PRIVATE
+sym(vp9_filter_block1d4_v8_avg_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ push rsi
+ push rdi
+ push rbx
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 16*5
+ %define k0k1 [rsp + 16*0]
+ %define k2k3 [rsp + 16*1]
+ %define k4k5 [rsp + 16*2]
+ %define k6k7 [rsp + 16*3]
+ %define krd [rsp + 16*4]
+
+ VERTx4 1
+
+ add rsp, 16*5
+ pop rsp
+ pop rbx
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+global sym(vp9_filter_block1d8_v8_avg_ssse3) PRIVATE
+sym(vp9_filter_block1d8_v8_avg_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ push rsi
+ push rdi
+ push rbx
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 16*5
+ %define k0k1 [rsp + 16*0]
+ %define k2k3 [rsp + 16*1]
+ %define k4k5 [rsp + 16*2]
+ %define k6k7 [rsp + 16*3]
+ %define krd [rsp + 16*4]
+
+ VERTx8 1
+
+ add rsp, 16*5
+ pop rsp
+ pop rbx
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+global sym(vp9_filter_block1d16_v8_avg_ssse3) PRIVATE
+sym(vp9_filter_block1d16_v8_avg_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ push rsi
+ push rdi
+ push rbx
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 16*5
+ %define k0k1 [rsp + 16*0]
+ %define k2k3 [rsp + 16*1]
+ %define k4k5 [rsp + 16*2]
+ %define k6k7 [rsp + 16*3]
+ %define krd [rsp + 16*4]
+
+ VERTx16 1
+
+ add rsp, 16*5
+ pop rsp
+ pop rbx
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+%macro HORIZx4 1
+ mov rdx, arg(5) ;filter ptr
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;output_ptr
+ mov rcx, 0x0400040
+
+ movdqa xmm4, [rdx] ;load filters
+ movq xmm5, rcx
+ packsswb xmm4, xmm4
+ pshuflw xmm0, xmm4, 0b ;k0_k1
+ pshuflw xmm1, xmm4, 01010101b ;k2_k3
+ pshuflw xmm2, xmm4, 10101010b ;k4_k5
+ pshuflw xmm3, xmm4, 11111111b ;k6_k7
+
+ punpcklqdq xmm0, xmm0
+ punpcklqdq xmm1, xmm1
+ punpcklqdq xmm2, xmm2
+ punpcklqdq xmm3, xmm3
+
+ movdqa k0k1, xmm0
+ movdqa k2k3, xmm1
+ pshufd xmm5, xmm5, 0
+ movdqa k4k5, xmm2
+ movdqa k6k7, xmm3
+ movdqa krd, xmm5
+
+ movsxd rax, dword ptr arg(1) ;src_pixels_per_line
+ movsxd rdx, dword ptr arg(3) ;output_pitch
+ movsxd rcx, dword ptr arg(4) ;output_height
+
+.loop:
+ movq xmm0, [rsi - 3] ; -3 -2 -1 0 1 2 3 4
+
+ movq xmm3, [rsi + 5] ; 5 6 7 8 9 10 11 12
+ punpcklqdq xmm0, xmm3
+
+ movdqa xmm1, xmm0
+ pshufb xmm0, [GLOBAL(shuf_t0t1)]
+ pmaddubsw xmm0, k0k1
+
+ movdqa xmm2, xmm1
+ pshufb xmm1, [GLOBAL(shuf_t2t3)]
+ pmaddubsw xmm1, k2k3
+
+ movdqa xmm4, xmm2
+ pshufb xmm2, [GLOBAL(shuf_t4t5)]
+ pmaddubsw xmm2, k4k5
+
+ pshufb xmm4, [GLOBAL(shuf_t6t7)]
+ pmaddubsw xmm4, k6k7
+
+ paddsw xmm0, xmm1
+ paddsw xmm0, xmm4
+ paddsw xmm0, xmm2
+ paddsw xmm0, krd
+ psraw xmm0, 7
+ packuswb xmm0, xmm0
+%if %1
+ movd xmm1, [rdi]
+ pavgb xmm0, xmm1
+%endif
+ lea rsi, [rsi + rax]
+ movd [rdi], xmm0
+
+ lea rdi, [rdi + rdx]
+ dec rcx
+ jnz .loop
+%endm
+
+%macro HORIZx8 1
+ mov rdx, arg(5) ;filter ptr
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;output_ptr
+ mov rcx, 0x0400040
+
+ movdqa xmm4, [rdx] ;load filters
+ movd xmm5, rcx
+ packsswb xmm4, xmm4
+ pshuflw xmm0, xmm4, 0b ;k0_k1
+ pshuflw xmm1, xmm4, 01010101b ;k2_k3
+ pshuflw xmm2, xmm4, 10101010b ;k4_k5
+ pshuflw xmm3, xmm4, 11111111b ;k6_k7
+
+ punpcklqdq xmm0, xmm0
+ punpcklqdq xmm1, xmm1
+ punpcklqdq xmm2, xmm2
+ punpcklqdq xmm3, xmm3
+
+ movdqa k0k1, xmm0
+ movdqa k2k3, xmm1
+ pshufd xmm5, xmm5, 0
+ movdqa k4k5, xmm2
+ movdqa k6k7, xmm3
+ movdqa krd, xmm5
+
+ movsxd rax, dword ptr arg(1) ;src_pixels_per_line
+ movsxd rdx, dword ptr arg(3) ;output_pitch
+ movsxd rcx, dword ptr arg(4) ;output_height
+
+.loop:
+ movq xmm0, [rsi - 3] ; -3 -2 -1 0 1 2 3 4
+
+ movq xmm3, [rsi + 5] ; 5 6 7 8 9 10 11 12
+ punpcklqdq xmm0, xmm3
+
+ movdqa xmm1, xmm0
+ pshufb xmm0, [GLOBAL(shuf_t0t1)]
+ pmaddubsw xmm0, k0k1
+
+ movdqa xmm2, xmm1
+ pshufb xmm1, [GLOBAL(shuf_t2t3)]
+ pmaddubsw xmm1, k2k3
+
+ movdqa xmm4, xmm2
+ pshufb xmm2, [GLOBAL(shuf_t4t5)]
+ pmaddubsw xmm2, k4k5
+
+ pshufb xmm4, [GLOBAL(shuf_t6t7)]
+ pmaddubsw xmm4, k6k7
+
+ paddsw xmm0, xmm1
+ paddsw xmm0, xmm4
+ paddsw xmm0, xmm2
+ paddsw xmm0, krd
+ psraw xmm0, 7
+ packuswb xmm0, xmm0
+%if %1
+ movq xmm1, [rdi]
+ pavgb xmm0, xmm1
+%endif
+
+ lea rsi, [rsi + rax]
+ movq [rdi], xmm0
+
+ lea rdi, [rdi + rdx]
+ dec rcx
+ jnz .loop
+%endm
+
+%macro HORIZx16 1
+ mov rdx, arg(5) ;filter ptr
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;output_ptr
+ mov rcx, 0x0400040
+
+ movdqa xmm4, [rdx] ;load filters
+ movq xmm5, rcx
+ packsswb xmm4, xmm4
+ pshuflw xmm0, xmm4, 0b ;k0_k1
+ pshuflw xmm1, xmm4, 01010101b ;k2_k3
+ pshuflw xmm2, xmm4, 10101010b ;k4_k5
+ pshuflw xmm3, xmm4, 11111111b ;k6_k7
+
+ punpcklqdq xmm0, xmm0
+ punpcklqdq xmm1, xmm1
+ punpcklqdq xmm2, xmm2
+ punpcklqdq xmm3, xmm3
+
+ movdqa k0k1, xmm0
+ movdqa k2k3, xmm1
+ pshufd xmm5, xmm5, 0
+ movdqa k4k5, xmm2
+ movdqa k6k7, xmm3
+ movdqa krd, xmm5
+
+ movsxd rax, dword ptr arg(1) ;src_pixels_per_line
+ movsxd rdx, dword ptr arg(3) ;output_pitch
+ movsxd rcx, dword ptr arg(4) ;output_height
+
+.loop:
+ movq xmm0, [rsi - 3] ; -3 -2 -1 0 1 2 3 4
+
+ movq xmm3, [rsi + 5] ; 5 6 7 8 9 10 11 12
+ punpcklqdq xmm0, xmm3
+
+ movdqa xmm1, xmm0
+ pshufb xmm0, [GLOBAL(shuf_t0t1)]
+ pmaddubsw xmm0, k0k1
+
+ movdqa xmm2, xmm1
+ pshufb xmm1, [GLOBAL(shuf_t2t3)]
+ pmaddubsw xmm1, k2k3
+
+ movdqa xmm4, xmm2
+ pshufb xmm2, [GLOBAL(shuf_t4t5)]
+ pmaddubsw xmm2, k4k5
+
+ pshufb xmm4, [GLOBAL(shuf_t6t7)]
+ pmaddubsw xmm4, k6k7
+
+ paddsw xmm0, xmm1
+ paddsw xmm0, xmm4
+ paddsw xmm0, xmm2
+ paddsw xmm0, krd
+ psraw xmm0, 7
+ packuswb xmm0, xmm0
+
+
+ movq xmm3, [rsi + 5]
+ movq xmm7, [rsi + 13]
+ punpcklqdq xmm3, xmm7
+
+ movdqa xmm1, xmm3
+ pshufb xmm3, [GLOBAL(shuf_t0t1)]
+ pmaddubsw xmm3, k0k1
+
+ movdqa xmm2, xmm1
+ pshufb xmm1, [GLOBAL(shuf_t2t3)]
+ pmaddubsw xmm1, k2k3
+
+ movdqa xmm4, xmm2
+ pshufb xmm2, [GLOBAL(shuf_t4t5)]
+ pmaddubsw xmm2, k4k5
+
+ pshufb xmm4, [GLOBAL(shuf_t6t7)]
+ pmaddubsw xmm4, k6k7
+
+ paddsw xmm3, xmm1
+ paddsw xmm3, xmm4
+ paddsw xmm3, xmm2
+ paddsw xmm3, krd
+ psraw xmm3, 7
+ packuswb xmm3, xmm3
+ punpcklqdq xmm0, xmm3
+%if %1
+ movdqa xmm1, [rdi]
+ pavgb xmm0, xmm1
+%endif
+
+ lea rsi, [rsi + rax]
+ movdqa [rdi], xmm0
+
+ lea rdi, [rdi + rdx]
+ dec rcx
+ jnz .loop
+%endm
+
+;void vp9_filter_block1d4_h8_ssse3
+;(
+; unsigned char *src_ptr,
+; unsigned int src_pixels_per_line,
+; unsigned char *output_ptr,
+; unsigned int output_pitch,
+; unsigned int output_height,
+; short *filter
+;)
+global sym(vp9_filter_block1d4_h8_ssse3) PRIVATE
+sym(vp9_filter_block1d4_h8_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 16*5
+ %define k0k1 [rsp + 16*0]
+ %define k2k3 [rsp + 16*1]
+ %define k4k5 [rsp + 16*2]
+ %define k6k7 [rsp + 16*3]
+ %define krd [rsp + 16*4]
+
+ HORIZx4 0
+
+ add rsp, 16*5
+ pop rsp
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void vp9_filter_block1d8_h8_ssse3
+;(
+; unsigned char *src_ptr,
+; unsigned int src_pixels_per_line,
+; unsigned char *output_ptr,
+; unsigned int output_pitch,
+; unsigned int output_height,
+; short *filter
+;)
+global sym(vp9_filter_block1d8_h8_ssse3) PRIVATE
+sym(vp9_filter_block1d8_h8_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 16*5
+ %define k0k1 [rsp + 16*0]
+ %define k2k3 [rsp + 16*1]
+ %define k4k5 [rsp + 16*2]
+ %define k6k7 [rsp + 16*3]
+ %define krd [rsp + 16*4]
+
+ HORIZx8 0
+
+ add rsp, 16*5
+ pop rsp
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void vp9_filter_block1d16_h8_ssse3
+;(
+; unsigned char *src_ptr,
+; unsigned int src_pixels_per_line,
+; unsigned char *output_ptr,
+; unsigned int output_pitch,
+; unsigned int output_height,
+; short *filter
+;)
+global sym(vp9_filter_block1d16_h8_ssse3) PRIVATE
+sym(vp9_filter_block1d16_h8_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 16*5
+ %define k0k1 [rsp + 16*0]
+ %define k2k3 [rsp + 16*1]
+ %define k4k5 [rsp + 16*2]
+ %define k6k7 [rsp + 16*3]
+ %define krd [rsp + 16*4]
+
+ HORIZx16 0
+
+ add rsp, 16*5
+ pop rsp
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+global sym(vp9_filter_block1d4_h8_avg_ssse3) PRIVATE
+sym(vp9_filter_block1d4_h8_avg_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 16*5
+ %define k0k1 [rsp + 16*0]
+ %define k2k3 [rsp + 16*1]
+ %define k4k5 [rsp + 16*2]
+ %define k6k7 [rsp + 16*3]
+ %define krd [rsp + 16*4]
+
+ HORIZx4 1
+
+ add rsp, 16*5
+ pop rsp
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+global sym(vp9_filter_block1d8_h8_avg_ssse3) PRIVATE
+sym(vp9_filter_block1d8_h8_avg_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 16*5
+ %define k0k1 [rsp + 16*0]
+ %define k2k3 [rsp + 16*1]
+ %define k4k5 [rsp + 16*2]
+ %define k6k7 [rsp + 16*3]
+ %define krd [rsp + 16*4]
+
+ HORIZx8 1
+
+ add rsp, 16*5
+ pop rsp
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+global sym(vp9_filter_block1d16_h8_avg_ssse3) PRIVATE
+sym(vp9_filter_block1d16_h8_avg_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 16*5
+ %define k0k1 [rsp + 16*0]
+ %define k2k3 [rsp + 16*1]
+ %define k4k5 [rsp + 16*2]
+ %define k6k7 [rsp + 16*3]
+ %define krd [rsp + 16*4]
+
+ HORIZx16 1
+
+ add rsp, 16*5
+ pop rsp
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+SECTION_RODATA
+align 16
+shuf_t0t1:
+ db 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8
+align 16
+shuf_t2t3:
+ db 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10
+align 16
+shuf_t4t5:
+ db 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12
+align 16
+shuf_t6t7:
+ db 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14
diff --git a/libvpx/vp9/decoder/arm/neon/vp9_add_constant_residual_neon.asm b/libvpx/vp9/decoder/arm/neon/vp9_add_constant_residual_neon.asm
new file mode 100644
index 0000000..174e747
--- /dev/null
+++ b/libvpx/vp9/decoder/arm/neon/vp9_add_constant_residual_neon.asm
@@ -0,0 +1,230 @@
+;
+; Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+ EXPORT |vp9_add_constant_residual_8x8_neon|
+ EXPORT |vp9_add_constant_residual_16x16_neon|
+ EXPORT |vp9_add_constant_residual_32x32_neon|
+ ARM
+
+ AREA ||.text||, CODE, READONLY, ALIGN=2
+
+ MACRO
+ LD_16x8 $src, $stride
+ vld1.8 {q8}, [$src], $stride
+ vld1.8 {q9}, [$src], $stride
+ vld1.8 {q10}, [$src], $stride
+ vld1.8 {q11}, [$src], $stride
+ vld1.8 {q12}, [$src], $stride
+ vld1.8 {q13}, [$src], $stride
+ vld1.8 {q14}, [$src], $stride
+ vld1.8 {q15}, [$src], $stride
+ MEND
+
+ MACRO
+ ADD_DIFF_16x8 $diff
+ vqadd.u8 q8, q8, $diff
+ vqadd.u8 q9, q9, $diff
+ vqadd.u8 q10, q10, $diff
+ vqadd.u8 q11, q11, $diff
+ vqadd.u8 q12, q12, $diff
+ vqadd.u8 q13, q13, $diff
+ vqadd.u8 q14, q14, $diff
+ vqadd.u8 q15, q15, $diff
+ MEND
+
+ MACRO
+ SUB_DIFF_16x8 $diff
+ vqsub.u8 q8, q8, $diff
+ vqsub.u8 q9, q9, $diff
+ vqsub.u8 q10, q10, $diff
+ vqsub.u8 q11, q11, $diff
+ vqsub.u8 q12, q12, $diff
+ vqsub.u8 q13, q13, $diff
+ vqsub.u8 q14, q14, $diff
+ vqsub.u8 q15, q15, $diff
+ MEND
+
+ MACRO
+ ST_16x8 $dst, $stride
+ vst1.8 {q8}, [$dst], $stride
+ vst1.8 {q9}, [$dst], $stride
+ vst1.8 {q10}, [$dst], $stride
+ vst1.8 {q11}, [$dst], $stride
+ vst1.8 {q12}, [$dst], $stride
+ vst1.8 {q13}, [$dst], $stride
+ vst1.8 {q14}, [$dst], $stride
+ vst1.8 {q15}, [$dst], $stride
+ MEND
+
+; void add_constant_residual(const int16_t diff, uint8_t *dest, int stride,
+; int width, int height) {
+; int r, c;
+;
+; for (r = 0; r < height; r++) {
+; for (c = 0; c < width; c++)
+; dest[c] = clip_pixel(diff + dest[c]);
+;
+; dest += stride;
+; }
+;}
+;void vp9_add_constant_residual_8x8_c(const int16_t diff, uint8_t *dest,
+; int stride) {
+; add_constant_residual(diff, dest, stride, 8, 8);
+;}
+; r0 : const int16_t diff
+; r1 : const uint8_t *dest
+; r2 : int stride
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+|vp9_add_constant_residual_8x8_neon| PROC
+ mov r3, r1 ; r3: save dest to r3
+ vld1.8 {d0}, [r1], r2
+ vld1.8 {d1}, [r1], r2
+ vld1.8 {d2}, [r1], r2
+ vld1.8 {d3}, [r1], r2
+ vld1.8 {d4}, [r1], r2
+ vld1.8 {d5}, [r1], r2
+ vld1.8 {d6}, [r1], r2
+ vld1.8 {d7}, [r1], r2
+ cmp r0, #0
+ bge DIFF_POSITIVE_8x8
+
+DIFF_NEGATIVE_8x8 ; diff < 0
+ neg r0, r0
+ usat r0, #8, r0
+ vdup.u8 q8, r0
+
+ vqsub.u8 q0, q0, q8
+ vqsub.u8 q1, q1, q8
+ vqsub.u8 q2, q2, q8
+ vqsub.u8 q3, q3, q8
+ b DIFF_SAVE_8x8
+
+DIFF_POSITIVE_8x8 ; diff >= 0
+ usat r0, #8, r0
+ vdup.u8 q8, r0
+
+ vqadd.u8 q0, q0, q8
+ vqadd.u8 q1, q1, q8
+ vqadd.u8 q2, q2, q8
+ vqadd.u8 q3, q3, q8
+
+DIFF_SAVE_8x8
+ vst1.8 {d0}, [r3], r2
+ vst1.8 {d1}, [r3], r2
+ vst1.8 {d2}, [r3], r2
+ vst1.8 {d3}, [r3], r2
+ vst1.8 {d4}, [r3], r2
+ vst1.8 {d5}, [r3], r2
+ vst1.8 {d6}, [r3], r2
+ vst1.8 {d7}, [r3], r2
+
+ bx lr
+ ENDP
+
+;void vp9_add_constant_residual_16x16_c(const int16_t diff, uint8_t *dest,
+; int stride) {
+; add_constant_residual(diff, dest, stride, 16, 16);
+;}
+; r0 : const int16_t diff
+; r1 : const uint8_t *dest
+; r2 : int stride
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+|vp9_add_constant_residual_16x16_neon| PROC
+ mov r3, r1
+ LD_16x8 r1, r2
+ cmp r0, #0
+ bge DIFF_POSITIVE_16x16
+
+|DIFF_NEGATIVE_16x16|
+ neg r0, r0
+ usat r0, #8, r0
+ vdup.u8 q0, r0
+
+ SUB_DIFF_16x8 q0
+ ST_16x8 r3, r2
+ LD_16x8 r1, r2
+ SUB_DIFF_16x8 q0
+ b DIFF_SAVE_16x16
+
+|DIFF_POSITIVE_16x16|
+ usat r0, #8, r0
+ vdup.u8 q0, r0
+
+ ADD_DIFF_16x8 q0
+ ST_16x8 r3, r2
+ LD_16x8 r1, r2
+ ADD_DIFF_16x8 q0
+
+|DIFF_SAVE_16x16|
+ ST_16x8 r3, r2
+ bx lr
+ ENDP
+
+;void vp9_add_constant_residual_32x32_c(const int16_t diff, uint8_t *dest,
+; int stride) {
+; add_constant_residual(diff, dest, stride, 32, 32);
+;}
+; r0 : const int16_t diff
+; r1 : const uint8_t *dest
+; r2 : int stride
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+|vp9_add_constant_residual_32x32_neon| PROC
+ push {r4,lr}
+ pld [r1]
+ mov r3, r1
+ add r4, r1, #16 ; r4 dest + 16 for second loop
+ cmp r0, #0
+ bge DIFF_POSITIVE_32x32
+
+|DIFF_NEGATIVE_32x32|
+ neg r0, r0
+ usat r0, #8, r0
+ vdup.u8 q0, r0
+ mov r0, #4
+
+|DIFF_NEGATIVE_32x32_LOOP|
+ sub r0, #1
+ LD_16x8 r1, r2
+ SUB_DIFF_16x8 q0
+ ST_16x8 r3, r2
+
+ LD_16x8 r1, r2
+ SUB_DIFF_16x8 q0
+ ST_16x8 r3, r2
+ cmp r0, #2
+ moveq r1, r4
+ moveq r3, r4
+ cmp r0, #0
+ bne DIFF_NEGATIVE_32x32_LOOP
+ pop {r4,pc}
+
+|DIFF_POSITIVE_32x32|
+ usat r0, #8, r0
+ vdup.u8 q0, r0
+ mov r0, #4
+
+|DIFF_POSITIVE_32x32_LOOP|
+ sub r0, #1
+ LD_16x8 r1, r2
+ ADD_DIFF_16x8 q0
+ ST_16x8 r3, r2
+
+ LD_16x8 r1, r2
+ ADD_DIFF_16x8 q0
+ ST_16x8 r3, r2
+ cmp r0, #2
+ moveq r1, r4
+ moveq r3, r4
+ cmp r0, #0
+ bne DIFF_POSITIVE_32x32_LOOP
+ pop {r4,pc}
+ ENDP
+
+ END
diff --git a/libvpx/vp9/decoder/vp9_dboolhuff.c b/libvpx/vp9/decoder/vp9_dboolhuff.c
new file mode 100644
index 0000000..06acec4
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_dboolhuff.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_ports/mem.h"
+#include "vpx_mem/vpx_mem.h"
+
+#include "vp9/decoder/vp9_dboolhuff.h"
+
+// This is meant to be a large, positive constant that can still be efficiently
+// loaded as an immediate (on platforms like ARM, for example).
+// Even relatively modest values like 100 would work fine.
+#define LOTS_OF_BITS 0x40000000
+
+
+int vp9_reader_init(vp9_reader *r, const uint8_t *buffer, size_t size) {
+ int marker_bit;
+
+ r->buffer_end = buffer + size;
+ r->buffer = buffer;
+ r->value = 0;
+ r->count = -8;
+ r->range = 255;
+
+ if (size && !buffer)
+ return 1;
+
+ vp9_reader_fill(r);
+ marker_bit = vp9_read_bit(r);
+ return marker_bit != 0;
+}
+
+void vp9_reader_fill(vp9_reader *r) {
+ const uint8_t *const buffer_end = r->buffer_end;
+ const uint8_t *buffer = r->buffer;
+ VP9_BD_VALUE value = r->value;
+ int count = r->count;
+ int shift = BD_VALUE_SIZE - 8 - (count + 8);
+ int loop_end = 0;
+ const int bits_left = (int)((buffer_end - buffer)*CHAR_BIT);
+ const int x = shift + CHAR_BIT - bits_left;
+
+ if (x >= 0) {
+ count += LOTS_OF_BITS;
+ loop_end = x;
+ }
+
+ if (x < 0 || bits_left) {
+ while (shift >= loop_end) {
+ count += CHAR_BIT;
+ value |= (VP9_BD_VALUE)*buffer++ << shift;
+ shift -= CHAR_BIT;
+ }
+ }
+
+ r->buffer = buffer;
+ r->value = value;
+ r->count = count;
+}
+
+const uint8_t *vp9_reader_find_end(vp9_reader *r) {
+ // Find the end of the coded buffer
+ while (r->count > CHAR_BIT && r->count < BD_VALUE_SIZE) {
+ r->count -= CHAR_BIT;
+ r->buffer--;
+ }
+ return r->buffer;
+}
+
+int vp9_reader_has_error(vp9_reader *r) {
+ // Check if we have reached the end of the buffer.
+ //
+ // Variable 'count' stores the number of bits in the 'value' buffer, minus
+ // 8. The top byte is part of the algorithm, and the remainder is buffered
+ // to be shifted into it. So if count == 8, the top 16 bits of 'value' are
+ // occupied, 8 for the algorithm and 8 in the buffer.
+ //
+ // When reading a byte from the user's buffer, count is filled with 8 and
+ // one byte is filled into the value buffer. When we reach the end of the
+ // data, count is additionally filled with LOTS_OF_BITS. So when
+ // count == LOTS_OF_BITS - 1, the user's data has been exhausted.
+ //
+ // 1 if we have tried to decode bits after the end of stream was encountered.
+ // 0 No error.
+ return r->count > BD_VALUE_SIZE && r->count < LOTS_OF_BITS;
+}
diff --git a/libvpx/vp9/decoder/vp9_dboolhuff.h b/libvpx/vp9/decoder/vp9_dboolhuff.h
new file mode 100644
index 0000000..c864516
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_dboolhuff.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_DECODER_VP9_DBOOLHUFF_H_
+#define VP9_DECODER_VP9_DBOOLHUFF_H_
+
+#include <stddef.h>
+#include <limits.h>
+
+#include "./vpx_config.h"
+#include "vpx_ports/mem.h"
+#include "vpx/vpx_integer.h"
+
+typedef size_t VP9_BD_VALUE;
+
+#define BD_VALUE_SIZE ((int)sizeof(VP9_BD_VALUE)*CHAR_BIT)
+
+typedef struct {
+ const uint8_t *buffer_end;
+ const uint8_t *buffer;
+ VP9_BD_VALUE value;
+ int count;
+ unsigned int range;
+} vp9_reader;
+
+DECLARE_ALIGNED(16, extern const uint8_t, vp9_norm[256]);
+
+int vp9_reader_init(vp9_reader *r, const uint8_t *buffer, size_t size);
+
+void vp9_reader_fill(vp9_reader *r);
+
+const uint8_t *vp9_reader_find_end(vp9_reader *r);
+
+static int vp9_read(vp9_reader *br, int probability) {
+ unsigned int bit = 0;
+ VP9_BD_VALUE value;
+ VP9_BD_VALUE bigsplit;
+ int count;
+ unsigned int range;
+ unsigned int split = 1 + (((br->range - 1) * probability) >> 8);
+
+ if (br->count < 0)
+ vp9_reader_fill(br);
+
+ value = br->value;
+ count = br->count;
+
+ bigsplit = (VP9_BD_VALUE)split << (BD_VALUE_SIZE - 8);
+
+ range = split;
+
+ if (value >= bigsplit) {
+ range = br->range - split;
+ value = value - bigsplit;
+ bit = 1;
+ }
+
+ {
+ register unsigned int shift = vp9_norm[range];
+ range <<= shift;
+ value <<= shift;
+ count -= shift;
+ }
+ br->value = value;
+ br->count = count;
+ br->range = range;
+
+ return bit;
+}
+
+static int vp9_read_bit(vp9_reader *r) {
+ return vp9_read(r, 128); // vp9_prob_half
+}
+
+static int vp9_read_literal(vp9_reader *br, int bits) {
+ int z = 0, bit;
+
+ for (bit = bits - 1; bit >= 0; bit--)
+ z |= vp9_read_bit(br) << bit;
+
+ return z;
+}
+
+int vp9_reader_has_error(vp9_reader *r);
+
+#endif // VP9_DECODER_VP9_DBOOLHUFF_H_
diff --git a/libvpx/vp9/decoder/vp9_decodemv.c b/libvpx/vp9/decoder/vp9_decodemv.c
new file mode 100644
index 0000000..84a29b1
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_decodemv.c
@@ -0,0 +1,690 @@
+/*
+ Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_entropy.h"
+#include "vp9/common/vp9_entropymode.h"
+#include "vp9/common/vp9_entropymv.h"
+#include "vp9/common/vp9_findnearmv.h"
+#include "vp9/common/vp9_mvref_common.h"
+#include "vp9/common/vp9_pred_common.h"
+#include "vp9/common/vp9_reconinter.h"
+#include "vp9/common/vp9_seg_common.h"
+
+#include "vp9/decoder/vp9_decodemv.h"
+#include "vp9/decoder/vp9_decodframe.h"
+#include "vp9/decoder/vp9_onyxd_int.h"
+#include "vp9/decoder/vp9_dsubexp.h"
+#include "vp9/decoder/vp9_treereader.h"
+
+static MB_PREDICTION_MODE read_intra_mode(vp9_reader *r, const vp9_prob *p) {
+ return (MB_PREDICTION_MODE)treed_read(r, vp9_intra_mode_tree, p);
+}
+
+static MB_PREDICTION_MODE read_inter_mode(VP9_COMMON *cm, vp9_reader *r,
+ uint8_t context) {
+ MB_PREDICTION_MODE mode = treed_read(r, vp9_inter_mode_tree,
+ cm->fc.inter_mode_probs[context]);
+ ++cm->counts.inter_mode[context][inter_mode_offset(mode)];
+ return mode;
+}
+
+static int read_segment_id(vp9_reader *r, const struct segmentation *seg) {
+ return treed_read(r, vp9_segment_tree, seg->tree_probs);
+}
+
+static TX_SIZE read_selected_tx_size(VP9_COMMON *cm, MACROBLOCKD *xd,
+ BLOCK_SIZE bsize, vp9_reader *r) {
+ const uint8_t context = vp9_get_pred_context_tx_size(xd);
+ const vp9_prob *tx_probs = get_tx_probs(bsize, context, &cm->fc.tx_probs);
+ TX_SIZE tx_size = vp9_read(r, tx_probs[0]);
+ if (tx_size != TX_4X4 && bsize >= BLOCK_16X16) {
+ tx_size += vp9_read(r, tx_probs[1]);
+ if (tx_size != TX_8X8 && bsize >= BLOCK_32X32)
+ tx_size += vp9_read(r, tx_probs[2]);
+ }
+
+ update_tx_counts(bsize, context, tx_size, &cm->counts.tx);
+ return tx_size;
+}
+
+static TX_SIZE read_tx_size(VP9D_COMP *pbi, TX_MODE tx_mode,
+ BLOCK_SIZE bsize, int allow_select,
+ vp9_reader *r) {
+ VP9_COMMON *const cm = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+
+ if (allow_select && tx_mode == TX_MODE_SELECT && bsize >= BLOCK_8X8)
+ return read_selected_tx_size(cm, xd, bsize, r);
+ else if (tx_mode >= ALLOW_32X32 && bsize >= BLOCK_32X32)
+ return TX_32X32;
+ else if (tx_mode >= ALLOW_16X16 && bsize >= BLOCK_16X16)
+ return TX_16X16;
+ else if (tx_mode >= ALLOW_8X8 && bsize >= BLOCK_8X8)
+ return TX_8X8;
+ else
+ return TX_4X4;
+}
+
+static void set_segment_id(VP9_COMMON *cm, BLOCK_SIZE bsize,
+ int mi_row, int mi_col, int segment_id) {
+ const int mi_offset = mi_row * cm->mi_cols + mi_col;
+ const int bw = 1 << mi_width_log2(bsize);
+ const int bh = 1 << mi_height_log2(bsize);
+ const int xmis = MIN(cm->mi_cols - mi_col, bw);
+ const int ymis = MIN(cm->mi_rows - mi_row, bh);
+ int x, y;
+
+ assert(segment_id >= 0 && segment_id < MAX_SEGMENTS);
+
+ for (y = 0; y < ymis; y++)
+ for (x = 0; x < xmis; x++)
+ cm->last_frame_seg_map[mi_offset + y * cm->mi_cols + x] = segment_id;
+}
+
+static int read_intra_segment_id(VP9D_COMP *pbi, int mi_row, int mi_col,
+ vp9_reader *r) {
+ MACROBLOCKD *const xd = &pbi->mb;
+ struct segmentation *const seg = &pbi->common.seg;
+ const BLOCK_SIZE bsize = xd->this_mi->mbmi.sb_type;
+ int segment_id;
+
+ if (!seg->enabled)
+ return 0; // Default for disabled segmentation
+
+ if (!seg->update_map)
+ return 0;
+
+ segment_id = read_segment_id(r, seg);
+ set_segment_id(&pbi->common, bsize, mi_row, mi_col, segment_id);
+ return segment_id;
+}
+
+static int read_inter_segment_id(VP9D_COMP *pbi, int mi_row, int mi_col,
+ vp9_reader *r) {
+ VP9_COMMON *const cm = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+ struct segmentation *const seg = &cm->seg;
+ const BLOCK_SIZE bsize = xd->this_mi->mbmi.sb_type;
+ int pred_segment_id, segment_id;
+
+ if (!seg->enabled)
+ return 0; // Default for disabled segmentation
+
+ pred_segment_id = vp9_get_segment_id(cm, cm->last_frame_seg_map,
+ bsize, mi_row, mi_col);
+ if (!seg->update_map)
+ return pred_segment_id;
+
+ if (seg->temporal_update) {
+ const vp9_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd);
+ const int pred_flag = vp9_read(r, pred_prob);
+ vp9_set_pred_flag_seg_id(xd, pred_flag);
+ segment_id = pred_flag ? pred_segment_id
+ : read_segment_id(r, seg);
+ } else {
+ segment_id = read_segment_id(r, seg);
+ }
+ set_segment_id(cm, bsize, mi_row, mi_col, segment_id);
+ return segment_id;
+}
+
+static uint8_t read_skip_coeff(VP9D_COMP *pbi, int segment_id, vp9_reader *r) {
+ VP9_COMMON *const cm = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+ int skip_coeff = vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP);
+ if (!skip_coeff) {
+ const int ctx = vp9_get_pred_context_mbskip(xd);
+ skip_coeff = vp9_read(r, vp9_get_pred_prob_mbskip(cm, xd));
+ cm->counts.mbskip[ctx][skip_coeff]++;
+ }
+ return skip_coeff;
+}
+
+static void read_intra_frame_mode_info(VP9D_COMP *pbi, MODE_INFO *m,
+ int mi_row, int mi_col, vp9_reader *r) {
+ VP9_COMMON *const cm = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+ MB_MODE_INFO *const mbmi = &m->mbmi;
+ const BLOCK_SIZE bsize = mbmi->sb_type;
+ const MODE_INFO *above_mi = xd->mi_8x8[-cm->mode_info_stride];
+ const MODE_INFO *left_mi = xd->mi_8x8[-1];
+
+ mbmi->segment_id = read_intra_segment_id(pbi, mi_row, mi_col, r);
+ mbmi->skip_coeff = read_skip_coeff(pbi, mbmi->segment_id, r);
+ mbmi->tx_size = read_tx_size(pbi, cm->tx_mode, bsize, 1, r);
+ mbmi->ref_frame[0] = INTRA_FRAME;
+ mbmi->ref_frame[1] = NONE;
+
+ if (bsize >= BLOCK_8X8) {
+ const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, 0);
+ const MB_PREDICTION_MODE L = xd->left_available ?
+ left_block_mode(m, left_mi, 0) : DC_PRED;
+ mbmi->mode = read_intra_mode(r, vp9_kf_y_mode_prob[A][L]);
+ } else {
+ // Only 4x4, 4x8, 8x4 blocks
+ const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; // 1 or 2
+ const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; // 1 or 2
+ int idx, idy;
+
+ for (idy = 0; idy < 2; idy += num_4x4_h) {
+ for (idx = 0; idx < 2; idx += num_4x4_w) {
+ const int ib = idy * 2 + idx;
+ const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, ib);
+ const MB_PREDICTION_MODE L = (xd->left_available || idx) ?
+ left_block_mode(m, left_mi, ib) : DC_PRED;
+ const MB_PREDICTION_MODE b_mode = read_intra_mode(r,
+ vp9_kf_y_mode_prob[A][L]);
+ m->bmi[ib].as_mode = b_mode;
+ if (num_4x4_h == 2)
+ m->bmi[ib + 2].as_mode = b_mode;
+ if (num_4x4_w == 2)
+ m->bmi[ib + 1].as_mode = b_mode;
+ }
+ }
+
+ mbmi->mode = m->bmi[3].as_mode;
+ }
+
+ mbmi->uv_mode = read_intra_mode(r, vp9_kf_uv_mode_prob[mbmi->mode]);
+}
+
+static int read_mv_component(vp9_reader *r,
+ const nmv_component *mvcomp, int usehp) {
+
+ int mag, d, fr, hp;
+ const int sign = vp9_read(r, mvcomp->sign);
+ const int mv_class = treed_read(r, vp9_mv_class_tree, mvcomp->classes);
+ const int class0 = mv_class == MV_CLASS_0;
+
+ // Integer part
+ if (class0) {
+ d = treed_read(r, vp9_mv_class0_tree, mvcomp->class0);
+ } else {
+ int i;
+ const int n = mv_class + CLASS0_BITS - 1; // number of bits
+
+ d = 0;
+ for (i = 0; i < n; ++i)
+ d |= vp9_read(r, mvcomp->bits[i]) << i;
+ }
+
+ // Fractional part
+ fr = treed_read(r, vp9_mv_fp_tree,
+ class0 ? mvcomp->class0_fp[d] : mvcomp->fp);
+
+
+ // High precision part (if hp is not used, the default value of the hp is 1)
+ hp = usehp ? vp9_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp)
+ : 1;
+
+ // Result
+ mag = vp9_get_mv_mag(mv_class, (d << 3) | (fr << 1) | hp) + 1;
+ return sign ? -mag : mag;
+}
+
+static INLINE void read_mv(vp9_reader *r, MV *mv, const MV *ref,
+ const nmv_context *ctx,
+ nmv_context_counts *counts, int allow_hp) {
+ const MV_JOINT_TYPE j = treed_read(r, vp9_mv_joint_tree, ctx->joints);
+ const int use_hp = allow_hp && vp9_use_mv_hp(ref);
+ MV diff = {0, 0};
+
+ if (mv_joint_vertical(j))
+ diff.row = read_mv_component(r, &ctx->comps[0], use_hp);
+
+ if (mv_joint_horizontal(j))
+ diff.col = read_mv_component(r, &ctx->comps[1], use_hp);
+
+ vp9_inc_mv(&diff, counts);
+
+ mv->row = ref->row + diff.row;
+ mv->col = ref->col + diff.col;
+}
+
+static void update_mv(vp9_reader *r, vp9_prob *p) {
+ if (vp9_read(r, NMV_UPDATE_PROB))
+ *p = (vp9_read_literal(r, 7) << 1) | 1;
+}
+
+static void read_mv_probs(vp9_reader *r, nmv_context *mvc, int allow_hp) {
+ int i, j, k;
+
+ for (j = 0; j < MV_JOINTS - 1; ++j)
+ update_mv(r, &mvc->joints[j]);
+
+ for (i = 0; i < 2; ++i) {
+ nmv_component *const comp = &mvc->comps[i];
+
+ update_mv(r, &comp->sign);
+
+ for (j = 0; j < MV_CLASSES - 1; ++j)
+ update_mv(r, &comp->classes[j]);
+
+ for (j = 0; j < CLASS0_SIZE - 1; ++j)
+ update_mv(r, &comp->class0[j]);
+
+ for (j = 0; j < MV_OFFSET_BITS; ++j)
+ update_mv(r, &comp->bits[j]);
+ }
+
+ for (i = 0; i < 2; ++i) {
+ nmv_component *const comp = &mvc->comps[i];
+
+ for (j = 0; j < CLASS0_SIZE; ++j)
+ for (k = 0; k < 3; ++k)
+ update_mv(r, &comp->class0_fp[j][k]);
+
+ for (j = 0; j < 3; ++j)
+ update_mv(r, &comp->fp[j]);
+ }
+
+ if (allow_hp) {
+ for (i = 0; i < 2; ++i) {
+ update_mv(r, &mvc->comps[i].class0_hp);
+ update_mv(r, &mvc->comps[i].hp);
+ }
+ }
+}
+
+// Read the referncence frame
+static void read_ref_frames(VP9D_COMP *pbi, vp9_reader *r,
+ int segment_id, MV_REFERENCE_FRAME ref_frame[2]) {
+ VP9_COMMON *const cm = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+ FRAME_CONTEXT *const fc = &cm->fc;
+ FRAME_COUNTS *const counts = &cm->counts;
+
+ if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
+ ref_frame[0] = vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME);
+ ref_frame[1] = NONE;
+ } else {
+ const int comp_ctx = vp9_get_pred_context_comp_inter_inter(cm, xd);
+ int is_comp;
+
+ if (cm->comp_pred_mode == HYBRID_PREDICTION) {
+ is_comp = vp9_read(r, fc->comp_inter_prob[comp_ctx]);
+ counts->comp_inter[comp_ctx][is_comp]++;
+ } else {
+ is_comp = cm->comp_pred_mode == COMP_PREDICTION_ONLY;
+ }
+
+ // FIXME(rbultje) I'm pretty sure this breaks segmentation ref frame coding
+ if (is_comp) {
+ const int fix_ref_idx = cm->ref_frame_sign_bias[cm->comp_fixed_ref];
+ const int ref_ctx = vp9_get_pred_context_comp_ref_p(cm, xd);
+ const int b = vp9_read(r, fc->comp_ref_prob[ref_ctx]);
+ counts->comp_ref[ref_ctx][b]++;
+ ref_frame[fix_ref_idx] = cm->comp_fixed_ref;
+ ref_frame[!fix_ref_idx] = cm->comp_var_ref[b];
+ } else {
+ const int ctx0 = vp9_get_pred_context_single_ref_p1(xd);
+ const int bit0 = vp9_read(r, fc->single_ref_prob[ctx0][0]);
+ ++counts->single_ref[ctx0][0][bit0];
+ if (bit0) {
+ const int ctx1 = vp9_get_pred_context_single_ref_p2(xd);
+ const int bit1 = vp9_read(r, fc->single_ref_prob[ctx1][1]);
+ ref_frame[0] = bit1 ? ALTREF_FRAME : GOLDEN_FRAME;
+ ++counts->single_ref[ctx1][1][bit1];
+ } else {
+ ref_frame[0] = LAST_FRAME;
+ }
+
+ ref_frame[1] = NONE;
+ }
+ }
+}
+
+static void read_switchable_interp_probs(FRAME_CONTEXT *fc, vp9_reader *r) {
+ int i, j;
+ for (j = 0; j < SWITCHABLE_FILTERS + 1; ++j)
+ for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i)
+ if (vp9_read(r, MODE_UPDATE_PROB))
+ vp9_diff_update_prob(r, &fc->switchable_interp_prob[j][i]);
+}
+
+static void read_inter_mode_probs(FRAME_CONTEXT *fc, vp9_reader *r) {
+ int i, j;
+ for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
+ for (j = 0; j < INTER_MODES - 1; ++j)
+ if (vp9_read(r, MODE_UPDATE_PROB))
+ vp9_diff_update_prob(r, &fc->inter_mode_probs[i][j]);
+}
+
+static INLINE COMPPREDMODE_TYPE read_comp_pred_mode(vp9_reader *r) {
+ COMPPREDMODE_TYPE mode = vp9_read_bit(r);
+ if (mode)
+ mode += vp9_read_bit(r);
+ return mode;
+}
+
+static INLINE INTERPOLATIONFILTERTYPE read_switchable_filter_type(
+ VP9D_COMP *pbi, vp9_reader *r) {
+ VP9_COMMON *const cm = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+ const int ctx = vp9_get_pred_context_switchable_interp(xd);
+ const int type = treed_read(r, vp9_switchable_interp_tree,
+ cm->fc.switchable_interp_prob[ctx]);
+ ++cm->counts.switchable_interp[ctx][type];
+ return type;
+}
+
+static void read_intra_block_mode_info(VP9D_COMP *pbi, MODE_INFO *mi,
+ vp9_reader *r) {
+ VP9_COMMON *const cm = &pbi->common;
+ MB_MODE_INFO *const mbmi = &mi->mbmi;
+ const BLOCK_SIZE bsize = mi->mbmi.sb_type;
+
+ mbmi->ref_frame[0] = INTRA_FRAME;
+ mbmi->ref_frame[1] = NONE;
+
+ if (bsize >= BLOCK_8X8) {
+ const int size_group = size_group_lookup[bsize];
+ mbmi->mode = read_intra_mode(r, cm->fc.y_mode_prob[size_group]);
+ cm->counts.y_mode[size_group][mbmi->mode]++;
+ } else {
+ // Only 4x4, 4x8, 8x4 blocks
+ const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; // 1 or 2
+ const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; // 1 or 2
+ int idx, idy;
+
+ for (idy = 0; idy < 2; idy += num_4x4_h) {
+ for (idx = 0; idx < 2; idx += num_4x4_w) {
+ const int ib = idy * 2 + idx;
+ const int b_mode = read_intra_mode(r, cm->fc.y_mode_prob[0]);
+ mi->bmi[ib].as_mode = b_mode;
+ cm->counts.y_mode[0][b_mode]++;
+
+ if (num_4x4_h == 2)
+ mi->bmi[ib + 2].as_mode = b_mode;
+ if (num_4x4_w == 2)
+ mi->bmi[ib + 1].as_mode = b_mode;
+ }
+ }
+ mbmi->mode = mi->bmi[3].as_mode;
+ }
+
+ mbmi->uv_mode = read_intra_mode(r, cm->fc.uv_mode_prob[mbmi->mode]);
+ cm->counts.uv_mode[mbmi->mode][mbmi->uv_mode]++;
+}
+
+static int read_is_inter_block(VP9D_COMP *pbi, int segment_id, vp9_reader *r) {
+ VP9_COMMON *const cm = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+
+ if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
+ return vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME) !=
+ INTRA_FRAME;
+ } else {
+ const int ctx = vp9_get_pred_context_intra_inter(xd);
+ const int is_inter = vp9_read(r, vp9_get_pred_prob_intra_inter(cm, xd));
+ ++cm->counts.intra_inter[ctx][is_inter];
+ return is_inter;
+ }
+}
+
+static void read_inter_block_mode_info(VP9D_COMP *pbi, MODE_INFO *mi,
+ int mi_row, int mi_col, vp9_reader *r) {
+ VP9_COMMON *const cm = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+ nmv_context *const nmvc = &cm->fc.nmvc;
+ MB_MODE_INFO *const mbmi = &mi->mbmi;
+ int_mv *const mv0 = &mbmi->mv[0];
+ int_mv *const mv1 = &mbmi->mv[1];
+ const BLOCK_SIZE bsize = mbmi->sb_type;
+ const int allow_hp = xd->allow_high_precision_mv;
+
+ int_mv nearest, nearby, best_mv;
+ int_mv nearest_second, nearby_second, best_mv_second;
+ uint8_t inter_mode_ctx;
+ MV_REFERENCE_FRAME ref0;
+ int is_compound;
+
+ mbmi->uv_mode = DC_PRED;
+ read_ref_frames(pbi, r, mbmi->segment_id, mbmi->ref_frame);
+ ref0 = mbmi->ref_frame[0];
+ is_compound = has_second_ref(mbmi);
+
+ vp9_find_mv_refs(cm, xd, mi, xd->last_mi, ref0, mbmi->ref_mvs[ref0],
+ mi_row, mi_col);
+
+ inter_mode_ctx = mbmi->mode_context[ref0];
+
+ if (vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
+ mbmi->mode = ZEROMV;
+ assert(bsize >= BLOCK_8X8);
+ } else {
+ if (bsize >= BLOCK_8X8)
+ mbmi->mode = read_inter_mode(cm, r, inter_mode_ctx);
+ }
+
+ // nearest, nearby
+ if (bsize < BLOCK_8X8 || mbmi->mode != ZEROMV) {
+ vp9_find_best_ref_mvs(xd, mbmi->ref_mvs[ref0], &nearest, &nearby);
+ best_mv.as_int = mbmi->ref_mvs[ref0][0].as_int;
+ }
+
+ if (is_compound) {
+ const MV_REFERENCE_FRAME ref1 = mbmi->ref_frame[1];
+ vp9_find_mv_refs(cm, xd, mi, xd->last_mi,
+ ref1, mbmi->ref_mvs[ref1], mi_row, mi_col);
+
+ if (bsize < BLOCK_8X8 || mbmi->mode != ZEROMV) {
+ vp9_find_best_ref_mvs(xd, mbmi->ref_mvs[ref1],
+ &nearest_second, &nearby_second);
+ best_mv_second.as_int = mbmi->ref_mvs[ref1][0].as_int;
+ }
+ }
+
+ mbmi->interp_filter = cm->mcomp_filter_type == SWITCHABLE
+ ? read_switchable_filter_type(pbi, r)
+ : cm->mcomp_filter_type;
+
+ if (bsize < BLOCK_8X8) {
+ const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; // 1 or 2
+ const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; // 1 or 2
+ int idx, idy;
+ for (idy = 0; idy < 2; idy += num_4x4_h) {
+ for (idx = 0; idx < 2; idx += num_4x4_w) {
+ int_mv blockmv, secondmv;
+ const int j = idy * 2 + idx;
+ const int b_mode = read_inter_mode(cm, r, inter_mode_ctx);
+
+ if (b_mode == NEARESTMV || b_mode == NEARMV) {
+ vp9_append_sub8x8_mvs_for_idx(cm, xd, &nearest, &nearby, j, 0,
+ mi_row, mi_col);
+
+ if (is_compound)
+ vp9_append_sub8x8_mvs_for_idx(cm, xd, &nearest_second,
+ &nearby_second, j, 1,
+ mi_row, mi_col);
+ }
+
+ switch (b_mode) {
+ case NEWMV:
+ read_mv(r, &blockmv.as_mv, &best_mv.as_mv, nmvc,
+ &cm->counts.mv, allow_hp);
+
+ if (is_compound)
+ read_mv(r, &secondmv.as_mv, &best_mv_second.as_mv, nmvc,
+ &cm->counts.mv, allow_hp);
+ break;
+ case NEARESTMV:
+ blockmv.as_int = nearest.as_int;
+ if (is_compound)
+ secondmv.as_int = nearest_second.as_int;
+ break;
+ case NEARMV:
+ blockmv.as_int = nearby.as_int;
+ if (is_compound)
+ secondmv.as_int = nearby_second.as_int;
+ break;
+ case ZEROMV:
+ blockmv.as_int = 0;
+ if (is_compound)
+ secondmv.as_int = 0;
+ break;
+ default:
+ assert(!"Invalid inter mode value");
+ }
+ mi->bmi[j].as_mv[0].as_int = blockmv.as_int;
+ if (is_compound)
+ mi->bmi[j].as_mv[1].as_int = secondmv.as_int;
+
+ if (num_4x4_h == 2)
+ mi->bmi[j + 2] = mi->bmi[j];
+ if (num_4x4_w == 2)
+ mi->bmi[j + 1] = mi->bmi[j];
+ mi->mbmi.mode = b_mode;
+ }
+ }
+
+ mv0->as_int = mi->bmi[3].as_mv[0].as_int;
+ mv1->as_int = mi->bmi[3].as_mv[1].as_int;
+ } else {
+ switch (mbmi->mode) {
+ case NEARMV:
+ mv0->as_int = nearby.as_int;
+ if (is_compound)
+ mv1->as_int = nearby_second.as_int;
+ break;
+
+ case NEARESTMV:
+ mv0->as_int = nearest.as_int;
+ if (is_compound)
+ mv1->as_int = nearest_second.as_int;
+ break;
+
+ case ZEROMV:
+ mv0->as_int = 0;
+ if (is_compound)
+ mv1->as_int = 0;
+ break;
+
+ case NEWMV:
+ read_mv(r, &mv0->as_mv, &best_mv.as_mv, nmvc, &cm->counts.mv, allow_hp);
+ if (is_compound)
+ read_mv(r, &mv1->as_mv, &best_mv_second.as_mv, nmvc, &cm->counts.mv,
+ allow_hp);
+ break;
+ default:
+ assert(!"Invalid inter mode value");
+ }
+ }
+}
+
+static void read_inter_frame_mode_info(VP9D_COMP *pbi, MODE_INFO *mi,
+ int mi_row, int mi_col, vp9_reader *r) {
+ VP9_COMMON *const cm = &pbi->common;
+ MB_MODE_INFO *const mbmi = &mi->mbmi;
+ int inter_block;
+
+ mbmi->mv[0].as_int = 0;
+ mbmi->mv[1].as_int = 0;
+ mbmi->segment_id = read_inter_segment_id(pbi, mi_row, mi_col, r);
+ mbmi->skip_coeff = read_skip_coeff(pbi, mbmi->segment_id, r);
+ inter_block = read_is_inter_block(pbi, mbmi->segment_id, r);
+ mbmi->tx_size = read_tx_size(pbi, cm->tx_mode, mbmi->sb_type,
+ !mbmi->skip_coeff || !inter_block, r);
+
+ if (inter_block)
+ read_inter_block_mode_info(pbi, mi, mi_row, mi_col, r);
+ else
+ read_intra_block_mode_info(pbi, mi, r);
+}
+
+static void read_comp_pred(VP9_COMMON *cm, vp9_reader *r) {
+ int i;
+
+ cm->comp_pred_mode = cm->allow_comp_inter_inter ? read_comp_pred_mode(r)
+ : SINGLE_PREDICTION_ONLY;
+
+ if (cm->comp_pred_mode == HYBRID_PREDICTION)
+ for (i = 0; i < COMP_INTER_CONTEXTS; i++)
+ if (vp9_read(r, MODE_UPDATE_PROB))
+ vp9_diff_update_prob(r, &cm->fc.comp_inter_prob[i]);
+
+ if (cm->comp_pred_mode != COMP_PREDICTION_ONLY)
+ for (i = 0; i < REF_CONTEXTS; i++) {
+ if (vp9_read(r, MODE_UPDATE_PROB))
+ vp9_diff_update_prob(r, &cm->fc.single_ref_prob[i][0]);
+ if (vp9_read(r, MODE_UPDATE_PROB))
+ vp9_diff_update_prob(r, &cm->fc.single_ref_prob[i][1]);
+ }
+
+ if (cm->comp_pred_mode != SINGLE_PREDICTION_ONLY)
+ for (i = 0; i < REF_CONTEXTS; i++)
+ if (vp9_read(r, MODE_UPDATE_PROB))
+ vp9_diff_update_prob(r, &cm->fc.comp_ref_prob[i]);
+}
+
+void vp9_prepare_read_mode_info(VP9D_COMP* pbi, vp9_reader *r) {
+ VP9_COMMON *const cm = &pbi->common;
+ int k;
+
+ // TODO(jkoleszar): does this clear more than MBSKIP_CONTEXTS? Maybe remove.
+ // vpx_memset(cm->fc.mbskip_probs, 0, sizeof(cm->fc.mbskip_probs));
+ for (k = 0; k < MBSKIP_CONTEXTS; ++k)
+ if (vp9_read(r, MODE_UPDATE_PROB))
+ vp9_diff_update_prob(r, &cm->fc.mbskip_probs[k]);
+
+ if (cm->frame_type != KEY_FRAME && !cm->intra_only) {
+ nmv_context *const nmvc = &pbi->common.fc.nmvc;
+ MACROBLOCKD *const xd = &pbi->mb;
+ int i, j;
+
+ read_inter_mode_probs(&cm->fc, r);
+
+ if (cm->mcomp_filter_type == SWITCHABLE)
+ read_switchable_interp_probs(&cm->fc, r);
+
+ for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
+ if (vp9_read(r, MODE_UPDATE_PROB))
+ vp9_diff_update_prob(r, &cm->fc.intra_inter_prob[i]);
+
+ read_comp_pred(cm, r);
+
+ for (j = 0; j < BLOCK_SIZE_GROUPS; j++)
+ for (i = 0; i < INTRA_MODES - 1; ++i)
+ if (vp9_read(r, MODE_UPDATE_PROB))
+ vp9_diff_update_prob(r, &cm->fc.y_mode_prob[j][i]);
+
+ for (j = 0; j < NUM_PARTITION_CONTEXTS; ++j)
+ for (i = 0; i < PARTITION_TYPES - 1; ++i)
+ if (vp9_read(r, MODE_UPDATE_PROB))
+ vp9_diff_update_prob(r, &cm->fc.partition_prob[INTER_FRAME][j][i]);
+
+ read_mv_probs(r, nmvc, xd->allow_high_precision_mv);
+ }
+}
+
+void vp9_read_mode_info(VP9D_COMP* pbi, int mi_row, int mi_col, vp9_reader *r) {
+ VP9_COMMON *const cm = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+ MODE_INFO *mi = xd->this_mi;
+ const BLOCK_SIZE bsize = mi->mbmi.sb_type;
+ const int bw = 1 << mi_width_log2(bsize);
+ const int bh = 1 << mi_height_log2(bsize);
+ const int y_mis = MIN(bh, cm->mi_rows - mi_row);
+ const int x_mis = MIN(bw, cm->mi_cols - mi_col);
+ int x, y, z;
+
+ if (cm->frame_type == KEY_FRAME || cm->intra_only)
+ read_intra_frame_mode_info(pbi, mi, mi_row, mi_col, r);
+ else
+ read_inter_frame_mode_info(pbi, mi, mi_row, mi_col, r);
+
+ for (y = 0, z = 0; y < y_mis; y++, z += cm->mode_info_stride)
+ for (x = !y; x < x_mis; x++) {
+ xd->mi_8x8[z + x] = mi;
+ }
+}
diff --git a/libvpx/vp9/decoder/vp9_decodemv.h b/libvpx/vp9/decoder/vp9_decodemv.h
new file mode 100644
index 0000000..462d2e3
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_decodemv.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_DECODER_VP9_DECODEMV_H_
+#define VP9_DECODER_VP9_DECODEMV_H_
+
+#include "vp9/decoder/vp9_onyxd_int.h"
+#include "vp9/decoder/vp9_dboolhuff.h"
+
+void vp9_prepare_read_mode_info(VP9D_COMP* pbi, vp9_reader *r);
+
+void vp9_read_mode_info(VP9D_COMP* pbi, int mi_row, int mi_col, vp9_reader *r);
+
+#endif // VP9_DECODER_VP9_DECODEMV_H_
diff --git a/libvpx/vp9/decoder/vp9_decodframe.c b/libvpx/vp9/decoder/vp9_decodframe.c
new file mode 100644
index 0000000..dbba28e
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_decodframe.c
@@ -0,0 +1,1019 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+
+#include "./vp9_rtcd.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_scale/vpx_scale.h"
+
+#include "vp9/common/vp9_alloccommon.h"
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_entropy.h"
+#include "vp9/common/vp9_entropymode.h"
+#include "vp9/common/vp9_extend.h"
+#include "vp9/common/vp9_pred_common.h"
+#include "vp9/common/vp9_quant_common.h"
+#include "vp9/common/vp9_reconintra.h"
+#include "vp9/common/vp9_reconinter.h"
+#include "vp9/common/vp9_seg_common.h"
+#include "vp9/common/vp9_tile_common.h"
+
+#include "vp9/decoder/vp9_dboolhuff.h"
+#include "vp9/decoder/vp9_decodframe.h"
+#include "vp9/decoder/vp9_detokenize.h"
+#include "vp9/decoder/vp9_decodemv.h"
+#include "vp9/decoder/vp9_dsubexp.h"
+#include "vp9/decoder/vp9_idct_blk.h"
+#include "vp9/decoder/vp9_onyxd_int.h"
+#include "vp9/decoder/vp9_read_bit_buffer.h"
+#include "vp9/decoder/vp9_thread.h"
+#include "vp9/decoder/vp9_treereader.h"
+
+static int read_be32(const uint8_t *p) {
+ return (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3];
+}
+
+// len == 0 is not allowed
+static int read_is_valid(const uint8_t *start, size_t len, const uint8_t *end) {
+ return start + len > start && start + len <= end;
+}
+
+static int decode_unsigned_max(struct vp9_read_bit_buffer *rb, int max) {
+ const int data = vp9_rb_read_literal(rb, get_unsigned_bits(max));
+ return data > max ? max : data;
+}
+
+static TX_MODE read_tx_mode(vp9_reader *r) {
+ TX_MODE tx_mode = vp9_read_literal(r, 2);
+ if (tx_mode == ALLOW_32X32)
+ tx_mode += vp9_read_bit(r);
+ return tx_mode;
+}
+
+static void read_tx_probs(struct tx_probs *tx_probs, vp9_reader *r) {
+ int i, j;
+
+ for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
+ for (j = 0; j < TX_SIZES - 3; ++j)
+ if (vp9_read(r, MODE_UPDATE_PROB))
+ vp9_diff_update_prob(r, &tx_probs->p8x8[i][j]);
+
+ for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
+ for (j = 0; j < TX_SIZES - 2; ++j)
+ if (vp9_read(r, MODE_UPDATE_PROB))
+ vp9_diff_update_prob(r, &tx_probs->p16x16[i][j]);
+
+ for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
+ for (j = 0; j < TX_SIZES - 1; ++j)
+ if (vp9_read(r, MODE_UPDATE_PROB))
+ vp9_diff_update_prob(r, &tx_probs->p32x32[i][j]);
+}
+
+static void setup_plane_dequants(VP9_COMMON *cm, MACROBLOCKD *xd, int q_index) {
+ int i;
+ xd->plane[0].dequant = cm->y_dequant[q_index];
+
+ for (i = 1; i < MAX_MB_PLANE; i++)
+ xd->plane[i].dequant = cm->uv_dequant[q_index];
+}
+
+static void decode_block(int plane, int block, BLOCK_SIZE plane_bsize,
+ TX_SIZE tx_size, void *arg) {
+ MACROBLOCKD* const xd = arg;
+ struct macroblockd_plane *const pd = &xd->plane[plane];
+ int16_t* const qcoeff = BLOCK_OFFSET(pd->qcoeff, block);
+ const int stride = pd->dst.stride;
+ const int eob = pd->eobs[block];
+ const int raster_block = txfrm_block_to_raster_block(plane_bsize, tx_size,
+ block);
+ uint8_t* const dst = raster_block_offset_uint8(plane_bsize, raster_block,
+ pd->dst.buf, stride);
+ switch (tx_size) {
+ case TX_4X4: {
+ const TX_TYPE tx_type = get_tx_type_4x4(pd->plane_type, xd, raster_block);
+ if (tx_type == DCT_DCT)
+ xd->itxm_add(qcoeff, dst, stride, eob);
+ else
+ vp9_iht_add_c(tx_type, qcoeff, dst, stride, eob);
+ break;
+ }
+ case TX_8X8:
+ vp9_iht_add_8x8_c(get_tx_type_8x8(pd->plane_type, xd), qcoeff, dst,
+ stride, eob);
+ break;
+ case TX_16X16:
+ vp9_iht_add_16x16_c(get_tx_type_16x16(pd->plane_type, xd), qcoeff, dst,
+ stride, eob);
+ break;
+ case TX_32X32:
+ vp9_idct_add_32x32(qcoeff, dst, stride, eob);
+ break;
+ default:
+ assert(!"Invalid transform size");
+ }
+}
+
+static void decode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
+ TX_SIZE tx_size, void *arg) {
+ MACROBLOCKD* const xd = arg;
+ struct macroblockd_plane *const pd = &xd->plane[plane];
+ MODE_INFO *const mi = xd->this_mi;
+ const int raster_block = txfrm_block_to_raster_block(plane_bsize, tx_size,
+ block);
+ uint8_t* const dst = raster_block_offset_uint8(plane_bsize, raster_block,
+ pd->dst.buf, pd->dst.stride);
+ const MB_PREDICTION_MODE mode = (plane == 0)
+ ? ((mi->mbmi.sb_type < BLOCK_8X8) ? mi->bmi[raster_block].as_mode
+ : mi->mbmi.mode)
+ : mi->mbmi.uv_mode;
+
+ if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0)
+ extend_for_intra(xd, plane_bsize, plane, block, tx_size);
+
+ vp9_predict_intra_block(xd, raster_block >> tx_size,
+ b_width_log2(plane_bsize), tx_size, mode,
+ dst, pd->dst.stride, dst, pd->dst.stride);
+
+ if (!mi->mbmi.skip_coeff)
+ decode_block(plane, block, plane_bsize, tx_size, arg);
+}
+
+static int decode_tokens(VP9D_COMP *pbi, BLOCK_SIZE bsize, vp9_reader *r) {
+ VP9_COMMON *const cm = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+ MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi;
+
+ if (mbmi->skip_coeff) {
+ reset_skip_context(xd, bsize);
+ return -1;
+ } else {
+ if (cm->seg.enabled)
+ setup_plane_dequants(cm, xd, vp9_get_qindex(&cm->seg, mbmi->segment_id,
+ cm->base_qindex));
+
+ // TODO(dkovalev) if (!vp9_reader_has_error(r))
+ return vp9_decode_tokens(pbi, r, bsize);
+ }
+}
+
+static void set_offsets(VP9D_COMP *pbi, BLOCK_SIZE bsize,
+ int mi_row, int mi_col) {
+ VP9_COMMON *const cm = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+ const int bh = num_8x8_blocks_high_lookup[bsize];
+ const int bw = num_8x8_blocks_wide_lookup[bsize];
+ const int offset = mi_row * cm->mode_info_stride + mi_col;
+
+ xd->mode_info_stride = cm->mode_info_stride;
+
+ xd->mi_8x8 = cm->mi_grid_visible + offset;
+ xd->prev_mi_8x8 = cm->prev_mi_grid_visible + offset;
+
+ // we are using the mode info context stream here
+ xd->this_mi =
+ xd->mi_8x8[0] = xd->mic_stream_ptr;
+ xd->this_mi->mbmi.sb_type = bsize;
+ xd->mic_stream_ptr++;
+
+ // Special case: if prev_mi is NULL, the previous mode info context
+ // cannot be used.
+ xd->last_mi = cm->prev_mi ? xd->prev_mi_8x8[0] : NULL;
+
+ set_skip_context(cm, xd, mi_row, mi_col);
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+
+ // Distance of Mb to the various image edges. These are specified to 8th pel
+ // as they are always compared to values that are in 1/8th pel units
+ set_mi_row_col(cm, xd, mi_row, bh, mi_col, bw);
+
+ setup_dst_planes(xd, &cm->yv12_fb[cm->new_fb_idx], mi_row, mi_col);
+}
+
+static void set_ref(VP9D_COMP *pbi, int i, int mi_row, int mi_col) {
+ VP9_COMMON *const cm = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+ MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi;
+ const int ref = mbmi->ref_frame[i] - LAST_FRAME;
+ const YV12_BUFFER_CONFIG *cfg = &cm->yv12_fb[cm->active_ref_idx[ref]];
+ const struct scale_factors *sf = &cm->active_ref_scale[ref];
+ if (!vp9_is_valid_scale(sf))
+ vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ "Invalid scale factors");
+
+ xd->scale_factor[i] = *sf;
+ setup_pre_planes(xd, i, cfg, mi_row, mi_col, sf);
+ xd->corrupted |= cfg->corrupted;
+}
+
+static void decode_modes_b(VP9D_COMP *pbi, int mi_row, int mi_col,
+ vp9_reader *r, BLOCK_SIZE bsize) {
+ VP9_COMMON *const cm = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+ const int less8x8 = bsize < BLOCK_8X8;
+ MB_MODE_INFO *mbmi;
+
+ if (less8x8)
+ if (xd->ab_index > 0)
+ return;
+
+ set_offsets(pbi, bsize, mi_row, mi_col);
+ vp9_read_mode_info(pbi, mi_row, mi_col, r);
+
+ if (less8x8)
+ bsize = BLOCK_8X8;
+
+ // Has to be called after set_offsets
+ mbmi = &xd->this_mi->mbmi;
+
+ if (!is_inter_block(mbmi)) {
+ // Intra reconstruction
+ decode_tokens(pbi, bsize, r);
+ foreach_transformed_block(xd, bsize, decode_block_intra, xd);
+ } else {
+ // Inter reconstruction
+ int eobtotal;
+
+ set_ref(pbi, 0, mi_row, mi_col);
+ if (has_second_ref(mbmi))
+ set_ref(pbi, 1, mi_row, mi_col);
+
+ vp9_setup_interp_filters(xd, mbmi->interp_filter, cm);
+ vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+ eobtotal = decode_tokens(pbi, bsize, r);
+ if (less8x8) {
+ if (eobtotal >= 0)
+ foreach_transformed_block(xd, bsize, decode_block, xd);
+ } else {
+ assert(mbmi->sb_type == bsize);
+ if (eobtotal == 0)
+ // skip loopfilter
+ vp9_set_pred_flag_mbskip(xd, bsize, 1);
+ else if (eobtotal > 0)
+ foreach_transformed_block(xd, bsize, decode_block, xd);
+ }
+ }
+ xd->corrupted |= vp9_reader_has_error(r);
+}
+
+static void decode_modes_sb(VP9D_COMP *pbi, int mi_row, int mi_col,
+ vp9_reader* r, BLOCK_SIZE bsize) {
+ VP9_COMMON *const cm = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+ const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2;
+ PARTITION_TYPE partition = PARTITION_NONE;
+ BLOCK_SIZE subsize;
+
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
+ return;
+
+ if (bsize < BLOCK_8X8) {
+ if (xd->ab_index != 0)
+ return;
+ } else {
+ int pl;
+ const int idx = check_bsize_coverage(hbs, cm->mi_rows, cm->mi_cols,
+ mi_row, mi_col);
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+ pl = partition_plane_context(xd, bsize);
+
+ if (idx == 0)
+ partition = treed_read(r, vp9_partition_tree,
+ cm->fc.partition_prob[cm->frame_type][pl]);
+ else if (idx > 0 &&
+ !vp9_read(r, cm->fc.partition_prob[cm->frame_type][pl][idx]))
+ partition = (idx == 1) ? PARTITION_HORZ : PARTITION_VERT;
+ else
+ partition = PARTITION_SPLIT;
+
+ cm->counts.partition[pl][partition]++;
+ }
+
+ subsize = get_subsize(bsize, partition);
+ *get_sb_index(xd, subsize) = 0;
+
+ switch (partition) {
+ case PARTITION_NONE:
+ decode_modes_b(pbi, mi_row, mi_col, r, subsize);
+ break;
+ case PARTITION_HORZ:
+ decode_modes_b(pbi, mi_row, mi_col, r, subsize);
+ *get_sb_index(xd, subsize) = 1;
+ if (mi_row + hbs < cm->mi_rows)
+ decode_modes_b(pbi, mi_row + hbs, mi_col, r, subsize);
+ break;
+ case PARTITION_VERT:
+ decode_modes_b(pbi, mi_row, mi_col, r, subsize);
+ *get_sb_index(xd, subsize) = 1;
+ if (mi_col + hbs < cm->mi_cols)
+ decode_modes_b(pbi, mi_row, mi_col + hbs, r, subsize);
+ break;
+ case PARTITION_SPLIT: {
+ int n;
+ for (n = 0; n < 4; n++) {
+ const int j = n >> 1, i = n & 1;
+ *get_sb_index(xd, subsize) = n;
+ decode_modes_sb(pbi, mi_row + j * hbs, mi_col + i * hbs, r, subsize);
+ }
+ } break;
+ default:
+ assert(!"Invalid partition type");
+ }
+
+ // update partition context
+ if (bsize >= BLOCK_8X8 &&
+ (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT)) {
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+ update_partition_context(xd, subsize, bsize);
+ }
+}
+
+static void setup_token_decoder(VP9D_COMP *pbi,
+ const uint8_t *data, size_t read_size,
+ vp9_reader *r) {
+ VP9_COMMON *cm = &pbi->common;
+ const uint8_t *data_end = pbi->source + pbi->source_sz;
+
+ // Validate the calculated partition length. If the buffer
+ // described by the partition can't be fully read, then restrict
+ // it to the portion that can be (for EC mode) or throw an error.
+ if (!read_is_valid(data, read_size, data_end))
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Truncated packet or corrupt tile length");
+
+ if (vp9_reader_init(r, data, read_size))
+ vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate bool decoder %d", 1);
+}
+
+static void read_coef_probs_common(vp9_coeff_probs_model *coef_probs,
+ vp9_reader *r) {
+ int i, j, k, l, m;
+
+ if (vp9_read_bit(r))
+ for (i = 0; i < BLOCK_TYPES; i++)
+ for (j = 0; j < REF_TYPES; j++)
+ for (k = 0; k < COEF_BANDS; k++)
+ for (l = 0; l < PREV_COEF_CONTEXTS; l++)
+ if (k > 0 || l < 3)
+ for (m = 0; m < UNCONSTRAINED_NODES; m++)
+ if (vp9_read(r, VP9_COEF_UPDATE_PROB))
+ vp9_diff_update_prob(r, &coef_probs[i][j][k][l][m]);
+}
+
+static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode,
+ vp9_reader *r) {
+ read_coef_probs_common(fc->coef_probs[TX_4X4], r);
+
+ if (tx_mode > ONLY_4X4)
+ read_coef_probs_common(fc->coef_probs[TX_8X8], r);
+
+ if (tx_mode > ALLOW_8X8)
+ read_coef_probs_common(fc->coef_probs[TX_16X16], r);
+
+ if (tx_mode > ALLOW_16X16)
+ read_coef_probs_common(fc->coef_probs[TX_32X32], r);
+}
+
+static void setup_segmentation(struct segmentation *seg,
+ struct vp9_read_bit_buffer *rb) {
+ int i, j;
+
+ seg->update_map = 0;
+ seg->update_data = 0;
+
+ seg->enabled = vp9_rb_read_bit(rb);
+ if (!seg->enabled)
+ return;
+
+ // Segmentation map update
+ seg->update_map = vp9_rb_read_bit(rb);
+ if (seg->update_map) {
+ for (i = 0; i < SEG_TREE_PROBS; i++)
+ seg->tree_probs[i] = vp9_rb_read_bit(rb) ? vp9_rb_read_literal(rb, 8)
+ : MAX_PROB;
+
+ seg->temporal_update = vp9_rb_read_bit(rb);
+ if (seg->temporal_update) {
+ for (i = 0; i < PREDICTION_PROBS; i++)
+ seg->pred_probs[i] = vp9_rb_read_bit(rb) ? vp9_rb_read_literal(rb, 8)
+ : MAX_PROB;
+ } else {
+ for (i = 0; i < PREDICTION_PROBS; i++)
+ seg->pred_probs[i] = MAX_PROB;
+ }
+ }
+
+ // Segmentation data update
+ seg->update_data = vp9_rb_read_bit(rb);
+ if (seg->update_data) {
+ seg->abs_delta = vp9_rb_read_bit(rb);
+
+ vp9_clearall_segfeatures(seg);
+
+ for (i = 0; i < MAX_SEGMENTS; i++) {
+ for (j = 0; j < SEG_LVL_MAX; j++) {
+ int data = 0;
+ const int feature_enabled = vp9_rb_read_bit(rb);
+ if (feature_enabled) {
+ vp9_enable_segfeature(seg, i, j);
+ data = decode_unsigned_max(rb, vp9_seg_feature_data_max(j));
+ if (vp9_is_segfeature_signed(j))
+ data = vp9_rb_read_bit(rb) ? -data : data;
+ }
+ vp9_set_segdata(seg, i, j, data);
+ }
+ }
+ }
+}
+
+static void setup_loopfilter(struct loopfilter *lf,
+ struct vp9_read_bit_buffer *rb) {
+
+ lf->filter_level = vp9_rb_read_literal(rb, 6);
+ lf->sharpness_level = vp9_rb_read_literal(rb, 3);
+
+ // Read in loop filter deltas applied at the MB level based on mode or ref
+ // frame.
+ lf->mode_ref_delta_update = 0;
+
+ lf->mode_ref_delta_enabled = vp9_rb_read_bit(rb);
+ if (lf->mode_ref_delta_enabled) {
+ lf->mode_ref_delta_update = vp9_rb_read_bit(rb);
+ if (lf->mode_ref_delta_update) {
+ int i;
+
+ for (i = 0; i < MAX_REF_LF_DELTAS; i++)
+ if (vp9_rb_read_bit(rb))
+ lf->ref_deltas[i] = vp9_rb_read_signed_literal(rb, 6);
+
+ for (i = 0; i < MAX_MODE_LF_DELTAS; i++)
+ if (vp9_rb_read_bit(rb))
+ lf->mode_deltas[i] = vp9_rb_read_signed_literal(rb, 6);
+ }
+ }
+}
+
+static int read_delta_q(struct vp9_read_bit_buffer *rb, int *delta_q) {
+ const int old = *delta_q;
+ *delta_q = vp9_rb_read_bit(rb) ? vp9_rb_read_signed_literal(rb, 4) : 0;
+ return old != *delta_q;
+}
+
+static void setup_quantization(VP9D_COMP *pbi, struct vp9_read_bit_buffer *rb) {
+ MACROBLOCKD *const xd = &pbi->mb;
+ VP9_COMMON *const cm = &pbi->common;
+ int update = 0;
+
+ cm->base_qindex = vp9_rb_read_literal(rb, QINDEX_BITS);
+ update |= read_delta_q(rb, &cm->y_dc_delta_q);
+ update |= read_delta_q(rb, &cm->uv_dc_delta_q);
+ update |= read_delta_q(rb, &cm->uv_ac_delta_q);
+ if (update)
+ vp9_init_dequantizer(cm);
+
+ xd->lossless = cm->base_qindex == 0 &&
+ cm->y_dc_delta_q == 0 &&
+ cm->uv_dc_delta_q == 0 &&
+ cm->uv_ac_delta_q == 0;
+
+ xd->itxm_add = xd->lossless ? vp9_idct_add_lossless_c
+ : vp9_idct_add;
+}
+
+static INTERPOLATIONFILTERTYPE read_interp_filter_type(
+ struct vp9_read_bit_buffer *rb) {
+ const INTERPOLATIONFILTERTYPE literal_to_type[] = { EIGHTTAP_SMOOTH,
+ EIGHTTAP,
+ EIGHTTAP_SHARP,
+ BILINEAR };
+ return vp9_rb_read_bit(rb) ? SWITCHABLE
+ : literal_to_type[vp9_rb_read_literal(rb, 2)];
+}
+
+static void read_frame_size(struct vp9_read_bit_buffer *rb,
+ int *width, int *height) {
+ const int w = vp9_rb_read_literal(rb, 16) + 1;
+ const int h = vp9_rb_read_literal(rb, 16) + 1;
+ *width = w;
+ *height = h;
+}
+
+static void setup_display_size(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
+ cm->display_width = cm->width;
+ cm->display_height = cm->height;
+ if (vp9_rb_read_bit(rb))
+ read_frame_size(rb, &cm->display_width, &cm->display_height);
+}
+
+static void apply_frame_size(VP9D_COMP *pbi, int width, int height) {
+ VP9_COMMON *cm = &pbi->common;
+
+ if (cm->width != width || cm->height != height) {
+ if (!pbi->initial_width || !pbi->initial_height) {
+ if (vp9_alloc_frame_buffers(cm, width, height))
+ vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate frame buffers");
+ pbi->initial_width = width;
+ pbi->initial_height = height;
+ } else {
+ if (width > pbi->initial_width)
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Frame width too large");
+
+ if (height > pbi->initial_height)
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Frame height too large");
+ }
+
+ cm->width = width;
+ cm->height = height;
+
+ vp9_update_frame_size(cm);
+ }
+
+ vp9_realloc_frame_buffer(&cm->yv12_fb[cm->new_fb_idx], cm->width, cm->height,
+ cm->subsampling_x, cm->subsampling_y,
+ VP9BORDERINPIXELS);
+}
+
+static void setup_frame_size(VP9D_COMP *pbi,
+ struct vp9_read_bit_buffer *rb) {
+ int width, height;
+ read_frame_size(rb, &width, &height);
+ apply_frame_size(pbi, width, height);
+ setup_display_size(&pbi->common, rb);
+}
+
+static void setup_frame_size_with_refs(VP9D_COMP *pbi,
+ struct vp9_read_bit_buffer *rb) {
+ VP9_COMMON *const cm = &pbi->common;
+
+ int width, height;
+ int found = 0, i;
+ for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) {
+ if (vp9_rb_read_bit(rb)) {
+ YV12_BUFFER_CONFIG *cfg = &cm->yv12_fb[cm->active_ref_idx[i]];
+ width = cfg->y_crop_width;
+ height = cfg->y_crop_height;
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ read_frame_size(rb, &width, &height);
+
+ if (!width || !height)
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Referenced frame with invalid size");
+
+ apply_frame_size(pbi, width, height);
+ setup_display_size(cm, rb);
+}
+
+static void decode_tile(VP9D_COMP *pbi, vp9_reader *r) {
+ const int num_threads = pbi->oxcf.max_threads;
+ VP9_COMMON *const cm = &pbi->common;
+ int mi_row, mi_col;
+ YV12_BUFFER_CONFIG *const fb = &cm->yv12_fb[cm->new_fb_idx];
+
+ if (pbi->do_loopfilter_inline) {
+ if (num_threads > 1) {
+ LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
+ lf_data->frame_buffer = fb;
+ lf_data->cm = cm;
+ lf_data->xd = pbi->mb;
+ lf_data->stop = 0;
+ lf_data->y_only = 0;
+ }
+ vp9_loop_filter_frame_init(cm, cm->lf.filter_level);
+ }
+
+ for (mi_row = cm->cur_tile_mi_row_start; mi_row < cm->cur_tile_mi_row_end;
+ mi_row += MI_BLOCK_SIZE) {
+ // For a SB there are 2 left contexts, each pertaining to a MB row within
+ vp9_zero(cm->left_context);
+ vp9_zero(cm->left_seg_context);
+ for (mi_col = cm->cur_tile_mi_col_start; mi_col < cm->cur_tile_mi_col_end;
+ mi_col += MI_BLOCK_SIZE)
+ decode_modes_sb(pbi, mi_row, mi_col, r, BLOCK_64X64);
+
+ if (pbi->do_loopfilter_inline) {
+ // delay the loopfilter by 1 macroblock row.
+ const int lf_start = mi_row - MI_BLOCK_SIZE;
+ if (lf_start < 0) continue;
+
+ if (num_threads > 1) {
+ LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
+
+ // decoding has completed: finish up the loop filter in this thread.
+ if (mi_row + MI_BLOCK_SIZE >= cm->cur_tile_mi_row_end) continue;
+
+ vp9_worker_sync(&pbi->lf_worker);
+ lf_data->start = lf_start;
+ lf_data->stop = mi_row;
+ pbi->lf_worker.hook = vp9_loop_filter_worker;
+ vp9_worker_launch(&pbi->lf_worker);
+ } else {
+ vp9_loop_filter_rows(fb, cm, &pbi->mb, lf_start, mi_row, 0);
+ }
+ }
+ }
+
+ if (pbi->do_loopfilter_inline) {
+ int lf_start;
+ if (num_threads > 1) {
+ LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
+
+ vp9_worker_sync(&pbi->lf_worker);
+ lf_start = lf_data->stop;
+ } else {
+ lf_start = mi_row - MI_BLOCK_SIZE;
+ }
+ vp9_loop_filter_rows(fb, cm, &pbi->mb,
+ lf_start, cm->mi_rows, 0);
+ }
+}
+
+static void setup_tile_info(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
+ int min_log2_tile_cols, max_log2_tile_cols, max_ones;
+ vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
+
+ // columns
+ max_ones = max_log2_tile_cols - min_log2_tile_cols;
+ cm->log2_tile_cols = min_log2_tile_cols;
+ while (max_ones-- && vp9_rb_read_bit(rb))
+ cm->log2_tile_cols++;
+
+ // rows
+ cm->log2_tile_rows = vp9_rb_read_bit(rb);
+ if (cm->log2_tile_rows)
+ cm->log2_tile_rows += vp9_rb_read_bit(rb);
+}
+
+static const uint8_t *decode_tiles(VP9D_COMP *pbi, const uint8_t *data) {
+ vp9_reader residual_bc;
+
+ VP9_COMMON *const cm = &pbi->common;
+
+ const uint8_t *const data_end = pbi->source + pbi->source_sz;
+ const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
+ const int tile_cols = 1 << cm->log2_tile_cols;
+ const int tile_rows = 1 << cm->log2_tile_rows;
+ int tile_row, tile_col;
+
+ // Note: this memset assumes above_context[0], [1] and [2]
+ // are allocated as part of the same buffer.
+ vpx_memset(cm->above_context[0], 0,
+ sizeof(ENTROPY_CONTEXT) * MAX_MB_PLANE * (2 * aligned_mi_cols));
+
+ vpx_memset(cm->above_seg_context, 0,
+ sizeof(PARTITION_CONTEXT) * aligned_mi_cols);
+
+ if (pbi->oxcf.inv_tile_order) {
+ const uint8_t *data_ptr2[4][1 << 6];
+ vp9_reader bc_bak = {0};
+
+ // pre-initialize the offsets, we're going to read in inverse order
+ data_ptr2[0][0] = data;
+ for (tile_row = 0; tile_row < tile_rows; tile_row++) {
+ if (tile_row) {
+ const int size = read_be32(data_ptr2[tile_row - 1][tile_cols - 1]);
+ data_ptr2[tile_row - 1][tile_cols - 1] += 4;
+ data_ptr2[tile_row][0] = data_ptr2[tile_row - 1][tile_cols - 1] + size;
+ }
+
+ for (tile_col = 1; tile_col < tile_cols; tile_col++) {
+ const int size = read_be32(data_ptr2[tile_row][tile_col - 1]);
+ data_ptr2[tile_row][tile_col - 1] += 4;
+ data_ptr2[tile_row][tile_col] =
+ data_ptr2[tile_row][tile_col - 1] + size;
+ }
+ }
+
+ for (tile_row = 0; tile_row < tile_rows; tile_row++) {
+ vp9_get_tile_row_offsets(cm, tile_row);
+ for (tile_col = tile_cols - 1; tile_col >= 0; tile_col--) {
+ vp9_get_tile_col_offsets(cm, tile_col);
+ setup_token_decoder(pbi, data_ptr2[tile_row][tile_col],
+ data_end - data_ptr2[tile_row][tile_col],
+ &residual_bc);
+ decode_tile(pbi, &residual_bc);
+ if (tile_row == tile_rows - 1 && tile_col == tile_cols - 1)
+ bc_bak = residual_bc;
+ }
+ }
+ residual_bc = bc_bak;
+ } else {
+ int has_more;
+
+ for (tile_row = 0; tile_row < tile_rows; tile_row++) {
+ vp9_get_tile_row_offsets(cm, tile_row);
+ for (tile_col = 0; tile_col < tile_cols; tile_col++) {
+ size_t size;
+
+ vp9_get_tile_col_offsets(cm, tile_col);
+
+ has_more = tile_col < tile_cols - 1 || tile_row < tile_rows - 1;
+ if (has_more) {
+ if (!read_is_valid(data, 4, data_end))
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Truncated packet or corrupt tile length");
+
+ size = read_be32(data);
+ data += 4;
+ } else {
+ size = data_end - data;
+ }
+
+ setup_token_decoder(pbi, data, size, &residual_bc);
+ decode_tile(pbi, &residual_bc);
+ data += size;
+ }
+ }
+ }
+
+ return vp9_reader_find_end(&residual_bc);
+}
+
+static void check_sync_code(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
+ if (vp9_rb_read_literal(rb, 8) != SYNC_CODE_0 ||
+ vp9_rb_read_literal(rb, 8) != SYNC_CODE_1 ||
+ vp9_rb_read_literal(rb, 8) != SYNC_CODE_2) {
+ vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ "Invalid frame sync code");
+ }
+}
+
+static void error_handler(void *data, size_t bit_offset) {
+ VP9_COMMON *const cm = (VP9_COMMON *)data;
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Truncated packet");
+}
+
+static void setup_inter_inter(VP9_COMMON *cm) {
+ int i;
+
+ cm->allow_comp_inter_inter = 0;
+ for (i = 1; i < ALLOWED_REFS_PER_FRAME; ++i)
+ cm->allow_comp_inter_inter |=
+ cm->ref_frame_sign_bias[i + 1] != cm->ref_frame_sign_bias[1];
+
+ if (cm->allow_comp_inter_inter) {
+ // which one is always-on in comp inter-inter?
+ if (cm->ref_frame_sign_bias[LAST_FRAME] ==
+ cm->ref_frame_sign_bias[GOLDEN_FRAME]) {
+ cm->comp_fixed_ref = ALTREF_FRAME;
+ cm->comp_var_ref[0] = LAST_FRAME;
+ cm->comp_var_ref[1] = GOLDEN_FRAME;
+ } else if (cm->ref_frame_sign_bias[LAST_FRAME] ==
+ cm->ref_frame_sign_bias[ALTREF_FRAME]) {
+ cm->comp_fixed_ref = GOLDEN_FRAME;
+ cm->comp_var_ref[0] = LAST_FRAME;
+ cm->comp_var_ref[1] = ALTREF_FRAME;
+ } else {
+ cm->comp_fixed_ref = LAST_FRAME;
+ cm->comp_var_ref[0] = GOLDEN_FRAME;
+ cm->comp_var_ref[1] = ALTREF_FRAME;
+ }
+ }
+}
+
+#define RESERVED \
+ if (vp9_rb_read_bit(rb)) \
+ vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, \
+ "Reserved bit must be unset")
+
+static size_t read_uncompressed_header(VP9D_COMP *pbi,
+ struct vp9_read_bit_buffer *rb) {
+ VP9_COMMON *const cm = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+ int i;
+
+ cm->last_frame_type = cm->frame_type;
+
+ if (vp9_rb_read_literal(rb, 2) != 0x2)
+ vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ "Invalid frame marker");
+
+ cm->version = vp9_rb_read_bit(rb);
+ RESERVED;
+
+ if (vp9_rb_read_bit(rb)) {
+ // show an existing frame directly
+ int frame_to_show = cm->ref_frame_map[vp9_rb_read_literal(rb, 3)];
+ ref_cnt_fb(cm->fb_idx_ref_cnt, &cm->new_fb_idx, frame_to_show);
+ pbi->refresh_frame_flags = 0;
+ cm->lf.filter_level = 0;
+ return 0;
+ }
+
+ cm->frame_type = (FRAME_TYPE) vp9_rb_read_bit(rb);
+ cm->show_frame = vp9_rb_read_bit(rb);
+ cm->error_resilient_mode = vp9_rb_read_bit(rb);
+
+ if (cm->frame_type == KEY_FRAME) {
+ int csp;
+
+ check_sync_code(cm, rb);
+
+ csp = vp9_rb_read_literal(rb, 3); // colorspace
+ if (csp != 7) { // != sRGB
+ vp9_rb_read_bit(rb); // [16,235] (including xvycc) vs [0,255] range
+ if (cm->version == 1) {
+ cm->subsampling_x = vp9_rb_read_bit(rb);
+ cm->subsampling_y = vp9_rb_read_bit(rb);
+ vp9_rb_read_bit(rb); // has extra plane
+ } else {
+ cm->subsampling_y = cm->subsampling_x = 1;
+ }
+ } else {
+ if (cm->version == 1) {
+ cm->subsampling_y = cm->subsampling_x = 0;
+ vp9_rb_read_bit(rb); // has extra plane
+ } else {
+ vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ "RGB not supported in profile 0");
+ }
+ }
+
+ pbi->refresh_frame_flags = (1 << NUM_REF_FRAMES) - 1;
+
+ for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i)
+ cm->active_ref_idx[i] = cm->new_fb_idx;
+
+ setup_frame_size(pbi, rb);
+ } else {
+ cm->intra_only = cm->show_frame ? 0 : vp9_rb_read_bit(rb);
+
+ cm->reset_frame_context = cm->error_resilient_mode ?
+ 0 : vp9_rb_read_literal(rb, 2);
+
+ if (cm->intra_only) {
+ check_sync_code(cm, rb);
+
+ pbi->refresh_frame_flags = vp9_rb_read_literal(rb, NUM_REF_FRAMES);
+ setup_frame_size(pbi, rb);
+ } else {
+ pbi->refresh_frame_flags = vp9_rb_read_literal(rb, NUM_REF_FRAMES);
+
+ for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) {
+ const int ref = vp9_rb_read_literal(rb, NUM_REF_FRAMES_LOG2);
+ cm->active_ref_idx[i] = cm->ref_frame_map[ref];
+ cm->ref_frame_sign_bias[LAST_FRAME + i] = vp9_rb_read_bit(rb);
+ }
+
+ setup_frame_size_with_refs(pbi, rb);
+
+ xd->allow_high_precision_mv = vp9_rb_read_bit(rb);
+ cm->mcomp_filter_type = read_interp_filter_type(rb);
+
+ for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i)
+ vp9_setup_scale_factors(cm, i);
+
+ setup_inter_inter(cm);
+ }
+ }
+
+ if (!cm->error_resilient_mode) {
+ cm->refresh_frame_context = vp9_rb_read_bit(rb);
+ cm->frame_parallel_decoding_mode = vp9_rb_read_bit(rb);
+ } else {
+ cm->refresh_frame_context = 0;
+ cm->frame_parallel_decoding_mode = 1;
+ }
+
+ cm->frame_context_idx = vp9_rb_read_literal(rb, NUM_FRAME_CONTEXTS_LOG2);
+
+ if (cm->frame_type == KEY_FRAME || cm->error_resilient_mode || cm->intra_only)
+ vp9_setup_past_independence(cm);
+
+ setup_loopfilter(&cm->lf, rb);
+ setup_quantization(pbi, rb);
+ setup_segmentation(&cm->seg, rb);
+
+ setup_tile_info(cm, rb);
+
+ return vp9_rb_read_literal(rb, 16);
+}
+
+static int read_compressed_header(VP9D_COMP *pbi, const uint8_t *data,
+ size_t partition_size) {
+ VP9_COMMON *const cm = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+ vp9_reader r;
+
+ if (vp9_reader_init(&r, data, partition_size))
+ vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate bool decoder 0");
+
+ cm->tx_mode = xd->lossless ? ONLY_4X4 : read_tx_mode(&r);
+ if (cm->tx_mode == TX_MODE_SELECT)
+ read_tx_probs(&cm->fc.tx_probs, &r);
+ read_coef_probs(&cm->fc, cm->tx_mode, &r);
+
+ vp9_prepare_read_mode_info(pbi, &r);
+
+ return vp9_reader_has_error(&r);
+}
+
+void vp9_init_dequantizer(VP9_COMMON *cm) {
+ int q;
+
+ for (q = 0; q < QINDEX_RANGE; q++) {
+ cm->y_dequant[q][0] = vp9_dc_quant(q, cm->y_dc_delta_q);
+ cm->y_dequant[q][1] = vp9_ac_quant(q, 0);
+
+ cm->uv_dequant[q][0] = vp9_dc_quant(q, cm->uv_dc_delta_q);
+ cm->uv_dequant[q][1] = vp9_ac_quant(q, cm->uv_ac_delta_q);
+ }
+}
+
+int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
+ int i;
+ VP9_COMMON *const cm = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+
+ const uint8_t *data = pbi->source;
+ const uint8_t *data_end = pbi->source + pbi->source_sz;
+
+ struct vp9_read_bit_buffer rb = { data, data_end, 0,
+ cm, error_handler };
+ const size_t first_partition_size = read_uncompressed_header(pbi, &rb);
+ const int keyframe = cm->frame_type == KEY_FRAME;
+ YV12_BUFFER_CONFIG *new_fb = &cm->yv12_fb[cm->new_fb_idx];
+
+ if (!first_partition_size) {
+ // showing a frame directly
+ *p_data_end = data + 1;
+ return 0;
+ }
+ data += vp9_rb_bytes_read(&rb);
+ xd->corrupted = 0;
+ new_fb->corrupted = 0;
+ pbi->do_loopfilter_inline =
+ (cm->log2_tile_rows | cm->log2_tile_cols) == 0 && cm->lf.filter_level;
+
+ if (!pbi->decoded_key_frame && !keyframe)
+ return -1;
+
+ if (!read_is_valid(data, first_partition_size, data_end))
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Truncated packet or corrupt header length");
+
+ setup_plane_dequants(cm, &pbi->mb, cm->base_qindex);
+
+ xd->mi_8x8 = cm->mi_grid_visible;
+ xd->mic_stream_ptr = cm->mi;
+ xd->mode_info_stride = cm->mode_info_stride;
+
+ cm->fc = cm->frame_contexts[cm->frame_context_idx];
+
+ vp9_zero(cm->counts);
+
+ new_fb->corrupted |= read_compressed_header(pbi, data, first_partition_size);
+
+ setup_block_dptrs(xd, cm->subsampling_x, cm->subsampling_y);
+
+ // clear out the coeff buffer
+ for (i = 0; i < MAX_MB_PLANE; ++i)
+ vp9_zero(xd->plane[i].qcoeff);
+
+ set_prev_mi(cm);
+
+ *p_data_end = decode_tiles(pbi, data + first_partition_size);
+
+ cm->last_width = cm->width;
+ cm->last_height = cm->height;
+
+ new_fb->corrupted |= xd->corrupted;
+
+ if (!pbi->decoded_key_frame) {
+ if (keyframe && !new_fb->corrupted)
+ pbi->decoded_key_frame = 1;
+ else
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "A stream must start with a complete key frame");
+ }
+
+ if (!cm->error_resilient_mode && !cm->frame_parallel_decoding_mode) {
+ vp9_adapt_coef_probs(cm);
+
+ if (!keyframe && !cm->intra_only) {
+ vp9_adapt_mode_probs(cm);
+ vp9_adapt_mv_probs(cm, xd->allow_high_precision_mv);
+ }
+ }
+
+ if (cm->refresh_frame_context)
+ cm->frame_contexts[cm->frame_context_idx] = cm->fc;
+
+ return 0;
+}
diff --git a/libvpx/vp9/decoder/vp9_decodframe.h b/libvpx/vp9/decoder/vp9_decodframe.h
new file mode 100644
index 0000000..c665f6f
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_decodframe.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_DECODER_VP9_DECODFRAME_H_
+#define VP9_DECODER_VP9_DECODFRAME_H_
+
+struct VP9Common;
+struct VP9Decompressor;
+
+void vp9_init_dequantizer(struct VP9Common *cm);
+int vp9_decode_frame(struct VP9Decompressor *cpi, const uint8_t **p_data_end);
+
+#endif // VP9_DECODER_VP9_DECODFRAME_H_
diff --git a/libvpx/vp9/decoder/vp9_detokenize.c b/libvpx/vp9/decoder/vp9_detokenize.c
new file mode 100644
index 0000000..cd74a0b
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_detokenize.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_ports/mem.h"
+
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_seg_common.h"
+
+#include "vp9/decoder/vp9_dboolhuff.h"
+#include "vp9/decoder/vp9_detokenize.h"
+#include "vp9/decoder/vp9_onyxd_int.h"
+#include "vp9/decoder/vp9_treereader.h"
+
+#define EOB_CONTEXT_NODE 0
+#define ZERO_CONTEXT_NODE 1
+#define ONE_CONTEXT_NODE 2
+#define LOW_VAL_CONTEXT_NODE 3
+#define TWO_CONTEXT_NODE 4
+#define THREE_CONTEXT_NODE 5
+#define HIGH_LOW_CONTEXT_NODE 6
+#define CAT_ONE_CONTEXT_NODE 7
+#define CAT_THREEFOUR_CONTEXT_NODE 8
+#define CAT_THREE_CONTEXT_NODE 9
+#define CAT_FIVE_CONTEXT_NODE 10
+
+#define CAT1_MIN_VAL 5
+#define CAT2_MIN_VAL 7
+#define CAT3_MIN_VAL 11
+#define CAT4_MIN_VAL 19
+#define CAT5_MIN_VAL 35
+#define CAT6_MIN_VAL 67
+#define CAT1_PROB0 159
+#define CAT2_PROB0 145
+#define CAT2_PROB1 165
+
+#define CAT3_PROB0 140
+#define CAT3_PROB1 148
+#define CAT3_PROB2 173
+
+#define CAT4_PROB0 135
+#define CAT4_PROB1 140
+#define CAT4_PROB2 155
+#define CAT4_PROB3 176
+
+#define CAT5_PROB0 130
+#define CAT5_PROB1 134
+#define CAT5_PROB2 141
+#define CAT5_PROB3 157
+#define CAT5_PROB4 180
+
+static const vp9_prob cat6_prob[15] = {
+ 254, 254, 254, 252, 249, 243, 230, 196, 177, 153, 140, 133, 130, 129, 0
+};
+
+DECLARE_ALIGNED(16, extern const uint8_t,
+ vp9_pt_energy_class[MAX_ENTROPY_TOKENS]);
+#define INCREMENT_COUNT(token) \
+ do { \
+ coef_counts[type][ref][band][pt] \
+ [token >= TWO_TOKEN ? \
+ (token == DCT_EOB_TOKEN ? DCT_EOB_MODEL_TOKEN : TWO_TOKEN) : \
+ token]++; \
+ token_cache[scan[c]] = vp9_pt_energy_class[token]; \
+ } while (0)
+
+#define WRITE_COEF_CONTINUE(val, token) \
+ { \
+ qcoeff_ptr[scan[c]] = vp9_read_and_apply_sign(r, val) * \
+ dq[c > 0] / (1 + (tx_size == TX_32X32)); \
+ INCREMENT_COUNT(token); \
+ c++; \
+ continue; \
+ }
+
+#define ADJUST_COEF(prob, bits_count) \
+ do { \
+ if (vp9_read(r, prob)) \
+ val += 1 << bits_count; \
+ } while (0);
+
+static int decode_coefs(VP9_COMMON *cm, const MACROBLOCKD *xd,
+ vp9_reader *r, int block_idx,
+ PLANE_TYPE type, int seg_eob, int16_t *qcoeff_ptr,
+ TX_SIZE tx_size, const int16_t *dq,
+ ENTROPY_CONTEXT *A, ENTROPY_CONTEXT *L) {
+ FRAME_CONTEXT *const fc = &cm->fc;
+ FRAME_COUNTS *const counts = &cm->counts;
+ const int ref = is_inter_block(&xd->this_mi->mbmi);
+ int band, c = 0;
+ vp9_prob (*coef_probs)[PREV_COEF_CONTEXTS][UNCONSTRAINED_NODES] =
+ fc->coef_probs[tx_size][type][ref];
+ vp9_prob coef_probs_full[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
+ uint8_t load_map[COEF_BANDS][PREV_COEF_CONTEXTS] = { { 0 } };
+ vp9_prob *prob;
+ vp9_coeff_count_model *coef_counts = counts->coef[tx_size];
+ const int16_t *scan, *nb;
+ const uint8_t *band_translate;
+ uint8_t token_cache[1024];
+ int pt = get_entropy_context(xd, tx_size, type, block_idx, A, L,
+ &scan, &band_translate);
+ nb = vp9_get_coef_neighbors_handle(scan);
+
+ while (1) {
+ int val;
+ const uint8_t *cat6 = cat6_prob;
+ if (c >= seg_eob)
+ break;
+ if (c)
+ pt = get_coef_context(nb, token_cache, c);
+ band = get_coef_band(band_translate, c);
+ prob = coef_probs[band][pt];
+ counts->eob_branch[tx_size][type][ref][band][pt]++;
+ if (!vp9_read(r, prob[EOB_CONTEXT_NODE]))
+ break;
+
+SKIP_START:
+ if (c >= seg_eob)
+ break;
+ if (c)
+ pt = get_coef_context(nb, token_cache, c);
+ band = get_coef_band(band_translate, c);
+ prob = coef_probs[band][pt];
+
+ if (!vp9_read(r, prob[ZERO_CONTEXT_NODE])) {
+ INCREMENT_COUNT(ZERO_TOKEN);
+ ++c;
+ goto SKIP_START;
+ }
+
+ // ONE_CONTEXT_NODE_0_
+ if (!vp9_read(r, prob[ONE_CONTEXT_NODE])) {
+ WRITE_COEF_CONTINUE(1, ONE_TOKEN);
+ }
+ // Load full probabilities if not already loaded
+ if (!load_map[band][pt]) {
+ vp9_model_to_full_probs(coef_probs[band][pt],
+ coef_probs_full[band][pt]);
+ load_map[band][pt] = 1;
+ }
+ prob = coef_probs_full[band][pt];
+ // LOW_VAL_CONTEXT_NODE_0_
+ if (!vp9_read(r, prob[LOW_VAL_CONTEXT_NODE])) {
+ if (!vp9_read(r, prob[TWO_CONTEXT_NODE])) {
+ WRITE_COEF_CONTINUE(2, TWO_TOKEN);
+ }
+ if (!vp9_read(r, prob[THREE_CONTEXT_NODE])) {
+ WRITE_COEF_CONTINUE(3, THREE_TOKEN);
+ }
+ WRITE_COEF_CONTINUE(4, FOUR_TOKEN);
+ }
+ // HIGH_LOW_CONTEXT_NODE_0_
+ if (!vp9_read(r, prob[HIGH_LOW_CONTEXT_NODE])) {
+ if (!vp9_read(r, prob[CAT_ONE_CONTEXT_NODE])) {
+ val = CAT1_MIN_VAL;
+ ADJUST_COEF(CAT1_PROB0, 0);
+ WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY1);
+ }
+ val = CAT2_MIN_VAL;
+ ADJUST_COEF(CAT2_PROB1, 1);
+ ADJUST_COEF(CAT2_PROB0, 0);
+ WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY2);
+ }
+ // CAT_THREEFOUR_CONTEXT_NODE_0_
+ if (!vp9_read(r, prob[CAT_THREEFOUR_CONTEXT_NODE])) {
+ if (!vp9_read(r, prob[CAT_THREE_CONTEXT_NODE])) {
+ val = CAT3_MIN_VAL;
+ ADJUST_COEF(CAT3_PROB2, 2);
+ ADJUST_COEF(CAT3_PROB1, 1);
+ ADJUST_COEF(CAT3_PROB0, 0);
+ WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY3);
+ }
+ val = CAT4_MIN_VAL;
+ ADJUST_COEF(CAT4_PROB3, 3);
+ ADJUST_COEF(CAT4_PROB2, 2);
+ ADJUST_COEF(CAT4_PROB1, 1);
+ ADJUST_COEF(CAT4_PROB0, 0);
+ WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY4);
+ }
+ // CAT_FIVE_CONTEXT_NODE_0_:
+ if (!vp9_read(r, prob[CAT_FIVE_CONTEXT_NODE])) {
+ val = CAT5_MIN_VAL;
+ ADJUST_COEF(CAT5_PROB4, 4);
+ ADJUST_COEF(CAT5_PROB3, 3);
+ ADJUST_COEF(CAT5_PROB2, 2);
+ ADJUST_COEF(CAT5_PROB1, 1);
+ ADJUST_COEF(CAT5_PROB0, 0);
+ WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY5);
+ }
+ val = 0;
+ while (*cat6) {
+ val = (val << 1) | vp9_read(r, *cat6++);
+ }
+ val += CAT6_MIN_VAL;
+ WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY6);
+ }
+
+ if (c < seg_eob)
+ coef_counts[type][ref][band][pt][DCT_EOB_MODEL_TOKEN]++;
+
+
+ return c;
+}
+
+struct decode_block_args {
+ VP9D_COMP *pbi;
+ vp9_reader *r;
+ int *eobtotal;
+};
+
+static void decode_block(int plane, int block, BLOCK_SIZE plane_bsize,
+ TX_SIZE tx_size, void *argv) {
+ const struct decode_block_args* const arg = argv;
+
+ // find the maximum eob for this transform size, adjusted by segment
+ MACROBLOCKD *xd = &arg->pbi->mb;
+ struct segmentation *seg = &arg->pbi->common.seg;
+ struct macroblockd_plane* pd = &xd->plane[plane];
+ const int segment_id = xd->this_mi->mbmi.segment_id;
+ const int seg_eob = get_tx_eob(seg, segment_id, tx_size);
+ int aoff, loff, eob;
+
+ txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &aoff, &loff);
+
+ eob = decode_coefs(&arg->pbi->common, xd, arg->r, block,
+ pd->plane_type, seg_eob, BLOCK_OFFSET(pd->qcoeff, block),
+ tx_size, pd->dequant,
+ pd->above_context + aoff, pd->left_context + loff);
+
+ set_contexts(xd, pd, plane_bsize, tx_size, eob > 0, aoff, loff);
+
+ pd->eobs[block] = eob;
+ *arg->eobtotal += eob;
+}
+
+int vp9_decode_tokens(VP9D_COMP *pbi, vp9_reader *r, BLOCK_SIZE bsize) {
+ int eobtotal = 0;
+ struct decode_block_args args = {pbi, r, &eobtotal};
+ foreach_transformed_block(&pbi->mb, bsize, decode_block, &args);
+ return eobtotal;
+}
diff --git a/libvpx/vp9/decoder/vp9_detokenize.h b/libvpx/vp9/decoder/vp9_detokenize.h
new file mode 100644
index 0000000..cf07c56
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_detokenize.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_DECODER_VP9_DETOKENIZE_H_
+#define VP9_DECODER_VP9_DETOKENIZE_H_
+
+#include "vp9/decoder/vp9_onyxd_int.h"
+#include "vp9/decoder/vp9_dboolhuff.h"
+
+int vp9_decode_tokens(VP9D_COMP* pbi, vp9_reader *r, BLOCK_SIZE bsize);
+
+#endif // VP9_DECODER_VP9_DETOKENIZE_H_
diff --git a/libvpx/vp9/decoder/vp9_dsubexp.c b/libvpx/vp9/decoder/vp9_dsubexp.c
new file mode 100644
index 0000000..8cc64f7
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_dsubexp.c
@@ -0,0 +1,106 @@
+/*
+ Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp9/common/vp9_entropy.h"
+
+#include "vp9/decoder/vp9_dsubexp.h"
+
+static int inv_recenter_nonneg(int v, int m) {
+ if (v > 2 * m)
+ return v;
+
+ return v % 2 ? m - (v + 1) / 2 : m + v / 2;
+}
+
+static int decode_uniform(vp9_reader *r, int n) {
+ int v;
+ const int l = get_unsigned_bits(n);
+ const int m = (1 << l) - n;
+ if (!l)
+ return 0;
+
+ v = vp9_read_literal(r, l - 1);
+ return v < m ? v : (v << 1) - m + vp9_read_bit(r);
+}
+
+
+static int merge_index(int v, int n, int modulus) {
+ int max1 = (n - 1 - modulus / 2) / modulus + 1;
+ if (v < max1) {
+ v = v * modulus + modulus / 2;
+ } else {
+ int w;
+ v -= max1;
+ w = v;
+ v += (v + modulus - modulus / 2) / modulus;
+ while (v % modulus == modulus / 2 ||
+ w != v - (v + modulus - modulus / 2) / modulus) v++;
+ }
+ return v;
+}
+
+static int inv_remap_prob(int v, int m) {
+ static int inv_map_table[MAX_PROB - 1] = {
+ // generated by:
+ // inv_map_table[j] = merge_index(j, MAX_PROB - 1, MODULUS_PARAM);
+ 6, 19, 32, 45, 58, 71, 84, 97, 110, 123, 136, 149, 162, 175, 188,
+ 201, 214, 227, 240, 253, 0, 1, 2, 3, 4, 5, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29, 30, 31, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+ 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 72, 73, 74, 75,
+ 76, 77, 78, 79, 80, 81, 82, 83, 85, 86, 87, 88, 89, 90, 91,
+ 92, 93, 94, 95, 96, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 124,
+ 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 137, 138, 139, 140,
+ 141, 142, 143, 144, 145, 146, 147, 148, 150, 151, 152, 153, 154, 155, 156,
+ 157, 158, 159, 160, 161, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+ 173, 174, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 189,
+ 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 202, 203, 204, 205,
+ 206, 207, 208, 209, 210, 211, 212, 213, 215, 216, 217, 218, 219, 220, 221,
+ 222, 223, 224, 225, 226, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237,
+ 238, 239, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252,
+
+ };
+ // v = merge_index(v, MAX_PROBS - 1, MODULUS_PARAM);
+ v = inv_map_table[v];
+ m--;
+ if ((m << 1) <= MAX_PROB) {
+ return 1 + inv_recenter_nonneg(v + 1, m);
+ } else {
+ return MAX_PROB - inv_recenter_nonneg(v + 1, MAX_PROB - 1 - m);
+ }
+}
+
+static int decode_term_subexp(vp9_reader *r, int k, int num_syms) {
+ int i = 0, mk = 0, word;
+ while (1) {
+ const int b = i ? k + i - 1 : k;
+ const int a = 1 << b;
+ if (num_syms <= mk + 3 * a) {
+ word = decode_uniform(r, num_syms - mk) + mk;
+ break;
+ } else {
+ if (vp9_read_bit(r)) {
+ i++;
+ mk += a;
+ } else {
+ word = vp9_read_literal(r, b) + mk;
+ break;
+ }
+ }
+ }
+ return word;
+}
+
+void vp9_diff_update_prob(vp9_reader *r, vp9_prob* p) {
+ int delp = decode_term_subexp(r, SUBEXP_PARAM, 255);
+ *p = (vp9_prob)inv_remap_prob(delp, *p);
+}
diff --git a/libvpx/vp9/decoder/vp9_dsubexp.h b/libvpx/vp9/decoder/vp9_dsubexp.h
new file mode 100644
index 0000000..aeb9399
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_dsubexp.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_DECODER_VP9_DSUBEXP_H_
+#define VP9_DECODER_VP9_DSUBEXP_H_
+
+#include "vp9/decoder/vp9_dboolhuff.h"
+
+void vp9_diff_update_prob(vp9_reader *r, vp9_prob* p);
+
+#endif // VP9_DECODER_VP9_DSUBEXP_H_
diff --git a/libvpx/vp9/decoder/vp9_idct_blk.c b/libvpx/vp9/decoder/vp9_idct_blk.c
new file mode 100644
index 0000000..395e636
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_idct_blk.c
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp9_rtcd.h"
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/decoder/vp9_idct_blk.h"
+
+static void add_constant_residual(const int16_t diff, uint8_t *dest, int stride,
+ int width, int height) {
+ int r, c;
+
+ for (r = 0; r < height; r++) {
+ for (c = 0; c < width; c++)
+ dest[c] = clip_pixel(diff + dest[c]);
+
+ dest += stride;
+ }
+}
+
+void vp9_add_constant_residual_8x8_c(const int16_t diff, uint8_t *dest,
+ int stride) {
+ add_constant_residual(diff, dest, stride, 8, 8);
+}
+
+void vp9_add_constant_residual_16x16_c(const int16_t diff, uint8_t *dest,
+ int stride) {
+ add_constant_residual(diff, dest, stride, 16, 16);
+}
+
+void vp9_add_constant_residual_32x32_c(const int16_t diff, uint8_t *dest,
+ int stride) {
+ add_constant_residual(diff, dest, stride, 32, 32);
+}
+
+void vp9_iht_add_c(TX_TYPE tx_type, int16_t *input, uint8_t *dest, int stride,
+ int eob) {
+ if (tx_type == DCT_DCT) {
+ vp9_idct_add(input, dest, stride, eob);
+ } else {
+ vp9_short_iht4x4_add(input, dest, stride, tx_type);
+ vpx_memset(input, 0, 32);
+ }
+}
+
+void vp9_iht_add_8x8_c(TX_TYPE tx_type, int16_t *input, uint8_t *dest,
+ int stride, int eob) {
+ if (tx_type == DCT_DCT) {
+ vp9_idct_add_8x8(input, dest, stride, eob);
+ } else {
+ if (eob > 0) {
+ vp9_short_iht8x8_add(input, dest, stride, tx_type);
+ vpx_memset(input, 0, 128);
+ }
+ }
+}
+
+void vp9_idct_add_c(int16_t *input, uint8_t *dest, int stride, int eob) {
+ if (eob > 1) {
+ vp9_short_idct4x4_add(input, dest, stride);
+ vpx_memset(input, 0, 32);
+ } else {
+ vp9_short_idct4x4_1_add(input, dest, stride);
+ ((int *)input)[0] = 0;
+ }
+}
+
+void vp9_idct_add_lossless_c(int16_t *input, uint8_t *dest, int stride,
+ int eob) {
+ if (eob > 1) {
+ vp9_short_iwalsh4x4_add(input, dest, stride);
+ vpx_memset(input, 0, 32);
+ } else {
+ vp9_short_iwalsh4x4_1_add_c(input, dest, stride);
+ ((int *)input)[0] = 0;
+ }
+}
+
+void vp9_idct_add_8x8_c(int16_t *input, uint8_t *dest, int stride, int eob) {
+ // If dc is 1, then input[0] is the reconstructed value, do not need
+ // dequantization. Also, when dc is 1, dc is counted in eobs, namely eobs >=1.
+
+ // The calculation can be simplified if there are not many non-zero dct
+ // coefficients. Use eobs to decide what to do.
+ // TODO(yunqingwang): "eobs = 1" case is also handled in vp9_short_idct8x8_c.
+ // Combine that with code here.
+ if (eob) {
+ if (eob == 1) {
+ // DC only DCT coefficient
+ vp9_short_idct8x8_1_add(input, dest, stride);
+ input[0] = 0;
+ } else if (eob <= 10) {
+ vp9_short_idct10_8x8_add(input, dest, stride);
+ vpx_memset(input, 0, 128);
+ } else {
+ vp9_short_idct8x8_add(input, dest, stride);
+ vpx_memset(input, 0, 128);
+ }
+ }
+}
+
+void vp9_iht_add_16x16_c(TX_TYPE tx_type, int16_t *input, uint8_t *dest,
+ int stride, int eob) {
+ if (tx_type == DCT_DCT) {
+ vp9_idct_add_16x16(input, dest, stride, eob);
+ } else {
+ if (eob > 0) {
+ vp9_short_iht16x16_add(input, dest, stride, tx_type);
+ vpx_memset(input, 0, 512);
+ }
+ }
+}
+
+void vp9_idct_add_16x16_c(int16_t *input, uint8_t *dest, int stride, int eob) {
+ /* The calculation can be simplified if there are not many non-zero dct
+ * coefficients. Use eobs to separate different cases. */
+ if (eob) {
+ if (eob == 1) {
+ /* DC only DCT coefficient. */
+ vp9_short_idct16x16_1_add(input, dest, stride);
+ input[0] = 0;
+ } else if (eob <= 10) {
+ vp9_short_idct10_16x16_add(input, dest, stride);
+ vpx_memset(input, 0, 512);
+ } else {
+ vp9_short_idct16x16_add(input, dest, stride);
+ vpx_memset(input, 0, 512);
+ }
+ }
+}
+
+void vp9_idct_add_32x32_c(int16_t *input, uint8_t *dest, int stride, int eob) {
+ DECLARE_ALIGNED_ARRAY(16, int16_t, output, 1024);
+
+ if (eob) {
+ if (eob == 1) {
+ vp9_short_idct1_32x32(input, output);
+ vp9_add_constant_residual_32x32(output[0], dest, stride);
+ input[0] = 0;
+ } else {
+ vp9_short_idct32x32_add(input, dest, stride);
+ vpx_memset(input, 0, 2048);
+ }
+ }
+}
+
diff --git a/libvpx/vp9/decoder/vp9_idct_blk.h b/libvpx/vp9/decoder/vp9_idct_blk.h
new file mode 100644
index 0000000..1810bd0
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_idct_blk.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_DECODER_VP9_IDCT_BLK_H_
+#define VP9_DECODER_VP9_IDCT_BLK_H_
+
+#include "vp9/common/vp9_blockd.h"
+
+
+void vp9_idct_add_lossless_c(int16_t *input, unsigned char *dest, int stride,
+ int eob);
+
+void vp9_iht_add_c(TX_TYPE tx_type, int16_t *input, unsigned char *dest,
+ int stride, int eob);
+
+void vp9_iht_add_8x8_c(TX_TYPE tx_type, int16_t *input, unsigned char *dest,
+ int stride, int eob);
+
+void vp9_iht_add_16x16_c(TX_TYPE tx_type, int16_t *input, unsigned char *dest,
+ int stride, int eob);
+
+#endif // VP9_DECODER_VP9_IDCT_BLK_H_
diff --git a/libvpx/vp9/decoder/vp9_onyxd.h b/libvpx/vp9/decoder/vp9_onyxd.h
new file mode 100644
index 0000000..cd5b750
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_onyxd.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_ONYXD_H_
+#define VP9_COMMON_VP9_ONYXD_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "vpx_scale/yv12config.h"
+#include "vp9/common/vp9_ppflags.h"
+#include "vpx/vpx_codec.h"
+
+typedef void *VP9D_PTR;
+
+typedef struct {
+ int width;
+ int height;
+ int version;
+ int postprocess;
+ int max_threads;
+ int inv_tile_order;
+ int input_partition;
+} VP9D_CONFIG;
+
+typedef enum {
+ VP9_LAST_FLAG = 1,
+ VP9_GOLD_FLAG = 2,
+ VP9_ALT_FLAG = 4
+} VP9_REFFRAME;
+
+void vp9_initialize_dec();
+
+int vp9_receive_compressed_data(VP9D_PTR comp,
+ uint64_t size, const uint8_t **dest,
+ int64_t time_stamp);
+
+int vp9_get_raw_frame(VP9D_PTR comp, YV12_BUFFER_CONFIG *sd,
+ int64_t *time_stamp, int64_t *time_end_stamp,
+ vp9_ppflags_t *flags);
+
+vpx_codec_err_t vp9_copy_reference_dec(VP9D_PTR comp,
+ VP9_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd);
+
+vpx_codec_err_t vp9_set_reference_dec(VP9D_PTR comp,
+ VP9_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd);
+
+int vp9_get_reference_dec(VP9D_PTR ptr, int index, YV12_BUFFER_CONFIG **fb);
+
+
+VP9D_PTR vp9_create_decompressor(VP9D_CONFIG *oxcf);
+
+void vp9_remove_decompressor(VP9D_PTR comp);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // VP9_COMMON_VP9_ONYXD_H_
diff --git a/libvpx/vp9/decoder/vp9_onyxd_if.c b/libvpx/vp9/decoder/vp9_onyxd_if.c
new file mode 100644
index 0000000..17d5def
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_onyxd_if.c
@@ -0,0 +1,448 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <limits.h>
+#include <stdio.h>
+
+#include "vp9/common/vp9_onyxc_int.h"
+#if CONFIG_VP9_POSTPROC
+#include "vp9/common/vp9_postproc.h"
+#endif
+#include "vp9/decoder/vp9_onyxd.h"
+#include "vp9/decoder/vp9_onyxd_int.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vp9/common/vp9_alloccommon.h"
+#include "vp9/common/vp9_loopfilter.h"
+#include "vp9/common/vp9_quant_common.h"
+#include "vpx_scale/vpx_scale.h"
+#include "vp9/common/vp9_systemdependent.h"
+#include "vpx_ports/vpx_timer.h"
+#include "vp9/decoder/vp9_decodframe.h"
+#include "vp9/decoder/vp9_detokenize.h"
+#include "./vpx_scale_rtcd.h"
+
+#define WRITE_RECON_BUFFER 0
+#if WRITE_RECON_BUFFER == 1
+static void recon_write_yuv_frame(const char *name,
+ const YV12_BUFFER_CONFIG *s,
+ int w, int _h) {
+ FILE *yuv_file = fopen(name, "ab");
+ const uint8_t *src = s->y_buffer;
+ int h = _h;
+
+ do {
+ fwrite(src, w, 1, yuv_file);
+ src += s->y_stride;
+ } while (--h);
+
+ src = s->u_buffer;
+ h = (_h + 1) >> 1;
+ w = (w + 1) >> 1;
+
+ do {
+ fwrite(src, w, 1, yuv_file);
+ src += s->uv_stride;
+ } while (--h);
+
+ src = s->v_buffer;
+ h = (_h + 1) >> 1;
+
+ do {
+ fwrite(src, w, 1, yuv_file);
+ src += s->uv_stride;
+ } while (--h);
+
+ fclose(yuv_file);
+}
+#endif
+#if WRITE_RECON_BUFFER == 2
+void write_dx_frame_to_file(YV12_BUFFER_CONFIG *frame, int this_frame) {
+
+ // write the frame
+ FILE *yframe;
+ int i;
+ char filename[255];
+
+ sprintf(filename, "dx\\y%04d.raw", this_frame);
+ yframe = fopen(filename, "wb");
+
+ for (i = 0; i < frame->y_height; i++)
+ fwrite(frame->y_buffer + i * frame->y_stride,
+ frame->y_width, 1, yframe);
+
+ fclose(yframe);
+ sprintf(filename, "dx\\u%04d.raw", this_frame);
+ yframe = fopen(filename, "wb");
+
+ for (i = 0; i < frame->uv_height; i++)
+ fwrite(frame->u_buffer + i * frame->uv_stride,
+ frame->uv_width, 1, yframe);
+
+ fclose(yframe);
+ sprintf(filename, "dx\\v%04d.raw", this_frame);
+ yframe = fopen(filename, "wb");
+
+ for (i = 0; i < frame->uv_height; i++)
+ fwrite(frame->v_buffer + i * frame->uv_stride,
+ frame->uv_width, 1, yframe);
+
+ fclose(yframe);
+}
+#endif
+
+void vp9_initialize_dec() {
+ static int init_done = 0;
+
+ if (!init_done) {
+ vp9_initialize_common();
+ vp9_init_quant_tables();
+ init_done = 1;
+ }
+}
+
+VP9D_PTR vp9_create_decompressor(VP9D_CONFIG *oxcf) {
+ VP9D_COMP *const pbi = vpx_memalign(32, sizeof(VP9D_COMP));
+ VP9_COMMON *const cm = pbi ? &pbi->common : NULL;
+
+ if (!cm)
+ return NULL;
+
+ vp9_zero(*pbi);
+
+ if (setjmp(cm->error.jmp)) {
+ cm->error.setjmp = 0;
+ vp9_remove_decompressor(pbi);
+ return NULL;
+ }
+
+ cm->error.setjmp = 1;
+ vp9_initialize_dec();
+
+ vp9_create_common(cm);
+
+ pbi->oxcf = *oxcf;
+ pbi->ready_for_new_data = 1;
+ cm->current_video_frame = 0;
+
+ // vp9_init_dequantizer() is first called here. Add check in
+ // frame_init_dequantizer() to avoid unnecessary calling of
+ // vp9_init_dequantizer() for every frame.
+ vp9_init_dequantizer(cm);
+
+ vp9_loop_filter_init(cm);
+
+ cm->error.setjmp = 0;
+ pbi->decoded_key_frame = 0;
+
+ if (pbi->oxcf.max_threads > 1) {
+ vp9_worker_init(&pbi->lf_worker);
+ pbi->lf_worker.data1 = vpx_malloc(sizeof(LFWorkerData));
+ pbi->lf_worker.hook = (VP9WorkerHook)vp9_loop_filter_worker;
+ if (pbi->lf_worker.data1 == NULL || !vp9_worker_reset(&pbi->lf_worker)) {
+ vp9_remove_decompressor(pbi);
+ return NULL;
+ }
+ }
+
+ return pbi;
+}
+
+void vp9_remove_decompressor(VP9D_PTR ptr) {
+ VP9D_COMP *const pbi = (VP9D_COMP *)ptr;
+
+ if (!pbi)
+ return;
+
+ vp9_remove_common(&pbi->common);
+ vp9_worker_end(&pbi->lf_worker);
+ vpx_free(pbi->lf_worker.data1);
+ vpx_free(pbi);
+}
+
+static int equal_dimensions(YV12_BUFFER_CONFIG *a, YV12_BUFFER_CONFIG *b) {
+ return a->y_height == b->y_height && a->y_width == b->y_width &&
+ a->uv_height == b->uv_height && a->uv_width == b->uv_width;
+}
+
+vpx_codec_err_t vp9_copy_reference_dec(VP9D_PTR ptr,
+ VP9_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd) {
+ VP9D_COMP *pbi = (VP9D_COMP *) ptr;
+ VP9_COMMON *cm = &pbi->common;
+ int ref_fb_idx;
+
+ /* TODO(jkoleszar): The decoder doesn't have any real knowledge of what the
+ * encoder is using the frame buffers for. This is just a stub to keep the
+ * vpxenc --test-decode functionality working, and will be replaced in a
+ * later commit that adds VP9-specific controls for this functionality.
+ */
+ if (ref_frame_flag == VP9_LAST_FLAG) {
+ ref_fb_idx = cm->ref_frame_map[0];
+ } else {
+ vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
+ "Invalid reference frame");
+ return cm->error.error_code;
+ }
+
+ if (!equal_dimensions(&cm->yv12_fb[ref_fb_idx], sd)) {
+ vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
+ "Incorrect buffer dimensions");
+ } else {
+ vp8_yv12_copy_frame(&cm->yv12_fb[ref_fb_idx], sd);
+ }
+
+ return cm->error.error_code;
+}
+
+
+vpx_codec_err_t vp9_set_reference_dec(VP9D_PTR ptr, VP9_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd) {
+ VP9D_COMP *pbi = (VP9D_COMP *) ptr;
+ VP9_COMMON *cm = &pbi->common;
+ int *ref_fb_ptr = NULL;
+
+ /* TODO(jkoleszar): The decoder doesn't have any real knowledge of what the
+ * encoder is using the frame buffers for. This is just a stub to keep the
+ * vpxenc --test-decode functionality working, and will be replaced in a
+ * later commit that adds VP9-specific controls for this functionality.
+ */
+ if (ref_frame_flag == VP9_LAST_FLAG)
+ ref_fb_ptr = &pbi->common.active_ref_idx[0];
+ else if (ref_frame_flag == VP9_GOLD_FLAG)
+ ref_fb_ptr = &pbi->common.active_ref_idx[1];
+ else if (ref_frame_flag == VP9_ALT_FLAG)
+ ref_fb_ptr = &pbi->common.active_ref_idx[2];
+ else {
+ vpx_internal_error(&pbi->common.error, VPX_CODEC_ERROR,
+ "Invalid reference frame");
+ return pbi->common.error.error_code;
+ }
+
+ if (!equal_dimensions(&cm->yv12_fb[*ref_fb_ptr], sd)) {
+ vpx_internal_error(&pbi->common.error, VPX_CODEC_ERROR,
+ "Incorrect buffer dimensions");
+ } else {
+ // Find an empty frame buffer.
+ const int free_fb = get_free_fb(cm);
+ // Decrease fb_idx_ref_cnt since it will be increased again in
+ // ref_cnt_fb() below.
+ cm->fb_idx_ref_cnt[free_fb]--;
+
+ // Manage the reference counters and copy image.
+ ref_cnt_fb(cm->fb_idx_ref_cnt, ref_fb_ptr, free_fb);
+ vp8_yv12_copy_frame(sd, &cm->yv12_fb[*ref_fb_ptr]);
+ }
+
+ return pbi->common.error.error_code;
+}
+
+
+int vp9_get_reference_dec(VP9D_PTR ptr, int index, YV12_BUFFER_CONFIG **fb) {
+ VP9D_COMP *pbi = (VP9D_COMP *) ptr;
+ VP9_COMMON *cm = &pbi->common;
+
+ if (index < 0 || index >= NUM_REF_FRAMES)
+ return -1;
+
+ *fb = &cm->yv12_fb[cm->ref_frame_map[index]];
+ return 0;
+}
+
+/* If any buffer updating is signaled it should be done here. */
+static void swap_frame_buffers(VP9D_COMP *pbi) {
+ int ref_index = 0, mask;
+ VP9_COMMON *const cm = &pbi->common;
+
+ for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) {
+ if (mask & 1)
+ ref_cnt_fb(cm->fb_idx_ref_cnt, &cm->ref_frame_map[ref_index],
+ cm->new_fb_idx);
+ ++ref_index;
+ }
+
+ cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx];
+ cm->fb_idx_ref_cnt[cm->new_fb_idx]--;
+
+ // Invalidate these references until the next frame starts.
+ for (ref_index = 0; ref_index < 3; ref_index++)
+ cm->active_ref_idx[ref_index] = INT_MAX;
+}
+
+int vp9_receive_compressed_data(VP9D_PTR ptr,
+ uint64_t size, const uint8_t **psource,
+ int64_t time_stamp) {
+ VP9D_COMP *pbi = (VP9D_COMP *) ptr;
+ VP9_COMMON *cm = &pbi->common;
+ const uint8_t *source = *psource;
+ int retcode = 0;
+
+ /*if(pbi->ready_for_new_data == 0)
+ return -1;*/
+
+ if (ptr == 0)
+ return -1;
+
+ cm->error.error_code = VPX_CODEC_OK;
+
+ pbi->source = source;
+ pbi->source_sz = size;
+
+ if (pbi->source_sz == 0) {
+ /* This is used to signal that we are missing frames.
+ * We do not know if the missing frame(s) was supposed to update
+ * any of the reference buffers, but we act conservative and
+ * mark only the last buffer as corrupted.
+ *
+ * TODO(jkoleszar): Error concealment is undefined and non-normative
+ * at this point, but if it becomes so, [0] may not always be the correct
+ * thing to do here.
+ */
+ if (cm->active_ref_idx[0] != INT_MAX)
+ cm->yv12_fb[cm->active_ref_idx[0]].corrupted = 1;
+ }
+
+ cm->new_fb_idx = get_free_fb(cm);
+
+ if (setjmp(cm->error.jmp)) {
+ cm->error.setjmp = 0;
+
+ /* We do not know if the missing frame(s) was supposed to update
+ * any of the reference buffers, but we act conservative and
+ * mark only the last buffer as corrupted.
+ *
+ * TODO(jkoleszar): Error concealment is undefined and non-normative
+ * at this point, but if it becomes so, [0] may not always be the correct
+ * thing to do here.
+ */
+ if (cm->active_ref_idx[0] != INT_MAX)
+ cm->yv12_fb[cm->active_ref_idx[0]].corrupted = 1;
+
+ if (cm->fb_idx_ref_cnt[cm->new_fb_idx] > 0)
+ cm->fb_idx_ref_cnt[cm->new_fb_idx]--;
+
+ return -1;
+ }
+
+ cm->error.setjmp = 1;
+
+ retcode = vp9_decode_frame(pbi, psource);
+
+ if (retcode < 0) {
+ cm->error.error_code = VPX_CODEC_ERROR;
+ cm->error.setjmp = 0;
+ if (cm->fb_idx_ref_cnt[cm->new_fb_idx] > 0)
+ cm->fb_idx_ref_cnt[cm->new_fb_idx]--;
+ return retcode;
+ }
+
+ {
+ swap_frame_buffers(pbi);
+
+#if WRITE_RECON_BUFFER == 2
+ if (cm->show_frame)
+ write_dx_frame_to_file(cm->frame_to_show,
+ cm->current_video_frame);
+ else
+ write_dx_frame_to_file(cm->frame_to_show,
+ cm->current_video_frame + 1000);
+#endif
+
+ if (!pbi->do_loopfilter_inline) {
+ /* Apply the loop filter if appropriate. */
+ vp9_loop_filter_frame(cm, &pbi->mb, pbi->common.lf.filter_level, 0, 0);
+ }
+
+#if WRITE_RECON_BUFFER == 2
+ if (cm->show_frame)
+ write_dx_frame_to_file(cm->frame_to_show,
+ cm->current_video_frame + 2000);
+ else
+ write_dx_frame_to_file(cm->frame_to_show,
+ cm->current_video_frame + 3000);
+#endif
+
+ vp9_extend_frame_inner_borders(cm->frame_to_show,
+ cm->subsampling_x,
+ cm->subsampling_y);
+ }
+
+#if WRITE_RECON_BUFFER == 1
+ if (cm->show_frame)
+ recon_write_yuv_frame("recon.yuv", cm->frame_to_show,
+ cm->width, cm->height);
+#endif
+
+ vp9_clear_system_state();
+
+ cm->last_show_frame = cm->show_frame;
+ if (cm->show_frame) {
+ // current mip will be the prev_mip for the next frame
+ MODE_INFO *temp = cm->prev_mip;
+ MODE_INFO **temp2 = cm->prev_mi_grid_base;
+ cm->prev_mip = cm->mip;
+ cm->mip = temp;
+ cm->prev_mi_grid_base = cm->mi_grid_base;
+ cm->mi_grid_base = temp2;
+
+ // update the upper left visible macroblock ptrs
+ cm->mi = cm->mip + cm->mode_info_stride + 1;
+ cm->prev_mi = cm->prev_mip + cm->mode_info_stride + 1;
+ cm->mi_grid_visible = cm->mi_grid_base + cm->mode_info_stride + 1;
+ cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mode_info_stride + 1;
+
+ cm->current_video_frame++;
+ }
+
+ pbi->ready_for_new_data = 0;
+ pbi->last_time_stamp = time_stamp;
+ pbi->source_sz = 0;
+
+ cm->error.setjmp = 0;
+ return retcode;
+}
+
+int vp9_get_raw_frame(VP9D_PTR ptr, YV12_BUFFER_CONFIG *sd,
+ int64_t *time_stamp, int64_t *time_end_stamp,
+ vp9_ppflags_t *flags) {
+ int ret = -1;
+ VP9D_COMP *pbi = (VP9D_COMP *) ptr;
+
+ if (pbi->ready_for_new_data == 1)
+ return ret;
+
+ /* ie no raw frame to show!!! */
+ if (pbi->common.show_frame == 0)
+ return ret;
+
+ pbi->ready_for_new_data = 1;
+ *time_stamp = pbi->last_time_stamp;
+ *time_end_stamp = 0;
+
+#if CONFIG_VP9_POSTPROC
+ ret = vp9_post_proc_frame(&pbi->common, sd, flags);
+#else
+
+ if (pbi->common.frame_to_show) {
+ *sd = *pbi->common.frame_to_show;
+ sd->y_width = pbi->common.width;
+ sd->y_height = pbi->common.height;
+ sd->uv_width = sd->y_width >> pbi->common.subsampling_x;
+ sd->uv_height = sd->y_height >> pbi->common.subsampling_y;
+
+ ret = 0;
+ } else {
+ ret = -1;
+ }
+
+#endif /*!CONFIG_POSTPROC*/
+ vp9_clear_system_state();
+ return ret;
+}
diff --git a/libvpx/vp9/decoder/vp9_onyxd_int.h b/libvpx/vp9/decoder/vp9_onyxd_int.h
new file mode 100644
index 0000000..a051971
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_onyxd_int.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_DECODER_VP9_ONYXD_INT_H_
+#define VP9_DECODER_VP9_ONYXD_INT_H_
+
+#include "./vpx_config.h"
+
+#include "vp9/common/vp9_onyxc_int.h"
+#include "vp9/decoder/vp9_onyxd.h"
+#include "vp9/decoder/vp9_thread.h"
+
+typedef struct VP9Decompressor {
+ DECLARE_ALIGNED(16, MACROBLOCKD, mb);
+
+ DECLARE_ALIGNED(16, VP9_COMMON, common);
+
+ VP9D_CONFIG oxcf;
+
+ const uint8_t *source;
+ uint32_t source_sz;
+
+ int64_t last_time_stamp;
+ int ready_for_new_data;
+
+ int refresh_frame_flags;
+
+ int decoded_key_frame;
+
+ int initial_width;
+ int initial_height;
+
+ int do_loopfilter_inline; // apply loopfilter to available rows immediately
+ VP9Worker lf_worker;
+} VP9D_COMP;
+
+#endif // VP9_DECODER_VP9_TREEREADER_H_
diff --git a/libvpx/vp9/decoder/vp9_read_bit_buffer.h b/libvpx/vp9/decoder/vp9_read_bit_buffer.h
new file mode 100644
index 0000000..c7fa3aa
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_read_bit_buffer.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_READ_BIT_BUFFER_
+#define VP9_READ_BIT_BUFFER_
+
+#include <limits.h>
+
+#include "vpx/vpx_integer.h"
+
+typedef void (*vp9_rb_error_handler)(void *data, size_t bit_offset);
+
+struct vp9_read_bit_buffer {
+ const uint8_t *bit_buffer;
+ const uint8_t *bit_buffer_end;
+ size_t bit_offset;
+
+ void *error_handler_data;
+ vp9_rb_error_handler error_handler;
+};
+
+static size_t vp9_rb_bytes_read(struct vp9_read_bit_buffer *rb) {
+ return rb->bit_offset / CHAR_BIT + (rb->bit_offset % CHAR_BIT > 0);
+}
+
+static int vp9_rb_read_bit(struct vp9_read_bit_buffer *rb) {
+ const size_t off = rb->bit_offset;
+ const size_t p = off / CHAR_BIT;
+ const int q = CHAR_BIT - 1 - (int)off % CHAR_BIT;
+ if (rb->bit_buffer + p >= rb->bit_buffer_end) {
+ rb->error_handler(rb->error_handler_data, rb->bit_offset);
+ return 0;
+ } else {
+ const int bit = (rb->bit_buffer[p] & (1 << q)) >> q;
+ rb->bit_offset = off + 1;
+ return bit;
+ }
+}
+
+static int vp9_rb_read_literal(struct vp9_read_bit_buffer *rb, int bits) {
+ int value = 0, bit;
+ for (bit = bits - 1; bit >= 0; bit--)
+ value |= vp9_rb_read_bit(rb) << bit;
+ return value;
+}
+
+static int vp9_rb_read_signed_literal(struct vp9_read_bit_buffer *rb,
+ int bits) {
+ const int value = vp9_rb_read_literal(rb, bits);
+ return vp9_rb_read_bit(rb) ? -value : value;
+}
+
+#endif // VP9_READ_BIT_BUFFER_
diff --git a/libvpx/vp9/decoder/vp9_thread.c b/libvpx/vp9/decoder/vp9_thread.c
new file mode 100644
index 0000000..dc3b681
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_thread.c
@@ -0,0 +1,248 @@
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the COPYING file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+// -----------------------------------------------------------------------------
+//
+// Multi-threaded worker
+//
+// Original source:
+// http://git.chromium.org/webm/libwebp.git
+// 100644 blob eff8f2a8c20095aade3c292b0e9292dac6cb3587 src/utils/thread.c
+
+
+#include <assert.h>
+#include <string.h> // for memset()
+#include "./vp9_thread.h"
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+#if CONFIG_MULTITHREAD
+
+#if defined(_WIN32)
+
+//------------------------------------------------------------------------------
+// simplistic pthread emulation layer
+
+#include <process.h>
+
+// _beginthreadex requires __stdcall
+#define THREADFN unsigned int __stdcall
+#define THREAD_RETURN(val) (unsigned int)((DWORD_PTR)val)
+
+static int pthread_create(pthread_t* const thread, const void* attr,
+ unsigned int (__stdcall *start)(void*), void* arg) {
+ (void)attr;
+ *thread = (pthread_t)_beginthreadex(NULL, /* void *security */
+ 0, /* unsigned stack_size */
+ start,
+ arg,
+ 0, /* unsigned initflag */
+ NULL); /* unsigned *thrdaddr */
+ if (*thread == NULL) return 1;
+ SetThreadPriority(*thread, THREAD_PRIORITY_ABOVE_NORMAL);
+ return 0;
+}
+
+static int pthread_join(pthread_t thread, void** value_ptr) {
+ (void)value_ptr;
+ return (WaitForSingleObject(thread, INFINITE) != WAIT_OBJECT_0 ||
+ CloseHandle(thread) == 0);
+}
+
+// Mutex
+static int pthread_mutex_init(pthread_mutex_t* const mutex, void* mutexattr) {
+ (void)mutexattr;
+ InitializeCriticalSection(mutex);
+ return 0;
+}
+
+static int pthread_mutex_lock(pthread_mutex_t* const mutex) {
+ EnterCriticalSection(mutex);
+ return 0;
+}
+
+static int pthread_mutex_unlock(pthread_mutex_t* const mutex) {
+ LeaveCriticalSection(mutex);
+ return 0;
+}
+
+static int pthread_mutex_destroy(pthread_mutex_t* const mutex) {
+ DeleteCriticalSection(mutex);
+ return 0;
+}
+
+// Condition
+static int pthread_cond_destroy(pthread_cond_t* const condition) {
+ int ok = 1;
+ ok &= (CloseHandle(condition->waiting_sem_) != 0);
+ ok &= (CloseHandle(condition->received_sem_) != 0);
+ ok &= (CloseHandle(condition->signal_event_) != 0);
+ return !ok;
+}
+
+static int pthread_cond_init(pthread_cond_t* const condition, void* cond_attr) {
+ (void)cond_attr;
+ condition->waiting_sem_ = CreateSemaphore(NULL, 0, 1, NULL);
+ condition->received_sem_ = CreateSemaphore(NULL, 0, 1, NULL);
+ condition->signal_event_ = CreateEvent(NULL, FALSE, FALSE, NULL);
+ if (condition->waiting_sem_ == NULL ||
+ condition->received_sem_ == NULL ||
+ condition->signal_event_ == NULL) {
+ pthread_cond_destroy(condition);
+ return 1;
+ }
+ return 0;
+}
+
+static int pthread_cond_signal(pthread_cond_t* const condition) {
+ int ok = 1;
+ if (WaitForSingleObject(condition->waiting_sem_, 0) == WAIT_OBJECT_0) {
+ // a thread is waiting in pthread_cond_wait: allow it to be notified
+ ok = SetEvent(condition->signal_event_);
+ // wait until the event is consumed so the signaler cannot consume
+ // the event via its own pthread_cond_wait.
+ ok &= (WaitForSingleObject(condition->received_sem_, INFINITE) !=
+ WAIT_OBJECT_0);
+ }
+ return !ok;
+}
+
+static int pthread_cond_wait(pthread_cond_t* const condition,
+ pthread_mutex_t* const mutex) {
+ int ok;
+ // note that there is a consumer available so the signal isn't dropped in
+ // pthread_cond_signal
+ if (!ReleaseSemaphore(condition->waiting_sem_, 1, NULL))
+ return 1;
+ // now unlock the mutex so pthread_cond_signal may be issued
+ pthread_mutex_unlock(mutex);
+ ok = (WaitForSingleObject(condition->signal_event_, INFINITE) ==
+ WAIT_OBJECT_0);
+ ok &= ReleaseSemaphore(condition->received_sem_, 1, NULL);
+ pthread_mutex_lock(mutex);
+ return !ok;
+}
+
+#else // _WIN32
+# define THREADFN void*
+# define THREAD_RETURN(val) val
+#endif
+
+//------------------------------------------------------------------------------
+
+static THREADFN thread_loop(void *ptr) { // thread loop
+ VP9Worker* const worker = (VP9Worker*)ptr;
+ int done = 0;
+ while (!done) {
+ pthread_mutex_lock(&worker->mutex_);
+ while (worker->status_ == OK) { // wait in idling mode
+ pthread_cond_wait(&worker->condition_, &worker->mutex_);
+ }
+ if (worker->status_ == WORK) {
+ if (worker->hook) {
+ worker->had_error |= !worker->hook(worker->data1, worker->data2);
+ }
+ worker->status_ = OK;
+ } else if (worker->status_ == NOT_OK) { // finish the worker
+ done = 1;
+ }
+ // signal to the main thread that we're done (for Sync())
+ pthread_cond_signal(&worker->condition_);
+ pthread_mutex_unlock(&worker->mutex_);
+ }
+ return THREAD_RETURN(NULL); // Thread is finished
+}
+
+// main thread state control
+static void change_state(VP9Worker* const worker,
+ VP9WorkerStatus new_status) {
+ // no-op when attempting to change state on a thread that didn't come up
+ if (worker->status_ < OK) return;
+
+ pthread_mutex_lock(&worker->mutex_);
+ // wait for the worker to finish
+ while (worker->status_ != OK) {
+ pthread_cond_wait(&worker->condition_, &worker->mutex_);
+ }
+ // assign new status and release the working thread if needed
+ if (new_status != OK) {
+ worker->status_ = new_status;
+ pthread_cond_signal(&worker->condition_);
+ }
+ pthread_mutex_unlock(&worker->mutex_);
+}
+
+#endif
+
+//------------------------------------------------------------------------------
+
+void vp9_worker_init(VP9Worker* const worker) {
+ memset(worker, 0, sizeof(*worker));
+ worker->status_ = NOT_OK;
+}
+
+int vp9_worker_sync(VP9Worker* const worker) {
+#if CONFIG_MULTITHREAD
+ change_state(worker, OK);
+#endif
+ assert(worker->status_ <= OK);
+ return !worker->had_error;
+}
+
+int vp9_worker_reset(VP9Worker* const worker) {
+ int ok = 1;
+ worker->had_error = 0;
+ if (worker->status_ < OK) {
+#if CONFIG_MULTITHREAD
+ if (pthread_mutex_init(&worker->mutex_, NULL) ||
+ pthread_cond_init(&worker->condition_, NULL)) {
+ return 0;
+ }
+ pthread_mutex_lock(&worker->mutex_);
+ ok = !pthread_create(&worker->thread_, NULL, thread_loop, worker);
+ if (ok) worker->status_ = OK;
+ pthread_mutex_unlock(&worker->mutex_);
+#else
+ worker->status_ = OK;
+#endif
+ } else if (worker->status_ > OK) {
+ ok = vp9_worker_sync(worker);
+ }
+ assert(!ok || (worker->status_ == OK));
+ return ok;
+}
+
+void vp9_worker_launch(VP9Worker* const worker) {
+#if CONFIG_MULTITHREAD
+ change_state(worker, WORK);
+#else
+ if (worker->hook)
+ worker->had_error |= !worker->hook(worker->data1, worker->data2);
+#endif
+}
+
+void vp9_worker_end(VP9Worker* const worker) {
+ if (worker->status_ >= OK) {
+#if CONFIG_MULTITHREAD
+ change_state(worker, NOT_OK);
+ pthread_join(worker->thread_, NULL);
+ pthread_mutex_destroy(&worker->mutex_);
+ pthread_cond_destroy(&worker->condition_);
+#else
+ worker->status_ = NOT_OK;
+#endif
+ }
+ assert(worker->status_ == NOT_OK);
+}
+
+//------------------------------------------------------------------------------
+
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
diff --git a/libvpx/vp9/decoder/vp9_thread.h b/libvpx/vp9/decoder/vp9_thread.h
new file mode 100644
index 0000000..a8f7e04
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_thread.h
@@ -0,0 +1,93 @@
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the COPYING file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+// -----------------------------------------------------------------------------
+//
+// Multi-threaded worker
+//
+// Original source:
+// http://git.chromium.org/webm/libwebp.git
+// 100644 blob 13a61a4c84194c3374080cbf03d881d3cd6af40d src/utils/thread.h
+
+
+#ifndef VP9_DECODER_VP9_THREAD_H_
+#define VP9_DECODER_VP9_THREAD_H_
+
+#include "vpx_config.h"
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+#if CONFIG_MULTITHREAD
+
+#if defined(_WIN32)
+
+#include <windows.h>
+typedef HANDLE pthread_t;
+typedef CRITICAL_SECTION pthread_mutex_t;
+typedef struct {
+ HANDLE waiting_sem_;
+ HANDLE received_sem_;
+ HANDLE signal_event_;
+} pthread_cond_t;
+
+#else
+
+#include <pthread.h>
+
+#endif /* _WIN32 */
+#endif /* CONFIG_MULTITHREAD */
+
+// State of the worker thread object
+typedef enum {
+ NOT_OK = 0, // object is unusable
+ OK, // ready to work
+ WORK // busy finishing the current task
+} VP9WorkerStatus;
+
+// Function to be called by the worker thread. Takes two opaque pointers as
+// arguments (data1 and data2), and should return false in case of error.
+typedef int (*VP9WorkerHook)(void*, void*);
+
+// Synchronize object used to launch job in the worker thread
+typedef struct {
+#if CONFIG_MULTITHREAD
+ pthread_mutex_t mutex_;
+ pthread_cond_t condition_;
+ pthread_t thread_;
+#endif
+ VP9WorkerStatus status_;
+ VP9WorkerHook hook; // hook to call
+ void* data1; // first argument passed to 'hook'
+ void* data2; // second argument passed to 'hook'
+ int had_error; // return value of the last call to 'hook'
+} VP9Worker;
+
+// Must be called first, before any other method.
+void vp9_worker_init(VP9Worker* const worker);
+// Must be called to initialize the object and spawn the thread. Re-entrant.
+// Will potentially launch the thread. Returns false in case of error.
+int vp9_worker_reset(VP9Worker* const worker);
+// Makes sure the previous work is finished. Returns true if worker->had_error
+// was not set and no error condition was triggered by the working thread.
+int vp9_worker_sync(VP9Worker* const worker);
+// Triggers the thread to call hook() with data1 and data2 argument. These
+// hook/data1/data2 can be changed at any time before calling this function,
+// but not be changed afterward until the next call to vp9_worker_sync().
+void vp9_worker_launch(VP9Worker* const worker);
+// Kill the thread and terminate the object. To use the object again, one
+// must call vp9_worker_reset() again.
+void vp9_worker_end(VP9Worker* const worker);
+
+//------------------------------------------------------------------------------
+
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
+
+#endif /* VP9_DECODER_VP9_THREAD_H_ */
diff --git a/libvpx/vp9/decoder/vp9_treereader.h b/libvpx/vp9/decoder/vp9_treereader.h
new file mode 100644
index 0000000..710cc4c
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_treereader.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_DECODER_VP9_TREEREADER_H_
+#define VP9_DECODER_VP9_TREEREADER_H_
+
+#include "vp9/common/vp9_treecoder.h"
+#include "vp9/decoder/vp9_dboolhuff.h"
+
+#define vp9_read_and_apply_sign(r, value) (vp9_read_bit(r) ? -(value) : (value))
+
+// Intent of tree data structure is to make decoding trivial.
+static int treed_read(vp9_reader *const r, /* !!! must return a 0 or 1 !!! */
+ vp9_tree t,
+ const vp9_prob *const p) {
+ register vp9_tree_index i = 0;
+
+ while ((i = t[ i + vp9_read(r, p[i >> 1])]) > 0);
+
+ return -i;
+}
+
+#endif // VP9_DECODER_VP9_TREEREADER_H_
diff --git a/libvpx/vp9/decoder/x86/vp9_dequantize_sse2.c b/libvpx/vp9/decoder/x86/vp9_dequantize_sse2.c
new file mode 100644
index 0000000..54ec67f
--- /dev/null
+++ b/libvpx/vp9/decoder/x86/vp9_dequantize_sse2.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <emmintrin.h> // SSE2
+#include "./vpx_config.h"
+#include "vpx/vpx_integer.h"
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_idct.h"
+
+void vp9_add_constant_residual_8x8_sse2(const int16_t diff, uint8_t *dest,
+ int stride) {
+ uint8_t abs_diff;
+ __m128i d;
+
+ // Prediction data.
+ __m128i p0 = _mm_loadl_epi64((const __m128i *)(dest + 0 * stride));
+ __m128i p1 = _mm_loadl_epi64((const __m128i *)(dest + 1 * stride));
+ __m128i p2 = _mm_loadl_epi64((const __m128i *)(dest + 2 * stride));
+ __m128i p3 = _mm_loadl_epi64((const __m128i *)(dest + 3 * stride));
+ __m128i p4 = _mm_loadl_epi64((const __m128i *)(dest + 4 * stride));
+ __m128i p5 = _mm_loadl_epi64((const __m128i *)(dest + 5 * stride));
+ __m128i p6 = _mm_loadl_epi64((const __m128i *)(dest + 6 * stride));
+ __m128i p7 = _mm_loadl_epi64((const __m128i *)(dest + 7 * stride));
+
+ p0 = _mm_unpacklo_epi64(p0, p1);
+ p2 = _mm_unpacklo_epi64(p2, p3);
+ p4 = _mm_unpacklo_epi64(p4, p5);
+ p6 = _mm_unpacklo_epi64(p6, p7);
+
+ // Clip diff value to [0, 255] range. Then, do addition or subtraction
+ // according to its sign.
+ if (diff >= 0) {
+ abs_diff = (diff > 255) ? 255 : diff;
+ d = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)(abs_diff * 0x01010101u)), 0);
+
+ p0 = _mm_adds_epu8(p0, d);
+ p2 = _mm_adds_epu8(p2, d);
+ p4 = _mm_adds_epu8(p4, d);
+ p6 = _mm_adds_epu8(p6, d);
+ } else {
+ abs_diff = (diff < -255) ? 255 : -diff;
+ d = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)(abs_diff * 0x01010101u)), 0);
+
+ p0 = _mm_subs_epu8(p0, d);
+ p2 = _mm_subs_epu8(p2, d);
+ p4 = _mm_subs_epu8(p4, d);
+ p6 = _mm_subs_epu8(p6, d);
+ }
+
+ _mm_storel_epi64((__m128i *)(dest + 0 * stride), p0);
+ p0 = _mm_srli_si128(p0, 8);
+ _mm_storel_epi64((__m128i *)(dest + 1 * stride), p0);
+
+ _mm_storel_epi64((__m128i *)(dest + 2 * stride), p2);
+ p2 = _mm_srli_si128(p2, 8);
+ _mm_storel_epi64((__m128i *)(dest + 3 * stride), p2);
+
+ _mm_storel_epi64((__m128i *)(dest + 4 * stride), p4);
+ p4 = _mm_srli_si128(p4, 8);
+ _mm_storel_epi64((__m128i *)(dest + 5 * stride), p4);
+
+ _mm_storel_epi64((__m128i *)(dest + 6 * stride), p6);
+ p6 = _mm_srli_si128(p6, 8);
+ _mm_storel_epi64((__m128i *)(dest + 7 * stride), p6);
+}
+
+void vp9_add_constant_residual_16x16_sse2(const int16_t diff, uint8_t *dest,
+ int stride) {
+ uint8_t abs_diff;
+ __m128i d;
+
+ // Prediction data.
+ __m128i p0 = _mm_load_si128((const __m128i *)(dest + 0 * stride));
+ __m128i p1 = _mm_load_si128((const __m128i *)(dest + 1 * stride));
+ __m128i p2 = _mm_load_si128((const __m128i *)(dest + 2 * stride));
+ __m128i p3 = _mm_load_si128((const __m128i *)(dest + 3 * stride));
+ __m128i p4 = _mm_load_si128((const __m128i *)(dest + 4 * stride));
+ __m128i p5 = _mm_load_si128((const __m128i *)(dest + 5 * stride));
+ __m128i p6 = _mm_load_si128((const __m128i *)(dest + 6 * stride));
+ __m128i p7 = _mm_load_si128((const __m128i *)(dest + 7 * stride));
+ __m128i p8 = _mm_load_si128((const __m128i *)(dest + 8 * stride));
+ __m128i p9 = _mm_load_si128((const __m128i *)(dest + 9 * stride));
+ __m128i p10 = _mm_load_si128((const __m128i *)(dest + 10 * stride));
+ __m128i p11 = _mm_load_si128((const __m128i *)(dest + 11 * stride));
+ __m128i p12 = _mm_load_si128((const __m128i *)(dest + 12 * stride));
+ __m128i p13 = _mm_load_si128((const __m128i *)(dest + 13 * stride));
+ __m128i p14 = _mm_load_si128((const __m128i *)(dest + 14 * stride));
+ __m128i p15 = _mm_load_si128((const __m128i *)(dest + 15 * stride));
+
+ // Clip diff value to [0, 255] range. Then, do addition or subtraction
+ // according to its sign.
+ if (diff >= 0) {
+ abs_diff = (diff > 255) ? 255 : diff;
+ d = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)(abs_diff * 0x01010101u)), 0);
+
+ p0 = _mm_adds_epu8(p0, d);
+ p1 = _mm_adds_epu8(p1, d);
+ p2 = _mm_adds_epu8(p2, d);
+ p3 = _mm_adds_epu8(p3, d);
+ p4 = _mm_adds_epu8(p4, d);
+ p5 = _mm_adds_epu8(p5, d);
+ p6 = _mm_adds_epu8(p6, d);
+ p7 = _mm_adds_epu8(p7, d);
+ p8 = _mm_adds_epu8(p8, d);
+ p9 = _mm_adds_epu8(p9, d);
+ p10 = _mm_adds_epu8(p10, d);
+ p11 = _mm_adds_epu8(p11, d);
+ p12 = _mm_adds_epu8(p12, d);
+ p13 = _mm_adds_epu8(p13, d);
+ p14 = _mm_adds_epu8(p14, d);
+ p15 = _mm_adds_epu8(p15, d);
+ } else {
+ abs_diff = (diff < -255) ? 255 : -diff;
+ d = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)(abs_diff * 0x01010101u)), 0);
+
+ p0 = _mm_subs_epu8(p0, d);
+ p1 = _mm_subs_epu8(p1, d);
+ p2 = _mm_subs_epu8(p2, d);
+ p3 = _mm_subs_epu8(p3, d);
+ p4 = _mm_subs_epu8(p4, d);
+ p5 = _mm_subs_epu8(p5, d);
+ p6 = _mm_subs_epu8(p6, d);
+ p7 = _mm_subs_epu8(p7, d);
+ p8 = _mm_subs_epu8(p8, d);
+ p9 = _mm_subs_epu8(p9, d);
+ p10 = _mm_subs_epu8(p10, d);
+ p11 = _mm_subs_epu8(p11, d);
+ p12 = _mm_subs_epu8(p12, d);
+ p13 = _mm_subs_epu8(p13, d);
+ p14 = _mm_subs_epu8(p14, d);
+ p15 = _mm_subs_epu8(p15, d);
+ }
+
+ // Store results
+ _mm_store_si128((__m128i *)(dest + 0 * stride), p0);
+ _mm_store_si128((__m128i *)(dest + 1 * stride), p1);
+ _mm_store_si128((__m128i *)(dest + 2 * stride), p2);
+ _mm_store_si128((__m128i *)(dest + 3 * stride), p3);
+ _mm_store_si128((__m128i *)(dest + 4 * stride), p4);
+ _mm_store_si128((__m128i *)(dest + 5 * stride), p5);
+ _mm_store_si128((__m128i *)(dest + 6 * stride), p6);
+ _mm_store_si128((__m128i *)(dest + 7 * stride), p7);
+ _mm_store_si128((__m128i *)(dest + 8 * stride), p8);
+ _mm_store_si128((__m128i *)(dest + 9 * stride), p9);
+ _mm_store_si128((__m128i *)(dest + 10 * stride), p10);
+ _mm_store_si128((__m128i *)(dest + 11 * stride), p11);
+ _mm_store_si128((__m128i *)(dest + 12 * stride), p12);
+ _mm_store_si128((__m128i *)(dest + 13 * stride), p13);
+ _mm_store_si128((__m128i *)(dest + 14 * stride), p14);
+ _mm_store_si128((__m128i *)(dest + 15 * stride), p15);
+}
+
+void vp9_add_constant_residual_32x32_sse2(const int16_t diff, uint8_t *dest,
+ int stride) {
+ uint8_t abs_diff;
+ __m128i d;
+ int i = 8;
+
+ if (diff >= 0) {
+ abs_diff = (diff > 255) ? 255 : diff;
+ d = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)(abs_diff * 0x01010101u)), 0);
+ } else {
+ abs_diff = (diff < -255) ? 255 : -diff;
+ d = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)(abs_diff * 0x01010101u)), 0);
+ }
+
+ do {
+ // Prediction data.
+ __m128i p0 = _mm_load_si128((const __m128i *)(dest + 0 * stride));
+ __m128i p1 = _mm_load_si128((const __m128i *)(dest + 0 * stride + 16));
+ __m128i p2 = _mm_load_si128((const __m128i *)(dest + 1 * stride));
+ __m128i p3 = _mm_load_si128((const __m128i *)(dest + 1 * stride + 16));
+ __m128i p4 = _mm_load_si128((const __m128i *)(dest + 2 * stride));
+ __m128i p5 = _mm_load_si128((const __m128i *)(dest + 2 * stride + 16));
+ __m128i p6 = _mm_load_si128((const __m128i *)(dest + 3 * stride));
+ __m128i p7 = _mm_load_si128((const __m128i *)(dest + 3 * stride + 16));
+
+ // Clip diff value to [0, 255] range. Then, do addition or subtraction
+ // according to its sign.
+ if (diff >= 0) {
+ p0 = _mm_adds_epu8(p0, d);
+ p1 = _mm_adds_epu8(p1, d);
+ p2 = _mm_adds_epu8(p2, d);
+ p3 = _mm_adds_epu8(p3, d);
+ p4 = _mm_adds_epu8(p4, d);
+ p5 = _mm_adds_epu8(p5, d);
+ p6 = _mm_adds_epu8(p6, d);
+ p7 = _mm_adds_epu8(p7, d);
+ } else {
+ p0 = _mm_subs_epu8(p0, d);
+ p1 = _mm_subs_epu8(p1, d);
+ p2 = _mm_subs_epu8(p2, d);
+ p3 = _mm_subs_epu8(p3, d);
+ p4 = _mm_subs_epu8(p4, d);
+ p5 = _mm_subs_epu8(p5, d);
+ p6 = _mm_subs_epu8(p6, d);
+ p7 = _mm_subs_epu8(p7, d);
+ }
+
+ // Store results
+ _mm_store_si128((__m128i *)(dest + 0 * stride), p0);
+ _mm_store_si128((__m128i *)(dest + 0 * stride + 16), p1);
+ _mm_store_si128((__m128i *)(dest + 1 * stride), p2);
+ _mm_store_si128((__m128i *)(dest + 1 * stride + 16), p3);
+ _mm_store_si128((__m128i *)(dest + 2 * stride), p4);
+ _mm_store_si128((__m128i *)(dest + 2 * stride + 16), p5);
+ _mm_store_si128((__m128i *)(dest + 3 * stride), p6);
+ _mm_store_si128((__m128i *)(dest + 3 * stride + 16), p7);
+
+ dest += 4 * stride;
+ } while (--i);
+}
diff --git a/libvpx/vp9/encoder/vp9_bitstream.c b/libvpx/vp9/encoder/vp9_bitstream.c
new file mode 100644
index 0000000..957cfd2
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_bitstream.c
@@ -0,0 +1,1621 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <limits.h>
+
+#include "vpx/vpx_encoder.h"
+#include "vpx_mem/vpx_mem.h"
+
+#include "vp9/common/vp9_entropymode.h"
+#include "vp9/common/vp9_entropymv.h"
+#include "vp9/common/vp9_findnearmv.h"
+#include "vp9/common/vp9_tile_common.h"
+#include "vp9/common/vp9_seg_common.h"
+#include "vp9/common/vp9_pred_common.h"
+#include "vp9/common/vp9_entropy.h"
+#include "vp9/common/vp9_entropymv.h"
+#include "vp9/common/vp9_mvref_common.h"
+#include "vp9/common/vp9_treecoder.h"
+#include "vp9/common/vp9_systemdependent.h"
+#include "vp9/common/vp9_pragmas.h"
+
+#include "vp9/encoder/vp9_mcomp.h"
+#include "vp9/encoder/vp9_encodemv.h"
+#include "vp9/encoder/vp9_bitstream.h"
+#include "vp9/encoder/vp9_segmentation.h"
+#include "vp9/encoder/vp9_subexp.h"
+#include "vp9/encoder/vp9_write_bit_buffer.h"
+
+
+#if defined(SECTIONBITS_OUTPUT)
+unsigned __int64 Sectionbits[500];
+#endif
+
+#ifdef ENTROPY_STATS
+int intra_mode_stats[INTRA_MODES]
+ [INTRA_MODES]
+ [INTRA_MODES];
+vp9_coeff_stats tree_update_hist[TX_SIZES][BLOCK_TYPES];
+
+extern unsigned int active_section;
+#endif
+
+
+#ifdef MODE_STATS
+int64_t tx_count_32x32p_stats[TX_SIZE_CONTEXTS][TX_SIZES];
+int64_t tx_count_16x16p_stats[TX_SIZE_CONTEXTS][TX_SIZES - 1];
+int64_t tx_count_8x8p_stats[TX_SIZE_CONTEXTS][TX_SIZES - 2];
+int64_t switchable_interp_stats[SWITCHABLE_FILTERS+1]
+ [SWITCHABLE_FILTERS];
+
+void init_tx_count_stats() {
+ vp9_zero(tx_count_32x32p_stats);
+ vp9_zero(tx_count_16x16p_stats);
+ vp9_zero(tx_count_8x8p_stats);
+}
+
+void init_switchable_interp_stats() {
+ vp9_zero(switchable_interp_stats);
+}
+
+static void update_tx_count_stats(VP9_COMMON *cm) {
+ int i, j;
+ for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
+ for (j = 0; j < TX_SIZES; j++) {
+ tx_count_32x32p_stats[i][j] += cm->fc.tx_count_32x32p[i][j];
+ }
+ }
+ for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
+ for (j = 0; j < TX_SIZES - 1; j++) {
+ tx_count_16x16p_stats[i][j] += cm->fc.tx_count_16x16p[i][j];
+ }
+ }
+ for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
+ for (j = 0; j < TX_SIZES - 2; j++) {
+ tx_count_8x8p_stats[i][j] += cm->fc.tx_count_8x8p[i][j];
+ }
+ }
+}
+
+static void update_switchable_interp_stats(VP9_COMMON *cm) {
+ int i, j;
+ for (i = 0; i < SWITCHABLE_FILTERS+1; ++i)
+ for (j = 0; j < SWITCHABLE_FILTERS; ++j) {
+ switchable_interp_stats[i][j] += cm->fc.switchable_interp_count[i][j];
+ }
+}
+
+void write_tx_count_stats() {
+ int i, j;
+ FILE *fp = fopen("tx_count.bin", "wb");
+ fwrite(tx_count_32x32p_stats, sizeof(tx_count_32x32p_stats), 1, fp);
+ fwrite(tx_count_16x16p_stats, sizeof(tx_count_16x16p_stats), 1, fp);
+ fwrite(tx_count_8x8p_stats, sizeof(tx_count_8x8p_stats), 1, fp);
+ fclose(fp);
+
+ printf(
+ "vp9_default_tx_count_32x32p[TX_SIZE_CONTEXTS][TX_SIZES] = {\n");
+ for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
+ printf(" { ");
+ for (j = 0; j < TX_SIZES; j++) {
+ printf("%"PRId64", ", tx_count_32x32p_stats[i][j]);
+ }
+ printf("},\n");
+ }
+ printf("};\n");
+ printf(
+ "vp9_default_tx_count_16x16p[TX_SIZE_CONTEXTS][TX_SIZES-1] = {\n");
+ for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
+ printf(" { ");
+ for (j = 0; j < TX_SIZES - 1; j++) {
+ printf("%"PRId64", ", tx_count_16x16p_stats[i][j]);
+ }
+ printf("},\n");
+ }
+ printf("};\n");
+ printf(
+ "vp9_default_tx_count_8x8p[TX_SIZE_CONTEXTS][TX_SIZES-2] = {\n");
+ for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
+ printf(" { ");
+ for (j = 0; j < TX_SIZES - 2; j++) {
+ printf("%"PRId64", ", tx_count_8x8p_stats[i][j]);
+ }
+ printf("},\n");
+ }
+ printf("};\n");
+}
+
+void write_switchable_interp_stats() {
+ int i, j;
+ FILE *fp = fopen("switchable_interp.bin", "wb");
+ fwrite(switchable_interp_stats, sizeof(switchable_interp_stats), 1, fp);
+ fclose(fp);
+
+ printf(
+ "vp9_default_switchable_filter_count[SWITCHABLE_FILTERS+1]"
+ "[SWITCHABLE_FILTERS] = {\n");
+ for (i = 0; i < SWITCHABLE_FILTERS+1; i++) {
+ printf(" { ");
+ for (j = 0; j < SWITCHABLE_FILTERS; j++) {
+ printf("%"PRId64", ", switchable_interp_stats[i][j]);
+ }
+ printf("},\n");
+ }
+ printf("};\n");
+}
+#endif
+
+static INLINE void write_be32(uint8_t *p, int value) {
+ p[0] = value >> 24;
+ p[1] = value >> 16;
+ p[2] = value >> 8;
+ p[3] = value;
+}
+
+void vp9_encode_unsigned_max(struct vp9_write_bit_buffer *wb,
+ int data, int max) {
+ vp9_wb_write_literal(wb, data, get_unsigned_bits(max));
+}
+
+static void update_mode(
+ vp9_writer *w,
+ int n,
+ vp9_tree tree,
+ vp9_prob Pnew[/* n-1 */],
+ vp9_prob Pcur[/* n-1 */],
+ unsigned int bct[/* n-1 */] [2],
+ const unsigned int num_events[/* n */]
+) {
+ int i = 0;
+
+ vp9_tree_probs_from_distribution(tree, Pnew, bct, num_events, 0);
+ n--;
+
+ for (i = 0; i < n; ++i) {
+ vp9_cond_prob_diff_update(w, &Pcur[i], MODE_UPDATE_PROB, bct[i]);
+ }
+}
+
+static void update_mbintra_mode_probs(VP9_COMP* const cpi,
+ vp9_writer* const bc) {
+ VP9_COMMON *const cm = &cpi->common;
+ int j;
+ vp9_prob pnew[INTRA_MODES - 1];
+ unsigned int bct[INTRA_MODES - 1][2];
+
+ for (j = 0; j < BLOCK_SIZE_GROUPS; j++)
+ update_mode(bc, INTRA_MODES, vp9_intra_mode_tree, pnew,
+ cm->fc.y_mode_prob[j], bct,
+ (unsigned int *)cpi->y_mode_count[j]);
+}
+
+static void write_selected_tx_size(const VP9_COMP *cpi, MODE_INFO *m,
+ TX_SIZE tx_size, BLOCK_SIZE bsize,
+ vp9_writer *w) {
+ const MACROBLOCKD *const xd = &cpi->mb.e_mbd;
+ const vp9_prob *tx_probs = get_tx_probs2(xd, &cpi->common.fc.tx_probs, m);
+ vp9_write(w, tx_size != TX_4X4, tx_probs[0]);
+ if (bsize >= BLOCK_16X16 && tx_size != TX_4X4) {
+ vp9_write(w, tx_size != TX_8X8, tx_probs[1]);
+ if (bsize >= BLOCK_32X32 && tx_size != TX_8X8)
+ vp9_write(w, tx_size != TX_16X16, tx_probs[2]);
+ }
+}
+
+static int write_skip_coeff(const VP9_COMP *cpi, int segment_id, MODE_INFO *m,
+ vp9_writer *w) {
+ const MACROBLOCKD *const xd = &cpi->mb.e_mbd;
+ if (vp9_segfeature_active(&cpi->common.seg, segment_id, SEG_LVL_SKIP)) {
+ return 1;
+ } else {
+ const int skip_coeff = m->mbmi.skip_coeff;
+ vp9_write(w, skip_coeff, vp9_get_pred_prob_mbskip(&cpi->common, xd));
+ return skip_coeff;
+ }
+}
+
+void vp9_update_skip_probs(VP9_COMP *cpi, vp9_writer *w) {
+ VP9_COMMON *cm = &cpi->common;
+ int k;
+
+ for (k = 0; k < MBSKIP_CONTEXTS; ++k)
+ vp9_cond_prob_diff_update(w, &cm->fc.mbskip_probs[k],
+ MODE_UPDATE_PROB, cm->counts.mbskip[k]);
+}
+
+static void write_intra_mode(vp9_writer *bc, int m, const vp9_prob *p) {
+ write_token(bc, vp9_intra_mode_tree, p, vp9_intra_mode_encodings + m);
+}
+
+static void update_switchable_interp_probs(VP9_COMP *const cpi,
+ vp9_writer* const bc) {
+ VP9_COMMON *const cm = &cpi->common;
+ unsigned int branch_ct[SWITCHABLE_FILTERS + 1]
+ [SWITCHABLE_FILTERS - 1][2];
+ vp9_prob new_prob[SWITCHABLE_FILTERS + 1][SWITCHABLE_FILTERS - 1];
+ int i, j;
+ for (j = 0; j <= SWITCHABLE_FILTERS; ++j) {
+ vp9_tree_probs_from_distribution(
+ vp9_switchable_interp_tree,
+ new_prob[j], branch_ct[j],
+ cm->counts.switchable_interp[j], 0);
+ }
+ for (j = 0; j <= SWITCHABLE_FILTERS; ++j) {
+ for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i) {
+ vp9_cond_prob_diff_update(bc, &cm->fc.switchable_interp_prob[j][i],
+ MODE_UPDATE_PROB, branch_ct[j][i]);
+ }
+ }
+#ifdef MODE_STATS
+ if (!cpi->dummy_packing)
+ update_switchable_interp_stats(cm);
+#endif
+}
+
+static void update_inter_mode_probs(VP9_COMMON *cm, vp9_writer* const bc) {
+ int i, j;
+
+ for (i = 0; i < INTER_MODE_CONTEXTS; ++i) {
+ unsigned int branch_ct[INTER_MODES - 1][2];
+ vp9_prob new_prob[INTER_MODES - 1];
+
+ vp9_tree_probs_from_distribution(vp9_inter_mode_tree,
+ new_prob, branch_ct,
+ cm->counts.inter_mode[i], NEARESTMV);
+
+ for (j = 0; j < INTER_MODES - 1; ++j)
+ vp9_cond_prob_diff_update(bc, &cm->fc.inter_mode_probs[i][j],
+ MODE_UPDATE_PROB, branch_ct[j]);
+ }
+}
+
+static void pack_mb_tokens(vp9_writer* const bc,
+ TOKENEXTRA **tp,
+ const TOKENEXTRA *const stop) {
+ TOKENEXTRA *p = *tp;
+
+ while (p < stop) {
+ const int t = p->token;
+ const struct vp9_token *const a = vp9_coef_encodings + t;
+ const vp9_extra_bit *const b = vp9_extra_bits + t;
+ int i = 0;
+ const vp9_prob *pp;
+ int v = a->value;
+ int n = a->len;
+ vp9_prob probs[ENTROPY_NODES];
+
+ if (t == EOSB_TOKEN) {
+ ++p;
+ break;
+ }
+ if (t >= TWO_TOKEN) {
+ vp9_model_to_full_probs(p->context_tree, probs);
+ pp = probs;
+ } else {
+ pp = p->context_tree;
+ }
+ assert(pp != 0);
+
+ /* skip one or two nodes */
+ if (p->skip_eob_node) {
+ n -= p->skip_eob_node;
+ i = 2 * p->skip_eob_node;
+ }
+
+ do {
+ const int bb = (v >> --n) & 1;
+ vp9_write(bc, bb, pp[i >> 1]);
+ i = vp9_coef_tree[i + bb];
+ } while (n);
+
+ if (b->base_val) {
+ const int e = p->extra, l = b->len;
+
+ if (l) {
+ const unsigned char *pb = b->prob;
+ int v = e >> 1;
+ int n = l; /* number of bits in v, assumed nonzero */
+ int i = 0;
+
+ do {
+ const int bb = (v >> --n) & 1;
+ vp9_write(bc, bb, pb[i >> 1]);
+ i = b->tree[i + bb];
+ } while (n);
+ }
+
+ vp9_write_bit(bc, e & 1);
+ }
+ ++p;
+ }
+
+ *tp = p;
+}
+
+static void write_sb_mv_ref(vp9_writer *w, MB_PREDICTION_MODE mode,
+ const vp9_prob *p) {
+ assert(is_inter_mode(mode));
+ write_token(w, vp9_inter_mode_tree, p,
+ &vp9_inter_mode_encodings[mode - NEARESTMV]);
+}
+
+
+static void write_segment_id(vp9_writer *w, const struct segmentation *seg,
+ int segment_id) {
+ if (seg->enabled && seg->update_map)
+ treed_write(w, vp9_segment_tree, seg->tree_probs, segment_id, 3);
+}
+
+// This function encodes the reference frame
+static void encode_ref_frame(VP9_COMP *cpi, vp9_writer *bc) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MB_MODE_INFO *mi = &xd->this_mi->mbmi;
+ const int segment_id = mi->segment_id;
+ int seg_ref_active = vp9_segfeature_active(&cm->seg, segment_id,
+ SEG_LVL_REF_FRAME);
+ // If segment level coding of this signal is disabled...
+ // or the segment allows multiple reference frame options
+ if (!seg_ref_active) {
+ // does the feature use compound prediction or not
+ // (if not specified at the frame/segment level)
+ if (cm->comp_pred_mode == HYBRID_PREDICTION) {
+ vp9_write(bc, mi->ref_frame[1] > INTRA_FRAME,
+ vp9_get_pred_prob_comp_inter_inter(cm, xd));
+ } else {
+ assert((mi->ref_frame[1] <= INTRA_FRAME) ==
+ (cm->comp_pred_mode == SINGLE_PREDICTION_ONLY));
+ }
+
+ if (mi->ref_frame[1] > INTRA_FRAME) {
+ vp9_write(bc, mi->ref_frame[0] == GOLDEN_FRAME,
+ vp9_get_pred_prob_comp_ref_p(cm, xd));
+ } else {
+ vp9_write(bc, mi->ref_frame[0] != LAST_FRAME,
+ vp9_get_pred_prob_single_ref_p1(cm, xd));
+ if (mi->ref_frame[0] != LAST_FRAME)
+ vp9_write(bc, mi->ref_frame[0] != GOLDEN_FRAME,
+ vp9_get_pred_prob_single_ref_p2(cm, xd));
+ }
+ } else {
+ assert(mi->ref_frame[1] <= INTRA_FRAME);
+ assert(vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME) ==
+ mi->ref_frame[0]);
+ }
+
+ // if using the prediction mdoel we have nothing further to do because
+ // the reference frame is fully coded by the segment
+}
+
+static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) {
+ VP9_COMMON *const cm = &cpi->common;
+ const nmv_context *nmvc = &cm->fc.nmvc;
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ struct segmentation *seg = &cm->seg;
+ MB_MODE_INFO *const mi = &m->mbmi;
+ const MV_REFERENCE_FRAME rf = mi->ref_frame[0];
+ const MB_PREDICTION_MODE mode = mi->mode;
+ const int segment_id = mi->segment_id;
+ int skip_coeff;
+ const BLOCK_SIZE bsize = mi->sb_type;
+ const int allow_hp = xd->allow_high_precision_mv;
+
+ x->partition_info = x->pi + (m - cm->mi);
+
+#ifdef ENTROPY_STATS
+ active_section = 9;
+#endif
+
+ if (seg->update_map) {
+ if (seg->temporal_update) {
+ const int pred_flag = mi->seg_id_predicted;
+ vp9_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd);
+ vp9_write(bc, pred_flag, pred_prob);
+ if (!pred_flag)
+ write_segment_id(bc, seg, segment_id);
+ } else {
+ write_segment_id(bc, seg, segment_id);
+ }
+ }
+
+ skip_coeff = write_skip_coeff(cpi, segment_id, m, bc);
+
+ if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
+ vp9_write(bc, rf != INTRA_FRAME,
+ vp9_get_pred_prob_intra_inter(cm, xd));
+
+ if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT &&
+ !(rf != INTRA_FRAME &&
+ (skip_coeff || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) {
+ write_selected_tx_size(cpi, m, mi->tx_size, bsize, bc);
+ }
+
+ if (rf == INTRA_FRAME) {
+#ifdef ENTROPY_STATS
+ active_section = 6;
+#endif
+
+ if (bsize >= BLOCK_8X8) {
+ write_intra_mode(bc, mode, cm->fc.y_mode_prob[size_group_lookup[bsize]]);
+ } else {
+ int idx, idy;
+ const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
+ const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
+ for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
+ for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
+ const MB_PREDICTION_MODE bm = m->bmi[idy * 2 + idx].as_mode;
+ write_intra_mode(bc, bm, cm->fc.y_mode_prob[0]);
+ }
+ }
+ }
+ write_intra_mode(bc, mi->uv_mode, cm->fc.uv_mode_prob[mode]);
+ } else {
+ vp9_prob *mv_ref_p;
+ encode_ref_frame(cpi, bc);
+ mv_ref_p = cpi->common.fc.inter_mode_probs[mi->mode_context[rf]];
+
+#ifdef ENTROPY_STATS
+ active_section = 3;
+#endif
+
+ // If segment skip is not enabled code the mode.
+ if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) {
+ if (bsize >= BLOCK_8X8) {
+ write_sb_mv_ref(bc, mode, mv_ref_p);
+ ++cm->counts.inter_mode[mi->mode_context[rf]]
+ [inter_mode_offset(mode)];
+ }
+ }
+
+ if (cm->mcomp_filter_type == SWITCHABLE) {
+ const int ctx = vp9_get_pred_context_switchable_interp(xd);
+ write_token(bc, vp9_switchable_interp_tree,
+ cm->fc.switchable_interp_prob[ctx],
+ &vp9_switchable_interp_encodings[mi->interp_filter]);
+ } else {
+ assert(mi->interp_filter == cm->mcomp_filter_type);
+ }
+
+ if (bsize < BLOCK_8X8) {
+ int j;
+ MB_PREDICTION_MODE blockmode;
+ int_mv blockmv;
+ const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
+ const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
+ int idx, idy;
+ for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
+ for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
+ j = idy * 2 + idx;
+ blockmode = x->partition_info->bmi[j].mode;
+ blockmv = m->bmi[j].as_mv[0];
+ write_sb_mv_ref(bc, blockmode, mv_ref_p);
+ ++cm->counts.inter_mode[mi->mode_context[rf]]
+ [inter_mode_offset(blockmode)];
+
+ if (blockmode == NEWMV) {
+#ifdef ENTROPY_STATS
+ active_section = 11;
+#endif
+ vp9_encode_mv(cpi, bc, &blockmv.as_mv, &mi->best_mv.as_mv,
+ nmvc, allow_hp);
+
+ if (mi->ref_frame[1] > INTRA_FRAME)
+ vp9_encode_mv(cpi, bc,
+ &m->bmi[j].as_mv[1].as_mv,
+ &mi->best_second_mv.as_mv,
+ nmvc, allow_hp);
+ }
+ }
+ }
+ } else if (mode == NEWMV) {
+#ifdef ENTROPY_STATS
+ active_section = 5;
+#endif
+ vp9_encode_mv(cpi, bc, &mi->mv[0].as_mv, &mi->best_mv.as_mv,
+ nmvc, allow_hp);
+
+ if (mi->ref_frame[1] > INTRA_FRAME)
+ vp9_encode_mv(cpi, bc, &mi->mv[1].as_mv, &mi->best_second_mv.as_mv,
+ nmvc, allow_hp);
+ }
+ }
+}
+
+static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO **mi_8x8,
+ vp9_writer *bc) {
+ const VP9_COMMON *const cm = &cpi->common;
+ const MACROBLOCKD *const xd = &cpi->mb.e_mbd;
+ const struct segmentation *const seg = &cm->seg;
+ MODE_INFO *m = mi_8x8[0];
+ const int ym = m->mbmi.mode;
+ const int segment_id = m->mbmi.segment_id;
+ MODE_INFO *above_mi = mi_8x8[-xd->mode_info_stride];
+ MODE_INFO *left_mi = mi_8x8[-1];
+
+ if (seg->update_map)
+ write_segment_id(bc, seg, m->mbmi.segment_id);
+
+ write_skip_coeff(cpi, segment_id, m, bc);
+
+ if (m->mbmi.sb_type >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT)
+ write_selected_tx_size(cpi, m, m->mbmi.tx_size, m->mbmi.sb_type, bc);
+
+ if (m->mbmi.sb_type >= BLOCK_8X8) {
+ const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, 0);
+ const MB_PREDICTION_MODE L = xd->left_available ?
+ left_block_mode(m, left_mi, 0) : DC_PRED;
+ write_intra_mode(bc, ym, vp9_kf_y_mode_prob[A][L]);
+ } else {
+ int idx, idy;
+ const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[m->mbmi.sb_type];
+ const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[m->mbmi.sb_type];
+ for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
+ for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
+ int i = idy * 2 + idx;
+ const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, i);
+ const MB_PREDICTION_MODE L = (xd->left_available || idx) ?
+ left_block_mode(m, left_mi, i) : DC_PRED;
+ const int bm = m->bmi[i].as_mode;
+#ifdef ENTROPY_STATS
+ ++intra_mode_stats[A][L][bm];
+#endif
+ write_intra_mode(bc, bm, vp9_kf_y_mode_prob[A][L]);
+ }
+ }
+ }
+
+ write_intra_mode(bc, m->mbmi.uv_mode, vp9_kf_uv_mode_prob[ym]);
+}
+
+static void write_modes_b(VP9_COMP *cpi, MODE_INFO **mi_8x8, vp9_writer *bc,
+ TOKENEXTRA **tok, TOKENEXTRA *tok_end,
+ int mi_row, int mi_col) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &cpi->mb.e_mbd;
+ MODE_INFO *m = mi_8x8[0];
+
+ if (m->mbmi.sb_type < BLOCK_8X8)
+ if (xd->ab_index > 0)
+ return;
+
+ xd->this_mi = mi_8x8[0];
+ xd->mi_8x8 = mi_8x8;
+
+ set_mi_row_col(&cpi->common, xd,
+ mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type],
+ mi_col, num_8x8_blocks_wide_lookup[m->mbmi.sb_type]);
+ if ((cm->frame_type == KEY_FRAME) || cm->intra_only) {
+ write_mb_modes_kf(cpi, mi_8x8, bc);
+#ifdef ENTROPY_STATS
+ active_section = 8;
+#endif
+ } else {
+ pack_inter_mode_mvs(cpi, m, bc);
+#ifdef ENTROPY_STATS
+ active_section = 1;
+#endif
+ }
+
+ assert(*tok < tok_end);
+ pack_mb_tokens(bc, tok, tok_end);
+}
+
+static void write_modes_sb(VP9_COMP *cpi, MODE_INFO **mi_8x8, vp9_writer *bc,
+ TOKENEXTRA **tok, TOKENEXTRA *tok_end,
+ int mi_row, int mi_col, BLOCK_SIZE bsize) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
+ const int mis = cm->mode_info_stride;
+ int bsl = b_width_log2(bsize);
+ int bs = (1 << bsl) / 4; // mode_info step for subsize
+ int n;
+ PARTITION_TYPE partition = PARTITION_NONE;
+ BLOCK_SIZE subsize;
+ MODE_INFO *m = mi_8x8[0];
+
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
+ return;
+
+ partition = partition_lookup[bsl][m->mbmi.sb_type];
+
+ if (bsize < BLOCK_8X8)
+ if (xd->ab_index > 0)
+ return;
+
+ if (bsize >= BLOCK_8X8) {
+ int pl;
+ const int idx = check_bsize_coverage(bs, cm->mi_rows, cm->mi_cols,
+ mi_row, mi_col);
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+ pl = partition_plane_context(xd, bsize);
+ // encode the partition information
+ if (idx == 0)
+ write_token(bc, vp9_partition_tree,
+ cm->fc.partition_prob[cm->frame_type][pl],
+ vp9_partition_encodings + partition);
+ else if (idx > 0)
+ vp9_write(bc, partition == PARTITION_SPLIT,
+ cm->fc.partition_prob[cm->frame_type][pl][idx]);
+ }
+
+ subsize = get_subsize(bsize, partition);
+ *(get_sb_index(xd, subsize)) = 0;
+
+ switch (partition) {
+ case PARTITION_NONE:
+ write_modes_b(cpi, mi_8x8, bc, tok, tok_end, mi_row, mi_col);
+ break;
+ case PARTITION_HORZ:
+ write_modes_b(cpi, mi_8x8, bc, tok, tok_end, mi_row, mi_col);
+ *(get_sb_index(xd, subsize)) = 1;
+ if ((mi_row + bs) < cm->mi_rows)
+ write_modes_b(cpi, mi_8x8 + bs * mis, bc, tok, tok_end, mi_row + bs,
+ mi_col);
+ break;
+ case PARTITION_VERT:
+ write_modes_b(cpi, mi_8x8, bc, tok, tok_end, mi_row, mi_col);
+ *(get_sb_index(xd, subsize)) = 1;
+ if ((mi_col + bs) < cm->mi_cols)
+ write_modes_b(cpi, mi_8x8 + bs, bc, tok, tok_end, mi_row, mi_col + bs);
+ break;
+ case PARTITION_SPLIT:
+ for (n = 0; n < 4; n++) {
+ int j = n >> 1, i = n & 0x01;
+ *(get_sb_index(xd, subsize)) = n;
+ write_modes_sb(cpi, mi_8x8 + j * bs * mis + i * bs, bc, tok, tok_end,
+ mi_row + j * bs, mi_col + i * bs, subsize);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ // update partition context
+ if (bsize >= BLOCK_8X8 &&
+ (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT)) {
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+ update_partition_context(xd, subsize, bsize);
+ }
+}
+
+static void write_modes(VP9_COMP *cpi, vp9_writer* const bc,
+ TOKENEXTRA **tok, TOKENEXTRA *tok_end) {
+ VP9_COMMON *const cm = &cpi->common;
+ const int mis = cm->mode_info_stride;
+ int mi_row, mi_col;
+ MODE_INFO **mi_8x8 = cm->mi_grid_visible;
+ MODE_INFO **m_8x8;
+
+ mi_8x8 += cm->cur_tile_mi_col_start + cm->cur_tile_mi_row_start * mis;
+
+ for (mi_row = cm->cur_tile_mi_row_start; mi_row < cm->cur_tile_mi_row_end;
+ mi_row += 8, mi_8x8 += 8 * mis) {
+ m_8x8 = mi_8x8;
+ vp9_zero(cm->left_seg_context);
+ for (mi_col = cm->cur_tile_mi_col_start; mi_col < cm->cur_tile_mi_col_end;
+ mi_col += MI_BLOCK_SIZE, m_8x8 += MI_BLOCK_SIZE) {
+ write_modes_sb(cpi, m_8x8, bc, tok, tok_end, mi_row, mi_col,
+ BLOCK_64X64);
+ }
+ }
+}
+
+/* This function is used for debugging probability trees. */
+static void print_prob_tree(vp9_coeff_probs *coef_probs, int block_types) {
+ /* print coef probability tree */
+ int i, j, k, l, m;
+ FILE *f = fopen("enc_tree_probs.txt", "a");
+ fprintf(f, "{\n");
+ for (i = 0; i < block_types; i++) {
+ fprintf(f, " {\n");
+ for (j = 0; j < REF_TYPES; ++j) {
+ fprintf(f, " {\n");
+ for (k = 0; k < COEF_BANDS; k++) {
+ fprintf(f, " {\n");
+ for (l = 0; l < PREV_COEF_CONTEXTS; l++) {
+ fprintf(f, " {");
+ for (m = 0; m < ENTROPY_NODES; m++) {
+ fprintf(f, "%3u, ",
+ (unsigned int)(coef_probs[i][j][k][l][m]));
+ }
+ }
+ fprintf(f, " }\n");
+ }
+ fprintf(f, " }\n");
+ }
+ fprintf(f, " }\n");
+ }
+ fprintf(f, "}\n");
+ fclose(f);
+}
+
+static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE tx_size) {
+ vp9_coeff_probs_model *coef_probs = cpi->frame_coef_probs[tx_size];
+ vp9_coeff_count *coef_counts = cpi->coef_counts[tx_size];
+ unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS] =
+ cpi->common.counts.eob_branch[tx_size];
+ vp9_coeff_stats *coef_branch_ct = cpi->frame_branch_ct[tx_size];
+ vp9_prob full_probs[ENTROPY_NODES];
+ int i, j, k, l;
+
+ for (i = 0; i < BLOCK_TYPES; ++i) {
+ for (j = 0; j < REF_TYPES; ++j) {
+ for (k = 0; k < COEF_BANDS; ++k) {
+ for (l = 0; l < PREV_COEF_CONTEXTS; ++l) {
+ if (l >= 3 && k == 0)
+ continue;
+ vp9_tree_probs_from_distribution(vp9_coef_tree,
+ full_probs,
+ coef_branch_ct[i][j][k][l],
+ coef_counts[i][j][k][l], 0);
+ vpx_memcpy(coef_probs[i][j][k][l], full_probs,
+ sizeof(vp9_prob) * UNCONSTRAINED_NODES);
+ coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] -
+ coef_branch_ct[i][j][k][l][0][0];
+ coef_probs[i][j][k][l][0] =
+ get_binary_prob(coef_branch_ct[i][j][k][l][0][0],
+ coef_branch_ct[i][j][k][l][0][1]);
+#ifdef ENTROPY_STATS
+ if (!cpi->dummy_packing) {
+ int t;
+ for (t = 0; t < MAX_ENTROPY_TOKENS; ++t)
+ context_counters[tx_size][i][j][k][l][t] +=
+ coef_counts[i][j][k][l][t];
+ context_counters[tx_size][i][j][k][l][MAX_ENTROPY_TOKENS] +=
+ eob_branch_ct[i][j][k][l];
+ }
+#endif
+ }
+ }
+ }
+ }
+}
+
+static void build_coeff_contexts(VP9_COMP *cpi) {
+ TX_SIZE t;
+ for (t = TX_4X4; t <= TX_32X32; t++)
+ build_tree_distribution(cpi, t);
+}
+
+static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi,
+ TX_SIZE tx_size) {
+ vp9_coeff_probs_model *new_frame_coef_probs = cpi->frame_coef_probs[tx_size];
+ vp9_coeff_probs_model *old_frame_coef_probs =
+ cpi->common.fc.coef_probs[tx_size];
+ vp9_coeff_stats *frame_branch_ct = cpi->frame_branch_ct[tx_size];
+ const vp9_prob upd = VP9_COEF_UPDATE_PROB;
+ const int entropy_nodes_update = UNCONSTRAINED_NODES;
+ int i, j, k, l, t;
+ switch (cpi->sf.use_fast_coef_updates) {
+ case 0: {
+ /* dry run to see if there is any udpate at all needed */
+ int savings = 0;
+ int update[2] = {0, 0};
+ for (i = 0; i < BLOCK_TYPES; ++i) {
+ for (j = 0; j < REF_TYPES; ++j) {
+ for (k = 0; k < COEF_BANDS; ++k) {
+ for (l = 0; l < PREV_COEF_CONTEXTS; ++l) {
+ for (t = 0; t < entropy_nodes_update; ++t) {
+ vp9_prob newp = new_frame_coef_probs[i][j][k][l][t];
+ const vp9_prob oldp = old_frame_coef_probs[i][j][k][l][t];
+ int s;
+ int u = 0;
+
+ if (l >= 3 && k == 0)
+ continue;
+ if (t == PIVOT_NODE)
+ s = vp9_prob_diff_update_savings_search_model(
+ frame_branch_ct[i][j][k][l][0],
+ old_frame_coef_probs[i][j][k][l], &newp, upd, i, j);
+ else
+ s = vp9_prob_diff_update_savings_search(
+ frame_branch_ct[i][j][k][l][t], oldp, &newp, upd);
+ if (s > 0 && newp != oldp)
+ u = 1;
+ if (u)
+ savings += s - (int)(vp9_cost_zero(upd));
+ else
+ savings -= (int)(vp9_cost_zero(upd));
+ update[u]++;
+ }
+ }
+ }
+ }
+ }
+
+ // printf("Update %d %d, savings %d\n", update[0], update[1], savings);
+ /* Is coef updated at all */
+ if (update[1] == 0 || savings < 0) {
+ vp9_write_bit(bc, 0);
+ return;
+ }
+ vp9_write_bit(bc, 1);
+ for (i = 0; i < BLOCK_TYPES; ++i) {
+ for (j = 0; j < REF_TYPES; ++j) {
+ for (k = 0; k < COEF_BANDS; ++k) {
+ for (l = 0; l < PREV_COEF_CONTEXTS; ++l) {
+ // calc probs and branch cts for this frame only
+ for (t = 0; t < entropy_nodes_update; ++t) {
+ vp9_prob newp = new_frame_coef_probs[i][j][k][l][t];
+ vp9_prob *oldp = old_frame_coef_probs[i][j][k][l] + t;
+ const vp9_prob upd = VP9_COEF_UPDATE_PROB;
+ int s;
+ int u = 0;
+ if (l >= 3 && k == 0)
+ continue;
+ if (t == PIVOT_NODE)
+ s = vp9_prob_diff_update_savings_search_model(
+ frame_branch_ct[i][j][k][l][0],
+ old_frame_coef_probs[i][j][k][l], &newp, upd, i, j);
+ else
+ s = vp9_prob_diff_update_savings_search(
+ frame_branch_ct[i][j][k][l][t],
+ *oldp, &newp, upd);
+ if (s > 0 && newp != *oldp)
+ u = 1;
+ vp9_write(bc, u, upd);
+#ifdef ENTROPY_STATS
+ if (!cpi->dummy_packing)
+ ++tree_update_hist[tx_size][i][j][k][l][t][u];
+#endif
+ if (u) {
+ /* send/use new probability */
+ vp9_write_prob_diff_update(bc, newp, *oldp);
+ *oldp = newp;
+ }
+ }
+ }
+ }
+ }
+ }
+ return;
+ }
+
+ case 1:
+ case 2: {
+ const int prev_coef_contexts_to_update =
+ (cpi->sf.use_fast_coef_updates == 2 ?
+ PREV_COEF_CONTEXTS >> 1 : PREV_COEF_CONTEXTS);
+ const int coef_band_to_update =
+ (cpi->sf.use_fast_coef_updates == 2 ?
+ COEF_BANDS >> 1 : COEF_BANDS);
+ int updates = 0;
+ int noupdates_before_first = 0;
+ for (i = 0; i < BLOCK_TYPES; ++i) {
+ for (j = 0; j < REF_TYPES; ++j) {
+ for (k = 0; k < COEF_BANDS; ++k) {
+ for (l = 0; l < PREV_COEF_CONTEXTS; ++l) {
+ // calc probs and branch cts for this frame only
+ for (t = 0; t < entropy_nodes_update; ++t) {
+ vp9_prob newp = new_frame_coef_probs[i][j][k][l][t];
+ vp9_prob *oldp = old_frame_coef_probs[i][j][k][l] + t;
+ int s;
+ int u = 0;
+ if (l >= 3 && k == 0)
+ continue;
+ if (l >= prev_coef_contexts_to_update ||
+ k >= coef_band_to_update) {
+ u = 0;
+ } else {
+ if (t == PIVOT_NODE)
+ s = vp9_prob_diff_update_savings_search_model(
+ frame_branch_ct[i][j][k][l][0],
+ old_frame_coef_probs[i][j][k][l], &newp, upd, i, j);
+ else
+ s = vp9_prob_diff_update_savings_search(
+ frame_branch_ct[i][j][k][l][t],
+ *oldp, &newp, upd);
+ if (s > 0 && newp != *oldp)
+ u = 1;
+ }
+ updates += u;
+ if (u == 0 && updates == 0) {
+ noupdates_before_first++;
+#ifdef ENTROPY_STATS
+ if (!cpi->dummy_packing)
+ ++tree_update_hist[tx_size][i][j][k][l][t][u];
+#endif
+ continue;
+ }
+ if (u == 1 && updates == 1) {
+ int v;
+ // first update
+ vp9_write_bit(bc, 1);
+ for (v = 0; v < noupdates_before_first; ++v)
+ vp9_write(bc, 0, upd);
+ }
+ vp9_write(bc, u, upd);
+#ifdef ENTROPY_STATS
+ if (!cpi->dummy_packing)
+ ++tree_update_hist[tx_size][i][j][k][l][t][u];
+#endif
+ if (u) {
+ /* send/use new probability */
+ vp9_write_prob_diff_update(bc, newp, *oldp);
+ *oldp = newp;
+ }
+ }
+ }
+ }
+ }
+ }
+ if (updates == 0) {
+ vp9_write_bit(bc, 0); // no updates
+ }
+ return;
+ }
+
+ default:
+ assert(0);
+ }
+}
+
+static void update_coef_probs(VP9_COMP* const cpi, vp9_writer* const bc) {
+ const TX_MODE tx_mode = cpi->common.tx_mode;
+
+ vp9_clear_system_state();
+
+ // Build the cofficient contexts based on counts collected in encode loop
+ build_coeff_contexts(cpi);
+
+ update_coef_probs_common(bc, cpi, TX_4X4);
+
+ // do not do this if not even allowed
+ if (tx_mode > ONLY_4X4)
+ update_coef_probs_common(bc, cpi, TX_8X8);
+
+ if (tx_mode > ALLOW_8X8)
+ update_coef_probs_common(bc, cpi, TX_16X16);
+
+ if (tx_mode > ALLOW_16X16)
+ update_coef_probs_common(bc, cpi, TX_32X32);
+}
+
+static void encode_loopfilter(struct loopfilter *lf,
+ struct vp9_write_bit_buffer *wb) {
+ int i;
+
+ // Encode the loop filter level and type
+ vp9_wb_write_literal(wb, lf->filter_level, 6);
+ vp9_wb_write_literal(wb, lf->sharpness_level, 3);
+
+ // Write out loop filter deltas applied at the MB level based on mode or
+ // ref frame (if they are enabled).
+ vp9_wb_write_bit(wb, lf->mode_ref_delta_enabled);
+
+ if (lf->mode_ref_delta_enabled) {
+ // Do the deltas need to be updated
+ vp9_wb_write_bit(wb, lf->mode_ref_delta_update);
+ if (lf->mode_ref_delta_update) {
+ // Send update
+ for (i = 0; i < MAX_REF_LF_DELTAS; i++) {
+ const int delta = lf->ref_deltas[i];
+
+ // Frame level data
+ if (delta != lf->last_ref_deltas[i]) {
+ lf->last_ref_deltas[i] = delta;
+ vp9_wb_write_bit(wb, 1);
+
+ assert(delta != 0);
+ vp9_wb_write_literal(wb, abs(delta) & 0x3F, 6);
+ vp9_wb_write_bit(wb, delta < 0);
+ } else {
+ vp9_wb_write_bit(wb, 0);
+ }
+ }
+
+ // Send update
+ for (i = 0; i < MAX_MODE_LF_DELTAS; i++) {
+ const int delta = lf->mode_deltas[i];
+ if (delta != lf->last_mode_deltas[i]) {
+ lf->last_mode_deltas[i] = delta;
+ vp9_wb_write_bit(wb, 1);
+
+ assert(delta != 0);
+ vp9_wb_write_literal(wb, abs(delta) & 0x3F, 6);
+ vp9_wb_write_bit(wb, delta < 0);
+ } else {
+ vp9_wb_write_bit(wb, 0);
+ }
+ }
+ }
+ }
+}
+
+static void write_delta_q(struct vp9_write_bit_buffer *wb, int delta_q) {
+ if (delta_q != 0) {
+ vp9_wb_write_bit(wb, 1);
+ vp9_wb_write_literal(wb, abs(delta_q), 4);
+ vp9_wb_write_bit(wb, delta_q < 0);
+ } else {
+ vp9_wb_write_bit(wb, 0);
+ }
+}
+
+static void encode_quantization(VP9_COMMON *cm,
+ struct vp9_write_bit_buffer *wb) {
+ vp9_wb_write_literal(wb, cm->base_qindex, QINDEX_BITS);
+ write_delta_q(wb, cm->y_dc_delta_q);
+ write_delta_q(wb, cm->uv_dc_delta_q);
+ write_delta_q(wb, cm->uv_ac_delta_q);
+}
+
+
+static void encode_segmentation(VP9_COMP *cpi,
+ struct vp9_write_bit_buffer *wb) {
+ int i, j;
+
+ struct segmentation *seg = &cpi->common.seg;
+
+ vp9_wb_write_bit(wb, seg->enabled);
+ if (!seg->enabled)
+ return;
+
+ // Segmentation map
+ vp9_wb_write_bit(wb, seg->update_map);
+ if (seg->update_map) {
+ // Select the coding strategy (temporal or spatial)
+ vp9_choose_segmap_coding_method(cpi);
+ // Write out probabilities used to decode unpredicted macro-block segments
+ for (i = 0; i < SEG_TREE_PROBS; i++) {
+ const int prob = seg->tree_probs[i];
+ const int update = prob != MAX_PROB;
+ vp9_wb_write_bit(wb, update);
+ if (update)
+ vp9_wb_write_literal(wb, prob, 8);
+ }
+
+ // Write out the chosen coding method.
+ vp9_wb_write_bit(wb, seg->temporal_update);
+ if (seg->temporal_update) {
+ for (i = 0; i < PREDICTION_PROBS; i++) {
+ const int prob = seg->pred_probs[i];
+ const int update = prob != MAX_PROB;
+ vp9_wb_write_bit(wb, update);
+ if (update)
+ vp9_wb_write_literal(wb, prob, 8);
+ }
+ }
+ }
+
+ // Segmentation data
+ vp9_wb_write_bit(wb, seg->update_data);
+ if (seg->update_data) {
+ vp9_wb_write_bit(wb, seg->abs_delta);
+
+ for (i = 0; i < MAX_SEGMENTS; i++) {
+ for (j = 0; j < SEG_LVL_MAX; j++) {
+ const int active = vp9_segfeature_active(seg, i, j);
+ vp9_wb_write_bit(wb, active);
+ if (active) {
+ const int data = vp9_get_segdata(seg, i, j);
+ const int data_max = vp9_seg_feature_data_max(j);
+
+ if (vp9_is_segfeature_signed(j)) {
+ vp9_encode_unsigned_max(wb, abs(data), data_max);
+ vp9_wb_write_bit(wb, data < 0);
+ } else {
+ vp9_encode_unsigned_max(wb, data, data_max);
+ }
+ }
+ }
+ }
+ }
+}
+
+
+static void encode_txfm_probs(VP9_COMP *cpi, vp9_writer *w) {
+ VP9_COMMON *const cm = &cpi->common;
+
+ // Mode
+ vp9_write_literal(w, MIN(cm->tx_mode, ALLOW_32X32), 2);
+ if (cm->tx_mode >= ALLOW_32X32)
+ vp9_write_bit(w, cm->tx_mode == TX_MODE_SELECT);
+
+ // Probabilities
+ if (cm->tx_mode == TX_MODE_SELECT) {
+ int i, j;
+ unsigned int ct_8x8p[TX_SIZES - 3][2];
+ unsigned int ct_16x16p[TX_SIZES - 2][2];
+ unsigned int ct_32x32p[TX_SIZES - 1][2];
+
+
+ for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
+ tx_counts_to_branch_counts_8x8(cm->counts.tx.p8x8[i],
+ ct_8x8p);
+ for (j = 0; j < TX_SIZES - 3; j++)
+ vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p8x8[i][j],
+ MODE_UPDATE_PROB, ct_8x8p[j]);
+ }
+
+ for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
+ tx_counts_to_branch_counts_16x16(cm->counts.tx.p16x16[i],
+ ct_16x16p);
+ for (j = 0; j < TX_SIZES - 2; j++)
+ vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p16x16[i][j],
+ MODE_UPDATE_PROB, ct_16x16p[j]);
+ }
+
+ for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
+ tx_counts_to_branch_counts_32x32(cm->counts.tx.p32x32[i], ct_32x32p);
+ for (j = 0; j < TX_SIZES - 1; j++)
+ vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p32x32[i][j],
+ MODE_UPDATE_PROB, ct_32x32p[j]);
+ }
+#ifdef MODE_STATS
+ if (!cpi->dummy_packing)
+ update_tx_count_stats(cm);
+#endif
+ }
+}
+
+static void write_interp_filter_type(INTERPOLATIONFILTERTYPE type,
+ struct vp9_write_bit_buffer *wb) {
+ const int type_to_literal[] = { 1, 0, 2 };
+
+ vp9_wb_write_bit(wb, type == SWITCHABLE);
+ if (type != SWITCHABLE)
+ vp9_wb_write_literal(wb, type_to_literal[type], 2);
+}
+
+static void fix_mcomp_filter_type(VP9_COMP *cpi) {
+ VP9_COMMON *const cm = &cpi->common;
+
+ if (cm->mcomp_filter_type == SWITCHABLE) {
+ // Check to see if only one of the filters is actually used
+ int count[SWITCHABLE_FILTERS];
+ int i, j, c = 0;
+ for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
+ count[i] = 0;
+ for (j = 0; j <= SWITCHABLE_FILTERS; ++j)
+ count[i] += cm->counts.switchable_interp[j][i];
+ c += (count[i] > 0);
+ }
+ if (c == 1) {
+ // Only one filter is used. So set the filter at frame level
+ for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
+ if (count[i]) {
+ cm->mcomp_filter_type = i;
+ break;
+ }
+ }
+ }
+ }
+}
+
+static void write_tile_info(VP9_COMMON *cm, struct vp9_write_bit_buffer *wb) {
+ int min_log2_tile_cols, max_log2_tile_cols, ones;
+ vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
+
+ // columns
+ ones = cm->log2_tile_cols - min_log2_tile_cols;
+ while (ones--)
+ vp9_wb_write_bit(wb, 1);
+
+ if (cm->log2_tile_cols < max_log2_tile_cols)
+ vp9_wb_write_bit(wb, 0);
+
+ // rows
+ vp9_wb_write_bit(wb, cm->log2_tile_rows != 0);
+ if (cm->log2_tile_rows != 0)
+ vp9_wb_write_bit(wb, cm->log2_tile_rows != 1);
+}
+
+static int get_refresh_mask(VP9_COMP *cpi) {
+ // Should the GF or ARF be updated using the transmitted frame or buffer
+#if CONFIG_MULTIPLE_ARF
+ if (!cpi->multi_arf_enabled && cpi->refresh_golden_frame &&
+ !cpi->refresh_alt_ref_frame) {
+#else
+ if (cpi->refresh_golden_frame && !cpi->refresh_alt_ref_frame &&
+ !cpi->use_svc) {
+#endif
+ // Preserve the previously existing golden frame and update the frame in
+ // the alt ref slot instead. This is highly specific to the use of
+ // alt-ref as a forward reference, and this needs to be generalized as
+ // other uses are implemented (like RTC/temporal scaling)
+ //
+ // gld_fb_idx and alt_fb_idx need to be swapped for future frames, but
+ // that happens in vp9_onyx_if.c:update_reference_frames() so that it can
+ // be done outside of the recode loop.
+ return (cpi->refresh_last_frame << cpi->lst_fb_idx) |
+ (cpi->refresh_golden_frame << cpi->alt_fb_idx);
+ } else {
+ int arf_idx = cpi->alt_fb_idx;
+#if CONFIG_MULTIPLE_ARF
+ // Determine which ARF buffer to use to encode this ARF frame.
+ if (cpi->multi_arf_enabled) {
+ int sn = cpi->sequence_number;
+ arf_idx = (cpi->frame_coding_order[sn] < 0) ?
+ cpi->arf_buffer_idx[sn + 1] :
+ cpi->arf_buffer_idx[sn];
+ }
+#endif
+ return (cpi->refresh_last_frame << cpi->lst_fb_idx) |
+ (cpi->refresh_golden_frame << cpi->gld_fb_idx) |
+ (cpi->refresh_alt_ref_frame << arf_idx);
+ }
+}
+
+static size_t encode_tiles(VP9_COMP *cpi, uint8_t *data_ptr) {
+ VP9_COMMON *const cm = &cpi->common;
+ vp9_writer residual_bc;
+
+ int tile_row, tile_col;
+ TOKENEXTRA *tok[4][1 << 6], *tok_end;
+ size_t total_size = 0;
+ const int tile_cols = 1 << cm->log2_tile_cols;
+ const int tile_rows = 1 << cm->log2_tile_rows;
+
+ vpx_memset(cm->above_seg_context, 0, sizeof(PARTITION_CONTEXT) *
+ mi_cols_aligned_to_sb(cm->mi_cols));
+
+ tok[0][0] = cpi->tok;
+ for (tile_row = 0; tile_row < tile_rows; tile_row++) {
+ if (tile_row)
+ tok[tile_row][0] = tok[tile_row - 1][tile_cols - 1] +
+ cpi->tok_count[tile_row - 1][tile_cols - 1];
+
+ for (tile_col = 1; tile_col < tile_cols; tile_col++)
+ tok[tile_row][tile_col] = tok[tile_row][tile_col - 1] +
+ cpi->tok_count[tile_row][tile_col - 1];
+ }
+
+ for (tile_row = 0; tile_row < tile_rows; tile_row++) {
+ vp9_get_tile_row_offsets(cm, tile_row);
+ for (tile_col = 0; tile_col < tile_cols; tile_col++) {
+ vp9_get_tile_col_offsets(cm, tile_col);
+ tok_end = tok[tile_row][tile_col] + cpi->tok_count[tile_row][tile_col];
+
+ if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1)
+ vp9_start_encode(&residual_bc, data_ptr + total_size + 4);
+ else
+ vp9_start_encode(&residual_bc, data_ptr + total_size);
+
+ write_modes(cpi, &residual_bc, &tok[tile_row][tile_col], tok_end);
+ assert(tok[tile_row][tile_col] == tok_end);
+ vp9_stop_encode(&residual_bc);
+ if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) {
+ // size of this tile
+ write_be32(data_ptr + total_size, residual_bc.pos);
+ total_size += 4;
+ }
+
+ total_size += residual_bc.pos;
+ }
+ }
+
+ return total_size;
+}
+
+static void write_display_size(VP9_COMP *cpi, struct vp9_write_bit_buffer *wb) {
+ VP9_COMMON *const cm = &cpi->common;
+
+ const int scaling_active = cm->width != cm->display_width ||
+ cm->height != cm->display_height;
+ vp9_wb_write_bit(wb, scaling_active);
+ if (scaling_active) {
+ vp9_wb_write_literal(wb, cm->display_width - 1, 16);
+ vp9_wb_write_literal(wb, cm->display_height - 1, 16);
+ }
+}
+
+static void write_frame_size(VP9_COMP *cpi,
+ struct vp9_write_bit_buffer *wb) {
+ VP9_COMMON *const cm = &cpi->common;
+ vp9_wb_write_literal(wb, cm->width - 1, 16);
+ vp9_wb_write_literal(wb, cm->height - 1, 16);
+
+ write_display_size(cpi, wb);
+}
+
+static void write_frame_size_with_refs(VP9_COMP *cpi,
+ struct vp9_write_bit_buffer *wb) {
+ VP9_COMMON *const cm = &cpi->common;
+ int refs[ALLOWED_REFS_PER_FRAME] = {cpi->lst_fb_idx, cpi->gld_fb_idx,
+ cpi->alt_fb_idx};
+ int i, found = 0;
+
+ for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) {
+ YV12_BUFFER_CONFIG *cfg = &cm->yv12_fb[cm->ref_frame_map[refs[i]]];
+ found = cm->width == cfg->y_crop_width &&
+ cm->height == cfg->y_crop_height;
+
+ // TODO(ivan): This prevents a bug while more than 3 buffers are used. Do it
+ // in a better way.
+ if (cpi->use_svc) {
+ found = 0;
+ }
+ vp9_wb_write_bit(wb, found);
+ if (found) {
+ break;
+ }
+ }
+
+ if (!found) {
+ vp9_wb_write_literal(wb, cm->width - 1, 16);
+ vp9_wb_write_literal(wb, cm->height - 1, 16);
+ }
+
+ write_display_size(cpi, wb);
+}
+
+static void write_sync_code(struct vp9_write_bit_buffer *wb) {
+ vp9_wb_write_literal(wb, SYNC_CODE_0, 8);
+ vp9_wb_write_literal(wb, SYNC_CODE_1, 8);
+ vp9_wb_write_literal(wb, SYNC_CODE_2, 8);
+}
+
+static void write_uncompressed_header(VP9_COMP *cpi,
+ struct vp9_write_bit_buffer *wb) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &cpi->mb.e_mbd;
+
+ // frame marker bits
+ vp9_wb_write_literal(wb, 0x2, 2);
+
+ // bitstream version.
+ // 00 - profile 0. 4:2:0 only
+ // 10 - profile 1. adds 4:4:4, 4:2:2, alpha
+ vp9_wb_write_bit(wb, cm->version);
+ vp9_wb_write_bit(wb, 0);
+
+ vp9_wb_write_bit(wb, 0);
+ vp9_wb_write_bit(wb, cm->frame_type);
+ vp9_wb_write_bit(wb, cm->show_frame);
+ vp9_wb_write_bit(wb, cm->error_resilient_mode);
+
+ if (cm->frame_type == KEY_FRAME) {
+ write_sync_code(wb);
+ // colorspaces
+ // 000 - Unknown
+ // 001 - BT.601
+ // 010 - BT.709
+ // 011 - SMPTE-170
+ // 100 - SMPTE-240
+ // 101 - Reserved
+ // 110 - Reserved
+ // 111 - sRGB (RGB)
+ vp9_wb_write_literal(wb, 0, 3);
+ if (1 /* colorspace != sRGB */) {
+ vp9_wb_write_bit(wb, 0); // 0: [16, 235] (i.e. xvYCC), 1: [0, 255]
+ if (cm->version == 1) {
+ vp9_wb_write_bit(wb, cm->subsampling_x);
+ vp9_wb_write_bit(wb, cm->subsampling_y);
+ vp9_wb_write_bit(wb, 0); // has extra plane
+ }
+ } else {
+ assert(cm->version == 1);
+ vp9_wb_write_bit(wb, 0); // has extra plane
+ }
+
+ write_frame_size(cpi, wb);
+ } else {
+ const int refs[ALLOWED_REFS_PER_FRAME] = {cpi->lst_fb_idx, cpi->gld_fb_idx,
+ cpi->alt_fb_idx};
+ if (!cm->show_frame)
+ vp9_wb_write_bit(wb, cm->intra_only);
+
+ if (!cm->error_resilient_mode)
+ vp9_wb_write_literal(wb, cm->reset_frame_context, 2);
+
+ if (cm->intra_only) {
+ write_sync_code(wb);
+
+ vp9_wb_write_literal(wb, get_refresh_mask(cpi), NUM_REF_FRAMES);
+ write_frame_size(cpi, wb);
+ } else {
+ int i;
+ vp9_wb_write_literal(wb, get_refresh_mask(cpi), NUM_REF_FRAMES);
+ for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) {
+ vp9_wb_write_literal(wb, refs[i], NUM_REF_FRAMES_LOG2);
+ vp9_wb_write_bit(wb, cm->ref_frame_sign_bias[LAST_FRAME + i]);
+ }
+
+ write_frame_size_with_refs(cpi, wb);
+
+ vp9_wb_write_bit(wb, xd->allow_high_precision_mv);
+
+ fix_mcomp_filter_type(cpi);
+ write_interp_filter_type(cm->mcomp_filter_type, wb);
+ }
+ }
+
+ if (!cm->error_resilient_mode) {
+ vp9_wb_write_bit(wb, cm->refresh_frame_context);
+ vp9_wb_write_bit(wb, cm->frame_parallel_decoding_mode);
+ }
+
+ vp9_wb_write_literal(wb, cm->frame_context_idx, NUM_FRAME_CONTEXTS_LOG2);
+
+ encode_loopfilter(&cm->lf, wb);
+ encode_quantization(cm, wb);
+ encode_segmentation(cpi, wb);
+
+ write_tile_info(cm, wb);
+}
+
+static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &cpi->mb.e_mbd;
+ FRAME_CONTEXT *const fc = &cm->fc;
+ vp9_writer header_bc;
+
+ vp9_start_encode(&header_bc, data);
+
+ if (xd->lossless)
+ cm->tx_mode = ONLY_4X4;
+ else
+ encode_txfm_probs(cpi, &header_bc);
+
+ update_coef_probs(cpi, &header_bc);
+
+#ifdef ENTROPY_STATS
+ active_section = 2;
+#endif
+
+ vp9_update_skip_probs(cpi, &header_bc);
+
+ if (cm->frame_type != KEY_FRAME) {
+ int i;
+#ifdef ENTROPY_STATS
+ active_section = 1;
+#endif
+
+ update_inter_mode_probs(cm, &header_bc);
+ vp9_zero(cm->counts.inter_mode);
+
+ if (cm->mcomp_filter_type == SWITCHABLE)
+ update_switchable_interp_probs(cpi, &header_bc);
+
+ for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
+ vp9_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i],
+ MODE_UPDATE_PROB,
+ cpi->intra_inter_count[i]);
+
+ if (cm->allow_comp_inter_inter) {
+ const int comp_pred_mode = cpi->common.comp_pred_mode;
+ const int use_compound_pred = comp_pred_mode != SINGLE_PREDICTION_ONLY;
+ const int use_hybrid_pred = comp_pred_mode == HYBRID_PREDICTION;
+
+ vp9_write_bit(&header_bc, use_compound_pred);
+ if (use_compound_pred) {
+ vp9_write_bit(&header_bc, use_hybrid_pred);
+ if (use_hybrid_pred)
+ for (i = 0; i < COMP_INTER_CONTEXTS; i++)
+ vp9_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i],
+ MODE_UPDATE_PROB,
+ cpi->comp_inter_count[i]);
+ }
+ }
+
+ if (cm->comp_pred_mode != COMP_PREDICTION_ONLY) {
+ for (i = 0; i < REF_CONTEXTS; i++) {
+ vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0],
+ MODE_UPDATE_PROB,
+ cpi->single_ref_count[i][0]);
+ vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1],
+ MODE_UPDATE_PROB,
+ cpi->single_ref_count[i][1]);
+ }
+ }
+
+ if (cm->comp_pred_mode != SINGLE_PREDICTION_ONLY)
+ for (i = 0; i < REF_CONTEXTS; i++)
+ vp9_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i],
+ MODE_UPDATE_PROB,
+ cpi->comp_ref_count[i]);
+
+ update_mbintra_mode_probs(cpi, &header_bc);
+
+ for (i = 0; i < NUM_PARTITION_CONTEXTS; ++i) {
+ vp9_prob pnew[PARTITION_TYPES - 1];
+ unsigned int bct[PARTITION_TYPES - 1][2];
+ update_mode(&header_bc, PARTITION_TYPES,
+ vp9_partition_tree, pnew,
+ fc->partition_prob[cm->frame_type][i], bct,
+ (unsigned int *)cpi->partition_count[i]);
+ }
+
+ vp9_write_nmv_probs(cpi, xd->allow_high_precision_mv, &header_bc);
+ }
+
+ vp9_stop_encode(&header_bc);
+ assert(header_bc.pos <= 0xffff);
+
+ return header_bc.pos;
+}
+
+void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, unsigned long *size) {
+ uint8_t *data = dest;
+ size_t first_part_size;
+ struct vp9_write_bit_buffer wb = {data, 0};
+ struct vp9_write_bit_buffer saved_wb;
+
+ write_uncompressed_header(cpi, &wb);
+ saved_wb = wb;
+ vp9_wb_write_literal(&wb, 0, 16); // don't know in advance first part. size
+
+ data += vp9_rb_bytes_written(&wb);
+
+ vp9_compute_update_table();
+
+#ifdef ENTROPY_STATS
+ if (cm->frame_type == INTER_FRAME)
+ active_section = 0;
+ else
+ active_section = 7;
+#endif
+
+ vp9_clear_system_state(); // __asm emms;
+
+ first_part_size = write_compressed_header(cpi, data);
+ data += first_part_size;
+ vp9_wb_write_literal(&saved_wb, first_part_size, 16);
+
+ data += encode_tiles(cpi, data);
+
+ *size = data - dest;
+}
+
+#ifdef ENTROPY_STATS
+static void print_tree_update_for_type(FILE *f,
+ vp9_coeff_stats *tree_update_hist,
+ int block_types, const char *header) {
+ int i, j, k, l, m;
+
+ fprintf(f, "const vp9_coeff_prob %s = {\n", header);
+ for (i = 0; i < block_types; i++) {
+ fprintf(f, " { \n");
+ for (j = 0; j < REF_TYPES; j++) {
+ fprintf(f, " { \n");
+ for (k = 0; k < COEF_BANDS; k++) {
+ fprintf(f, " {\n");
+ for (l = 0; l < PREV_COEF_CONTEXTS; l++) {
+ fprintf(f, " {");
+ for (m = 0; m < ENTROPY_NODES; m++) {
+ fprintf(f, "%3d, ",
+ get_binary_prob(tree_update_hist[i][j][k][l][m][0],
+ tree_update_hist[i][j][k][l][m][1]));
+ }
+ fprintf(f, "},\n");
+ }
+ fprintf(f, "},\n");
+ }
+ fprintf(f, " },\n");
+ }
+ fprintf(f, " },\n");
+ }
+ fprintf(f, "};\n");
+}
+
+void print_tree_update_probs() {
+ FILE *f = fopen("coefupdprob.h", "w");
+ fprintf(f, "\n/* Update probabilities for token entropy tree. */\n\n");
+
+ print_tree_update_for_type(f, tree_update_hist[TX_4X4], BLOCK_TYPES,
+ "vp9_coef_update_probs_4x4[BLOCK_TYPES]");
+ print_tree_update_for_type(f, tree_update_hist[TX_8X8], BLOCK_TYPES,
+ "vp9_coef_update_probs_8x8[BLOCK_TYPES]");
+ print_tree_update_for_type(f, tree_update_hist[TX_16X16], BLOCK_TYPES,
+ "vp9_coef_update_probs_16x16[BLOCK_TYPES]");
+ print_tree_update_for_type(f, tree_update_hist[TX_32X32], BLOCK_TYPES,
+ "vp9_coef_update_probs_32x32[BLOCK_TYPES]");
+
+ fclose(f);
+ f = fopen("treeupdate.bin", "wb");
+ fwrite(tree_update_hist, sizeof(tree_update_hist), 1, f);
+ fclose(f);
+}
+#endif
diff --git a/libvpx/vp9/encoder/vp9_bitstream.h b/libvpx/vp9/encoder/vp9_bitstream.h
new file mode 100644
index 0000000..b3dbee1
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_bitstream.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_ENCODER_VP9_BITSTREAM_H_
+#define VP9_ENCODER_VP9_BITSTREAM_H_
+
+void vp9_update_skip_probs(VP9_COMP *cpi, vp9_writer *bc);
+
+#endif // VP9_ENCODER_VP9_BITSTREAM_H_
diff --git a/libvpx/vp9/encoder/vp9_block.h b/libvpx/vp9/encoder/vp9_block.h
new file mode 100644
index 0000000..013047e
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_block.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_ENCODER_VP9_BLOCK_H_
+#define VP9_ENCODER_VP9_BLOCK_H_
+
+#include "vp9/common/vp9_onyx.h"
+#include "vp9/common/vp9_entropymv.h"
+#include "vp9/common/vp9_entropy.h"
+#include "vpx_ports/mem.h"
+#include "vp9/common/vp9_onyxc_int.h"
+
+// motion search site
+typedef struct {
+ MV mv;
+ int offset;
+} search_site;
+
+typedef struct {
+ struct {
+ MB_PREDICTION_MODE mode;
+ } bmi[4];
+} PARTITION_INFO;
+
+// Structure to hold snapshot of coding context during the mode picking process
+// TODO Do we need all of these?
+typedef struct {
+ MODE_INFO mic;
+ PARTITION_INFO partition_info;
+ int skip;
+ int_mv best_ref_mv;
+ int_mv second_best_ref_mv;
+ int_mv ref_mvs[MAX_REF_FRAMES][MAX_MV_REF_CANDIDATES];
+ int rate;
+ int distortion;
+ int64_t intra_error;
+ int best_mode_index;
+ int rddiv;
+ int rdmult;
+ int hybrid_pred_diff;
+ int comp_pred_diff;
+ int single_pred_diff;
+ int64_t tx_rd_diff[TX_MODES];
+ int64_t best_filter_diff[SWITCHABLE_FILTERS + 1];
+
+ // motion vector cache for adaptive motion search control in partition
+ // search loop
+ int_mv pred_mv[MAX_REF_FRAMES];
+
+ // Bit flag for each mode whether it has high error in comparison to others.
+ unsigned int modes_with_high_error;
+
+ // Bit flag for each ref frame whether it has high error compared to others.
+ unsigned int frames_with_high_error;
+} PICK_MODE_CONTEXT;
+
+struct macroblock_plane {
+ DECLARE_ALIGNED(16, int16_t, src_diff[64*64]);
+ DECLARE_ALIGNED(16, int16_t, coeff[64*64]);
+ struct buf_2d src;
+
+ // Quantizer setings
+ int16_t *quant;
+ int16_t *quant_shift;
+ int16_t *zbin;
+ int16_t *round;
+
+ // Zbin Over Quant value
+ int16_t zbin_extra;
+};
+
+/* The [2] dimension is for whether we skip the EOB node (i.e. if previous
+ * coefficient in this block was zero) or not. */
+typedef unsigned int vp9_coeff_cost[BLOCK_TYPES][REF_TYPES][COEF_BANDS][2]
+ [PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS];
+
+typedef struct macroblock MACROBLOCK;
+struct macroblock {
+ struct macroblock_plane plane[MAX_MB_PLANE];
+
+ MACROBLOCKD e_mbd;
+ int skip_block;
+ PARTITION_INFO *partition_info; /* work pointer */
+ PARTITION_INFO *pi; /* Corresponds to upper left visible macroblock */
+ PARTITION_INFO *pip; /* Base of allocated array */
+
+ search_site *ss;
+ int ss_count;
+ int searches_per_step;
+
+ int errorperbit;
+ int sadperbit16;
+ int sadperbit4;
+ int rddiv;
+ int rdmult;
+ unsigned int *mb_activity_ptr;
+ int *mb_norm_activity_ptr;
+ signed int act_zbin_adj;
+
+ int mv_best_ref_index[MAX_REF_FRAMES];
+ unsigned int max_mv_context[MAX_REF_FRAMES];
+ unsigned int source_variance;
+
+ int nmvjointcost[MV_JOINTS];
+ int nmvcosts[2][MV_VALS];
+ int *nmvcost[2];
+ int nmvcosts_hp[2][MV_VALS];
+ int *nmvcost_hp[2];
+ int **mvcost;
+
+ int nmvjointsadcost[MV_JOINTS];
+ int nmvsadcosts[2][MV_VALS];
+ int *nmvsadcost[2];
+ int nmvsadcosts_hp[2][MV_VALS];
+ int *nmvsadcost_hp[2];
+ int **mvsadcost;
+
+ int mbmode_cost[MB_MODE_COUNT];
+ unsigned inter_mode_cost[INTER_MODE_CONTEXTS][MB_MODE_COUNT - NEARESTMV];
+ int intra_uv_mode_cost[2][MB_MODE_COUNT];
+ int y_mode_costs[INTRA_MODES][INTRA_MODES][INTRA_MODES];
+ int switchable_interp_costs[SWITCHABLE_FILTERS + 1]
+ [SWITCHABLE_FILTERS];
+
+ // These define limits to motion vector components to prevent them
+ // from extending outside the UMV borders
+ int mv_col_min;
+ int mv_col_max;
+ int mv_row_min;
+ int mv_row_max;
+
+ int skip;
+
+ int encode_breakout;
+
+ unsigned char *active_ptr;
+
+ // note that token_costs is the cost when eob node is skipped
+ vp9_coeff_cost token_costs[TX_SIZES];
+
+ int optimize;
+
+ // indicate if it is in the rd search loop or encoding process
+ int use_lp32x32fdct;
+ int skip_encode;
+
+ // Used to store sub partition's choices.
+ int fast_ms;
+ int_mv pred_mv[MAX_REF_FRAMES];
+ int subblock_ref;
+
+ // TODO(jingning): Need to refactor the structure arrays that buffers the
+ // coding mode decisions of each partition type.
+ PICK_MODE_CONTEXT ab4x4_context[4][4][4];
+ PICK_MODE_CONTEXT sb8x4_context[4][4][4];
+ PICK_MODE_CONTEXT sb4x8_context[4][4][4];
+ PICK_MODE_CONTEXT sb8x8_context[4][4][4];
+ PICK_MODE_CONTEXT sb8x16_context[4][4][2];
+ PICK_MODE_CONTEXT sb16x8_context[4][4][2];
+ PICK_MODE_CONTEXT mb_context[4][4];
+ PICK_MODE_CONTEXT sb32x16_context[4][2];
+ PICK_MODE_CONTEXT sb16x32_context[4][2];
+ // when 4 MBs share coding parameters:
+ PICK_MODE_CONTEXT sb32_context[4];
+ PICK_MODE_CONTEXT sb32x64_context[2];
+ PICK_MODE_CONTEXT sb64x32_context[2];
+ PICK_MODE_CONTEXT sb64_context;
+ int partition_cost[NUM_PARTITION_CONTEXTS][PARTITION_TYPES];
+
+ BLOCK_SIZE b_partitioning[4][4][4];
+ BLOCK_SIZE mb_partitioning[4][4];
+ BLOCK_SIZE sb_partitioning[4];
+ BLOCK_SIZE sb64_partitioning;
+
+ void (*fwd_txm4x4)(int16_t *input, int16_t *output, int pitch);
+ void (*fwd_txm8x4)(int16_t *input, int16_t *output, int pitch);
+ void (*fwd_txm8x8)(int16_t *input, int16_t *output, int pitch);
+ void (*fwd_txm16x16)(int16_t *input, int16_t *output, int pitch);
+ void (*quantize_b_4x4)(MACROBLOCK *x, int b_idx, TX_TYPE tx_type,
+ int y_blocks);
+};
+
+#endif // VP9_ENCODER_VP9_BLOCK_H_
diff --git a/libvpx/vp9/encoder/vp9_boolhuff.c b/libvpx/vp9/encoder/vp9_boolhuff.c
new file mode 100644
index 0000000..0f1aa59
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_boolhuff.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include "vp9/encoder/vp9_boolhuff.h"
+#include "vp9/common/vp9_entropy.h"
+
+#if defined(SECTIONBITS_OUTPUT)
+unsigned __int64 Sectionbits[500];
+
+#endif
+
+#ifdef ENTROPY_STATS
+unsigned int active_section = 0;
+#endif
+
+const unsigned int vp9_prob_cost[256] = {
+ 2047, 2047, 1791, 1641, 1535, 1452, 1385, 1328, 1279, 1235, 1196, 1161, 1129, 1099, 1072, 1046,
+ 1023, 1000, 979, 959, 940, 922, 905, 889, 873, 858, 843, 829, 816, 803, 790, 778,
+ 767, 755, 744, 733, 723, 713, 703, 693, 684, 675, 666, 657, 649, 641, 633, 625,
+ 617, 609, 602, 594, 587, 580, 573, 567, 560, 553, 547, 541, 534, 528, 522, 516,
+ 511, 505, 499, 494, 488, 483, 477, 472, 467, 462, 457, 452, 447, 442, 437, 433,
+ 428, 424, 419, 415, 410, 406, 401, 397, 393, 389, 385, 381, 377, 373, 369, 365,
+ 361, 357, 353, 349, 346, 342, 338, 335, 331, 328, 324, 321, 317, 314, 311, 307,
+ 304, 301, 297, 294, 291, 288, 285, 281, 278, 275, 272, 269, 266, 263, 260, 257,
+ 255, 252, 249, 246, 243, 240, 238, 235, 232, 229, 227, 224, 221, 219, 216, 214,
+ 211, 208, 206, 203, 201, 198, 196, 194, 191, 189, 186, 184, 181, 179, 177, 174,
+ 172, 170, 168, 165, 163, 161, 159, 156, 154, 152, 150, 148, 145, 143, 141, 139,
+ 137, 135, 133, 131, 129, 127, 125, 123, 121, 119, 117, 115, 113, 111, 109, 107,
+ 105, 103, 101, 99, 97, 95, 93, 92, 90, 88, 86, 84, 82, 81, 79, 77,
+ 75, 73, 72, 70, 68, 66, 65, 63, 61, 60, 58, 56, 55, 53, 51, 50,
+ 48, 46, 45, 43, 41, 40, 38, 37, 35, 33, 32, 30, 29, 27, 25, 24,
+ 22, 21, 19, 18, 16, 15, 13, 12, 10, 9, 7, 6, 4, 3, 1, 1
+};
+
+void vp9_start_encode(vp9_writer *br, uint8_t *source) {
+ br->lowvalue = 0;
+ br->range = 255;
+ br->value = 0;
+ br->count = -24;
+ br->buffer = source;
+ br->pos = 0;
+ vp9_write_bit(br, 0);
+}
+
+void vp9_stop_encode(vp9_writer *br) {
+ int i;
+
+ for (i = 0; i < 32; i++)
+ vp9_write_bit(br, 0);
+
+ // Ensure there's no ambigous collision with any index marker bytes
+ if ((br->buffer[br->pos - 1] & 0xe0) == 0xc0)
+ br->buffer[br->pos++] = 0;
+}
+
diff --git a/libvpx/vp9/encoder/vp9_boolhuff.h b/libvpx/vp9/encoder/vp9_boolhuff.h
new file mode 100644
index 0000000..c3f340d
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_boolhuff.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/****************************************************************************
+*
+* Module Title : vp9_boolhuff.h
+*
+* Description : Bool Coder header file.
+*
+****************************************************************************/
+#ifndef VP9_ENCODER_VP9_BOOLHUFF_H_
+#define VP9_ENCODER_VP9_BOOLHUFF_H_
+
+#include "vpx_ports/mem.h"
+
+typedef struct {
+ unsigned int lowvalue;
+ unsigned int range;
+ unsigned int value;
+ int count;
+ unsigned int pos;
+ uint8_t *buffer;
+
+ // Variables used to track bit costs without outputing to the bitstream
+ unsigned int measure_cost;
+ unsigned long bit_counter;
+} vp9_writer;
+
+extern const unsigned int vp9_prob_cost[256];
+
+void vp9_start_encode(vp9_writer *bc, uint8_t *buffer);
+void vp9_stop_encode(vp9_writer *bc);
+
+DECLARE_ALIGNED(16, extern const unsigned char, vp9_norm[256]);
+
+static void vp9_write(vp9_writer *br, int bit, int probability) {
+ unsigned int split;
+ int count = br->count;
+ unsigned int range = br->range;
+ unsigned int lowvalue = br->lowvalue;
+ register unsigned int shift;
+
+#ifdef ENTROPY_STATS
+#if defined(SECTIONBITS_OUTPUT)
+
+ if (bit)
+ Sectionbits[active_section] += vp9_prob_cost[255 - probability];
+ else
+ Sectionbits[active_section] += vp9_prob_cost[probability];
+
+#endif
+#endif
+
+ split = 1 + (((range - 1) * probability) >> 8);
+
+ range = split;
+
+ if (bit) {
+ lowvalue += split;
+ range = br->range - split;
+ }
+
+ shift = vp9_norm[range];
+
+ range <<= shift;
+ count += shift;
+
+ if (count >= 0) {
+ int offset = shift - count;
+
+ if ((lowvalue << (offset - 1)) & 0x80000000) {
+ int x = br->pos - 1;
+
+ while (x >= 0 && br->buffer[x] == 0xff) {
+ br->buffer[x] = 0;
+ x--;
+ }
+
+ br->buffer[x] += 1;
+ }
+
+ br->buffer[br->pos++] = (lowvalue >> (24 - offset));
+ lowvalue <<= offset;
+ shift = count;
+ lowvalue &= 0xffffff;
+ count -= 8;
+ }
+
+ lowvalue <<= shift;
+ br->count = count;
+ br->lowvalue = lowvalue;
+ br->range = range;
+}
+
+static void vp9_write_bit(vp9_writer *w, int bit) {
+ vp9_write(w, bit, 128); // vp9_prob_half
+}
+
+static void vp9_write_literal(vp9_writer *w, int data, int bits) {
+ int bit;
+
+ for (bit = bits - 1; bit >= 0; bit--)
+ vp9_write_bit(w, 1 & (data >> bit));
+}
+
+
+#endif // VP9_ENCODER_VP9_BOOLHUFF_H_
diff --git a/libvpx/vp9/encoder/vp9_dct.c b/libvpx/vp9/encoder/vp9_dct.c
new file mode 100644
index 0000000..4f4ad04
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_dct.c
@@ -0,0 +1,1385 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include <assert.h>
+#include <math.h>
+#include "./vpx_config.h"
+#include "vp9/common/vp9_systemdependent.h"
+
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/common/vp9_idct.h"
+
+static void fdct4_1d(int16_t *input, int16_t *output) {
+ int16_t step[4];
+ int temp1, temp2;
+
+ step[0] = input[0] + input[3];
+ step[1] = input[1] + input[2];
+ step[2] = input[1] - input[2];
+ step[3] = input[0] - input[3];
+
+ temp1 = (step[0] + step[1]) * cospi_16_64;
+ temp2 = (step[0] - step[1]) * cospi_16_64;
+ output[0] = dct_const_round_shift(temp1);
+ output[2] = dct_const_round_shift(temp2);
+ temp1 = step[2] * cospi_24_64 + step[3] * cospi_8_64;
+ temp2 = -step[2] * cospi_8_64 + step[3] * cospi_24_64;
+ output[1] = dct_const_round_shift(temp1);
+ output[3] = dct_const_round_shift(temp2);
+}
+
+void vp9_short_fdct4x4_c(int16_t *input, int16_t *output, int pitch) {
+ // The 2D transform is done with two passes which are actually pretty
+ // similar. In the first one, we transform the columns and transpose
+ // the results. In the second one, we transform the rows. To achieve that,
+ // as the first pass results are transposed, we tranpose the columns (that
+ // is the transposed rows) and transpose the results (so that it goes back
+ // in normal/row positions).
+ const int stride = pitch >> 1;
+ int pass;
+ // We need an intermediate buffer between passes.
+ int16_t intermediate[4 * 4];
+ int16_t *in = input;
+ int16_t *out = intermediate;
+ // Do the two transform/transpose passes
+ for (pass = 0; pass < 2; ++pass) {
+ /*canbe16*/ int input[4];
+ /*canbe16*/ int step[4];
+ /*needs32*/ int temp1, temp2;
+ int i;
+ for (i = 0; i < 4; ++i) {
+ // Load inputs.
+ if (0 == pass) {
+ input[0] = in[0 * stride] << 4;
+ input[1] = in[1 * stride] << 4;
+ input[2] = in[2 * stride] << 4;
+ input[3] = in[3 * stride] << 4;
+ if (i == 0 && input[0]) {
+ input[0] += 1;
+ }
+ } else {
+ input[0] = in[0 * 4];
+ input[1] = in[1 * 4];
+ input[2] = in[2 * 4];
+ input[3] = in[3 * 4];
+ }
+ // Transform.
+ step[0] = input[0] + input[3];
+ step[1] = input[1] + input[2];
+ step[2] = input[1] - input[2];
+ step[3] = input[0] - input[3];
+ temp1 = (step[0] + step[1]) * cospi_16_64;
+ temp2 = (step[0] - step[1]) * cospi_16_64;
+ out[0] = dct_const_round_shift(temp1);
+ out[2] = dct_const_round_shift(temp2);
+ temp1 = step[2] * cospi_24_64 + step[3] * cospi_8_64;
+ temp2 = -step[2] * cospi_8_64 + step[3] * cospi_24_64;
+ out[1] = dct_const_round_shift(temp1);
+ out[3] = dct_const_round_shift(temp2);
+ // Do next column (which is a transposed row in second/horizontal pass)
+ in++;
+ out += 4;
+ }
+ // Setup in/out for next pass.
+ in = intermediate;
+ out = output;
+ }
+
+ {
+ int i, j;
+ for (i = 0; i < 4; ++i) {
+ for (j = 0; j < 4; ++j)
+ output[j + i * 4] = (output[j + i * 4] + 1) >> 2;
+ }
+ }
+}
+
+static void fadst4_1d(int16_t *input, int16_t *output) {
+ int x0, x1, x2, x3;
+ int s0, s1, s2, s3, s4, s5, s6, s7;
+
+ x0 = input[0];
+ x1 = input[1];
+ x2 = input[2];
+ x3 = input[3];
+
+ if (!(x0 | x1 | x2 | x3)) {
+ output[0] = output[1] = output[2] = output[3] = 0;
+ return;
+ }
+
+ s0 = sinpi_1_9 * x0;
+ s1 = sinpi_4_9 * x0;
+ s2 = sinpi_2_9 * x1;
+ s3 = sinpi_1_9 * x1;
+ s4 = sinpi_3_9 * x2;
+ s5 = sinpi_4_9 * x3;
+ s6 = sinpi_2_9 * x3;
+ s7 = x0 + x1 - x3;
+
+ x0 = s0 + s2 + s5;
+ x1 = sinpi_3_9 * s7;
+ x2 = s1 - s3 + s6;
+ x3 = s4;
+
+ s0 = x0 + x3;
+ s1 = x1;
+ s2 = x2 - x3;
+ s3 = x2 - x0 + x3;
+
+ // 1-D transform scaling factor is sqrt(2).
+ output[0] = dct_const_round_shift(s0);
+ output[1] = dct_const_round_shift(s1);
+ output[2] = dct_const_round_shift(s2);
+ output[3] = dct_const_round_shift(s3);
+}
+
+static const transform_2d FHT_4[] = {
+ { fdct4_1d, fdct4_1d }, // DCT_DCT = 0
+ { fadst4_1d, fdct4_1d }, // ADST_DCT = 1
+ { fdct4_1d, fadst4_1d }, // DCT_ADST = 2
+ { fadst4_1d, fadst4_1d } // ADST_ADST = 3
+};
+
+void vp9_short_fht4x4_c(int16_t *input, int16_t *output,
+ int pitch, TX_TYPE tx_type) {
+ int16_t out[4 * 4];
+ int16_t *outptr = &out[0];
+ int i, j;
+ int16_t temp_in[4], temp_out[4];
+ const transform_2d ht = FHT_4[tx_type];
+
+ // Columns
+ for (i = 0; i < 4; ++i) {
+ for (j = 0; j < 4; ++j)
+ temp_in[j] = input[j * pitch + i] << 4;
+ if (i == 0 && temp_in[0])
+ temp_in[0] += 1;
+ ht.cols(temp_in, temp_out);
+ for (j = 0; j < 4; ++j)
+ outptr[j * 4 + i] = temp_out[j];
+ }
+
+ // Rows
+ for (i = 0; i < 4; ++i) {
+ for (j = 0; j < 4; ++j)
+ temp_in[j] = out[j + i * 4];
+ ht.rows(temp_in, temp_out);
+ for (j = 0; j < 4; ++j)
+ output[j + i * 4] = (temp_out[j] + 1) >> 2;
+ }
+}
+
+void vp9_short_fdct8x4_c(int16_t *input, int16_t *output, int pitch) {
+ vp9_short_fdct4x4_c(input, output, pitch);
+ vp9_short_fdct4x4_c(input + 4, output + 16, pitch);
+}
+
+static void fdct8_1d(int16_t *input, int16_t *output) {
+ /*canbe16*/ int s0, s1, s2, s3, s4, s5, s6, s7;
+ /*needs32*/ int t0, t1, t2, t3;
+ /*canbe16*/ int x0, x1, x2, x3;
+
+ // stage 1
+ s0 = input[0] + input[7];
+ s1 = input[1] + input[6];
+ s2 = input[2] + input[5];
+ s3 = input[3] + input[4];
+ s4 = input[3] - input[4];
+ s5 = input[2] - input[5];
+ s6 = input[1] - input[6];
+ s7 = input[0] - input[7];
+
+ // fdct4_1d(step, step);
+ x0 = s0 + s3;
+ x1 = s1 + s2;
+ x2 = s1 - s2;
+ x3 = s0 - s3;
+ t0 = (x0 + x1) * cospi_16_64;
+ t1 = (x0 - x1) * cospi_16_64;
+ t2 = x2 * cospi_24_64 + x3 * cospi_8_64;
+ t3 = -x2 * cospi_8_64 + x3 * cospi_24_64;
+ output[0] = dct_const_round_shift(t0);
+ output[2] = dct_const_round_shift(t2);
+ output[4] = dct_const_round_shift(t1);
+ output[6] = dct_const_round_shift(t3);
+
+ // Stage 2
+ t0 = (s6 - s5) * cospi_16_64;
+ t1 = (s6 + s5) * cospi_16_64;
+ t2 = dct_const_round_shift(t0);
+ t3 = dct_const_round_shift(t1);
+
+ // Stage 3
+ x0 = s4 + t2;
+ x1 = s4 - t2;
+ x2 = s7 - t3;
+ x3 = s7 + t3;
+
+ // Stage 4
+ t0 = x0 * cospi_28_64 + x3 * cospi_4_64;
+ t1 = x1 * cospi_12_64 + x2 * cospi_20_64;
+ t2 = x2 * cospi_12_64 + x1 * -cospi_20_64;
+ t3 = x3 * cospi_28_64 + x0 * -cospi_4_64;
+ output[1] = dct_const_round_shift(t0);
+ output[3] = dct_const_round_shift(t2);
+ output[5] = dct_const_round_shift(t1);
+ output[7] = dct_const_round_shift(t3);
+}
+
+void vp9_short_fdct8x8_c(int16_t *input, int16_t *final_output, int pitch) {
+ const int stride = pitch >> 1;
+ int i, j;
+ int16_t intermediate[64];
+
+ // Transform columns
+ {
+ int16_t *output = intermediate;
+ /*canbe16*/ int s0, s1, s2, s3, s4, s5, s6, s7;
+ /*needs32*/ int t0, t1, t2, t3;
+ /*canbe16*/ int x0, x1, x2, x3;
+
+ int i;
+ for (i = 0; i < 8; i++) {
+ // stage 1
+ s0 = (input[0 * stride] + input[7 * stride]) << 2;
+ s1 = (input[1 * stride] + input[6 * stride]) << 2;
+ s2 = (input[2 * stride] + input[5 * stride]) << 2;
+ s3 = (input[3 * stride] + input[4 * stride]) << 2;
+ s4 = (input[3 * stride] - input[4 * stride]) << 2;
+ s5 = (input[2 * stride] - input[5 * stride]) << 2;
+ s6 = (input[1 * stride] - input[6 * stride]) << 2;
+ s7 = (input[0 * stride] - input[7 * stride]) << 2;
+
+ // fdct4_1d(step, step);
+ x0 = s0 + s3;
+ x1 = s1 + s2;
+ x2 = s1 - s2;
+ x3 = s0 - s3;
+ t0 = (x0 + x1) * cospi_16_64;
+ t1 = (x0 - x1) * cospi_16_64;
+ t2 = x2 * cospi_24_64 + x3 * cospi_8_64;
+ t3 = -x2 * cospi_8_64 + x3 * cospi_24_64;
+ output[0 * 8] = dct_const_round_shift(t0);
+ output[2 * 8] = dct_const_round_shift(t2);
+ output[4 * 8] = dct_const_round_shift(t1);
+ output[6 * 8] = dct_const_round_shift(t3);
+
+ // Stage 2
+ t0 = (s6 - s5) * cospi_16_64;
+ t1 = (s6 + s5) * cospi_16_64;
+ t2 = dct_const_round_shift(t0);
+ t3 = dct_const_round_shift(t1);
+
+ // Stage 3
+ x0 = s4 + t2;
+ x1 = s4 - t2;
+ x2 = s7 - t3;
+ x3 = s7 + t3;
+
+ // Stage 4
+ t0 = x0 * cospi_28_64 + x3 * cospi_4_64;
+ t1 = x1 * cospi_12_64 + x2 * cospi_20_64;
+ t2 = x2 * cospi_12_64 + x1 * -cospi_20_64;
+ t3 = x3 * cospi_28_64 + x0 * -cospi_4_64;
+ output[1 * 8] = dct_const_round_shift(t0);
+ output[3 * 8] = dct_const_round_shift(t2);
+ output[5 * 8] = dct_const_round_shift(t1);
+ output[7 * 8] = dct_const_round_shift(t3);
+ input++;
+ output++;
+ }
+ }
+
+ // Rows
+ for (i = 0; i < 8; ++i) {
+ fdct8_1d(&intermediate[i * 8], &final_output[i * 8]);
+ for (j = 0; j < 8; ++j)
+ final_output[j + i * 8] /= 2;
+ }
+}
+
+void vp9_short_fdct16x16_c(int16_t *input, int16_t *output, int pitch) {
+ // The 2D transform is done with two passes which are actually pretty
+ // similar. In the first one, we transform the columns and transpose
+ // the results. In the second one, we transform the rows. To achieve that,
+ // as the first pass results are transposed, we tranpose the columns (that
+ // is the transposed rows) and transpose the results (so that it goes back
+ // in normal/row positions).
+ const int stride = pitch >> 1;
+ int pass;
+ // We need an intermediate buffer between passes.
+ int16_t intermediate[256];
+ int16_t *in = input;
+ int16_t *out = intermediate;
+ // Do the two transform/transpose passes
+ for (pass = 0; pass < 2; ++pass) {
+ /*canbe16*/ int step1[8];
+ /*canbe16*/ int step2[8];
+ /*canbe16*/ int step3[8];
+ /*canbe16*/ int input[8];
+ /*needs32*/ int temp1, temp2;
+ int i;
+ for (i = 0; i < 16; i++) {
+ if (0 == pass) {
+ // Calculate input for the first 8 results.
+ input[0] = (in[0 * stride] + in[15 * stride]) << 2;
+ input[1] = (in[1 * stride] + in[14 * stride]) << 2;
+ input[2] = (in[2 * stride] + in[13 * stride]) << 2;
+ input[3] = (in[3 * stride] + in[12 * stride]) << 2;
+ input[4] = (in[4 * stride] + in[11 * stride]) << 2;
+ input[5] = (in[5 * stride] + in[10 * stride]) << 2;
+ input[6] = (in[6 * stride] + in[ 9 * stride]) << 2;
+ input[7] = (in[7 * stride] + in[ 8 * stride]) << 2;
+ // Calculate input for the next 8 results.
+ step1[0] = (in[7 * stride] - in[ 8 * stride]) << 2;
+ step1[1] = (in[6 * stride] - in[ 9 * stride]) << 2;
+ step1[2] = (in[5 * stride] - in[10 * stride]) << 2;
+ step1[3] = (in[4 * stride] - in[11 * stride]) << 2;
+ step1[4] = (in[3 * stride] - in[12 * stride]) << 2;
+ step1[5] = (in[2 * stride] - in[13 * stride]) << 2;
+ step1[6] = (in[1 * stride] - in[14 * stride]) << 2;
+ step1[7] = (in[0 * stride] - in[15 * stride]) << 2;
+ } else {
+ // Calculate input for the first 8 results.
+ input[0] = ((in[0 * 16] + 1) >> 2) + ((in[15 * 16] + 1) >> 2);
+ input[1] = ((in[1 * 16] + 1) >> 2) + ((in[14 * 16] + 1) >> 2);
+ input[2] = ((in[2 * 16] + 1) >> 2) + ((in[13 * 16] + 1) >> 2);
+ input[3] = ((in[3 * 16] + 1) >> 2) + ((in[12 * 16] + 1) >> 2);
+ input[4] = ((in[4 * 16] + 1) >> 2) + ((in[11 * 16] + 1) >> 2);
+ input[5] = ((in[5 * 16] + 1) >> 2) + ((in[10 * 16] + 1) >> 2);
+ input[6] = ((in[6 * 16] + 1) >> 2) + ((in[ 9 * 16] + 1) >> 2);
+ input[7] = ((in[7 * 16] + 1) >> 2) + ((in[ 8 * 16] + 1) >> 2);
+ // Calculate input for the next 8 results.
+ step1[0] = ((in[7 * 16] + 1) >> 2) - ((in[ 8 * 16] + 1) >> 2);
+ step1[1] = ((in[6 * 16] + 1) >> 2) - ((in[ 9 * 16] + 1) >> 2);
+ step1[2] = ((in[5 * 16] + 1) >> 2) - ((in[10 * 16] + 1) >> 2);
+ step1[3] = ((in[4 * 16] + 1) >> 2) - ((in[11 * 16] + 1) >> 2);
+ step1[4] = ((in[3 * 16] + 1) >> 2) - ((in[12 * 16] + 1) >> 2);
+ step1[5] = ((in[2 * 16] + 1) >> 2) - ((in[13 * 16] + 1) >> 2);
+ step1[6] = ((in[1 * 16] + 1) >> 2) - ((in[14 * 16] + 1) >> 2);
+ step1[7] = ((in[0 * 16] + 1) >> 2) - ((in[15 * 16] + 1) >> 2);
+ }
+ // Work on the first eight values; fdct8_1d(input, even_results);
+ {
+ /*canbe16*/ int s0, s1, s2, s3, s4, s5, s6, s7;
+ /*needs32*/ int t0, t1, t2, t3;
+ /*canbe16*/ int x0, x1, x2, x3;
+
+ // stage 1
+ s0 = input[0] + input[7];
+ s1 = input[1] + input[6];
+ s2 = input[2] + input[5];
+ s3 = input[3] + input[4];
+ s4 = input[3] - input[4];
+ s5 = input[2] - input[5];
+ s6 = input[1] - input[6];
+ s7 = input[0] - input[7];
+
+ // fdct4_1d(step, step);
+ x0 = s0 + s3;
+ x1 = s1 + s2;
+ x2 = s1 - s2;
+ x3 = s0 - s3;
+ t0 = (x0 + x1) * cospi_16_64;
+ t1 = (x0 - x1) * cospi_16_64;
+ t2 = x3 * cospi_8_64 + x2 * cospi_24_64;
+ t3 = x3 * cospi_24_64 - x2 * cospi_8_64;
+ out[0] = dct_const_round_shift(t0);
+ out[4] = dct_const_round_shift(t2);
+ out[8] = dct_const_round_shift(t1);
+ out[12] = dct_const_round_shift(t3);
+
+ // Stage 2
+ t0 = (s6 - s5) * cospi_16_64;
+ t1 = (s6 + s5) * cospi_16_64;
+ t2 = dct_const_round_shift(t0);
+ t3 = dct_const_round_shift(t1);
+
+ // Stage 3
+ x0 = s4 + t2;
+ x1 = s4 - t2;
+ x2 = s7 - t3;
+ x3 = s7 + t3;
+
+ // Stage 4
+ t0 = x0 * cospi_28_64 + x3 * cospi_4_64;
+ t1 = x1 * cospi_12_64 + x2 * cospi_20_64;
+ t2 = x2 * cospi_12_64 + x1 * -cospi_20_64;
+ t3 = x3 * cospi_28_64 + x0 * -cospi_4_64;
+ out[2] = dct_const_round_shift(t0);
+ out[6] = dct_const_round_shift(t2);
+ out[10] = dct_const_round_shift(t1);
+ out[14] = dct_const_round_shift(t3);
+ }
+ // Work on the next eight values; step1 -> odd_results
+ {
+ // step 2
+ temp1 = (step1[5] - step1[2]) * cospi_16_64;
+ temp2 = (step1[4] - step1[3]) * cospi_16_64;
+ step2[2] = dct_const_round_shift(temp1);
+ step2[3] = dct_const_round_shift(temp2);
+ temp1 = (step1[4] + step1[3]) * cospi_16_64;
+ temp2 = (step1[5] + step1[2]) * cospi_16_64;
+ step2[4] = dct_const_round_shift(temp1);
+ step2[5] = dct_const_round_shift(temp2);
+ // step 3
+ step3[0] = step1[0] + step2[3];
+ step3[1] = step1[1] + step2[2];
+ step3[2] = step1[1] - step2[2];
+ step3[3] = step1[0] - step2[3];
+ step3[4] = step1[7] - step2[4];
+ step3[5] = step1[6] - step2[5];
+ step3[6] = step1[6] + step2[5];
+ step3[7] = step1[7] + step2[4];
+ // step 4
+ temp1 = step3[1] * -cospi_8_64 + step3[6] * cospi_24_64;
+ temp2 = step3[2] * -cospi_24_64 - step3[5] * cospi_8_64;
+ step2[1] = dct_const_round_shift(temp1);
+ step2[2] = dct_const_round_shift(temp2);
+ temp1 = step3[2] * -cospi_8_64 + step3[5] * cospi_24_64;
+ temp2 = step3[1] * cospi_24_64 + step3[6] * cospi_8_64;
+ step2[5] = dct_const_round_shift(temp1);
+ step2[6] = dct_const_round_shift(temp2);
+ // step 5
+ step1[0] = step3[0] + step2[1];
+ step1[1] = step3[0] - step2[1];
+ step1[2] = step3[3] - step2[2];
+ step1[3] = step3[3] + step2[2];
+ step1[4] = step3[4] + step2[5];
+ step1[5] = step3[4] - step2[5];
+ step1[6] = step3[7] - step2[6];
+ step1[7] = step3[7] + step2[6];
+ // step 6
+ temp1 = step1[0] * cospi_30_64 + step1[7] * cospi_2_64;
+ temp2 = step1[1] * cospi_14_64 + step1[6] * cospi_18_64;
+ out[1] = dct_const_round_shift(temp1);
+ out[9] = dct_const_round_shift(temp2);
+ temp1 = step1[2] * cospi_22_64 + step1[5] * cospi_10_64;
+ temp2 = step1[3] * cospi_6_64 + step1[4] * cospi_26_64;
+ out[5] = dct_const_round_shift(temp1);
+ out[13] = dct_const_round_shift(temp2);
+ temp1 = step1[3] * -cospi_26_64 + step1[4] * cospi_6_64;
+ temp2 = step1[2] * -cospi_10_64 + step1[5] * cospi_22_64;
+ out[3] = dct_const_round_shift(temp1);
+ out[11] = dct_const_round_shift(temp2);
+ temp1 = step1[1] * -cospi_18_64 + step1[6] * cospi_14_64;
+ temp2 = step1[0] * -cospi_2_64 + step1[7] * cospi_30_64;
+ out[7] = dct_const_round_shift(temp1);
+ out[15] = dct_const_round_shift(temp2);
+ }
+ // Do next column (which is a transposed row in second/horizontal pass)
+ in++;
+ out += 16;
+ }
+ // Setup in/out for next pass.
+ in = intermediate;
+ out = output;
+ }
+}
+
+static void fadst8_1d(int16_t *input, int16_t *output) {
+ int s0, s1, s2, s3, s4, s5, s6, s7;
+
+ int x0 = input[7];
+ int x1 = input[0];
+ int x2 = input[5];
+ int x3 = input[2];
+ int x4 = input[3];
+ int x5 = input[4];
+ int x6 = input[1];
+ int x7 = input[6];
+
+ // stage 1
+ s0 = cospi_2_64 * x0 + cospi_30_64 * x1;
+ s1 = cospi_30_64 * x0 - cospi_2_64 * x1;
+ s2 = cospi_10_64 * x2 + cospi_22_64 * x3;
+ s3 = cospi_22_64 * x2 - cospi_10_64 * x3;
+ s4 = cospi_18_64 * x4 + cospi_14_64 * x5;
+ s5 = cospi_14_64 * x4 - cospi_18_64 * x5;
+ s6 = cospi_26_64 * x6 + cospi_6_64 * x7;
+ s7 = cospi_6_64 * x6 - cospi_26_64 * x7;
+
+ x0 = dct_const_round_shift(s0 + s4);
+ x1 = dct_const_round_shift(s1 + s5);
+ x2 = dct_const_round_shift(s2 + s6);
+ x3 = dct_const_round_shift(s3 + s7);
+ x4 = dct_const_round_shift(s0 - s4);
+ x5 = dct_const_round_shift(s1 - s5);
+ x6 = dct_const_round_shift(s2 - s6);
+ x7 = dct_const_round_shift(s3 - s7);
+
+ // stage 2
+ s0 = x0;
+ s1 = x1;
+ s2 = x2;
+ s3 = x3;
+ s4 = cospi_8_64 * x4 + cospi_24_64 * x5;
+ s5 = cospi_24_64 * x4 - cospi_8_64 * x5;
+ s6 = - cospi_24_64 * x6 + cospi_8_64 * x7;
+ s7 = cospi_8_64 * x6 + cospi_24_64 * x7;
+
+ x0 = s0 + s2;
+ x1 = s1 + s3;
+ x2 = s0 - s2;
+ x3 = s1 - s3;
+ x4 = dct_const_round_shift(s4 + s6);
+ x5 = dct_const_round_shift(s5 + s7);
+ x6 = dct_const_round_shift(s4 - s6);
+ x7 = dct_const_round_shift(s5 - s7);
+
+ // stage 3
+ s2 = cospi_16_64 * (x2 + x3);
+ s3 = cospi_16_64 * (x2 - x3);
+ s6 = cospi_16_64 * (x6 + x7);
+ s7 = cospi_16_64 * (x6 - x7);
+
+ x2 = dct_const_round_shift(s2);
+ x3 = dct_const_round_shift(s3);
+ x6 = dct_const_round_shift(s6);
+ x7 = dct_const_round_shift(s7);
+
+ output[0] = x0;
+ output[1] = - x4;
+ output[2] = x6;
+ output[3] = - x2;
+ output[4] = x3;
+ output[5] = - x7;
+ output[6] = x5;
+ output[7] = - x1;
+}
+
+static const transform_2d FHT_8[] = {
+ { fdct8_1d, fdct8_1d }, // DCT_DCT = 0
+ { fadst8_1d, fdct8_1d }, // ADST_DCT = 1
+ { fdct8_1d, fadst8_1d }, // DCT_ADST = 2
+ { fadst8_1d, fadst8_1d } // ADST_ADST = 3
+};
+
+void vp9_short_fht8x8_c(int16_t *input, int16_t *output,
+ int pitch, TX_TYPE tx_type) {
+ int16_t out[64];
+ int16_t *outptr = &out[0];
+ int i, j;
+ int16_t temp_in[8], temp_out[8];
+ const transform_2d ht = FHT_8[tx_type];
+
+ // Columns
+ for (i = 0; i < 8; ++i) {
+ for (j = 0; j < 8; ++j)
+ temp_in[j] = input[j * pitch + i] << 2;
+ ht.cols(temp_in, temp_out);
+ for (j = 0; j < 8; ++j)
+ outptr[j * 8 + i] = temp_out[j];
+ }
+
+ // Rows
+ for (i = 0; i < 8; ++i) {
+ for (j = 0; j < 8; ++j)
+ temp_in[j] = out[j + i * 8];
+ ht.rows(temp_in, temp_out);
+ for (j = 0; j < 8; ++j)
+ output[j + i * 8] = (temp_out[j] + (temp_out[j] < 0)) >> 1;
+ }
+}
+
+/* 4-point reversible, orthonormal Walsh-Hadamard in 3.5 adds, 0.5 shifts per
+ pixel. */
+void vp9_short_walsh4x4_c(short *input, short *output, int pitch) {
+ int i;
+ int a1, b1, c1, d1, e1;
+ short *ip = input;
+ short *op = output;
+ int pitch_short = pitch >> 1;
+
+ for (i = 0; i < 4; i++) {
+ a1 = ip[0 * pitch_short];
+ b1 = ip[1 * pitch_short];
+ c1 = ip[2 * pitch_short];
+ d1 = ip[3 * pitch_short];
+
+ a1 += b1;
+ d1 = d1 - c1;
+ e1 = (a1 - d1) >> 1;
+ b1 = e1 - b1;
+ c1 = e1 - c1;
+ a1 -= c1;
+ d1 += b1;
+ op[0] = a1;
+ op[4] = c1;
+ op[8] = d1;
+ op[12] = b1;
+
+ ip++;
+ op++;
+ }
+ ip = output;
+ op = output;
+
+ for (i = 0; i < 4; i++) {
+ a1 = ip[0];
+ b1 = ip[1];
+ c1 = ip[2];
+ d1 = ip[3];
+
+ a1 += b1;
+ d1 -= c1;
+ e1 = (a1 - d1) >> 1;
+ b1 = e1 - b1;
+ c1 = e1 - c1;
+ a1 -= c1;
+ d1 += b1;
+ op[0] = a1 << WHT_UPSCALE_FACTOR;
+ op[1] = c1 << WHT_UPSCALE_FACTOR;
+ op[2] = d1 << WHT_UPSCALE_FACTOR;
+ op[3] = b1 << WHT_UPSCALE_FACTOR;
+
+ ip += 4;
+ op += 4;
+ }
+}
+
+void vp9_short_walsh8x4_c(short *input, short *output, int pitch) {
+ vp9_short_walsh4x4_c(input, output, pitch);
+ vp9_short_walsh4x4_c(input + 4, output + 16, pitch);
+}
+
+
+// Rewrote to use same algorithm as others.
+static void fdct16_1d(int16_t in[16], int16_t out[16]) {
+ /*canbe16*/ int step1[8];
+ /*canbe16*/ int step2[8];
+ /*canbe16*/ int step3[8];
+ /*canbe16*/ int input[8];
+ /*needs32*/ int temp1, temp2;
+
+ // step 1
+ input[0] = in[0] + in[15];
+ input[1] = in[1] + in[14];
+ input[2] = in[2] + in[13];
+ input[3] = in[3] + in[12];
+ input[4] = in[4] + in[11];
+ input[5] = in[5] + in[10];
+ input[6] = in[6] + in[ 9];
+ input[7] = in[7] + in[ 8];
+
+ step1[0] = in[7] - in[ 8];
+ step1[1] = in[6] - in[ 9];
+ step1[2] = in[5] - in[10];
+ step1[3] = in[4] - in[11];
+ step1[4] = in[3] - in[12];
+ step1[5] = in[2] - in[13];
+ step1[6] = in[1] - in[14];
+ step1[7] = in[0] - in[15];
+
+ // fdct8_1d(step, step);
+ {
+ /*canbe16*/ int s0, s1, s2, s3, s4, s5, s6, s7;
+ /*needs32*/ int t0, t1, t2, t3;
+ /*canbe16*/ int x0, x1, x2, x3;
+
+ // stage 1
+ s0 = input[0] + input[7];
+ s1 = input[1] + input[6];
+ s2 = input[2] + input[5];
+ s3 = input[3] + input[4];
+ s4 = input[3] - input[4];
+ s5 = input[2] - input[5];
+ s6 = input[1] - input[6];
+ s7 = input[0] - input[7];
+
+ // fdct4_1d(step, step);
+ x0 = s0 + s3;
+ x1 = s1 + s2;
+ x2 = s1 - s2;
+ x3 = s0 - s3;
+ t0 = (x0 + x1) * cospi_16_64;
+ t1 = (x0 - x1) * cospi_16_64;
+ t2 = x3 * cospi_8_64 + x2 * cospi_24_64;
+ t3 = x3 * cospi_24_64 - x2 * cospi_8_64;
+ out[0] = dct_const_round_shift(t0);
+ out[4] = dct_const_round_shift(t2);
+ out[8] = dct_const_round_shift(t1);
+ out[12] = dct_const_round_shift(t3);
+
+ // Stage 2
+ t0 = (s6 - s5) * cospi_16_64;
+ t1 = (s6 + s5) * cospi_16_64;
+ t2 = dct_const_round_shift(t0);
+ t3 = dct_const_round_shift(t1);
+
+ // Stage 3
+ x0 = s4 + t2;
+ x1 = s4 - t2;
+ x2 = s7 - t3;
+ x3 = s7 + t3;
+
+ // Stage 4
+ t0 = x0 * cospi_28_64 + x3 * cospi_4_64;
+ t1 = x1 * cospi_12_64 + x2 * cospi_20_64;
+ t2 = x2 * cospi_12_64 + x1 * -cospi_20_64;
+ t3 = x3 * cospi_28_64 + x0 * -cospi_4_64;
+ out[2] = dct_const_round_shift(t0);
+ out[6] = dct_const_round_shift(t2);
+ out[10] = dct_const_round_shift(t1);
+ out[14] = dct_const_round_shift(t3);
+ }
+
+ // step 2
+ temp1 = (step1[5] - step1[2]) * cospi_16_64;
+ temp2 = (step1[4] - step1[3]) * cospi_16_64;
+ step2[2] = dct_const_round_shift(temp1);
+ step2[3] = dct_const_round_shift(temp2);
+ temp1 = (step1[4] + step1[3]) * cospi_16_64;
+ temp2 = (step1[5] + step1[2]) * cospi_16_64;
+ step2[4] = dct_const_round_shift(temp1);
+ step2[5] = dct_const_round_shift(temp2);
+
+ // step 3
+ step3[0] = step1[0] + step2[3];
+ step3[1] = step1[1] + step2[2];
+ step3[2] = step1[1] - step2[2];
+ step3[3] = step1[0] - step2[3];
+ step3[4] = step1[7] - step2[4];
+ step3[5] = step1[6] - step2[5];
+ step3[6] = step1[6] + step2[5];
+ step3[7] = step1[7] + step2[4];
+
+ // step 4
+ temp1 = step3[1] * -cospi_8_64 + step3[6] * cospi_24_64;
+ temp2 = step3[2] * -cospi_24_64 - step3[5] * cospi_8_64;
+ step2[1] = dct_const_round_shift(temp1);
+ step2[2] = dct_const_round_shift(temp2);
+ temp1 = step3[2] * -cospi_8_64 + step3[5] * cospi_24_64;
+ temp2 = step3[1] * cospi_24_64 + step3[6] * cospi_8_64;
+ step2[5] = dct_const_round_shift(temp1);
+ step2[6] = dct_const_round_shift(temp2);
+
+ // step 5
+ step1[0] = step3[0] + step2[1];
+ step1[1] = step3[0] - step2[1];
+ step1[2] = step3[3] - step2[2];
+ step1[3] = step3[3] + step2[2];
+ step1[4] = step3[4] + step2[5];
+ step1[5] = step3[4] - step2[5];
+ step1[6] = step3[7] - step2[6];
+ step1[7] = step3[7] + step2[6];
+
+ // step 6
+ temp1 = step1[0] * cospi_30_64 + step1[7] * cospi_2_64;
+ temp2 = step1[1] * cospi_14_64 + step1[6] * cospi_18_64;
+ out[1] = dct_const_round_shift(temp1);
+ out[9] = dct_const_round_shift(temp2);
+
+ temp1 = step1[2] * cospi_22_64 + step1[5] * cospi_10_64;
+ temp2 = step1[3] * cospi_6_64 + step1[4] * cospi_26_64;
+ out[5] = dct_const_round_shift(temp1);
+ out[13] = dct_const_round_shift(temp2);
+
+ temp1 = step1[3] * -cospi_26_64 + step1[4] * cospi_6_64;
+ temp2 = step1[2] * -cospi_10_64 + step1[5] * cospi_22_64;
+ out[3] = dct_const_round_shift(temp1);
+ out[11] = dct_const_round_shift(temp2);
+
+ temp1 = step1[1] * -cospi_18_64 + step1[6] * cospi_14_64;
+ temp2 = step1[0] * -cospi_2_64 + step1[7] * cospi_30_64;
+ out[7] = dct_const_round_shift(temp1);
+ out[15] = dct_const_round_shift(temp2);
+}
+
+void fadst16_1d(int16_t *input, int16_t *output) {
+ int s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15;
+
+ int x0 = input[15];
+ int x1 = input[0];
+ int x2 = input[13];
+ int x3 = input[2];
+ int x4 = input[11];
+ int x5 = input[4];
+ int x6 = input[9];
+ int x7 = input[6];
+ int x8 = input[7];
+ int x9 = input[8];
+ int x10 = input[5];
+ int x11 = input[10];
+ int x12 = input[3];
+ int x13 = input[12];
+ int x14 = input[1];
+ int x15 = input[14];
+
+ // stage 1
+ s0 = x0 * cospi_1_64 + x1 * cospi_31_64;
+ s1 = x0 * cospi_31_64 - x1 * cospi_1_64;
+ s2 = x2 * cospi_5_64 + x3 * cospi_27_64;
+ s3 = x2 * cospi_27_64 - x3 * cospi_5_64;
+ s4 = x4 * cospi_9_64 + x5 * cospi_23_64;
+ s5 = x4 * cospi_23_64 - x5 * cospi_9_64;
+ s6 = x6 * cospi_13_64 + x7 * cospi_19_64;
+ s7 = x6 * cospi_19_64 - x7 * cospi_13_64;
+ s8 = x8 * cospi_17_64 + x9 * cospi_15_64;
+ s9 = x8 * cospi_15_64 - x9 * cospi_17_64;
+ s10 = x10 * cospi_21_64 + x11 * cospi_11_64;
+ s11 = x10 * cospi_11_64 - x11 * cospi_21_64;
+ s12 = x12 * cospi_25_64 + x13 * cospi_7_64;
+ s13 = x12 * cospi_7_64 - x13 * cospi_25_64;
+ s14 = x14 * cospi_29_64 + x15 * cospi_3_64;
+ s15 = x14 * cospi_3_64 - x15 * cospi_29_64;
+
+ x0 = dct_const_round_shift(s0 + s8);
+ x1 = dct_const_round_shift(s1 + s9);
+ x2 = dct_const_round_shift(s2 + s10);
+ x3 = dct_const_round_shift(s3 + s11);
+ x4 = dct_const_round_shift(s4 + s12);
+ x5 = dct_const_round_shift(s5 + s13);
+ x6 = dct_const_round_shift(s6 + s14);
+ x7 = dct_const_round_shift(s7 + s15);
+ x8 = dct_const_round_shift(s0 - s8);
+ x9 = dct_const_round_shift(s1 - s9);
+ x10 = dct_const_round_shift(s2 - s10);
+ x11 = dct_const_round_shift(s3 - s11);
+ x12 = dct_const_round_shift(s4 - s12);
+ x13 = dct_const_round_shift(s5 - s13);
+ x14 = dct_const_round_shift(s6 - s14);
+ x15 = dct_const_round_shift(s7 - s15);
+
+ // stage 2
+ s0 = x0;
+ s1 = x1;
+ s2 = x2;
+ s3 = x3;
+ s4 = x4;
+ s5 = x5;
+ s6 = x6;
+ s7 = x7;
+ s8 = x8 * cospi_4_64 + x9 * cospi_28_64;
+ s9 = x8 * cospi_28_64 - x9 * cospi_4_64;
+ s10 = x10 * cospi_20_64 + x11 * cospi_12_64;
+ s11 = x10 * cospi_12_64 - x11 * cospi_20_64;
+ s12 = - x12 * cospi_28_64 + x13 * cospi_4_64;
+ s13 = x12 * cospi_4_64 + x13 * cospi_28_64;
+ s14 = - x14 * cospi_12_64 + x15 * cospi_20_64;
+ s15 = x14 * cospi_20_64 + x15 * cospi_12_64;
+
+ x0 = s0 + s4;
+ x1 = s1 + s5;
+ x2 = s2 + s6;
+ x3 = s3 + s7;
+ x4 = s0 - s4;
+ x5 = s1 - s5;
+ x6 = s2 - s6;
+ x7 = s3 - s7;
+ x8 = dct_const_round_shift(s8 + s12);
+ x9 = dct_const_round_shift(s9 + s13);
+ x10 = dct_const_round_shift(s10 + s14);
+ x11 = dct_const_round_shift(s11 + s15);
+ x12 = dct_const_round_shift(s8 - s12);
+ x13 = dct_const_round_shift(s9 - s13);
+ x14 = dct_const_round_shift(s10 - s14);
+ x15 = dct_const_round_shift(s11 - s15);
+
+ // stage 3
+ s0 = x0;
+ s1 = x1;
+ s2 = x2;
+ s3 = x3;
+ s4 = x4 * cospi_8_64 + x5 * cospi_24_64;
+ s5 = x4 * cospi_24_64 - x5 * cospi_8_64;
+ s6 = - x6 * cospi_24_64 + x7 * cospi_8_64;
+ s7 = x6 * cospi_8_64 + x7 * cospi_24_64;
+ s8 = x8;
+ s9 = x9;
+ s10 = x10;
+ s11 = x11;
+ s12 = x12 * cospi_8_64 + x13 * cospi_24_64;
+ s13 = x12 * cospi_24_64 - x13 * cospi_8_64;
+ s14 = - x14 * cospi_24_64 + x15 * cospi_8_64;
+ s15 = x14 * cospi_8_64 + x15 * cospi_24_64;
+
+ x0 = s0 + s2;
+ x1 = s1 + s3;
+ x2 = s0 - s2;
+ x3 = s1 - s3;
+ x4 = dct_const_round_shift(s4 + s6);
+ x5 = dct_const_round_shift(s5 + s7);
+ x6 = dct_const_round_shift(s4 - s6);
+ x7 = dct_const_round_shift(s5 - s7);
+ x8 = s8 + s10;
+ x9 = s9 + s11;
+ x10 = s8 - s10;
+ x11 = s9 - s11;
+ x12 = dct_const_round_shift(s12 + s14);
+ x13 = dct_const_round_shift(s13 + s15);
+ x14 = dct_const_round_shift(s12 - s14);
+ x15 = dct_const_round_shift(s13 - s15);
+
+ // stage 4
+ s2 = (- cospi_16_64) * (x2 + x3);
+ s3 = cospi_16_64 * (x2 - x3);
+ s6 = cospi_16_64 * (x6 + x7);
+ s7 = cospi_16_64 * (- x6 + x7);
+ s10 = cospi_16_64 * (x10 + x11);
+ s11 = cospi_16_64 * (- x10 + x11);
+ s14 = (- cospi_16_64) * (x14 + x15);
+ s15 = cospi_16_64 * (x14 - x15);
+
+ x2 = dct_const_round_shift(s2);
+ x3 = dct_const_round_shift(s3);
+ x6 = dct_const_round_shift(s6);
+ x7 = dct_const_round_shift(s7);
+ x10 = dct_const_round_shift(s10);
+ x11 = dct_const_round_shift(s11);
+ x14 = dct_const_round_shift(s14);
+ x15 = dct_const_round_shift(s15);
+
+ output[0] = x0;
+ output[1] = - x8;
+ output[2] = x12;
+ output[3] = - x4;
+ output[4] = x6;
+ output[5] = x14;
+ output[6] = x10;
+ output[7] = x2;
+ output[8] = x3;
+ output[9] = x11;
+ output[10] = x15;
+ output[11] = x7;
+ output[12] = x5;
+ output[13] = - x13;
+ output[14] = x9;
+ output[15] = - x1;
+}
+
+static const transform_2d FHT_16[] = {
+ { fdct16_1d, fdct16_1d }, // DCT_DCT = 0
+ { fadst16_1d, fdct16_1d }, // ADST_DCT = 1
+ { fdct16_1d, fadst16_1d }, // DCT_ADST = 2
+ { fadst16_1d, fadst16_1d } // ADST_ADST = 3
+};
+
+void vp9_short_fht16x16_c(int16_t *input, int16_t *output,
+ int pitch, TX_TYPE tx_type) {
+ int16_t out[256];
+ int16_t *outptr = &out[0];
+ int i, j;
+ int16_t temp_in[16], temp_out[16];
+ const transform_2d ht = FHT_16[tx_type];
+
+ // Columns
+ for (i = 0; i < 16; ++i) {
+ for (j = 0; j < 16; ++j)
+ temp_in[j] = input[j * pitch + i] << 2;
+ ht.cols(temp_in, temp_out);
+ for (j = 0; j < 16; ++j)
+ outptr[j * 16 + i] = (temp_out[j] + 1 + (temp_out[j] < 0)) >> 2;
+// outptr[j * 16 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
+ }
+
+ // Rows
+ for (i = 0; i < 16; ++i) {
+ for (j = 0; j < 16; ++j)
+ temp_in[j] = out[j + i * 16];
+ ht.rows(temp_in, temp_out);
+ for (j = 0; j < 16; ++j)
+ output[j + i * 16] = temp_out[j];
+ }
+}
+
+static INLINE int dct_32_round(int input) {
+ int rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS);
+ assert(-131072 <= rv && rv <= 131071);
+ return rv;
+}
+
+static INLINE int half_round_shift(int input) {
+ int rv = (input + 1 + (input < 0)) >> 2;
+ return rv;
+}
+
+static void dct32_1d(int *input, int *output, int round) {
+ int step[32];
+ // Stage 1
+ step[0] = input[0] + input[(32 - 1)];
+ step[1] = input[1] + input[(32 - 2)];
+ step[2] = input[2] + input[(32 - 3)];
+ step[3] = input[3] + input[(32 - 4)];
+ step[4] = input[4] + input[(32 - 5)];
+ step[5] = input[5] + input[(32 - 6)];
+ step[6] = input[6] + input[(32 - 7)];
+ step[7] = input[7] + input[(32 - 8)];
+ step[8] = input[8] + input[(32 - 9)];
+ step[9] = input[9] + input[(32 - 10)];
+ step[10] = input[10] + input[(32 - 11)];
+ step[11] = input[11] + input[(32 - 12)];
+ step[12] = input[12] + input[(32 - 13)];
+ step[13] = input[13] + input[(32 - 14)];
+ step[14] = input[14] + input[(32 - 15)];
+ step[15] = input[15] + input[(32 - 16)];
+ step[16] = -input[16] + input[(32 - 17)];
+ step[17] = -input[17] + input[(32 - 18)];
+ step[18] = -input[18] + input[(32 - 19)];
+ step[19] = -input[19] + input[(32 - 20)];
+ step[20] = -input[20] + input[(32 - 21)];
+ step[21] = -input[21] + input[(32 - 22)];
+ step[22] = -input[22] + input[(32 - 23)];
+ step[23] = -input[23] + input[(32 - 24)];
+ step[24] = -input[24] + input[(32 - 25)];
+ step[25] = -input[25] + input[(32 - 26)];
+ step[26] = -input[26] + input[(32 - 27)];
+ step[27] = -input[27] + input[(32 - 28)];
+ step[28] = -input[28] + input[(32 - 29)];
+ step[29] = -input[29] + input[(32 - 30)];
+ step[30] = -input[30] + input[(32 - 31)];
+ step[31] = -input[31] + input[(32 - 32)];
+
+ // Stage 2
+ output[0] = step[0] + step[16 - 1];
+ output[1] = step[1] + step[16 - 2];
+ output[2] = step[2] + step[16 - 3];
+ output[3] = step[3] + step[16 - 4];
+ output[4] = step[4] + step[16 - 5];
+ output[5] = step[5] + step[16 - 6];
+ output[6] = step[6] + step[16 - 7];
+ output[7] = step[7] + step[16 - 8];
+ output[8] = -step[8] + step[16 - 9];
+ output[9] = -step[9] + step[16 - 10];
+ output[10] = -step[10] + step[16 - 11];
+ output[11] = -step[11] + step[16 - 12];
+ output[12] = -step[12] + step[16 - 13];
+ output[13] = -step[13] + step[16 - 14];
+ output[14] = -step[14] + step[16 - 15];
+ output[15] = -step[15] + step[16 - 16];
+
+ output[16] = step[16];
+ output[17] = step[17];
+ output[18] = step[18];
+ output[19] = step[19];
+
+ output[20] = dct_32_round((-step[20] + step[27]) * cospi_16_64);
+ output[21] = dct_32_round((-step[21] + step[26]) * cospi_16_64);
+ output[22] = dct_32_round((-step[22] + step[25]) * cospi_16_64);
+ output[23] = dct_32_round((-step[23] + step[24]) * cospi_16_64);
+
+ output[24] = dct_32_round((step[24] + step[23]) * cospi_16_64);
+ output[25] = dct_32_round((step[25] + step[22]) * cospi_16_64);
+ output[26] = dct_32_round((step[26] + step[21]) * cospi_16_64);
+ output[27] = dct_32_round((step[27] + step[20]) * cospi_16_64);
+
+ output[28] = step[28];
+ output[29] = step[29];
+ output[30] = step[30];
+ output[31] = step[31];
+
+ // dump the magnitude by 4, hence the intermediate values are within
+ // the range of 16 bits.
+ if (round) {
+ output[0] = half_round_shift(output[0]);
+ output[1] = half_round_shift(output[1]);
+ output[2] = half_round_shift(output[2]);
+ output[3] = half_round_shift(output[3]);
+ output[4] = half_round_shift(output[4]);
+ output[5] = half_round_shift(output[5]);
+ output[6] = half_round_shift(output[6]);
+ output[7] = half_round_shift(output[7]);
+ output[8] = half_round_shift(output[8]);
+ output[9] = half_round_shift(output[9]);
+ output[10] = half_round_shift(output[10]);
+ output[11] = half_round_shift(output[11]);
+ output[12] = half_round_shift(output[12]);
+ output[13] = half_round_shift(output[13]);
+ output[14] = half_round_shift(output[14]);
+ output[15] = half_round_shift(output[15]);
+
+ output[16] = half_round_shift(output[16]);
+ output[17] = half_round_shift(output[17]);
+ output[18] = half_round_shift(output[18]);
+ output[19] = half_round_shift(output[19]);
+ output[20] = half_round_shift(output[20]);
+ output[21] = half_round_shift(output[21]);
+ output[22] = half_round_shift(output[22]);
+ output[23] = half_round_shift(output[23]);
+ output[24] = half_round_shift(output[24]);
+ output[25] = half_round_shift(output[25]);
+ output[26] = half_round_shift(output[26]);
+ output[27] = half_round_shift(output[27]);
+ output[28] = half_round_shift(output[28]);
+ output[29] = half_round_shift(output[29]);
+ output[30] = half_round_shift(output[30]);
+ output[31] = half_round_shift(output[31]);
+ }
+
+ // Stage 3
+ step[0] = output[0] + output[(8 - 1)];
+ step[1] = output[1] + output[(8 - 2)];
+ step[2] = output[2] + output[(8 - 3)];
+ step[3] = output[3] + output[(8 - 4)];
+ step[4] = -output[4] + output[(8 - 5)];
+ step[5] = -output[5] + output[(8 - 6)];
+ step[6] = -output[6] + output[(8 - 7)];
+ step[7] = -output[7] + output[(8 - 8)];
+ step[8] = output[8];
+ step[9] = output[9];
+ step[10] = dct_32_round((-output[10] + output[13]) * cospi_16_64);
+ step[11] = dct_32_round((-output[11] + output[12]) * cospi_16_64);
+ step[12] = dct_32_round((output[12] + output[11]) * cospi_16_64);
+ step[13] = dct_32_round((output[13] + output[10]) * cospi_16_64);
+ step[14] = output[14];
+ step[15] = output[15];
+
+ step[16] = output[16] + output[23];
+ step[17] = output[17] + output[22];
+ step[18] = output[18] + output[21];
+ step[19] = output[19] + output[20];
+ step[20] = -output[20] + output[19];
+ step[21] = -output[21] + output[18];
+ step[22] = -output[22] + output[17];
+ step[23] = -output[23] + output[16];
+ step[24] = -output[24] + output[31];
+ step[25] = -output[25] + output[30];
+ step[26] = -output[26] + output[29];
+ step[27] = -output[27] + output[28];
+ step[28] = output[28] + output[27];
+ step[29] = output[29] + output[26];
+ step[30] = output[30] + output[25];
+ step[31] = output[31] + output[24];
+
+ // Stage 4
+ output[0] = step[0] + step[3];
+ output[1] = step[1] + step[2];
+ output[2] = -step[2] + step[1];
+ output[3] = -step[3] + step[0];
+ output[4] = step[4];
+ output[5] = dct_32_round((-step[5] + step[6]) * cospi_16_64);
+ output[6] = dct_32_round((step[6] + step[5]) * cospi_16_64);
+ output[7] = step[7];
+ output[8] = step[8] + step[11];
+ output[9] = step[9] + step[10];
+ output[10] = -step[10] + step[9];
+ output[11] = -step[11] + step[8];
+ output[12] = -step[12] + step[15];
+ output[13] = -step[13] + step[14];
+ output[14] = step[14] + step[13];
+ output[15] = step[15] + step[12];
+
+ output[16] = step[16];
+ output[17] = step[17];
+ output[18] = dct_32_round(step[18] * -cospi_8_64 + step[29] * cospi_24_64);
+ output[19] = dct_32_round(step[19] * -cospi_8_64 + step[28] * cospi_24_64);
+ output[20] = dct_32_round(step[20] * -cospi_24_64 + step[27] * -cospi_8_64);
+ output[21] = dct_32_round(step[21] * -cospi_24_64 + step[26] * -cospi_8_64);
+ output[22] = step[22];
+ output[23] = step[23];
+ output[24] = step[24];
+ output[25] = step[25];
+ output[26] = dct_32_round(step[26] * cospi_24_64 + step[21] * -cospi_8_64);
+ output[27] = dct_32_round(step[27] * cospi_24_64 + step[20] * -cospi_8_64);
+ output[28] = dct_32_round(step[28] * cospi_8_64 + step[19] * cospi_24_64);
+ output[29] = dct_32_round(step[29] * cospi_8_64 + step[18] * cospi_24_64);
+ output[30] = step[30];
+ output[31] = step[31];
+
+ // Stage 5
+ step[0] = dct_32_round((output[0] + output[1]) * cospi_16_64);
+ step[1] = dct_32_round((-output[1] + output[0]) * cospi_16_64);
+ step[2] = dct_32_round(output[2] * cospi_24_64 + output[3] * cospi_8_64);
+ step[3] = dct_32_round(output[3] * cospi_24_64 - output[2] * cospi_8_64);
+ step[4] = output[4] + output[5];
+ step[5] = -output[5] + output[4];
+ step[6] = -output[6] + output[7];
+ step[7] = output[7] + output[6];
+ step[8] = output[8];
+ step[9] = dct_32_round(output[9] * -cospi_8_64 + output[14] * cospi_24_64);
+ step[10] = dct_32_round(output[10] * -cospi_24_64 + output[13] * -cospi_8_64);
+ step[11] = output[11];
+ step[12] = output[12];
+ step[13] = dct_32_round(output[13] * cospi_24_64 + output[10] * -cospi_8_64);
+ step[14] = dct_32_round(output[14] * cospi_8_64 + output[9] * cospi_24_64);
+ step[15] = output[15];
+
+ step[16] = output[16] + output[19];
+ step[17] = output[17] + output[18];
+ step[18] = -output[18] + output[17];
+ step[19] = -output[19] + output[16];
+ step[20] = -output[20] + output[23];
+ step[21] = -output[21] + output[22];
+ step[22] = output[22] + output[21];
+ step[23] = output[23] + output[20];
+ step[24] = output[24] + output[27];
+ step[25] = output[25] + output[26];
+ step[26] = -output[26] + output[25];
+ step[27] = -output[27] + output[24];
+ step[28] = -output[28] + output[31];
+ step[29] = -output[29] + output[30];
+ step[30] = output[30] + output[29];
+ step[31] = output[31] + output[28];
+
+ // Stage 6
+ output[0] = step[0];
+ output[1] = step[1];
+ output[2] = step[2];
+ output[3] = step[3];
+ output[4] = dct_32_round(step[4] * cospi_28_64 + step[7] * cospi_4_64);
+ output[5] = dct_32_round(step[5] * cospi_12_64 + step[6] * cospi_20_64);
+ output[6] = dct_32_round(step[6] * cospi_12_64 + step[5] * -cospi_20_64);
+ output[7] = dct_32_round(step[7] * cospi_28_64 + step[4] * -cospi_4_64);
+ output[8] = step[8] + step[9];
+ output[9] = -step[9] + step[8];
+ output[10] = -step[10] + step[11];
+ output[11] = step[11] + step[10];
+ output[12] = step[12] + step[13];
+ output[13] = -step[13] + step[12];
+ output[14] = -step[14] + step[15];
+ output[15] = step[15] + step[14];
+
+ output[16] = step[16];
+ output[17] = dct_32_round(step[17] * -cospi_4_64 + step[30] * cospi_28_64);
+ output[18] = dct_32_round(step[18] * -cospi_28_64 + step[29] * -cospi_4_64);
+ output[19] = step[19];
+ output[20] = step[20];
+ output[21] = dct_32_round(step[21] * -cospi_20_64 + step[26] * cospi_12_64);
+ output[22] = dct_32_round(step[22] * -cospi_12_64 + step[25] * -cospi_20_64);
+ output[23] = step[23];
+ output[24] = step[24];
+ output[25] = dct_32_round(step[25] * cospi_12_64 + step[22] * -cospi_20_64);
+ output[26] = dct_32_round(step[26] * cospi_20_64 + step[21] * cospi_12_64);
+ output[27] = step[27];
+ output[28] = step[28];
+ output[29] = dct_32_round(step[29] * cospi_28_64 + step[18] * -cospi_4_64);
+ output[30] = dct_32_round(step[30] * cospi_4_64 + step[17] * cospi_28_64);
+ output[31] = step[31];
+
+ // Stage 7
+ step[0] = output[0];
+ step[1] = output[1];
+ step[2] = output[2];
+ step[3] = output[3];
+ step[4] = output[4];
+ step[5] = output[5];
+ step[6] = output[6];
+ step[7] = output[7];
+ step[8] = dct_32_round(output[8] * cospi_30_64 + output[15] * cospi_2_64);
+ step[9] = dct_32_round(output[9] * cospi_14_64 + output[14] * cospi_18_64);
+ step[10] = dct_32_round(output[10] * cospi_22_64 + output[13] * cospi_10_64);
+ step[11] = dct_32_round(output[11] * cospi_6_64 + output[12] * cospi_26_64);
+ step[12] = dct_32_round(output[12] * cospi_6_64 + output[11] * -cospi_26_64);
+ step[13] = dct_32_round(output[13] * cospi_22_64 + output[10] * -cospi_10_64);
+ step[14] = dct_32_round(output[14] * cospi_14_64 + output[9] * -cospi_18_64);
+ step[15] = dct_32_round(output[15] * cospi_30_64 + output[8] * -cospi_2_64);
+
+ step[16] = output[16] + output[17];
+ step[17] = -output[17] + output[16];
+ step[18] = -output[18] + output[19];
+ step[19] = output[19] + output[18];
+ step[20] = output[20] + output[21];
+ step[21] = -output[21] + output[20];
+ step[22] = -output[22] + output[23];
+ step[23] = output[23] + output[22];
+ step[24] = output[24] + output[25];
+ step[25] = -output[25] + output[24];
+ step[26] = -output[26] + output[27];
+ step[27] = output[27] + output[26];
+ step[28] = output[28] + output[29];
+ step[29] = -output[29] + output[28];
+ step[30] = -output[30] + output[31];
+ step[31] = output[31] + output[30];
+
+ // Final stage --- outputs indices are bit-reversed.
+ output[0] = step[0];
+ output[16] = step[1];
+ output[8] = step[2];
+ output[24] = step[3];
+ output[4] = step[4];
+ output[20] = step[5];
+ output[12] = step[6];
+ output[28] = step[7];
+ output[2] = step[8];
+ output[18] = step[9];
+ output[10] = step[10];
+ output[26] = step[11];
+ output[6] = step[12];
+ output[22] = step[13];
+ output[14] = step[14];
+ output[30] = step[15];
+
+ output[1] = dct_32_round(step[16] * cospi_31_64 + step[31] * cospi_1_64);
+ output[17] = dct_32_round(step[17] * cospi_15_64 + step[30] * cospi_17_64);
+ output[9] = dct_32_round(step[18] * cospi_23_64 + step[29] * cospi_9_64);
+ output[25] = dct_32_round(step[19] * cospi_7_64 + step[28] * cospi_25_64);
+ output[5] = dct_32_round(step[20] * cospi_27_64 + step[27] * cospi_5_64);
+ output[21] = dct_32_round(step[21] * cospi_11_64 + step[26] * cospi_21_64);
+ output[13] = dct_32_round(step[22] * cospi_19_64 + step[25] * cospi_13_64);
+ output[29] = dct_32_round(step[23] * cospi_3_64 + step[24] * cospi_29_64);
+ output[3] = dct_32_round(step[24] * cospi_3_64 + step[23] * -cospi_29_64);
+ output[19] = dct_32_round(step[25] * cospi_19_64 + step[22] * -cospi_13_64);
+ output[11] = dct_32_round(step[26] * cospi_11_64 + step[21] * -cospi_21_64);
+ output[27] = dct_32_round(step[27] * cospi_27_64 + step[20] * -cospi_5_64);
+ output[7] = dct_32_round(step[28] * cospi_7_64 + step[19] * -cospi_25_64);
+ output[23] = dct_32_round(step[29] * cospi_23_64 + step[18] * -cospi_9_64);
+ output[15] = dct_32_round(step[30] * cospi_15_64 + step[17] * -cospi_17_64);
+ output[31] = dct_32_round(step[31] * cospi_31_64 + step[16] * -cospi_1_64);
+}
+
+void vp9_short_fdct32x32_c(int16_t *input, int16_t *out, int pitch) {
+ int shortpitch = pitch >> 1;
+ int i, j;
+ int output[32 * 32];
+
+ // Columns
+ for (i = 0; i < 32; ++i) {
+ int temp_in[32], temp_out[32];
+ for (j = 0; j < 32; ++j)
+ temp_in[j] = input[j * shortpitch + i] << 2;
+ dct32_1d(temp_in, temp_out, 0);
+ for (j = 0; j < 32; ++j)
+ output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
+ }
+
+ // Rows
+ for (i = 0; i < 32; ++i) {
+ int temp_in[32], temp_out[32];
+ for (j = 0; j < 32; ++j)
+ temp_in[j] = output[j + i * 32];
+ dct32_1d(temp_in, temp_out, 0);
+ for (j = 0; j < 32; ++j)
+ out[j + i * 32] = (temp_out[j] + 1 + (temp_out[j] < 0)) >> 2;
+ }
+}
+
+// Note that although we use dct_32_round in dct32_1d computation flow,
+// this 2d fdct32x32 for rate-distortion optimization loop is operating
+// within 16 bits precision.
+void vp9_short_fdct32x32_rd_c(int16_t *input, int16_t *out, int pitch) {
+ int shortpitch = pitch >> 1;
+ int i, j;
+ int output[32 * 32];
+
+ // Columns
+ for (i = 0; i < 32; ++i) {
+ int temp_in[32], temp_out[32];
+ for (j = 0; j < 32; ++j)
+ temp_in[j] = input[j * shortpitch + i] << 2;
+ dct32_1d(temp_in, temp_out, 0);
+ for (j = 0; j < 32; ++j)
+ // TODO(cd): see quality impact of only doing
+ // output[j * 32 + i] = (temp_out[j] + 1) >> 2;
+ // PS: also change code in vp9/encoder/x86/vp9_dct_sse2.c
+ output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
+ }
+
+ // Rows
+ for (i = 0; i < 32; ++i) {
+ int temp_in[32], temp_out[32];
+ for (j = 0; j < 32; ++j)
+ temp_in[j] = output[j + i * 32];
+ dct32_1d(temp_in, temp_out, 1);
+ for (j = 0; j < 32; ++j)
+ out[j + i * 32] = temp_out[j];
+ }
+}
diff --git a/libvpx/vp9/encoder/vp9_encodeframe.c b/libvpx/vp9/encoder/vp9_encodeframe.c
new file mode 100644
index 0000000..44ab02d
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_encodeframe.c
@@ -0,0 +1,2799 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits.h>
+#include <math.h>
+#include <stdio.h>
+
+#include "./vp9_rtcd.h"
+#include "./vpx_config.h"
+
+#include "vpx_ports/vpx_timer.h"
+
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_entropy.h"
+#include "vp9/common/vp9_entropymode.h"
+#include "vp9/common/vp9_extend.h"
+#include "vp9/common/vp9_findnearmv.h"
+#include "vp9/common/vp9_mvref_common.h"
+#include "vp9/common/vp9_pred_common.h"
+#include "vp9/common/vp9_quant_common.h"
+#include "vp9/common/vp9_reconintra.h"
+#include "vp9/common/vp9_reconinter.h"
+#include "vp9/common/vp9_seg_common.h"
+#include "vp9/common/vp9_tile_common.h"
+
+#include "vp9/encoder/vp9_encodeframe.h"
+#include "vp9/encoder/vp9_encodeintra.h"
+#include "vp9/encoder/vp9_encodemb.h"
+#include "vp9/encoder/vp9_encodemv.h"
+#include "vp9/encoder/vp9_onyx_int.h"
+#include "vp9/encoder/vp9_rdopt.h"
+#include "vp9/encoder/vp9_segmentation.h"
+#include "vp9/encoder/vp9_tokenize.h"
+
+#define DBG_PRNT_SEGMAP 0
+
+
+static const TX_SIZE tx_mode_to_biggest_tx_size[TX_MODES] = {
+ TX_4X4, // ONLY_4X4
+ TX_8X8, // ONLY_8X8
+ TX_16X16, // ONLY_16X16
+ TX_32X32, // ONLY_32X32
+ TX_32X32, // TX_MODE_SELECT
+};
+
+// #define ENC_DEBUG
+#ifdef ENC_DEBUG
+int enc_debug = 0;
+#endif
+
+static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
+ int mi_row, int mi_col, BLOCK_SIZE bsize);
+
+static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x);
+
+/* activity_avg must be positive, or flat regions could get a zero weight
+ * (infinite lambda), which confounds analysis.
+ * This also avoids the need for divide by zero checks in
+ * vp9_activity_masking().
+ */
+#define ACTIVITY_AVG_MIN (64)
+
+/* Motion vector component magnitude threshold for defining fast motion. */
+#define FAST_MOTION_MV_THRESH (24)
+
+/* This is used as a reference when computing the source variance for the
+ * purposes of activity masking.
+ * Eventually this should be replaced by custom no-reference routines,
+ * which will be faster.
+ */
+static const uint8_t VP9_VAR_OFFS[64] = {
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128
+};
+
+static unsigned int get_sby_perpixel_variance(VP9_COMP *cpi, MACROBLOCK *x,
+ BLOCK_SIZE bs) {
+ unsigned int var, sse;
+ var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf,
+ x->plane[0].src.stride,
+ VP9_VAR_OFFS, 0, &sse);
+ return (var + (1 << (num_pels_log2_lookup[bs] - 1))) >>
+ num_pels_log2_lookup[bs];
+}
+
+// Original activity measure from Tim T's code.
+static unsigned int tt_activity_measure(MACROBLOCK *x) {
+ unsigned int act;
+ unsigned int sse;
+ /* TODO: This could also be done over smaller areas (8x8), but that would
+ * require extensive changes elsewhere, as lambda is assumed to be fixed
+ * over an entire MB in most of the code.
+ * Another option is to compute four 8x8 variances, and pick a single
+ * lambda using a non-linear combination (e.g., the smallest, or second
+ * smallest, etc.).
+ */
+ act = vp9_variance16x16(x->plane[0].src.buf, x->plane[0].src.stride,
+ VP9_VAR_OFFS, 0, &sse);
+ act <<= 4;
+
+ /* If the region is flat, lower the activity some more. */
+ if (act < 8 << 12)
+ act = act < 5 << 12 ? act : 5 << 12;
+
+ return act;
+}
+
+// Stub for alternative experimental activity measures.
+static unsigned int alt_activity_measure(MACROBLOCK *x, int use_dc_pred) {
+ return vp9_encode_intra(x, use_dc_pred);
+}
+DECLARE_ALIGNED(16, static const uint8_t, vp9_64x64_zeros[64*64]) = {0};
+
+// Measure the activity of the current macroblock
+// What we measure here is TBD so abstracted to this function
+#define ALT_ACT_MEASURE 1
+static unsigned int mb_activity_measure(MACROBLOCK *x, int mb_row, int mb_col) {
+ unsigned int mb_activity;
+
+ if (ALT_ACT_MEASURE) {
+ int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
+
+ // Or use and alternative.
+ mb_activity = alt_activity_measure(x, use_dc_pred);
+ } else {
+ // Original activity measure from Tim T's code.
+ mb_activity = tt_activity_measure(x);
+ }
+
+ if (mb_activity < ACTIVITY_AVG_MIN)
+ mb_activity = ACTIVITY_AVG_MIN;
+
+ return mb_activity;
+}
+
+// Calculate an "average" mb activity value for the frame
+#define ACT_MEDIAN 0
+static void calc_av_activity(VP9_COMP *cpi, int64_t activity_sum) {
+#if ACT_MEDIAN
+ // Find median: Simple n^2 algorithm for experimentation
+ {
+ unsigned int median;
+ unsigned int i, j;
+ unsigned int *sortlist;
+ unsigned int tmp;
+
+ // Create a list to sort to
+ CHECK_MEM_ERROR(&cpi->common, sortlist, vpx_calloc(sizeof(unsigned int),
+ cpi->common.MBs));
+
+ // Copy map to sort list
+ vpx_memcpy(sortlist, cpi->mb_activity_map,
+ sizeof(unsigned int) * cpi->common.MBs);
+
+ // Ripple each value down to its correct position
+ for (i = 1; i < cpi->common.MBs; i ++) {
+ for (j = i; j > 0; j --) {
+ if (sortlist[j] < sortlist[j - 1]) {
+ // Swap values
+ tmp = sortlist[j - 1];
+ sortlist[j - 1] = sortlist[j];
+ sortlist[j] = tmp;
+ } else
+ break;
+ }
+ }
+
+ // Even number MBs so estimate median as mean of two either side.
+ median = (1 + sortlist[cpi->common.MBs >> 1] +
+ sortlist[(cpi->common.MBs >> 1) + 1]) >> 1;
+
+ cpi->activity_avg = median;
+
+ vpx_free(sortlist);
+ }
+#else
+ // Simple mean for now
+ cpi->activity_avg = (unsigned int) (activity_sum / cpi->common.MBs);
+#endif // ACT_MEDIAN
+
+ if (cpi->activity_avg < ACTIVITY_AVG_MIN)
+ cpi->activity_avg = ACTIVITY_AVG_MIN;
+
+ // Experimental code: return fixed value normalized for several clips
+ if (ALT_ACT_MEASURE)
+ cpi->activity_avg = 100000;
+}
+
+#define USE_ACT_INDEX 0
+#define OUTPUT_NORM_ACT_STATS 0
+
+#if USE_ACT_INDEX
+// Calculate an activity index for each mb
+static void calc_activity_index(VP9_COMP *cpi, MACROBLOCK *x) {
+ VP9_COMMON *const cm = &cpi->common;
+ int mb_row, mb_col;
+
+ int64_t act;
+ int64_t a;
+ int64_t b;
+
+#if OUTPUT_NORM_ACT_STATS
+ FILE *f = fopen("norm_act.stt", "a");
+ fprintf(f, "\n%12d\n", cpi->activity_avg);
+#endif
+
+ // Reset pointers to start of activity map
+ x->mb_activity_ptr = cpi->mb_activity_map;
+
+ // Calculate normalized mb activity number.
+ for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
+ // for each macroblock col in image
+ for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
+ // Read activity from the map
+ act = *(x->mb_activity_ptr);
+
+ // Calculate a normalized activity number
+ a = act + 4 * cpi->activity_avg;
+ b = 4 * act + cpi->activity_avg;
+
+ if (b >= a)
+ *(x->activity_ptr) = (int)((b + (a >> 1)) / a) - 1;
+ else
+ *(x->activity_ptr) = 1 - (int)((a + (b >> 1)) / b);
+
+#if OUTPUT_NORM_ACT_STATS
+ fprintf(f, " %6d", *(x->mb_activity_ptr));
+#endif
+ // Increment activity map pointers
+ x->mb_activity_ptr++;
+ }
+
+#if OUTPUT_NORM_ACT_STATS
+ fprintf(f, "\n");
+#endif
+
+ }
+
+#if OUTPUT_NORM_ACT_STATS
+ fclose(f);
+#endif
+
+}
+#endif // USE_ACT_INDEX
+
+// Loop through all MBs. Note activity of each, average activity and
+// calculate a normalized activity for each
+static void build_activity_map(VP9_COMP *cpi) {
+ MACROBLOCK * const x = &cpi->mb;
+ MACROBLOCKD *xd = &x->e_mbd;
+ VP9_COMMON * const cm = &cpi->common;
+
+#if ALT_ACT_MEASURE
+ YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx];
+ int recon_yoffset;
+ int recon_y_stride = new_yv12->y_stride;
+#endif
+
+ int mb_row, mb_col;
+ unsigned int mb_activity;
+ int64_t activity_sum = 0;
+
+ x->mb_activity_ptr = cpi->mb_activity_map;
+
+ // for each macroblock row in image
+ for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
+#if ALT_ACT_MEASURE
+ // reset above block coeffs
+ xd->up_available = (mb_row != 0);
+ recon_yoffset = (mb_row * recon_y_stride * 16);
+#endif
+ // for each macroblock col in image
+ for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
+#if ALT_ACT_MEASURE
+ xd->plane[0].dst.buf = new_yv12->y_buffer + recon_yoffset;
+ xd->left_available = (mb_col != 0);
+ recon_yoffset += 16;
+#endif
+
+ // measure activity
+ mb_activity = mb_activity_measure(x, mb_row, mb_col);
+
+ // Keep frame sum
+ activity_sum += mb_activity;
+
+ // Store MB level activity details.
+ *x->mb_activity_ptr = mb_activity;
+
+ // Increment activity map pointer
+ x->mb_activity_ptr++;
+
+ // adjust to the next column of source macroblocks
+ x->plane[0].src.buf += 16;
+ }
+
+ // adjust to the next row of mbs
+ x->plane[0].src.buf += 16 * x->plane[0].src.stride - 16 * cm->mb_cols;
+ }
+
+ // Calculate an "average" MB activity
+ calc_av_activity(cpi, activity_sum);
+
+#if USE_ACT_INDEX
+ // Calculate an activity index number of each mb
+ calc_activity_index(cpi, x);
+#endif
+
+}
+
+// Macroblock activity masking
+void vp9_activity_masking(VP9_COMP *cpi, MACROBLOCK *x) {
+#if USE_ACT_INDEX
+ x->rdmult += *(x->mb_activity_ptr) * (x->rdmult >> 2);
+ x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
+ x->errorperbit += (x->errorperbit == 0);
+#else
+ int64_t a;
+ int64_t b;
+ int64_t act = *(x->mb_activity_ptr);
+
+ // Apply the masking to the RD multiplier.
+ a = act + (2 * cpi->activity_avg);
+ b = (2 * act) + cpi->activity_avg;
+
+ x->rdmult = (unsigned int) (((int64_t) x->rdmult * b + (a >> 1)) / a);
+ x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
+ x->errorperbit += (x->errorperbit == 0);
+#endif
+
+ // Activity based Zbin adjustment
+ adjust_act_zbin(cpi, x);
+}
+
+static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
+ BLOCK_SIZE bsize, int output_enabled) {
+ int i, x_idx, y;
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MODE_INFO *mi = &ctx->mic;
+ MB_MODE_INFO * const mbmi = &xd->this_mi->mbmi;
+ MODE_INFO *mi_addr = xd->this_mi;
+
+ int mb_mode_index = ctx->best_mode_index;
+ const int mis = cm->mode_info_stride;
+ const int mi_width = num_8x8_blocks_wide_lookup[bsize];
+ const int mi_height = num_8x8_blocks_high_lookup[bsize];
+
+ assert(mi->mbmi.mode < MB_MODE_COUNT);
+ assert(mb_mode_index < MAX_MODES);
+ assert(mi->mbmi.ref_frame[0] < MAX_REF_FRAMES);
+ assert(mi->mbmi.ref_frame[1] < MAX_REF_FRAMES);
+ assert(mi->mbmi.sb_type == bsize);
+
+ *mi_addr = *mi;
+
+ // Restore the coding context of the MB to that that was in place
+ // when the mode was picked for it
+ for (y = 0; y < mi_height; y++)
+ for (x_idx = 0; x_idx < mi_width; x_idx++)
+ if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx
+ && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y)
+ xd->mi_8x8[x_idx + y * mis] = mi_addr;
+
+ // FIXME(rbultje) I'm pretty sure this should go to the end of this block
+ // (i.e. after the output_enabled)
+ if (bsize < BLOCK_32X32) {
+ if (bsize < BLOCK_16X16)
+ ctx->tx_rd_diff[ALLOW_16X16] = ctx->tx_rd_diff[ALLOW_8X8];
+ ctx->tx_rd_diff[ALLOW_32X32] = ctx->tx_rd_diff[ALLOW_16X16];
+ }
+
+ if (is_inter_block(mbmi) && mbmi->sb_type < BLOCK_8X8) {
+ *x->partition_info = ctx->partition_info;
+ mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
+ mbmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
+ }
+
+ x->skip = ctx->skip;
+ if (!output_enabled)
+ return;
+
+ if (!vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
+ for (i = 0; i < TX_MODES; i++)
+ cpi->rd_tx_select_diff[i] += ctx->tx_rd_diff[i];
+ }
+
+ if (cm->frame_type == KEY_FRAME) {
+ // Restore the coding modes to that held in the coding context
+ // if (mb_mode == I4X4_PRED)
+ // for (i = 0; i < 16; i++)
+ // {
+ // xd->block[i].bmi.as_mode =
+ // xd->mode_info_context->bmi[i].as_mode;
+ // assert(xd->mode_info_context->bmi[i].as_mode < MB_MODE_COUNT);
+ // }
+#if CONFIG_INTERNAL_STATS
+ static const int kf_mode_index[] = {
+ THR_DC /*DC_PRED*/,
+ THR_V_PRED /*V_PRED*/,
+ THR_H_PRED /*H_PRED*/,
+ THR_D45_PRED /*D45_PRED*/,
+ THR_D135_PRED /*D135_PRED*/,
+ THR_D117_PRED /*D117_PRED*/,
+ THR_D153_PRED /*D153_PRED*/,
+ THR_D207_PRED /*D207_PRED*/,
+ THR_D63_PRED /*D63_PRED*/,
+ THR_TM /*TM_PRED*/,
+ THR_B_PRED /*I4X4_PRED*/,
+ };
+ cpi->mode_chosen_counts[kf_mode_index[mi->mbmi.mode]]++;
+#endif
+ } else {
+ // Note how often each mode chosen as best
+ cpi->mode_chosen_counts[mb_mode_index]++;
+ if (is_inter_block(mbmi)
+ && (mbmi->sb_type < BLOCK_8X8 || mbmi->mode == NEWMV)) {
+ int_mv best_mv, best_second_mv;
+ const MV_REFERENCE_FRAME rf1 = mbmi->ref_frame[0];
+ const MV_REFERENCE_FRAME rf2 = mbmi->ref_frame[1];
+ best_mv.as_int = ctx->best_ref_mv.as_int;
+ best_second_mv.as_int = ctx->second_best_ref_mv.as_int;
+ if (mbmi->mode == NEWMV) {
+ best_mv.as_int = mbmi->ref_mvs[rf1][0].as_int;
+ best_second_mv.as_int = mbmi->ref_mvs[rf2][0].as_int;
+ }
+ mbmi->best_mv.as_int = best_mv.as_int;
+ mbmi->best_second_mv.as_int = best_second_mv.as_int;
+ vp9_update_nmv_count(cpi, x, &best_mv, &best_second_mv);
+ }
+
+ if (cm->mcomp_filter_type == SWITCHABLE && is_inter_mode(mbmi->mode)) {
+ const int ctx = vp9_get_pred_context_switchable_interp(xd);
+ ++cm->counts.switchable_interp[ctx][mbmi->interp_filter];
+ }
+
+ cpi->rd_comp_pred_diff[SINGLE_PREDICTION_ONLY] += ctx->single_pred_diff;
+ cpi->rd_comp_pred_diff[COMP_PREDICTION_ONLY] += ctx->comp_pred_diff;
+ cpi->rd_comp_pred_diff[HYBRID_PREDICTION] += ctx->hybrid_pred_diff;
+
+ for (i = 0; i <= SWITCHABLE_FILTERS; i++)
+ cpi->rd_filter_diff[i] += ctx->best_filter_diff[i];
+ }
+}
+
+void vp9_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
+ int mb_row, int mb_col) {
+ uint8_t *buffers[4] = {src->y_buffer, src->u_buffer, src->v_buffer, src
+ ->alpha_buffer};
+ int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride, src
+ ->alpha_stride};
+ int i;
+
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ setup_pred_plane(&x->plane[i].src, buffers[i], strides[i], mb_row, mb_col,
+ NULL, x->e_mbd.plane[i].subsampling_x,
+ x->e_mbd.plane[i].subsampling_y);
+ }
+}
+
+static void set_offsets(VP9_COMP *cpi, int mi_row, int mi_col,
+ BLOCK_SIZE bsize) {
+ MACROBLOCK *const x = &cpi->mb;
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MB_MODE_INFO *mbmi;
+ const int dst_fb_idx = cm->new_fb_idx;
+ const int idx_str = xd->mode_info_stride * mi_row + mi_col;
+ const int mi_width = num_8x8_blocks_wide_lookup[bsize];
+ const int mi_height = num_8x8_blocks_high_lookup[bsize];
+ const int mb_row = mi_row >> 1;
+ const int mb_col = mi_col >> 1;
+ const int idx_map = mb_row * cm->mb_cols + mb_col;
+ const struct segmentation *const seg = &cm->seg;
+
+ set_skip_context(cm, xd, mi_row, mi_col);
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+
+ // Activity map pointer
+ x->mb_activity_ptr = &cpi->mb_activity_map[idx_map];
+ x->active_ptr = cpi->active_map + idx_map;
+
+ /* pointers to mode info contexts */
+ x->partition_info = x->pi + idx_str;
+
+ xd->mi_8x8 = cm->mi_grid_visible + idx_str;
+ xd->prev_mi_8x8 = cm->prev_mi_grid_visible + idx_str;
+
+ // Special case: if prev_mi is NULL, the previous mode info context
+ // cannot be used.
+ xd->last_mi = cm->prev_mi ? xd->prev_mi_8x8[0] : NULL;
+
+ xd->this_mi =
+ xd->mi_8x8[0] = cm->mi + idx_str;
+
+ mbmi = &xd->this_mi->mbmi;
+
+ // Set up destination pointers
+ setup_dst_planes(xd, &cm->yv12_fb[dst_fb_idx], mi_row, mi_col);
+
+ // Set up limit values for MV components
+ // mv beyond the range do not produce new/different prediction block
+ x->mv_row_min = -(((mi_row + mi_height) * MI_SIZE) + VP9_INTERP_EXTEND);
+ x->mv_col_min = -(((mi_col + mi_width) * MI_SIZE) + VP9_INTERP_EXTEND);
+ x->mv_row_max = (cm->mi_rows - mi_row) * MI_SIZE + VP9_INTERP_EXTEND;
+ x->mv_col_max = (cm->mi_cols - mi_col) * MI_SIZE + VP9_INTERP_EXTEND;
+
+ // Set up distance of MB to edge of frame in 1/8th pel units
+ assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1)));
+ set_mi_row_col(cm, xd, mi_row, mi_height, mi_col, mi_width);
+
+ /* set up source buffers */
+ vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
+
+ /* R/D setup */
+ x->rddiv = cpi->RDDIV;
+ x->rdmult = cpi->RDMULT;
+
+ /* segment ID */
+ if (seg->enabled) {
+ uint8_t *map = seg->update_map ? cpi->segmentation_map
+ : cm->last_frame_seg_map;
+ mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
+
+ vp9_mb_init_quantizer(cpi, x);
+
+ if (seg->enabled && cpi->seg0_cnt > 0
+ && !vp9_segfeature_active(seg, 0, SEG_LVL_REF_FRAME)
+ && vp9_segfeature_active(seg, 1, SEG_LVL_REF_FRAME)) {
+ cpi->seg0_progress = (cpi->seg0_idx << 16) / cpi->seg0_cnt;
+ } else {
+ const int y = mb_row & ~3;
+ const int x = mb_col & ~3;
+ const int p16 = ((mb_row & 1) << 1) + (mb_col & 1);
+ const int p32 = ((mb_row & 2) << 2) + ((mb_col & 2) << 1);
+ const int tile_progress = cm->cur_tile_mi_col_start * cm->mb_rows >> 1;
+ const int mb_cols = (cm->cur_tile_mi_col_end - cm->cur_tile_mi_col_start)
+ >> 1;
+
+ cpi->seg0_progress = ((y * mb_cols + x * 4 + p32 + p16 + tile_progress)
+ << 16) / cm->MBs;
+ }
+
+ x->encode_breakout = cpi->segment_encode_breakout[mbmi->segment_id];
+ } else {
+ mbmi->segment_id = 0;
+ x->encode_breakout = cpi->oxcf.encode_breakout;
+ }
+}
+
+static void pick_sb_modes(VP9_COMP *cpi, int mi_row, int mi_col,
+ int *totalrate, int64_t *totaldist,
+ BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
+ int64_t best_rd) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+
+ // Use the lower precision, but faster, 32x32 fdct for mode selection.
+ x->use_lp32x32fdct = 1;
+
+ if (bsize < BLOCK_8X8) {
+ // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
+ // there is nothing to be done.
+ if (xd->ab_index != 0) {
+ *totalrate = 0;
+ *totaldist = 0;
+ return;
+ }
+ }
+
+ set_offsets(cpi, mi_row, mi_col, bsize);
+ xd->this_mi->mbmi.sb_type = bsize;
+
+ // Set to zero to make sure we do not use the previous encoded frame stats
+ xd->this_mi->mbmi.skip_coeff = 0;
+
+ x->source_variance = get_sby_perpixel_variance(cpi, x, bsize);
+
+ if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
+ vp9_activity_masking(cpi, x);
+
+ // Find best coding mode & reconstruct the MB so it is available
+ // as a predictor for MBs that follow in the SB
+ if (cm->frame_type == KEY_FRAME)
+ vp9_rd_pick_intra_mode_sb(cpi, x, totalrate, totaldist, bsize, ctx,
+ best_rd);
+ else
+ vp9_rd_pick_inter_mode_sb(cpi, x, mi_row, mi_col, totalrate, totaldist,
+ bsize, ctx, best_rd);
+}
+
+static void update_stats(VP9_COMP *cpi) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MODE_INFO *mi = xd->this_mi;
+ MB_MODE_INFO *const mbmi = &mi->mbmi;
+
+ if (cm->frame_type != KEY_FRAME) {
+ const int seg_ref_active = vp9_segfeature_active(&cm->seg, mbmi->segment_id,
+ SEG_LVL_REF_FRAME);
+
+ if (!seg_ref_active)
+ cpi->intra_inter_count[vp9_get_pred_context_intra_inter(xd)]
+ [is_inter_block(mbmi)]++;
+
+ // If the segment reference feature is enabled we have only a single
+ // reference frame allowed for the segment so exclude it from
+ // the reference frame counts used to work out probabilities.
+ if (is_inter_block(mbmi) && !seg_ref_active) {
+ if (cm->comp_pred_mode == HYBRID_PREDICTION)
+ cpi->comp_inter_count[vp9_get_pred_context_comp_inter_inter(cm, xd)]
+ [has_second_ref(mbmi)]++;
+
+ if (has_second_ref(mbmi)) {
+ cpi->comp_ref_count[vp9_get_pred_context_comp_ref_p(cm, xd)]
+ [mbmi->ref_frame[0] == GOLDEN_FRAME]++;
+ } else {
+ cpi->single_ref_count[vp9_get_pred_context_single_ref_p1(xd)][0]
+ [mbmi->ref_frame[0] != LAST_FRAME]++;
+ if (mbmi->ref_frame[0] != LAST_FRAME)
+ cpi->single_ref_count[vp9_get_pred_context_single_ref_p2(xd)][1]
+ [mbmi->ref_frame[0] != GOLDEN_FRAME]++;
+ }
+ }
+
+ // Count of last ref frame 0,0 usage
+ if (mbmi->mode == ZEROMV && mbmi->ref_frame[0] == LAST_FRAME)
+ cpi->inter_zz_count++;
+ }
+}
+
+// TODO(jingning): the variables used here are little complicated. need further
+// refactoring on organizing the temporary buffers, when recursive
+// partition down to 4x4 block size is enabled.
+static PICK_MODE_CONTEXT *get_block_context(MACROBLOCK *x, BLOCK_SIZE bsize) {
+ MACROBLOCKD *const xd = &x->e_mbd;
+
+ switch (bsize) {
+ case BLOCK_64X64:
+ return &x->sb64_context;
+ case BLOCK_64X32:
+ return &x->sb64x32_context[xd->sb_index];
+ case BLOCK_32X64:
+ return &x->sb32x64_context[xd->sb_index];
+ case BLOCK_32X32:
+ return &x->sb32_context[xd->sb_index];
+ case BLOCK_32X16:
+ return &x->sb32x16_context[xd->sb_index][xd->mb_index];
+ case BLOCK_16X32:
+ return &x->sb16x32_context[xd->sb_index][xd->mb_index];
+ case BLOCK_16X16:
+ return &x->mb_context[xd->sb_index][xd->mb_index];
+ case BLOCK_16X8:
+ return &x->sb16x8_context[xd->sb_index][xd->mb_index][xd->b_index];
+ case BLOCK_8X16:
+ return &x->sb8x16_context[xd->sb_index][xd->mb_index][xd->b_index];
+ case BLOCK_8X8:
+ return &x->sb8x8_context[xd->sb_index][xd->mb_index][xd->b_index];
+ case BLOCK_8X4:
+ return &x->sb8x4_context[xd->sb_index][xd->mb_index][xd->b_index];
+ case BLOCK_4X8:
+ return &x->sb4x8_context[xd->sb_index][xd->mb_index][xd->b_index];
+ case BLOCK_4X4:
+ return &x->ab4x4_context[xd->sb_index][xd->mb_index][xd->b_index];
+ default:
+ assert(0);
+ return NULL ;
+ }
+}
+
+static BLOCK_SIZE *get_sb_partitioning(MACROBLOCK *x, BLOCK_SIZE bsize) {
+ MACROBLOCKD *const xd = &x->e_mbd;
+ switch (bsize) {
+ case BLOCK_64X64:
+ return &x->sb64_partitioning;
+ case BLOCK_32X32:
+ return &x->sb_partitioning[xd->sb_index];
+ case BLOCK_16X16:
+ return &x->mb_partitioning[xd->sb_index][xd->mb_index];
+ case BLOCK_8X8:
+ return &x->b_partitioning[xd->sb_index][xd->mb_index][xd->b_index];
+ default:
+ assert(0);
+ return NULL ;
+ }
+}
+
+static void restore_context(VP9_COMP *cpi, int mi_row, int mi_col,
+ ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
+ ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
+ PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8],
+ BLOCK_SIZE bsize) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ int p;
+ const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
+ const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
+ int mi_width = num_8x8_blocks_wide_lookup[bsize];
+ int mi_height = num_8x8_blocks_high_lookup[bsize];
+ for (p = 0; p < MAX_MB_PLANE; p++) {
+ vpx_memcpy(
+ cm->above_context[p] + ((mi_col * 2) >> xd->plane[p].subsampling_x),
+ a + num_4x4_blocks_wide * p,
+ (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
+ xd->plane[p].subsampling_x);
+ vpx_memcpy(
+ cm->left_context[p]
+ + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
+ l + num_4x4_blocks_high * p,
+ (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
+ xd->plane[p].subsampling_y);
+ }
+ vpx_memcpy(cm->above_seg_context + mi_col, sa,
+ sizeof(PARTITION_CONTEXT) * mi_width);
+ vpx_memcpy(cm->left_seg_context + (mi_row & MI_MASK), sl,
+ sizeof(PARTITION_CONTEXT) * mi_height);
+}
+static void save_context(VP9_COMP *cpi, int mi_row, int mi_col,
+ ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
+ ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
+ PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8],
+ BLOCK_SIZE bsize) {
+ const VP9_COMMON *const cm = &cpi->common;
+ const MACROBLOCK *const x = &cpi->mb;
+ const MACROBLOCKD *const xd = &x->e_mbd;
+ int p;
+ const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
+ const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
+ int mi_width = num_8x8_blocks_wide_lookup[bsize];
+ int mi_height = num_8x8_blocks_high_lookup[bsize];
+
+ // buffer the above/left context information of the block in search.
+ for (p = 0; p < MAX_MB_PLANE; ++p) {
+ vpx_memcpy(
+ a + num_4x4_blocks_wide * p,
+ cm->above_context[p] + (mi_col * 2 >> xd->plane[p].subsampling_x),
+ (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
+ xd->plane[p].subsampling_x);
+ vpx_memcpy(
+ l + num_4x4_blocks_high * p,
+ cm->left_context[p]
+ + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
+ (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
+ xd->plane[p].subsampling_y);
+ }
+ vpx_memcpy(sa, cm->above_seg_context + mi_col,
+ sizeof(PARTITION_CONTEXT) * mi_width);
+ vpx_memcpy(sl, cm->left_seg_context + (mi_row & MI_MASK),
+ sizeof(PARTITION_CONTEXT) * mi_height);
+}
+
+static void encode_b(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row, int mi_col,
+ int output_enabled, BLOCK_SIZE bsize, int sub_index) {
+ VP9_COMMON * const cm = &cpi->common;
+ MACROBLOCK * const x = &cpi->mb;
+ MACROBLOCKD * const xd = &x->e_mbd;
+
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
+ return;
+
+ if (sub_index != -1)
+ *get_sb_index(xd, bsize) = sub_index;
+
+ if (bsize < BLOCK_8X8) {
+ // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
+ // there is nothing to be done.
+ if (xd->ab_index > 0)
+ return;
+ }
+ set_offsets(cpi, mi_row, mi_col, bsize);
+ update_state(cpi, get_block_context(x, bsize), bsize, output_enabled);
+ encode_superblock(cpi, tp, output_enabled, mi_row, mi_col, bsize);
+
+ if (output_enabled) {
+ update_stats(cpi);
+
+ (*tp)->token = EOSB_TOKEN;
+ (*tp)++;
+ }
+}
+
+static void encode_sb(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row, int mi_col,
+ int output_enabled, BLOCK_SIZE bsize) {
+ VP9_COMMON * const cm = &cpi->common;
+ MACROBLOCK * const x = &cpi->mb;
+ MACROBLOCKD * const xd = &x->e_mbd;
+ BLOCK_SIZE c1 = BLOCK_8X8;
+ const int bsl = b_width_log2(bsize), bs = (1 << bsl) / 4;
+ int pl = 0;
+ PARTITION_TYPE partition;
+ BLOCK_SIZE subsize;
+ int i;
+
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
+ return;
+
+ c1 = BLOCK_4X4;
+ if (bsize >= BLOCK_8X8) {
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+ pl = partition_plane_context(xd, bsize);
+ c1 = *(get_sb_partitioning(x, bsize));
+ }
+ partition = partition_lookup[bsl][c1];
+
+ switch (partition) {
+ case PARTITION_NONE:
+ if (output_enabled && bsize >= BLOCK_8X8)
+ cpi->partition_count[pl][PARTITION_NONE]++;
+ encode_b(cpi, tp, mi_row, mi_col, output_enabled, c1, -1);
+ break;
+ case PARTITION_VERT:
+ if (output_enabled)
+ cpi->partition_count[pl][PARTITION_VERT]++;
+ encode_b(cpi, tp, mi_row, mi_col, output_enabled, c1, 0);
+ encode_b(cpi, tp, mi_row, mi_col + bs, output_enabled, c1, 1);
+ break;
+ case PARTITION_HORZ:
+ if (output_enabled)
+ cpi->partition_count[pl][PARTITION_HORZ]++;
+ encode_b(cpi, tp, mi_row, mi_col, output_enabled, c1, 0);
+ encode_b(cpi, tp, mi_row + bs, mi_col, output_enabled, c1, 1);
+ break;
+ case PARTITION_SPLIT:
+ subsize = get_subsize(bsize, PARTITION_SPLIT);
+
+ if (output_enabled)
+ cpi->partition_count[pl][PARTITION_SPLIT]++;
+
+ for (i = 0; i < 4; i++) {
+ const int x_idx = i & 1, y_idx = i >> 1;
+
+ *get_sb_index(xd, subsize) = i;
+ encode_sb(cpi, tp, mi_row + y_idx * bs, mi_col + x_idx * bs,
+ output_enabled, subsize);
+ }
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8) {
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+ update_partition_context(xd, c1, bsize);
+ }
+}
+
+// Check to see if the given partition size is allowed for a specified number
+// of 8x8 block rows and columns remaining in the image.
+// If not then return the largest allowed partition size
+static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize,
+ int rows_left, int cols_left,
+ int *bh, int *bw) {
+ if ((rows_left <= 0) || (cols_left <= 0)) {
+ return MIN(bsize, BLOCK_8X8);
+ } else {
+ for (; bsize > 0; --bsize) {
+ *bh = num_8x8_blocks_high_lookup[bsize];
+ *bw = num_8x8_blocks_wide_lookup[bsize];
+ if ((*bh <= rows_left) && (*bw <= cols_left)) {
+ break;
+ }
+ }
+ }
+ return bsize;
+}
+
+// This function attempts to set all mode info entries in a given SB64
+// to the same block partition size.
+// However, at the bottom and right borders of the image the requested size
+// may not be allowed in which case this code attempts to choose the largest
+// allowable partition.
+static void set_partitioning(VP9_COMP *cpi, MODE_INFO **mi_8x8,
+ int mi_row, int mi_col) {
+ VP9_COMMON *const cm = &cpi->common;
+ BLOCK_SIZE bsize = cpi->sf.always_this_block_size;
+ const int mis = cm->mode_info_stride;
+ int row8x8_remaining = cm->cur_tile_mi_row_end - mi_row;
+ int col8x8_remaining = cm->cur_tile_mi_col_end - mi_col;
+ int block_row, block_col;
+ MODE_INFO * mi_upper_left = cm->mi + mi_row * mis + mi_col;
+ int bh = num_8x8_blocks_high_lookup[bsize];
+ int bw = num_8x8_blocks_wide_lookup[bsize];
+
+ assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
+
+ // Apply the requested partition size to the SB64 if it is all "in image"
+ if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
+ (row8x8_remaining >= MI_BLOCK_SIZE)) {
+ for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) {
+ for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) {
+ int index = block_row * mis + block_col;
+ mi_8x8[index] = mi_upper_left + index;
+ mi_8x8[index]->mbmi.sb_type = bsize;
+ }
+ }
+ } else {
+ // Else this is a partial SB64.
+ for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) {
+ for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) {
+ int index = block_row * mis + block_col;
+ // Find a partition size that fits
+ bsize = find_partition_size(cpi->sf.always_this_block_size,
+ (row8x8_remaining - block_row),
+ (col8x8_remaining - block_col), &bh, &bw);
+ mi_8x8[index] = mi_upper_left + index;
+ mi_8x8[index]->mbmi.sb_type = bsize;
+ }
+ }
+ }
+}
+
+static void copy_partitioning(VP9_COMP *cpi, MODE_INFO **mi_8x8,
+ MODE_INFO **prev_mi_8x8) {
+ VP9_COMMON *const cm = &cpi->common;
+ const int mis = cm->mode_info_stride;
+ int block_row, block_col;
+
+ for (block_row = 0; block_row < 8; ++block_row) {
+ for (block_col = 0; block_col < 8; ++block_col) {
+ MODE_INFO * prev_mi = prev_mi_8x8[block_row * mis + block_col];
+ BLOCK_SIZE sb_type = prev_mi ? prev_mi->mbmi.sb_type : 0;
+ int offset;
+
+ if (prev_mi) {
+ offset = prev_mi - cm->prev_mi;
+ mi_8x8[block_row * mis + block_col] = cm->mi + offset;
+ mi_8x8[block_row * mis + block_col]->mbmi.sb_type = sb_type;
+ }
+ }
+ }
+}
+
+static void set_block_size(VP9_COMMON * const cm, MODE_INFO **mi_8x8,
+ BLOCK_SIZE bsize, int mis, int mi_row,
+ int mi_col) {
+ int r, c;
+ const int bs = MAX(num_8x8_blocks_wide_lookup[bsize],
+ num_8x8_blocks_high_lookup[bsize]);
+ const int idx_str = mis * mi_row + mi_col;
+ MODE_INFO **const mi2 = &mi_8x8[idx_str];
+
+ mi2[0] = cm->mi + idx_str;
+ mi2[0]->mbmi.sb_type = bsize;
+
+ for (r = 0; r < bs; r++)
+ for (c = 0; c < bs; c++)
+ if (mi_row + r < cm->mi_rows && mi_col + c < cm->mi_cols)
+ mi2[r * mis + c] = mi2[0];
+}
+
+typedef struct {
+ int64_t sum_square_error;
+ int64_t sum_error;
+ int count;
+ int variance;
+} var;
+
+typedef struct {
+ var none;
+ var horz[2];
+ var vert[2];
+} partition_variance;
+
+#define VT(TYPE, BLOCKSIZE) \
+ typedef struct { \
+ partition_variance vt; \
+ BLOCKSIZE split[4]; } TYPE;
+
+VT(v8x8, var)
+VT(v16x16, v8x8)
+VT(v32x32, v16x16)
+VT(v64x64, v32x32)
+
+typedef struct {
+ partition_variance *vt;
+ var *split[4];
+} vt_node;
+
+typedef enum {
+ V16X16,
+ V32X32,
+ V64X64,
+} TREE_LEVEL;
+
+static void tree_to_node(void *data, BLOCK_SIZE bsize, vt_node *node) {
+ int i;
+ switch (bsize) {
+ case BLOCK_64X64: {
+ v64x64 *vt = (v64x64 *) data;
+ node->vt = &vt->vt;
+ for (i = 0; i < 4; i++)
+ node->split[i] = &vt->split[i].vt.none;
+ break;
+ }
+ case BLOCK_32X32: {
+ v32x32 *vt = (v32x32 *) data;
+ node->vt = &vt->vt;
+ for (i = 0; i < 4; i++)
+ node->split[i] = &vt->split[i].vt.none;
+ break;
+ }
+ case BLOCK_16X16: {
+ v16x16 *vt = (v16x16 *) data;
+ node->vt = &vt->vt;
+ for (i = 0; i < 4; i++)
+ node->split[i] = &vt->split[i].vt.none;
+ break;
+ }
+ case BLOCK_8X8: {
+ v8x8 *vt = (v8x8 *) data;
+ node->vt = &vt->vt;
+ for (i = 0; i < 4; i++)
+ node->split[i] = &vt->split[i];
+ break;
+ }
+ default:
+ node->vt = 0;
+ for (i = 0; i < 4; i++)
+ node->split[i] = 0;
+ assert(-1);
+ }
+}
+
+// Set variance values given sum square error, sum error, count.
+static void fill_variance(var *v, int64_t s2, int64_t s, int c) {
+ v->sum_square_error = s2;
+ v->sum_error = s;
+ v->count = c;
+ if (c > 0)
+ v->variance = 256
+ * (v->sum_square_error - v->sum_error * v->sum_error / v->count)
+ / v->count;
+ else
+ v->variance = 0;
+}
+
+// Combine 2 variance structures by summing the sum_error, sum_square_error,
+// and counts and then calculating the new variance.
+void sum_2_variances(var *r, var *a, var*b) {
+ fill_variance(r, a->sum_square_error + b->sum_square_error,
+ a->sum_error + b->sum_error, a->count + b->count);
+}
+
+static void fill_variance_tree(void *data, BLOCK_SIZE bsize) {
+ vt_node node;
+ tree_to_node(data, bsize, &node);
+ sum_2_variances(&node.vt->horz[0], node.split[0], node.split[1]);
+ sum_2_variances(&node.vt->horz[1], node.split[2], node.split[3]);
+ sum_2_variances(&node.vt->vert[0], node.split[0], node.split[2]);
+ sum_2_variances(&node.vt->vert[1], node.split[1], node.split[3]);
+ sum_2_variances(&node.vt->none, &node.vt->vert[0], &node.vt->vert[1]);
+}
+
+#if PERFORM_RANDOM_PARTITIONING
+static int set_vt_partitioning(VP9_COMP *cpi, void *data, MODE_INFO *m,
+ BLOCK_SIZE block_size, int mi_row,
+ int mi_col, int mi_size) {
+ VP9_COMMON * const cm = &cpi->common;
+ vt_node vt;
+ const int mis = cm->mode_info_stride;
+ int64_t threshold = 4 * cpi->common.base_qindex * cpi->common.base_qindex;
+
+ tree_to_node(data, block_size, &vt);
+
+ // split none is available only if we have more than half a block size
+ // in width and height inside the visible image
+ if (mi_col + mi_size < cm->mi_cols && mi_row + mi_size < cm->mi_rows &&
+ (rand() & 3) < 1) {
+ set_block_size(cm, m, block_size, mis, mi_row, mi_col);
+ return 1;
+ }
+
+ // vertical split is available on all but the bottom border
+ if (mi_row + mi_size < cm->mi_rows && vt.vt->vert[0].variance < threshold
+ && (rand() & 3) < 1) {
+ set_block_size(cm, m, get_subsize(block_size, PARTITION_VERT), mis, mi_row,
+ mi_col);
+ return 1;
+ }
+
+ // horizontal split is available on all but the right border
+ if (mi_col + mi_size < cm->mi_cols && vt.vt->horz[0].variance < threshold
+ && (rand() & 3) < 1) {
+ set_block_size(cm, m, get_subsize(block_size, PARTITION_HORZ), mis, mi_row,
+ mi_col);
+ return 1;
+ }
+
+ return 0;
+}
+
+#else // !PERFORM_RANDOM_PARTITIONING
+
+static int set_vt_partitioning(VP9_COMP *cpi, void *data, MODE_INFO **m,
+ BLOCK_SIZE bsize, int mi_row,
+ int mi_col, int mi_size) {
+ VP9_COMMON * const cm = &cpi->common;
+ vt_node vt;
+ const int mis = cm->mode_info_stride;
+ int64_t threshold = 50 * cpi->common.base_qindex;
+
+ tree_to_node(data, bsize, &vt);
+
+ // split none is available only if we have more than half a block size
+ // in width and height inside the visible image
+ if (mi_col + mi_size < cm->mi_cols && mi_row + mi_size < cm->mi_rows
+ && vt.vt->none.variance < threshold) {
+ set_block_size(cm, m, bsize, mis, mi_row, mi_col);
+ return 1;
+ }
+
+ // vertical split is available on all but the bottom border
+ if (mi_row + mi_size < cm->mi_rows && vt.vt->vert[0].variance < threshold
+ && vt.vt->vert[1].variance < threshold) {
+ set_block_size(cm, m, get_subsize(bsize, PARTITION_VERT), mis, mi_row,
+ mi_col);
+ return 1;
+ }
+
+ // horizontal split is available on all but the right border
+ if (mi_col + mi_size < cm->mi_cols && vt.vt->horz[0].variance < threshold
+ && vt.vt->horz[1].variance < threshold) {
+ set_block_size(cm, m, get_subsize(bsize, PARTITION_HORZ), mis, mi_row,
+ mi_col);
+ return 1;
+ }
+
+ return 0;
+}
+#endif // PERFORM_RANDOM_PARTITIONING
+
+static void choose_partitioning(VP9_COMP *cpi, MODE_INFO **mi_8x8,
+ int mi_row, int mi_col) {
+ VP9_COMMON * const cm = &cpi->common;
+ MACROBLOCK *x = &cpi->mb;
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
+ const int mis = cm->mode_info_stride;
+ // TODO(JBB): More experimentation or testing of this threshold;
+ int64_t threshold = 4;
+ int i, j, k;
+ v64x64 vt;
+ unsigned char * s;
+ int sp;
+ const unsigned char * d;
+ int dp;
+ int pixels_wide = 64, pixels_high = 64;
+
+ vp9_zero(vt);
+ set_offsets(cpi, mi_row, mi_col, BLOCK_64X64);
+
+ if (xd->mb_to_right_edge < 0)
+ pixels_wide += (xd->mb_to_right_edge >> 3);
+
+ if (xd->mb_to_bottom_edge < 0)
+ pixels_high += (xd->mb_to_bottom_edge >> 3);
+
+ s = x->plane[0].src.buf;
+ sp = x->plane[0].src.stride;
+
+ // TODO(JBB): Clearly the higher the quantizer the fewer partitions we want
+ // but this needs more experimentation.
+ threshold = threshold * cpi->common.base_qindex * cpi->common.base_qindex;
+
+ d = vp9_64x64_zeros;
+ dp = 64;
+ if (cm->frame_type != KEY_FRAME) {
+ int_mv nearest_mv, near_mv;
+ const int idx = cm->ref_frame_map[get_ref_frame_idx(cpi, LAST_FRAME)];
+ YV12_BUFFER_CONFIG *ref_fb = &cm->yv12_fb[idx];
+ YV12_BUFFER_CONFIG *second_ref_fb = NULL;
+
+ setup_pre_planes(xd, 0, ref_fb, mi_row, mi_col,
+ &xd->scale_factor[0]);
+ setup_pre_planes(xd, 1, second_ref_fb, mi_row, mi_col,
+ &xd->scale_factor[1]);
+
+ xd->this_mi->mbmi.ref_frame[0] = LAST_FRAME;
+ xd->this_mi->mbmi.sb_type = BLOCK_64X64;
+ vp9_find_best_ref_mvs(xd,
+ mi_8x8[0]->mbmi.ref_mvs[mi_8x8[0]->mbmi.ref_frame[0]],
+ &nearest_mv, &near_mv);
+
+ xd->this_mi->mbmi.mv[0] = nearest_mv;
+ vp9_build_inter_predictors_sby(xd, mi_row, mi_col, BLOCK_64X64);
+
+ d = xd->plane[0].dst.buf;
+ dp = xd->plane[0].dst.stride;
+ }
+
+ // Fill in the entire tree of 8x8 variances for splits.
+ for (i = 0; i < 4; i++) {
+ const int x32_idx = ((i & 1) << 5);
+ const int y32_idx = ((i >> 1) << 5);
+ for (j = 0; j < 4; j++) {
+ const int x16_idx = x32_idx + ((j & 1) << 4);
+ const int y16_idx = y32_idx + ((j >> 1) << 4);
+ v16x16 *vst = &vt.split[i].split[j];
+ for (k = 0; k < 4; k++) {
+ int x_idx = x16_idx + ((k & 1) << 3);
+ int y_idx = y16_idx + ((k >> 1) << 3);
+ unsigned int sse = 0;
+ int sum = 0;
+ if (x_idx < pixels_wide && y_idx < pixels_high)
+ vp9_get_sse_sum_8x8(s + y_idx * sp + x_idx, sp,
+ d + y_idx * dp + x_idx, dp, &sse, &sum);
+ fill_variance(&vst->split[k].vt.none, sse, sum, 64);
+ }
+ }
+ }
+ // Fill the rest of the variance tree by summing the split partition
+ // values.
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 4; j++) {
+ fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16);
+ }
+ fill_variance_tree(&vt.split[i], BLOCK_32X32);
+ }
+ fill_variance_tree(&vt, BLOCK_64X64);
+ // Now go through the entire structure, splitting every block size until
+ // we get to one that's got a variance lower than our threshold, or we
+ // hit 8x8.
+ if (!set_vt_partitioning(cpi, &vt, mi_8x8, BLOCK_64X64, mi_row, mi_col,
+ 4)) {
+ for (i = 0; i < 4; ++i) {
+ const int x32_idx = ((i & 1) << 2);
+ const int y32_idx = ((i >> 1) << 2);
+ if (!set_vt_partitioning(cpi, &vt.split[i], mi_8x8, BLOCK_32X32,
+ (mi_row + y32_idx), (mi_col + x32_idx), 2)) {
+ for (j = 0; j < 4; ++j) {
+ const int x16_idx = ((j & 1) << 1);
+ const int y16_idx = ((j >> 1) << 1);
+ if (!set_vt_partitioning(cpi, &vt.split[i].split[j], mi_8x8,
+ BLOCK_16X16,
+ (mi_row + y32_idx + y16_idx),
+ (mi_col + x32_idx + x16_idx), 1)) {
+ for (k = 0; k < 4; ++k) {
+ const int x8_idx = (k & 1);
+ const int y8_idx = (k >> 1);
+ set_block_size(cm, mi_8x8, BLOCK_8X8, mis,
+ (mi_row + y32_idx + y16_idx + y8_idx),
+ (mi_col + x32_idx + x16_idx + x8_idx));
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+static void rd_use_partition(VP9_COMP *cpi, MODE_INFO **mi_8x8,
+ TOKENEXTRA **tp, int mi_row, int mi_col,
+ BLOCK_SIZE bsize, int *rate, int64_t *dist,
+ int do_recon) {
+ VP9_COMMON * const cm = &cpi->common;
+ MACROBLOCK * const x = &cpi->mb;
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
+ const int mis = cm->mode_info_stride;
+ int bsl = b_width_log2(bsize);
+ const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
+ const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
+ int ms = num_4x4_blocks_wide / 2;
+ int mh = num_4x4_blocks_high / 2;
+ int bss = (1 << bsl) / 4;
+ int i, pl;
+ PARTITION_TYPE partition = PARTITION_NONE;
+ BLOCK_SIZE subsize;
+ ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
+ PARTITION_CONTEXT sl[8], sa[8];
+ int last_part_rate = INT_MAX;
+ int64_t last_part_dist = INT_MAX;
+ int split_rate = INT_MAX;
+ int64_t split_dist = INT_MAX;
+ int none_rate = INT_MAX;
+ int64_t none_dist = INT_MAX;
+ int chosen_rate = INT_MAX;
+ int64_t chosen_dist = INT_MAX;
+ BLOCK_SIZE sub_subsize = BLOCK_4X4;
+ int splits_below = 0;
+ BLOCK_SIZE bs_type = mi_8x8[0]->mbmi.sb_type;
+
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
+ return;
+
+ partition = partition_lookup[bsl][bs_type];
+
+ subsize = get_subsize(bsize, partition);
+
+ if (bsize < BLOCK_8X8) {
+ // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
+ // there is nothing to be done.
+ if (xd->ab_index != 0) {
+ *rate = 0;
+ *dist = 0;
+ return;
+ }
+ } else {
+ *(get_sb_partitioning(x, bsize)) = subsize;
+ }
+ save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
+
+ x->fast_ms = 0;
+ x->subblock_ref = 0;
+
+ if (cpi->sf.adjust_partitioning_from_last_frame) {
+ // Check if any of the sub blocks are further split.
+ if (partition == PARTITION_SPLIT && subsize > BLOCK_8X8) {
+ sub_subsize = get_subsize(subsize, PARTITION_SPLIT);
+ splits_below = 1;
+ for (i = 0; i < 4; i++) {
+ int jj = i >> 1, ii = i & 0x01;
+ MODE_INFO * this_mi = mi_8x8[jj * bss * mis + ii * bss];
+ if (this_mi && this_mi->mbmi.sb_type >= sub_subsize) {
+ splits_below = 0;
+ }
+ }
+ }
+
+ // If partition is not none try none unless each of the 4 splits are split
+ // even further..
+ if (partition != PARTITION_NONE && !splits_below &&
+ mi_row + (ms >> 1) < cm->mi_rows &&
+ mi_col + (ms >> 1) < cm->mi_cols) {
+ *(get_sb_partitioning(x, bsize)) = bsize;
+ pick_sb_modes(cpi, mi_row, mi_col, &none_rate, &none_dist, bsize,
+ get_block_context(x, bsize), INT64_MAX);
+
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+ pl = partition_plane_context(xd, bsize);
+ none_rate += x->partition_cost[pl][PARTITION_NONE];
+
+ restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
+ mi_8x8[0]->mbmi.sb_type = bs_type;
+ *(get_sb_partitioning(x, bsize)) = subsize;
+ }
+ }
+
+ switch (partition) {
+ case PARTITION_NONE:
+ pick_sb_modes(cpi, mi_row, mi_col, &last_part_rate, &last_part_dist,
+ bsize, get_block_context(x, bsize), INT64_MAX);
+ break;
+ case PARTITION_HORZ:
+ *get_sb_index(xd, subsize) = 0;
+ pick_sb_modes(cpi, mi_row, mi_col, &last_part_rate, &last_part_dist,
+ subsize, get_block_context(x, subsize), INT64_MAX);
+ if (last_part_rate != INT_MAX &&
+ bsize >= BLOCK_8X8 && mi_row + (mh >> 1) < cm->mi_rows) {
+ int rt = 0;
+ int64_t dt = 0;
+ update_state(cpi, get_block_context(x, subsize), subsize, 0);
+ encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
+ *get_sb_index(xd, subsize) = 1;
+ pick_sb_modes(cpi, mi_row + (ms >> 1), mi_col, &rt, &dt, subsize,
+ get_block_context(x, subsize), INT64_MAX);
+ if (rt == INT_MAX || dt == INT_MAX) {
+ last_part_rate = INT_MAX;
+ last_part_dist = INT_MAX;
+ break;
+ }
+
+ last_part_rate += rt;
+ last_part_dist += dt;
+ }
+ break;
+ case PARTITION_VERT:
+ *get_sb_index(xd, subsize) = 0;
+ pick_sb_modes(cpi, mi_row, mi_col, &last_part_rate, &last_part_dist,
+ subsize, get_block_context(x, subsize), INT64_MAX);
+ if (last_part_rate != INT_MAX &&
+ bsize >= BLOCK_8X8 && mi_col + (ms >> 1) < cm->mi_cols) {
+ int rt = 0;
+ int64_t dt = 0;
+ update_state(cpi, get_block_context(x, subsize), subsize, 0);
+ encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
+ *get_sb_index(xd, subsize) = 1;
+ pick_sb_modes(cpi, mi_row, mi_col + (ms >> 1), &rt, &dt, subsize,
+ get_block_context(x, subsize), INT64_MAX);
+ if (rt == INT_MAX || dt == INT_MAX) {
+ last_part_rate = INT_MAX;
+ last_part_dist = INT_MAX;
+ break;
+ }
+ last_part_rate += rt;
+ last_part_dist += dt;
+ }
+ break;
+ case PARTITION_SPLIT:
+ // Split partition.
+ last_part_rate = 0;
+ last_part_dist = 0;
+ for (i = 0; i < 4; i++) {
+ int x_idx = (i & 1) * (ms >> 1);
+ int y_idx = (i >> 1) * (ms >> 1);
+ int jj = i >> 1, ii = i & 0x01;
+ int rt;
+ int64_t dt;
+
+ if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
+ continue;
+
+ *get_sb_index(xd, subsize) = i;
+
+ rd_use_partition(cpi, mi_8x8 + jj * bss * mis + ii * bss, tp,
+ mi_row + y_idx, mi_col + x_idx, subsize, &rt, &dt,
+ i != 3);
+ if (rt == INT_MAX || dt == INT_MAX) {
+ last_part_rate = INT_MAX;
+ last_part_dist = INT_MAX;
+ break;
+ }
+ last_part_rate += rt;
+ last_part_dist += dt;
+ }
+ break;
+ default:
+ assert(0);
+ }
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+ pl = partition_plane_context(xd, bsize);
+ if (last_part_rate < INT_MAX)
+ last_part_rate += x->partition_cost[pl][partition];
+
+ if (cpi->sf.adjust_partitioning_from_last_frame
+ && partition != PARTITION_SPLIT && bsize > BLOCK_8X8
+ && (mi_row + ms < cm->mi_rows || mi_row + (ms >> 1) == cm->mi_rows)
+ && (mi_col + ms < cm->mi_cols || mi_col + (ms >> 1) == cm->mi_cols)) {
+ BLOCK_SIZE split_subsize = get_subsize(bsize, PARTITION_SPLIT);
+ split_rate = 0;
+ split_dist = 0;
+ restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
+
+ // Split partition.
+ for (i = 0; i < 4; i++) {
+ int x_idx = (i & 1) * (num_4x4_blocks_wide >> 2);
+ int y_idx = (i >> 1) * (num_4x4_blocks_wide >> 2);
+ int rt = 0;
+ int64_t dt = 0;
+ ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
+ PARTITION_CONTEXT sl[8], sa[8];
+
+ if ((mi_row + y_idx >= cm->mi_rows)
+ || (mi_col + x_idx >= cm->mi_cols))
+ continue;
+
+ *get_sb_index(xd, split_subsize) = i;
+ *get_sb_partitioning(x, bsize) = split_subsize;
+ *get_sb_partitioning(x, split_subsize) = split_subsize;
+
+ save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
+
+ pick_sb_modes(cpi, mi_row + y_idx, mi_col + x_idx, &rt, &dt,
+ split_subsize, get_block_context(x, split_subsize),
+ INT64_MAX);
+
+ restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
+
+ if (rt == INT_MAX || dt == INT_MAX) {
+ split_rate = INT_MAX;
+ split_dist = INT_MAX;
+ break;
+ }
+
+ if (i != 3)
+ encode_sb(cpi, tp, mi_row + y_idx, mi_col + x_idx, 0,
+ split_subsize);
+
+ split_rate += rt;
+ split_dist += dt;
+ set_partition_seg_context(cm, xd, mi_row + y_idx, mi_col + x_idx);
+ pl = partition_plane_context(xd, bsize);
+ split_rate += x->partition_cost[pl][PARTITION_NONE];
+ }
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+ pl = partition_plane_context(xd, bsize);
+ if (split_rate < INT_MAX) {
+ split_rate += x->partition_cost[pl][PARTITION_SPLIT];
+
+ chosen_rate = split_rate;
+ chosen_dist = split_dist;
+ }
+ }
+
+ // If last_part is better set the partitioning to that...
+ if (RDCOST(x->rdmult, x->rddiv, last_part_rate, last_part_dist)
+ < RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist)) {
+ mi_8x8[0]->mbmi.sb_type = bsize;
+ if (bsize >= BLOCK_8X8)
+ *(get_sb_partitioning(x, bsize)) = subsize;
+ chosen_rate = last_part_rate;
+ chosen_dist = last_part_dist;
+ }
+ // If none was better set the partitioning to that...
+ if (RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist)
+ > RDCOST(x->rdmult, x->rddiv, none_rate, none_dist)) {
+ if (bsize >= BLOCK_8X8)
+ *(get_sb_partitioning(x, bsize)) = bsize;
+ chosen_rate = none_rate;
+ chosen_dist = none_dist;
+ }
+
+ restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
+
+ // We must have chosen a partitioning and encoding or we'll fail later on.
+ // No other opportunities for success.
+ if ( bsize == BLOCK_64X64)
+ assert(chosen_rate < INT_MAX && chosen_dist < INT_MAX);
+
+ if (do_recon)
+ encode_sb(cpi, tp, mi_row, mi_col, bsize == BLOCK_64X64, bsize);
+
+ *rate = chosen_rate;
+ *dist = chosen_dist;
+}
+
+static const BLOCK_SIZE min_partition_size[BLOCK_SIZES] = {
+ BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
+ BLOCK_4X4, BLOCK_4X4, BLOCK_8X8, BLOCK_8X8,
+ BLOCK_8X8, BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, BLOCK_16X16
+};
+
+static const BLOCK_SIZE max_partition_size[BLOCK_SIZES] = {
+ BLOCK_8X8, BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
+ BLOCK_32X32, BLOCK_32X32, BLOCK_32X32, BLOCK_64X64,
+ BLOCK_64X64, BLOCK_64X64, BLOCK_64X64, BLOCK_64X64, BLOCK_64X64
+};
+
+// Look at all the mode_info entries for blocks that are part of this
+// partition and find the min and max values for sb_type.
+// At the moment this is designed to work on a 64x64 SB but could be
+// adjusted to use a size parameter.
+//
+// The min and max are assumed to have been initialized prior to calling this
+// function so repeat calls can accumulate a min and max of more than one sb64.
+static void get_sb_partition_size_range(VP9_COMP *cpi, MODE_INFO ** mi_8x8,
+ BLOCK_SIZE * min_block_size,
+ BLOCK_SIZE * max_block_size ) {
+ MACROBLOCKD *const xd = &cpi->mb.e_mbd;
+ int sb_width_in_blocks = MI_BLOCK_SIZE;
+ int sb_height_in_blocks = MI_BLOCK_SIZE;
+ int i, j;
+ int index = 0;
+
+ // Check the sb_type for each block that belongs to this region.
+ for (i = 0; i < sb_height_in_blocks; ++i) {
+ for (j = 0; j < sb_width_in_blocks; ++j) {
+ MODE_INFO * mi = mi_8x8[index+j];
+ BLOCK_SIZE sb_type = mi ? mi->mbmi.sb_type : 0;
+ *min_block_size = MIN(*min_block_size, sb_type);
+ *max_block_size = MAX(*max_block_size, sb_type);
+ }
+ index += xd->mode_info_stride;
+ }
+}
+
+// Look at neighboring blocks and set a min and max partition size based on
+// what they chose.
+static void rd_auto_partition_range(VP9_COMP *cpi, int row, int col,
+ BLOCK_SIZE *min_block_size,
+ BLOCK_SIZE *max_block_size) {
+ MACROBLOCKD *const xd = &cpi->mb.e_mbd;
+ MODE_INFO ** mi_8x8 = xd->mi_8x8;
+ const int left_in_image = xd->left_available && mi_8x8[-1];
+ const int above_in_image = xd->up_available &&
+ mi_8x8[-xd->mode_info_stride];
+ MODE_INFO ** above_sb64_mi_8x8;
+ MODE_INFO ** left_sb64_mi_8x8;
+
+ // Frequency check
+ if (cpi->sf.auto_min_max_partition_count <= 0) {
+ cpi->sf.auto_min_max_partition_count =
+ cpi->sf.auto_min_max_partition_interval;
+ *min_block_size = BLOCK_4X4;
+ *max_block_size = BLOCK_64X64;
+ } else {
+ --cpi->sf.auto_min_max_partition_count;
+
+ // Set default values if no left or above neighbour
+ if (!left_in_image && !above_in_image) {
+ *min_block_size = BLOCK_4X4;
+ *max_block_size = BLOCK_64X64;
+ } else {
+ VP9_COMMON *const cm = &cpi->common;
+ int row8x8_remaining = cm->cur_tile_mi_row_end - row;
+ int col8x8_remaining = cm->cur_tile_mi_col_end - col;
+ int bh, bw;
+
+ // Default "min to max" and "max to min"
+ *min_block_size = BLOCK_64X64;
+ *max_block_size = BLOCK_4X4;
+
+ // Find the min and max partition sizes used in the left SB64
+ if (left_in_image) {
+ left_sb64_mi_8x8 = &mi_8x8[-MI_BLOCK_SIZE];
+ get_sb_partition_size_range(cpi, left_sb64_mi_8x8,
+ min_block_size, max_block_size);
+ }
+
+ // Find the min and max partition sizes used in the above SB64 taking
+ // the values found for left as a starting point.
+ if (above_in_image) {
+ above_sb64_mi_8x8 = &mi_8x8[-xd->mode_info_stride * MI_BLOCK_SIZE];
+ get_sb_partition_size_range(cpi, above_sb64_mi_8x8,
+ min_block_size, max_block_size);
+ }
+
+ // Give a bit of leaway either side of the observed min and max
+ *min_block_size = min_partition_size[*min_block_size];
+ *max_block_size = max_partition_size[*max_block_size];
+
+ // Check border cases where max and min from neighbours may not be legal.
+ *max_block_size = find_partition_size(*max_block_size,
+ row8x8_remaining, col8x8_remaining,
+ &bh, &bw);
+ *min_block_size = MIN(*min_block_size, *max_block_size);
+ }
+ }
+}
+
+static void compute_fast_motion_search_level(VP9_COMP *cpi, BLOCK_SIZE bsize) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+
+ // Only use 8x8 result for non HD videos.
+ // int use_8x8 = (MIN(cpi->common.width, cpi->common.height) < 720) ? 1 : 0;
+ int use_8x8 = 1;
+
+ if (cm->frame_type && !cpi->is_src_frame_alt_ref &&
+ ((use_8x8 && bsize == BLOCK_16X16) ||
+ bsize == BLOCK_32X32 || bsize == BLOCK_64X64)) {
+ int ref0 = 0, ref1 = 0, ref2 = 0, ref3 = 0;
+ PICK_MODE_CONTEXT *block_context = NULL;
+
+ if (bsize == BLOCK_16X16) {
+ block_context = x->sb8x8_context[xd->sb_index][xd->mb_index];
+ } else if (bsize == BLOCK_32X32) {
+ block_context = x->mb_context[xd->sb_index];
+ } else if (bsize == BLOCK_64X64) {
+ block_context = x->sb32_context;
+ }
+
+ if (block_context) {
+ ref0 = block_context[0].mic.mbmi.ref_frame[0];
+ ref1 = block_context[1].mic.mbmi.ref_frame[0];
+ ref2 = block_context[2].mic.mbmi.ref_frame[0];
+ ref3 = block_context[3].mic.mbmi.ref_frame[0];
+ }
+
+ // Currently, only consider 4 inter reference frames.
+ if (ref0 && ref1 && ref2 && ref3) {
+ int d01, d23, d02, d13;
+
+ // Motion vectors for the four subblocks.
+ int16_t mvr0 = block_context[0].mic.mbmi.mv[0].as_mv.row;
+ int16_t mvc0 = block_context[0].mic.mbmi.mv[0].as_mv.col;
+ int16_t mvr1 = block_context[1].mic.mbmi.mv[0].as_mv.row;
+ int16_t mvc1 = block_context[1].mic.mbmi.mv[0].as_mv.col;
+ int16_t mvr2 = block_context[2].mic.mbmi.mv[0].as_mv.row;
+ int16_t mvc2 = block_context[2].mic.mbmi.mv[0].as_mv.col;
+ int16_t mvr3 = block_context[3].mic.mbmi.mv[0].as_mv.row;
+ int16_t mvc3 = block_context[3].mic.mbmi.mv[0].as_mv.col;
+
+ // Adjust sign if ref is alt_ref.
+ if (cm->ref_frame_sign_bias[ref0]) {
+ mvr0 *= -1;
+ mvc0 *= -1;
+ }
+
+ if (cm->ref_frame_sign_bias[ref1]) {
+ mvr1 *= -1;
+ mvc1 *= -1;
+ }
+
+ if (cm->ref_frame_sign_bias[ref2]) {
+ mvr2 *= -1;
+ mvc2 *= -1;
+ }
+
+ if (cm->ref_frame_sign_bias[ref3]) {
+ mvr3 *= -1;
+ mvc3 *= -1;
+ }
+
+ // Calculate mv distances.
+ d01 = MAX(abs(mvr0 - mvr1), abs(mvc0 - mvc1));
+ d23 = MAX(abs(mvr2 - mvr3), abs(mvc2 - mvc3));
+ d02 = MAX(abs(mvr0 - mvr2), abs(mvc0 - mvc2));
+ d13 = MAX(abs(mvr1 - mvr3), abs(mvc1 - mvc3));
+
+ if (d01 < FAST_MOTION_MV_THRESH && d23 < FAST_MOTION_MV_THRESH &&
+ d02 < FAST_MOTION_MV_THRESH && d13 < FAST_MOTION_MV_THRESH) {
+ // Set fast motion search level.
+ x->fast_ms = 1;
+
+ if (ref0 == ref1 && ref1 == ref2 && ref2 == ref3 &&
+ d01 < 2 && d23 < 2 && d02 < 2 && d13 < 2) {
+ // Set fast motion search level.
+ x->fast_ms = 2;
+
+ if (!d01 && !d23 && !d02 && !d13) {
+ x->fast_ms = 3;
+ x->subblock_ref = ref0;
+ }
+ }
+ }
+ }
+ }
+}
+
+static INLINE void store_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
+ vpx_memcpy(ctx->pred_mv, x->pred_mv, sizeof(x->pred_mv));
+}
+
+static INLINE void load_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
+ vpx_memcpy(x->pred_mv, ctx->pred_mv, sizeof(x->pred_mv));
+}
+
+// TODO(jingning,jimbankoski,rbultje): properly skip partition types that are
+// unlikely to be selected depending on previous rate-distortion optimization
+// results, for encoding speed-up.
+static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row,
+ int mi_col, BLOCK_SIZE bsize, int *rate,
+ int64_t *dist, int do_recon, int64_t best_rd) {
+ VP9_COMMON * const cm = &cpi->common;
+ MACROBLOCK * const x = &cpi->mb;
+ MACROBLOCKD * const xd = &x->e_mbd;
+ const int ms = num_8x8_blocks_wide_lookup[bsize] / 2;
+ ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
+ PARTITION_CONTEXT sl[8], sa[8];
+ TOKENEXTRA *tp_orig = *tp;
+ int i, pl;
+ BLOCK_SIZE subsize;
+ int this_rate, sum_rate = 0, best_rate = INT_MAX;
+ int64_t this_dist, sum_dist = 0, best_dist = INT64_MAX;
+ int64_t sum_rd = 0;
+ int do_split = bsize >= BLOCK_8X8;
+ int do_rect = 1;
+ // Override skipping rectangular partition operations for edge blocks
+ const int force_horz_split = (mi_row + ms >= cm->mi_rows);
+ const int force_vert_split = (mi_col + ms >= cm->mi_cols);
+
+ int partition_none_allowed = !force_horz_split && !force_vert_split;
+ int partition_horz_allowed = !force_vert_split && bsize >= BLOCK_8X8;
+ int partition_vert_allowed = !force_horz_split && bsize >= BLOCK_8X8;
+
+ int partition_split_done = 0;
+ (void) *tp_orig;
+
+ if (bsize < BLOCK_8X8) {
+ // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
+ // there is nothing to be done.
+ if (xd->ab_index != 0) {
+ *rate = 0;
+ *dist = 0;
+ return;
+ }
+ }
+ assert(mi_height_log2(bsize) == mi_width_log2(bsize));
+
+ // Determine partition types in search according to the speed features.
+ // The threshold set here has to be of square block size.
+ if (cpi->sf.auto_min_max_partition_size) {
+ partition_none_allowed &= (bsize <= cpi->sf.max_partition_size &&
+ bsize >= cpi->sf.min_partition_size);
+ partition_horz_allowed &= ((bsize <= cpi->sf.max_partition_size &&
+ bsize > cpi->sf.min_partition_size) ||
+ force_horz_split);
+ partition_vert_allowed &= ((bsize <= cpi->sf.max_partition_size &&
+ bsize > cpi->sf.min_partition_size) ||
+ force_vert_split);
+ do_split &= bsize > cpi->sf.min_partition_size;
+ }
+ if (cpi->sf.use_square_partition_only) {
+ partition_horz_allowed &= force_horz_split;
+ partition_vert_allowed &= force_vert_split;
+ }
+
+ save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
+
+ if (cpi->sf.disable_split_var_thresh && partition_none_allowed) {
+ unsigned int source_variancey;
+ vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
+ source_variancey = get_sby_perpixel_variance(cpi, x, bsize);
+ if (source_variancey < cpi->sf.disable_split_var_thresh) {
+ do_split = 0;
+ if (source_variancey < cpi->sf.disable_split_var_thresh / 2)
+ do_rect = 0;
+ }
+ }
+
+ // PARTITION_NONE
+ if (partition_none_allowed) {
+ pick_sb_modes(cpi, mi_row, mi_col, &this_rate, &this_dist, bsize,
+ get_block_context(x, bsize), best_rd);
+ if (this_rate != INT_MAX) {
+ if (bsize >= BLOCK_8X8) {
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+ pl = partition_plane_context(xd, bsize);
+ this_rate += x->partition_cost[pl][PARTITION_NONE];
+ }
+ sum_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_dist);
+ if (sum_rd < best_rd) {
+ int64_t stop_thresh = 2048;
+
+ best_rate = this_rate;
+ best_dist = this_dist;
+ best_rd = sum_rd;
+ if (bsize >= BLOCK_8X8)
+ *(get_sb_partitioning(x, bsize)) = bsize;
+
+ // Adjust threshold according to partition size.
+ stop_thresh >>= 8 - (b_width_log2_lookup[bsize] +
+ b_height_log2_lookup[bsize]);
+
+ // If obtained distortion is very small, choose current partition
+ // and stop splitting.
+ if (this_dist < stop_thresh) {
+ do_split = 0;
+ do_rect = 0;
+ }
+ }
+ }
+ restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
+ }
+
+ // store estimated motion vector
+ if (cpi->sf.adaptive_motion_search)
+ store_pred_mv(x, get_block_context(x, bsize));
+
+ // PARTITION_SPLIT
+ sum_rd = 0;
+ // TODO(jingning): use the motion vectors given by the above search as
+ // the starting point of motion search in the following partition type check.
+ if (do_split) {
+ subsize = get_subsize(bsize, PARTITION_SPLIT);
+ for (i = 0; i < 4 && sum_rd < best_rd; ++i) {
+ const int x_idx = (i & 1) * ms;
+ const int y_idx = (i >> 1) * ms;
+
+ if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
+ continue;
+
+ *get_sb_index(xd, subsize) = i;
+ if (cpi->sf.adaptive_motion_search)
+ load_pred_mv(x, get_block_context(x, bsize));
+ rd_pick_partition(cpi, tp, mi_row + y_idx, mi_col + x_idx, subsize,
+ &this_rate, &this_dist, i != 3, best_rd - sum_rd);
+
+ if (this_rate == INT_MAX) {
+ sum_rd = INT64_MAX;
+ } else {
+ sum_rate += this_rate;
+ sum_dist += this_dist;
+ sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
+ }
+ }
+ if (sum_rd < best_rd && i == 4) {
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+ pl = partition_plane_context(xd, bsize);
+ sum_rate += x->partition_cost[pl][PARTITION_SPLIT];
+ sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
+ if (sum_rd < best_rd) {
+ best_rate = sum_rate;
+ best_dist = sum_dist;
+ best_rd = sum_rd;
+ *(get_sb_partitioning(x, bsize)) = subsize;
+ } else {
+ // skip rectangular partition test when larger block size
+ // gives better rd cost
+ if (cpi->sf.less_rectangular_check)
+ do_rect &= !partition_none_allowed;
+ }
+ }
+ partition_split_done = 1;
+ restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
+ }
+
+ x->fast_ms = 0;
+ x->subblock_ref = 0;
+
+ if (partition_split_done &&
+ cpi->sf.using_small_partition_info) {
+ compute_fast_motion_search_level(cpi, bsize);
+ }
+
+ // PARTITION_HORZ
+ if (partition_horz_allowed && do_rect) {
+ subsize = get_subsize(bsize, PARTITION_HORZ);
+ *get_sb_index(xd, subsize) = 0;
+ if (cpi->sf.adaptive_motion_search)
+ load_pred_mv(x, get_block_context(x, bsize));
+ pick_sb_modes(cpi, mi_row, mi_col, &sum_rate, &sum_dist, subsize,
+ get_block_context(x, subsize), best_rd);
+ sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
+
+ if (sum_rd < best_rd && mi_row + ms < cm->mi_rows) {
+ update_state(cpi, get_block_context(x, subsize), subsize, 0);
+ encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
+
+ *get_sb_index(xd, subsize) = 1;
+ if (cpi->sf.adaptive_motion_search)
+ load_pred_mv(x, get_block_context(x, bsize));
+ pick_sb_modes(cpi, mi_row + ms, mi_col, &this_rate,
+ &this_dist, subsize, get_block_context(x, subsize),
+ best_rd - sum_rd);
+ if (this_rate == INT_MAX) {
+ sum_rd = INT64_MAX;
+ } else {
+ sum_rate += this_rate;
+ sum_dist += this_dist;
+ sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
+ }
+ }
+ if (sum_rd < best_rd) {
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+ pl = partition_plane_context(xd, bsize);
+ sum_rate += x->partition_cost[pl][PARTITION_HORZ];
+ sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
+ if (sum_rd < best_rd) {
+ best_rd = sum_rd;
+ best_rate = sum_rate;
+ best_dist = sum_dist;
+ *(get_sb_partitioning(x, bsize)) = subsize;
+ }
+ }
+ restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
+ }
+
+ // PARTITION_VERT
+ if (partition_vert_allowed && do_rect) {
+ subsize = get_subsize(bsize, PARTITION_VERT);
+
+ *get_sb_index(xd, subsize) = 0;
+ if (cpi->sf.adaptive_motion_search)
+ load_pred_mv(x, get_block_context(x, bsize));
+ pick_sb_modes(cpi, mi_row, mi_col, &sum_rate, &sum_dist, subsize,
+ get_block_context(x, subsize), best_rd);
+ sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
+ if (sum_rd < best_rd && mi_col + ms < cm->mi_cols) {
+ update_state(cpi, get_block_context(x, subsize), subsize, 0);
+ encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
+
+ *get_sb_index(xd, subsize) = 1;
+ if (cpi->sf.adaptive_motion_search)
+ load_pred_mv(x, get_block_context(x, bsize));
+ pick_sb_modes(cpi, mi_row, mi_col + ms, &this_rate,
+ &this_dist, subsize, get_block_context(x, subsize),
+ best_rd - sum_rd);
+ if (this_rate == INT_MAX) {
+ sum_rd = INT64_MAX;
+ } else {
+ sum_rate += this_rate;
+ sum_dist += this_dist;
+ sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
+ }
+ }
+ if (sum_rd < best_rd) {
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+ pl = partition_plane_context(xd, bsize);
+ sum_rate += x->partition_cost[pl][PARTITION_VERT];
+ sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
+ if (sum_rd < best_rd) {
+ best_rate = sum_rate;
+ best_dist = sum_dist;
+ best_rd = sum_rd;
+ *(get_sb_partitioning(x, bsize)) = subsize;
+ }
+ }
+ restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
+ }
+
+
+ *rate = best_rate;
+ *dist = best_dist;
+
+ if (best_rate < INT_MAX && best_dist < INT64_MAX && do_recon)
+ encode_sb(cpi, tp, mi_row, mi_col, bsize == BLOCK_64X64, bsize);
+ if (bsize == BLOCK_64X64) {
+ assert(tp_orig < *tp);
+ assert(best_rate < INT_MAX);
+ assert(best_dist < INT_MAX);
+ } else {
+ assert(tp_orig == *tp);
+ }
+}
+
+// Examines 64x64 block and chooses a best reference frame
+static void rd_pick_reference_frame(VP9_COMP *cpi, int mi_row, int mi_col) {
+ VP9_COMMON * const cm = &cpi->common;
+ MACROBLOCK * const x = &cpi->mb;
+ MACROBLOCKD * const xd = &x->e_mbd;
+ int bsl = b_width_log2(BLOCK_64X64), bs = 1 << bsl;
+ int ms = bs / 2;
+ ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
+ PARTITION_CONTEXT sl[8], sa[8];
+ int pl;
+ int r;
+ int64_t d;
+
+ save_context(cpi, mi_row, mi_col, a, l, sa, sl, BLOCK_64X64);
+
+ // Default is non mask (all reference frames allowed.
+ cpi->ref_frame_mask = 0;
+
+ // Do RD search for 64x64.
+ if ((mi_row + (ms >> 1) < cm->mi_rows) &&
+ (mi_col + (ms >> 1) < cm->mi_cols)) {
+ cpi->set_ref_frame_mask = 1;
+ pick_sb_modes(cpi, mi_row, mi_col, &r, &d, BLOCK_64X64,
+ get_block_context(x, BLOCK_64X64), INT64_MAX);
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+ pl = partition_plane_context(xd, BLOCK_64X64);
+ r += x->partition_cost[pl][PARTITION_NONE];
+
+ *(get_sb_partitioning(x, BLOCK_64X64)) = BLOCK_64X64;
+ cpi->set_ref_frame_mask = 0;
+ }
+
+ restore_context(cpi, mi_row, mi_col, a, l, sa, sl, BLOCK_64X64);
+}
+
+static void encode_sb_row(VP9_COMP *cpi, int mi_row, TOKENEXTRA **tp,
+ int *totalrate) {
+ VP9_COMMON * const cm = &cpi->common;
+ int mi_col;
+
+ // Initialize the left context for the new SB row
+ vpx_memset(&cm->left_context, 0, sizeof(cm->left_context));
+ vpx_memset(cm->left_seg_context, 0, sizeof(cm->left_seg_context));
+
+ // Code each SB in the row
+ for (mi_col = cm->cur_tile_mi_col_start; mi_col < cm->cur_tile_mi_col_end;
+ mi_col += MI_BLOCK_SIZE) {
+ int dummy_rate;
+ int64_t dummy_dist;
+
+ vpx_memset(cpi->mb.pred_mv, 0, sizeof(cpi->mb.pred_mv));
+
+ if (cpi->sf.reference_masking)
+ rd_pick_reference_frame(cpi, mi_row, mi_col);
+
+ if (cpi->sf.partition_by_variance || cpi->sf.use_lastframe_partitioning ||
+ cpi->sf.use_one_partition_size_always ) {
+ const int idx_str = cm->mode_info_stride * mi_row + mi_col;
+ MODE_INFO **mi_8x8 = cm->mi_grid_visible + idx_str;
+ MODE_INFO **prev_mi_8x8 = cm->prev_mi_grid_visible + idx_str;
+
+ cpi->mb.source_variance = UINT_MAX;
+ if (cpi->sf.use_one_partition_size_always) {
+ set_offsets(cpi, mi_row, mi_col, BLOCK_64X64);
+ set_partitioning(cpi, mi_8x8, mi_row, mi_col);
+ rd_use_partition(cpi, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
+ &dummy_rate, &dummy_dist, 1);
+ } else if (cpi->sf.partition_by_variance) {
+ choose_partitioning(cpi, cm->mi_grid_visible, mi_row, mi_col);
+ rd_use_partition(cpi, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
+ &dummy_rate, &dummy_dist, 1);
+ } else {
+ if ((cpi->common.current_video_frame
+ % cpi->sf.last_partitioning_redo_frequency) == 0
+ || cm->prev_mi == 0
+ || cpi->common.show_frame == 0
+ || cpi->common.frame_type == KEY_FRAME
+ || cpi->is_src_frame_alt_ref) {
+ // If required set upper and lower partition size limits
+ if (cpi->sf.auto_min_max_partition_size) {
+ set_offsets(cpi, mi_row, mi_col, BLOCK_64X64);
+ rd_auto_partition_range(cpi, mi_row, mi_col,
+ &cpi->sf.min_partition_size,
+ &cpi->sf.max_partition_size);
+ }
+ rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_64X64,
+ &dummy_rate, &dummy_dist, 1, INT64_MAX);
+ } else {
+ copy_partitioning(cpi, mi_8x8, prev_mi_8x8);
+ rd_use_partition(cpi, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
+ &dummy_rate, &dummy_dist, 1);
+ }
+ }
+ } else {
+ // If required set upper and lower partition size limits
+ if (cpi->sf.auto_min_max_partition_size) {
+ set_offsets(cpi, mi_row, mi_col, BLOCK_64X64);
+ rd_auto_partition_range(cpi, mi_row, mi_col,
+ &cpi->sf.min_partition_size,
+ &cpi->sf.max_partition_size);
+ }
+ rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_64X64,
+ &dummy_rate, &dummy_dist, 1, INT64_MAX);
+ }
+ }
+}
+
+static void init_encode_frame_mb_context(VP9_COMP *cpi) {
+ MACROBLOCK *const x = &cpi->mb;
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
+
+ x->act_zbin_adj = 0;
+ cpi->seg0_idx = 0;
+
+ xd->mode_info_stride = cm->mode_info_stride;
+
+ // reset intra mode contexts
+ if (cm->frame_type == KEY_FRAME)
+ vp9_init_mbmode_probs(cm);
+
+ // Copy data over into macro block data structures.
+ vp9_setup_src_planes(x, cpi->Source, 0, 0);
+
+ // TODO(jkoleszar): are these initializations required?
+ setup_pre_planes(xd, 0, &cm->yv12_fb[cm->ref_frame_map[cpi->lst_fb_idx]],
+ 0, 0, NULL);
+ setup_dst_planes(xd, &cm->yv12_fb[cm->new_fb_idx], 0, 0);
+
+ setup_block_dptrs(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
+
+ xd->this_mi->mbmi.mode = DC_PRED;
+ xd->this_mi->mbmi.uv_mode = DC_PRED;
+
+ vp9_zero(cpi->y_mode_count)
+ vp9_zero(cpi->y_uv_mode_count)
+ vp9_zero(cm->counts.inter_mode)
+ vp9_zero(cpi->partition_count);
+ vp9_zero(cpi->intra_inter_count);
+ vp9_zero(cpi->comp_inter_count);
+ vp9_zero(cpi->single_ref_count);
+ vp9_zero(cpi->comp_ref_count);
+ vp9_zero(cm->counts.tx);
+ vp9_zero(cm->counts.mbskip);
+
+ // Note: this memset assumes above_context[0], [1] and [2]
+ // are allocated as part of the same buffer.
+ vpx_memset(cm->above_context[0], 0,
+ sizeof(ENTROPY_CONTEXT) * 2 * MAX_MB_PLANE * aligned_mi_cols);
+ vpx_memset(cm->above_seg_context, 0,
+ sizeof(PARTITION_CONTEXT) * aligned_mi_cols);
+}
+
+static void switch_lossless_mode(VP9_COMP *cpi, int lossless) {
+ if (lossless) {
+ // printf("Switching to lossless\n");
+ cpi->mb.fwd_txm8x4 = vp9_short_walsh8x4;
+ cpi->mb.fwd_txm4x4 = vp9_short_walsh4x4;
+ cpi->mb.e_mbd.inv_txm4x4_1_add = vp9_short_iwalsh4x4_1_add;
+ cpi->mb.e_mbd.inv_txm4x4_add = vp9_short_iwalsh4x4_add;
+ cpi->mb.optimize = 0;
+ cpi->common.lf.filter_level = 0;
+ cpi->zbin_mode_boost_enabled = 0;
+ cpi->common.tx_mode = ONLY_4X4;
+ } else {
+ // printf("Not lossless\n");
+ cpi->mb.fwd_txm8x4 = vp9_short_fdct8x4;
+ cpi->mb.fwd_txm4x4 = vp9_short_fdct4x4;
+ cpi->mb.e_mbd.inv_txm4x4_1_add = vp9_short_idct4x4_1_add;
+ cpi->mb.e_mbd.inv_txm4x4_add = vp9_short_idct4x4_add;
+ }
+}
+
+static void switch_tx_mode(VP9_COMP *cpi) {
+ if (cpi->sf.tx_size_search_method == USE_LARGESTALL &&
+ cpi->common.tx_mode >= ALLOW_32X32)
+ cpi->common.tx_mode = ALLOW_32X32;
+}
+
+static void encode_frame_internal(VP9_COMP *cpi) {
+ int mi_row;
+ MACROBLOCK * const x = &cpi->mb;
+ VP9_COMMON * const cm = &cpi->common;
+ MACROBLOCKD * const xd = &x->e_mbd;
+ int totalrate;
+
+// fprintf(stderr, "encode_frame_internal frame %d (%d) type %d\n",
+// cpi->common.current_video_frame, cpi->common.show_frame,
+// cm->frame_type);
+
+// debug output
+#if DBG_PRNT_SEGMAP
+ {
+ FILE *statsfile;
+ statsfile = fopen("segmap2.stt", "a");
+ fprintf(statsfile, "\n");
+ fclose(statsfile);
+ }
+#endif
+
+ totalrate = 0;
+
+ // Reset frame count of inter 0,0 motion vector usage.
+ cpi->inter_zz_count = 0;
+
+ vp9_zero(cm->counts.switchable_interp);
+ vp9_zero(cpi->txfm_stepdown_count);
+
+ xd->mi_8x8 = cm->mi_grid_visible;
+ // required for vp9_frame_init_quantizer
+ xd->this_mi =
+ xd->mi_8x8[0] = cm->mi;
+ xd->mic_stream_ptr = cm->mi;
+
+ xd->last_mi = cm->prev_mi;
+
+
+ vp9_zero(cpi->NMVcount);
+ vp9_zero(cpi->coef_counts);
+ vp9_zero(cm->counts.eob_branch);
+
+ cpi->mb.e_mbd.lossless = cm->base_qindex == 0 && cm->y_dc_delta_q == 0
+ && cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
+ switch_lossless_mode(cpi, cpi->mb.e_mbd.lossless);
+
+ vp9_frame_init_quantizer(cpi);
+
+ vp9_initialize_rd_consts(cpi, cm->base_qindex + cm->y_dc_delta_q);
+ vp9_initialize_me_consts(cpi, cm->base_qindex);
+ switch_tx_mode(cpi);
+
+ if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
+ // Initialize encode frame context.
+ init_encode_frame_mb_context(cpi);
+
+ // Build a frame level activity map
+ build_activity_map(cpi);
+ }
+
+ // Re-initialize encode frame context.
+ init_encode_frame_mb_context(cpi);
+
+ vp9_zero(cpi->rd_comp_pred_diff);
+ vp9_zero(cpi->rd_filter_diff);
+ vp9_zero(cpi->rd_tx_select_diff);
+ vp9_zero(cpi->rd_tx_select_threshes);
+
+ set_prev_mi(cm);
+
+ {
+ struct vpx_usec_timer emr_timer;
+ vpx_usec_timer_start(&emr_timer);
+
+ {
+ // Take tiles into account and give start/end MB
+ int tile_col, tile_row;
+ TOKENEXTRA *tp = cpi->tok;
+ const int tile_cols = 1 << cm->log2_tile_cols;
+ const int tile_rows = 1 << cm->log2_tile_rows;
+
+ for (tile_row = 0; tile_row < tile_rows; tile_row++) {
+ vp9_get_tile_row_offsets(cm, tile_row);
+
+ for (tile_col = 0; tile_col < tile_cols; tile_col++) {
+ TOKENEXTRA *tp_old = tp;
+
+ // For each row of SBs in the frame
+ vp9_get_tile_col_offsets(cm, tile_col);
+ for (mi_row = cm->cur_tile_mi_row_start;
+ mi_row < cm->cur_tile_mi_row_end; mi_row += 8)
+ encode_sb_row(cpi, mi_row, &tp, &totalrate);
+
+ cpi->tok_count[tile_row][tile_col] = (unsigned int)(tp - tp_old);
+ assert(tp - cpi->tok <= get_token_alloc(cm->mb_rows, cm->mb_cols));
+ }
+ }
+ }
+
+ vpx_usec_timer_mark(&emr_timer);
+ cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer);
+ }
+
+ if (cpi->sf.skip_encode_sb) {
+ int j;
+ unsigned int intra_count = 0, inter_count = 0;
+ for (j = 0; j < INTRA_INTER_CONTEXTS; ++j) {
+ intra_count += cpi->intra_inter_count[j][0];
+ inter_count += cpi->intra_inter_count[j][1];
+ }
+ cpi->sf.skip_encode_frame = ((intra_count << 2) < inter_count);
+ cpi->sf.skip_encode_frame &= (cm->frame_type != KEY_FRAME);
+ cpi->sf.skip_encode_frame &= cm->show_frame;
+ } else {
+ cpi->sf.skip_encode_frame = 0;
+ }
+
+ // 256 rate units to the bit,
+ // projected_frame_size in units of BYTES
+ cpi->projected_frame_size = totalrate >> 8;
+
+#if 0
+ // Keep record of the total distortion this time around for future use
+ cpi->last_frame_distortion = cpi->frame_distortion;
+#endif
+
+}
+
+static int check_dual_ref_flags(VP9_COMP *cpi) {
+ const int ref_flags = cpi->ref_frame_flags;
+
+ if (vp9_segfeature_active(&cpi->common.seg, 1, SEG_LVL_REF_FRAME)) {
+ return 0;
+ } else {
+ return (!!(ref_flags & VP9_GOLD_FLAG) + !!(ref_flags & VP9_LAST_FLAG)
+ + !!(ref_flags & VP9_ALT_FLAG)) >= 2;
+ }
+}
+
+static int get_skip_flag(MODE_INFO **mi_8x8, int mis, int ymbs, int xmbs) {
+ int x, y;
+
+ for (y = 0; y < ymbs; y++) {
+ for (x = 0; x < xmbs; x++) {
+ if (!mi_8x8[y * mis + x]->mbmi.skip_coeff)
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static void set_txfm_flag(MODE_INFO **mi_8x8, int mis, int ymbs, int xmbs,
+ TX_SIZE tx_size) {
+ int x, y;
+
+ for (y = 0; y < ymbs; y++) {
+ for (x = 0; x < xmbs; x++)
+ mi_8x8[y * mis + x]->mbmi.tx_size = tx_size;
+ }
+}
+
+static void reset_skip_txfm_size_b(VP9_COMP *cpi, MODE_INFO **mi_8x8,
+ int mis, TX_SIZE max_tx_size, int bw, int bh,
+ int mi_row, int mi_col, BLOCK_SIZE bsize) {
+ VP9_COMMON * const cm = &cpi->common;
+ MB_MODE_INFO * const mbmi = &mi_8x8[0]->mbmi;
+
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
+ return;
+
+ if (mbmi->tx_size > max_tx_size) {
+ const int ymbs = MIN(bh, cm->mi_rows - mi_row);
+ const int xmbs = MIN(bw, cm->mi_cols - mi_col);
+
+ assert(vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP) ||
+ get_skip_flag(mi_8x8, mis, ymbs, xmbs));
+ set_txfm_flag(mi_8x8, mis, ymbs, xmbs, max_tx_size);
+ }
+}
+
+static void reset_skip_txfm_size_sb(VP9_COMP *cpi, MODE_INFO **mi_8x8,
+ TX_SIZE max_tx_size, int mi_row, int mi_col,
+ BLOCK_SIZE bsize) {
+ VP9_COMMON * const cm = &cpi->common;
+ const int mis = cm->mode_info_stride;
+ int bw, bh;
+ const int bs = num_8x8_blocks_wide_lookup[bsize], hbs = bs / 2;
+
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
+ return;
+
+ bw = num_8x8_blocks_wide_lookup[mi_8x8[0]->mbmi.sb_type];
+ bh = num_8x8_blocks_high_lookup[mi_8x8[0]->mbmi.sb_type];
+
+ if (bw == bs && bh == bs) {
+ reset_skip_txfm_size_b(cpi, mi_8x8, mis, max_tx_size, bs, bs, mi_row,
+ mi_col, bsize);
+ } else if (bw == bs && bh < bs) {
+ reset_skip_txfm_size_b(cpi, mi_8x8, mis, max_tx_size, bs, hbs, mi_row,
+ mi_col, bsize);
+ reset_skip_txfm_size_b(cpi, mi_8x8 + hbs * mis, mis, max_tx_size, bs, hbs,
+ mi_row + hbs, mi_col, bsize);
+ } else if (bw < bs && bh == bs) {
+ reset_skip_txfm_size_b(cpi, mi_8x8, mis, max_tx_size, hbs, bs, mi_row,
+ mi_col, bsize);
+ reset_skip_txfm_size_b(cpi, mi_8x8 + hbs, mis, max_tx_size, hbs, bs, mi_row,
+ mi_col + hbs, bsize);
+
+ } else {
+ const BLOCK_SIZE subsize = subsize_lookup[PARTITION_SPLIT][bsize];
+ int n;
+
+ assert(bw < bs && bh < bs);
+
+ for (n = 0; n < 4; n++) {
+ const int mi_dc = hbs * (n & 1);
+ const int mi_dr = hbs * (n >> 1);
+
+ reset_skip_txfm_size_sb(cpi, &mi_8x8[mi_dr * mis + mi_dc], max_tx_size,
+ mi_row + mi_dr, mi_col + mi_dc, subsize);
+ }
+ }
+}
+
+static void reset_skip_txfm_size(VP9_COMP *cpi, TX_SIZE txfm_max) {
+ VP9_COMMON * const cm = &cpi->common;
+ int mi_row, mi_col;
+ const int mis = cm->mode_info_stride;
+// MODE_INFO *mi, *mi_ptr = cm->mi;
+ MODE_INFO **mi_8x8, **mi_ptr = cm->mi_grid_visible;
+
+ for (mi_row = 0; mi_row < cm->mi_rows; mi_row += 8, mi_ptr += 8 * mis) {
+ mi_8x8 = mi_ptr;
+ for (mi_col = 0; mi_col < cm->mi_cols; mi_col += 8, mi_8x8 += 8) {
+ reset_skip_txfm_size_sb(cpi, mi_8x8, txfm_max, mi_row, mi_col,
+ BLOCK_64X64);
+ }
+ }
+}
+
+static int get_frame_type(VP9_COMP *cpi) {
+ int frame_type;
+ if (cpi->common.frame_type == KEY_FRAME)
+ frame_type = 0;
+ else if (cpi->is_src_frame_alt_ref && cpi->refresh_golden_frame)
+ frame_type = 3;
+ else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)
+ frame_type = 1;
+ else
+ frame_type = 2;
+ return frame_type;
+}
+
+static void select_tx_mode(VP9_COMP *cpi) {
+ if (cpi->oxcf.lossless) {
+ cpi->common.tx_mode = ONLY_4X4;
+ } else if (cpi->common.current_video_frame == 0) {
+ cpi->common.tx_mode = TX_MODE_SELECT;
+ } else {
+ if (cpi->sf.tx_size_search_method == USE_LARGESTALL) {
+ cpi->common.tx_mode = ALLOW_32X32;
+ } else if (cpi->sf.tx_size_search_method == USE_FULL_RD) {
+ int frame_type = get_frame_type(cpi);
+ cpi->common.tx_mode =
+ cpi->rd_tx_select_threshes[frame_type][ALLOW_32X32]
+ > cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] ?
+ ALLOW_32X32 : TX_MODE_SELECT;
+ } else {
+ unsigned int total = 0;
+ int i;
+ for (i = 0; i < TX_SIZES; ++i)
+ total += cpi->txfm_stepdown_count[i];
+ if (total) {
+ double fraction = (double)cpi->txfm_stepdown_count[0] / total;
+ cpi->common.tx_mode = fraction > 0.90 ? ALLOW_32X32 : TX_MODE_SELECT;
+ // printf("fraction = %f\n", fraction);
+ } // else keep unchanged
+ }
+ }
+}
+
+void vp9_encode_frame(VP9_COMP *cpi) {
+ VP9_COMMON * const cm = &cpi->common;
+
+ // In the longer term the encoder should be generalized to match the
+ // decoder such that we allow compound where one of the 3 buffers has a
+ // different sign bias and that buffer is then the fixed ref. However, this
+ // requires further work in the rd loop. For now the only supported encoder
+ // side behavior is where the ALT ref buffer has opposite sign bias to
+ // the other two.
+ if ((cm->ref_frame_sign_bias[ALTREF_FRAME]
+ == cm->ref_frame_sign_bias[GOLDEN_FRAME])
+ || (cm->ref_frame_sign_bias[ALTREF_FRAME]
+ == cm->ref_frame_sign_bias[LAST_FRAME])) {
+ cm->allow_comp_inter_inter = 0;
+ } else {
+ cm->allow_comp_inter_inter = 1;
+ cm->comp_fixed_ref = ALTREF_FRAME;
+ cm->comp_var_ref[0] = LAST_FRAME;
+ cm->comp_var_ref[1] = GOLDEN_FRAME;
+ }
+
+ if (cpi->sf.RD) {
+ int i, pred_type;
+ INTERPOLATIONFILTERTYPE filter_type;
+ /*
+ * This code does a single RD pass over the whole frame assuming
+ * either compound, single or hybrid prediction as per whatever has
+ * worked best for that type of frame in the past.
+ * It also predicts whether another coding mode would have worked
+ * better that this coding mode. If that is the case, it remembers
+ * that for subsequent frames.
+ * It does the same analysis for transform size selection also.
+ */
+ int frame_type = get_frame_type(cpi);
+
+ /* prediction (compound, single or hybrid) mode selection */
+ if (frame_type == 3 || !cm->allow_comp_inter_inter)
+ pred_type = SINGLE_PREDICTION_ONLY;
+ else if (cpi->rd_prediction_type_threshes[frame_type][1]
+ > cpi->rd_prediction_type_threshes[frame_type][0]
+ && cpi->rd_prediction_type_threshes[frame_type][1]
+ > cpi->rd_prediction_type_threshes[frame_type][2]
+ && check_dual_ref_flags(cpi) && cpi->static_mb_pct == 100)
+ pred_type = COMP_PREDICTION_ONLY;
+ else if (cpi->rd_prediction_type_threshes[frame_type][0]
+ > cpi->rd_prediction_type_threshes[frame_type][2])
+ pred_type = SINGLE_PREDICTION_ONLY;
+ else
+ pred_type = HYBRID_PREDICTION;
+
+ /* filter type selection */
+ // FIXME(rbultje) for some odd reason, we often select smooth_filter
+ // as default filter for ARF overlay frames. This is a REALLY BAD
+ // IDEA so we explicitly disable it here.
+ if (frame_type != 3 &&
+ cpi->rd_filter_threshes[frame_type][1] >
+ cpi->rd_filter_threshes[frame_type][0] &&
+ cpi->rd_filter_threshes[frame_type][1] >
+ cpi->rd_filter_threshes[frame_type][2] &&
+ cpi->rd_filter_threshes[frame_type][1] >
+ cpi->rd_filter_threshes[frame_type][SWITCHABLE_FILTERS]) {
+ filter_type = EIGHTTAP_SMOOTH;
+ } else if (cpi->rd_filter_threshes[frame_type][2] >
+ cpi->rd_filter_threshes[frame_type][0] &&
+ cpi->rd_filter_threshes[frame_type][2] >
+ cpi->rd_filter_threshes[frame_type][SWITCHABLE_FILTERS]) {
+ filter_type = EIGHTTAP_SHARP;
+ } else if (cpi->rd_filter_threshes[frame_type][0] >
+ cpi->rd_filter_threshes[frame_type][SWITCHABLE_FILTERS]) {
+ filter_type = EIGHTTAP;
+ } else {
+ filter_type = SWITCHABLE;
+ }
+
+ cpi->mb.e_mbd.lossless = 0;
+ if (cpi->oxcf.lossless) {
+ cpi->mb.e_mbd.lossless = 1;
+ }
+
+ /* transform size selection (4x4, 8x8, 16x16 or select-per-mb) */
+ select_tx_mode(cpi);
+ cpi->common.comp_pred_mode = pred_type;
+ cpi->common.mcomp_filter_type = filter_type;
+ encode_frame_internal(cpi);
+
+ for (i = 0; i < NB_PREDICTION_TYPES; ++i) {
+ const int diff = (int) (cpi->rd_comp_pred_diff[i] / cpi->common.MBs);
+ cpi->rd_prediction_type_threshes[frame_type][i] += diff;
+ cpi->rd_prediction_type_threshes[frame_type][i] >>= 1;
+ }
+
+ for (i = 0; i <= SWITCHABLE_FILTERS; i++) {
+ const int64_t diff = cpi->rd_filter_diff[i] / cpi->common.MBs;
+ cpi->rd_filter_threshes[frame_type][i] =
+ (cpi->rd_filter_threshes[frame_type][i] + diff) / 2;
+ }
+
+ for (i = 0; i < TX_MODES; ++i) {
+ int64_t pd = cpi->rd_tx_select_diff[i];
+ int diff;
+ if (i == TX_MODE_SELECT)
+ pd -= RDCOST(cpi->mb.rdmult, cpi->mb.rddiv,
+ 2048 * (TX_SIZES - 1), 0);
+ diff = (int) (pd / cpi->common.MBs);
+ cpi->rd_tx_select_threshes[frame_type][i] += diff;
+ cpi->rd_tx_select_threshes[frame_type][i] /= 2;
+ }
+
+ if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) {
+ int single_count_zero = 0;
+ int comp_count_zero = 0;
+
+ for (i = 0; i < COMP_INTER_CONTEXTS; i++) {
+ single_count_zero += cpi->comp_inter_count[i][0];
+ comp_count_zero += cpi->comp_inter_count[i][1];
+ }
+
+ if (comp_count_zero == 0) {
+ cpi->common.comp_pred_mode = SINGLE_PREDICTION_ONLY;
+ vp9_zero(cpi->comp_inter_count);
+ } else if (single_count_zero == 0) {
+ cpi->common.comp_pred_mode = COMP_PREDICTION_ONLY;
+ vp9_zero(cpi->comp_inter_count);
+ }
+ }
+
+ if (cpi->common.tx_mode == TX_MODE_SELECT) {
+ int count4x4 = 0;
+ int count8x8_lp = 0, count8x8_8x8p = 0;
+ int count16x16_16x16p = 0, count16x16_lp = 0;
+ int count32x32 = 0;
+
+ for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
+ count4x4 += cm->counts.tx.p32x32[i][TX_4X4];
+ count4x4 += cm->counts.tx.p16x16[i][TX_4X4];
+ count4x4 += cm->counts.tx.p8x8[i][TX_4X4];
+
+ count8x8_lp += cm->counts.tx.p32x32[i][TX_8X8];
+ count8x8_lp += cm->counts.tx.p16x16[i][TX_8X8];
+ count8x8_8x8p += cm->counts.tx.p8x8[i][TX_8X8];
+
+ count16x16_16x16p += cm->counts.tx.p16x16[i][TX_16X16];
+ count16x16_lp += cm->counts.tx.p32x32[i][TX_16X16];
+ count32x32 += cm->counts.tx.p32x32[i][TX_32X32];
+ }
+
+ if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0
+ && count32x32 == 0) {
+ cpi->common.tx_mode = ALLOW_8X8;
+ reset_skip_txfm_size(cpi, TX_8X8);
+ } else if (count8x8_8x8p == 0 && count16x16_16x16p == 0
+ && count8x8_lp == 0 && count16x16_lp == 0 && count32x32 == 0) {
+ cpi->common.tx_mode = ONLY_4X4;
+ reset_skip_txfm_size(cpi, TX_4X4);
+ } else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) {
+ cpi->common.tx_mode = ALLOW_32X32;
+ } else if (count32x32 == 0 && count8x8_lp == 0 && count4x4 == 0) {
+ cpi->common.tx_mode = ALLOW_16X16;
+ reset_skip_txfm_size(cpi, TX_16X16);
+ }
+ }
+ } else {
+ encode_frame_internal(cpi);
+ }
+
+}
+
+static void sum_intra_stats(VP9_COMP *cpi, const MODE_INFO *mi) {
+ const MB_PREDICTION_MODE y_mode = mi->mbmi.mode;
+ const MB_PREDICTION_MODE uv_mode = mi->mbmi.uv_mode;
+ const BLOCK_SIZE bsize = mi->mbmi.sb_type;
+
+ ++cpi->y_uv_mode_count[y_mode][uv_mode];
+
+ if (bsize < BLOCK_8X8) {
+ int idx, idy;
+ const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
+ const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
+ for (idy = 0; idy < 2; idy += num_4x4_blocks_high)
+ for (idx = 0; idx < 2; idx += num_4x4_blocks_wide)
+ ++cpi->y_mode_count[0][mi->bmi[idy * 2 + idx].as_mode];
+ } else {
+ ++cpi->y_mode_count[size_group_lookup[bsize]][y_mode];
+ }
+}
+
+// Experimental stub function to create a per MB zbin adjustment based on
+// some previously calculated measure of MB activity.
+static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x) {
+#if USE_ACT_INDEX
+ x->act_zbin_adj = *(x->mb_activity_ptr);
+#else
+ int64_t a;
+ int64_t b;
+ int64_t act = *(x->mb_activity_ptr);
+
+ // Apply the masking to the RD multiplier.
+ a = act + 4 * cpi->activity_avg;
+ b = 4 * act + cpi->activity_avg;
+
+ if (act > cpi->activity_avg)
+ x->act_zbin_adj = (int) (((int64_t) b + (a >> 1)) / a) - 1;
+ else
+ x->act_zbin_adj = 1 - (int) (((int64_t) a + (b >> 1)) / b);
+#endif
+}
+static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
+ int mi_row, int mi_col, BLOCK_SIZE bsize) {
+ VP9_COMMON * const cm = &cpi->common;
+ MACROBLOCK * const x = &cpi->mb;
+ MACROBLOCKD * const xd = &x->e_mbd;
+ MODE_INFO **mi_8x8 = xd->mi_8x8;
+ MODE_INFO *mi = mi_8x8[0];
+ MB_MODE_INFO *mbmi = &mi->mbmi;
+ unsigned int segment_id = mbmi->segment_id;
+ const int mis = cm->mode_info_stride;
+ const int mi_width = num_8x8_blocks_wide_lookup[bsize];
+ const int mi_height = num_8x8_blocks_high_lookup[bsize];
+ x->use_lp32x32fdct = cpi->sf.use_lp32x32fdct;
+ x->skip_encode = (!output_enabled && cpi->sf.skip_encode_frame &&
+ xd->q_index < QIDX_SKIP_THRESH);
+ if (x->skip_encode)
+ return;
+
+ if (cm->frame_type == KEY_FRAME) {
+ if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
+ adjust_act_zbin(cpi, x);
+ vp9_update_zbin_extra(cpi, x);
+ }
+ } else {
+ vp9_setup_interp_filters(xd, mbmi->interp_filter, cm);
+
+ if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
+ // Adjust the zbin based on this MB rate.
+ adjust_act_zbin(cpi, x);
+ }
+
+ // Experimental code. Special case for gf and arf zeromv modes.
+ // Increase zbin size to suppress noise
+ cpi->zbin_mode_boost = 0;
+ if (cpi->zbin_mode_boost_enabled) {
+ if (is_inter_block(mbmi)) {
+ if (mbmi->mode == ZEROMV) {
+ if (mbmi->ref_frame[0] != LAST_FRAME)
+ cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
+ else
+ cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
+ } else if (mbmi->sb_type < BLOCK_8X8) {
+ cpi->zbin_mode_boost = SPLIT_MV_ZBIN_BOOST;
+ } else {
+ cpi->zbin_mode_boost = MV_ZBIN_BOOST;
+ }
+ } else {
+ cpi->zbin_mode_boost = INTRA_ZBIN_BOOST;
+ }
+ }
+
+ vp9_update_zbin_extra(cpi, x);
+ }
+
+ if (!is_inter_block(mbmi)) {
+ vp9_encode_intra_block_y(x, MAX(bsize, BLOCK_8X8));
+ vp9_encode_intra_block_uv(x, MAX(bsize, BLOCK_8X8));
+ if (output_enabled)
+ sum_intra_stats(cpi, mi);
+ } else {
+ int idx = cm->ref_frame_map[get_ref_frame_idx(cpi, mbmi->ref_frame[0])];
+ YV12_BUFFER_CONFIG *ref_fb = &cm->yv12_fb[idx];
+ YV12_BUFFER_CONFIG *second_ref_fb = NULL;
+ if (mbmi->ref_frame[1] > 0) {
+ idx = cm->ref_frame_map[get_ref_frame_idx(cpi, mbmi->ref_frame[1])];
+ second_ref_fb = &cm->yv12_fb[idx];
+ }
+
+ assert(cm->frame_type != KEY_FRAME);
+
+ setup_pre_planes(xd, 0, ref_fb, mi_row, mi_col,
+ &xd->scale_factor[0]);
+ setup_pre_planes(xd, 1, second_ref_fb, mi_row, mi_col,
+ &xd->scale_factor[1]);
+
+
+ vp9_build_inter_predictors_sb(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8));
+ }
+
+ if (!is_inter_block(mbmi)) {
+ vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8));
+ } else if (!x->skip) {
+ vp9_encode_sb(x, MAX(bsize, BLOCK_8X8));
+ vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8));
+ } else {
+ int mb_skip_context = xd->left_available ? mi_8x8[-1]->mbmi.skip_coeff : 0;
+ mb_skip_context += mi_8x8[-mis] ? mi_8x8[-mis]->mbmi.skip_coeff : 0;
+
+ mbmi->skip_coeff = 1;
+ if (output_enabled)
+ cm->counts.mbskip[mb_skip_context][1]++;
+ reset_skip_context(xd, MAX(bsize, BLOCK_8X8));
+ }
+
+ if (output_enabled) {
+ if (cm->tx_mode == TX_MODE_SELECT &&
+ mbmi->sb_type >= BLOCK_8X8 &&
+ !(is_inter_block(mbmi) &&
+ (mbmi->skip_coeff ||
+ vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)))) {
+ const uint8_t context = vp9_get_pred_context_tx_size(xd);
+ update_tx_counts(bsize, context, mbmi->tx_size, &cm->counts.tx);
+ } else {
+ int x, y;
+ TX_SIZE sz = tx_mode_to_biggest_tx_size[cm->tx_mode];
+ assert(sizeof(tx_mode_to_biggest_tx_size) /
+ sizeof(tx_mode_to_biggest_tx_size[0]) == TX_MODES);
+ // The new intra coding scheme requires no change of transform size
+ if (is_inter_block(&mi->mbmi)) {
+ if (sz == TX_32X32 && bsize < BLOCK_32X32)
+ sz = TX_16X16;
+ if (sz == TX_16X16 && bsize < BLOCK_16X16)
+ sz = TX_8X8;
+ if (sz == TX_8X8 && bsize < BLOCK_8X8)
+ sz = TX_4X4;
+ } else if (bsize >= BLOCK_8X8) {
+ sz = mbmi->tx_size;
+ } else {
+ sz = TX_4X4;
+ }
+
+ for (y = 0; y < mi_height; y++)
+ for (x = 0; x < mi_width; x++)
+ if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows)
+ mi_8x8[mis * y + x]->mbmi.tx_size = sz;
+ }
+ }
+}
diff --git a/libvpx/vp9/encoder/vp9_encodeframe.h b/libvpx/vp9/encoder/vp9_encodeframe.h
new file mode 100644
index 0000000..3991969
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_encodeframe.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_ENCODER_VP9_ENCODEFRAME_H_
+#define VP9_ENCODER_VP9_ENCODEFRAME_H_
+
+struct macroblock;
+struct yv12_buffer_config;
+
+void vp9_setup_src_planes(struct macroblock *x,
+ const struct yv12_buffer_config *src,
+ int mb_row, int mb_col);
+
+#endif // VP9_ENCODER_VP9_ENCODEFRAME_H_
diff --git a/libvpx/vp9/encoder/vp9_encodeintra.c b/libvpx/vp9/encoder/vp9_encodeintra.c
new file mode 100644
index 0000000..c5e5dff
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_encodeintra.c
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vpx_config.h"
+#include "vp9_rtcd.h"
+#include "vp9/encoder/vp9_quantize.h"
+#include "vp9/common/vp9_reconintra.h"
+#include "vp9/encoder/vp9_encodemb.h"
+#include "vp9/encoder/vp9_encodeintra.h"
+
+int vp9_encode_intra(MACROBLOCK *x, int use_16x16_pred) {
+ MB_MODE_INFO * mbmi = &x->e_mbd.mi_8x8[0]->mbmi;
+ x->skip_encode = 0;
+ mbmi->mode = DC_PRED;
+ mbmi->ref_frame[0] = INTRA_FRAME;
+ mbmi->tx_size = use_16x16_pred ? (mbmi->sb_type >= BLOCK_16X16 ? TX_16X16
+ : TX_8X8)
+ : TX_4X4;
+ vp9_encode_intra_block_y(x, mbmi->sb_type);
+ return vp9_get_mb_ss(x->plane[0].src_diff);
+}
diff --git a/libvpx/vp9/encoder/vp9_encodeintra.h b/libvpx/vp9/encoder/vp9_encodeintra.h
new file mode 100644
index 0000000..e217924
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_encodeintra.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_ENCODER_VP9_ENCODEINTRA_H_
+#define VP9_ENCODER_VP9_ENCODEINTRA_H_
+
+#include "vp9/encoder/vp9_onyx_int.h"
+
+int vp9_encode_intra(MACROBLOCK *x, int use_16x16_pred);
+void vp9_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
+ TX_SIZE tx_size, void *arg);
+
+#endif // VP9_ENCODER_VP9_ENCODEINTRA_H_
diff --git a/libvpx/vp9/encoder/vp9_encodemb.c b/libvpx/vp9/encoder/vp9_encodemb.c
new file mode 100644
index 0000000..8dd80a5
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_encodemb.c
@@ -0,0 +1,731 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vpx_config.h"
+#include "vp9/encoder/vp9_encodemb.h"
+#include "vp9/common/vp9_reconinter.h"
+#include "vp9/encoder/vp9_quantize.h"
+#include "vp9/encoder/vp9_tokenize.h"
+#include "vp9/common/vp9_reconintra.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vp9/encoder/vp9_rdopt.h"
+#include "vp9/common/vp9_systemdependent.h"
+#include "vp9_rtcd.h"
+
+DECLARE_ALIGNED(16, extern const uint8_t,
+ vp9_pt_energy_class[MAX_ENTROPY_TOKENS]);
+
+void vp9_subtract_block_c(int rows, int cols,
+ int16_t *diff_ptr, ptrdiff_t diff_stride,
+ const uint8_t *src_ptr, ptrdiff_t src_stride,
+ const uint8_t *pred_ptr, ptrdiff_t pred_stride) {
+ int r, c;
+
+ for (r = 0; r < rows; r++) {
+ for (c = 0; c < cols; c++)
+ diff_ptr[c] = src_ptr[c] - pred_ptr[c];
+
+ diff_ptr += diff_stride;
+ pred_ptr += pred_stride;
+ src_ptr += src_stride;
+ }
+}
+
+static void inverse_transform_b_4x4_add(MACROBLOCKD *xd, int eob,
+ int16_t *dqcoeff, uint8_t *dest,
+ int stride) {
+ if (eob <= 1)
+ xd->inv_txm4x4_1_add(dqcoeff, dest, stride);
+ else
+ xd->inv_txm4x4_add(dqcoeff, dest, stride);
+}
+
+static void inverse_transform_b_8x8_add(int eob,
+ int16_t *dqcoeff, uint8_t *dest,
+ int stride) {
+ if (eob <= 1)
+ vp9_short_idct8x8_1_add(dqcoeff, dest, stride);
+ else if (eob <= 10)
+ vp9_short_idct10_8x8_add(dqcoeff, dest, stride);
+ else
+ vp9_short_idct8x8_add(dqcoeff, dest, stride);
+}
+
+static void inverse_transform_b_16x16_add(int eob,
+ int16_t *dqcoeff, uint8_t *dest,
+ int stride) {
+ if (eob <= 1)
+ vp9_short_idct16x16_1_add(dqcoeff, dest, stride);
+ else if (eob <= 10)
+ vp9_short_idct10_16x16_add(dqcoeff, dest, stride);
+ else
+ vp9_short_idct16x16_add(dqcoeff, dest, stride);
+}
+
+static void subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
+ struct macroblock_plane *const p = &x->plane[plane];
+ const MACROBLOCKD *const xd = &x->e_mbd;
+ const struct macroblockd_plane *const pd = &xd->plane[plane];
+ const int bw = plane_block_width(bsize, pd);
+ const int bh = plane_block_height(bsize, pd);
+
+ vp9_subtract_block(bh, bw, p->src_diff, bw,
+ p->src.buf, p->src.stride,
+ pd->dst.buf, pd->dst.stride);
+}
+
+void vp9_subtract_sby(MACROBLOCK *x, BLOCK_SIZE bsize) {
+ subtract_plane(x, bsize, 0);
+}
+
+void vp9_subtract_sbuv(MACROBLOCK *x, BLOCK_SIZE bsize) {
+ int i;
+
+ for (i = 1; i < MAX_MB_PLANE; i++)
+ subtract_plane(x, bsize, i);
+}
+
+void vp9_subtract_sb(MACROBLOCK *x, BLOCK_SIZE bsize) {
+ vp9_subtract_sby(x, bsize);
+ vp9_subtract_sbuv(x, bsize);
+}
+
+
+#define RDTRUNC(RM,DM,R,D) ( (128+(R)*(RM)) & 0xFF )
+typedef struct vp9_token_state vp9_token_state;
+
+struct vp9_token_state {
+ int rate;
+ int error;
+ int next;
+ signed char token;
+ short qc;
+};
+
+// TODO: experiments to find optimal multiple numbers
+#define Y1_RD_MULT 4
+#define UV_RD_MULT 2
+
+static const int plane_rd_mult[4] = {
+ Y1_RD_MULT,
+ UV_RD_MULT,
+};
+
+#define UPDATE_RD_COST()\
+{\
+ rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0);\
+ rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1);\
+ if (rd_cost0 == rd_cost1) {\
+ rd_cost0 = RDTRUNC(rdmult, rddiv, rate0, error0);\
+ rd_cost1 = RDTRUNC(rdmult, rddiv, rate1, error1);\
+ }\
+}
+
+// This function is a place holder for now but may ultimately need
+// to scan previous tokens to work out the correct context.
+static int trellis_get_coeff_context(const int16_t *scan,
+ const int16_t *nb,
+ int idx, int token,
+ uint8_t *token_cache) {
+ int bak = token_cache[scan[idx]], pt;
+ token_cache[scan[idx]] = vp9_pt_energy_class[token];
+ pt = get_coef_context(nb, token_cache, idx + 1);
+ token_cache[scan[idx]] = bak;
+ return pt;
+}
+
+static void optimize_b(MACROBLOCK *mb,
+ int plane, int block, BLOCK_SIZE plane_bsize,
+ ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
+ TX_SIZE tx_size) {
+ MACROBLOCKD *const xd = &mb->e_mbd;
+ struct macroblockd_plane *pd = &xd->plane[plane];
+ const int ref = is_inter_block(&xd->this_mi->mbmi);
+ vp9_token_state tokens[1025][2];
+ unsigned best_index[1025][2];
+ const int16_t *coeff_ptr = BLOCK_OFFSET(mb->plane[plane].coeff, block);
+ int16_t *qcoeff_ptr;
+ int16_t *dqcoeff_ptr;
+ int eob = pd->eobs[block], final_eob, sz = 0;
+ const int i0 = 0;
+ int rc, x, next, i;
+ int64_t rdmult, rddiv, rd_cost0, rd_cost1;
+ int rate0, rate1, error0, error1, t0, t1;
+ int best, band, pt;
+ PLANE_TYPE type = pd->plane_type;
+ int err_mult = plane_rd_mult[type];
+ int default_eob;
+ const int16_t *scan, *nb;
+ const int mul = 1 + (tx_size == TX_32X32);
+ uint8_t token_cache[1024];
+ const int ib = txfrm_block_to_raster_block(plane_bsize, tx_size, block);
+ const int16_t *dequant_ptr = pd->dequant;
+ const uint8_t * band_translate;
+
+ assert((!type && !plane) || (type && plane));
+ dqcoeff_ptr = BLOCK_OFFSET(pd->dqcoeff, block);
+ qcoeff_ptr = BLOCK_OFFSET(pd->qcoeff, block);
+ switch (tx_size) {
+ default:
+ case TX_4X4:
+ default_eob = 16;
+ scan = get_scan_4x4(get_tx_type_4x4(type, xd, ib));
+ band_translate = vp9_coefband_trans_4x4;
+ break;
+ case TX_8X8:
+ scan = get_scan_8x8(get_tx_type_8x8(type, xd));
+ default_eob = 64;
+ band_translate = vp9_coefband_trans_8x8plus;
+ break;
+ case TX_16X16:
+ scan = get_scan_16x16(get_tx_type_16x16(type, xd));
+ default_eob = 256;
+ band_translate = vp9_coefband_trans_8x8plus;
+ break;
+ case TX_32X32:
+ scan = vp9_default_scan_32x32;
+ default_eob = 1024;
+ band_translate = vp9_coefband_trans_8x8plus;
+ break;
+ }
+ assert(eob <= default_eob);
+
+ /* Now set up a Viterbi trellis to evaluate alternative roundings. */
+ rdmult = mb->rdmult * err_mult;
+ if (mb->e_mbd.mi_8x8[0]->mbmi.ref_frame[0] == INTRA_FRAME)
+ rdmult = (rdmult * 9) >> 4;
+ rddiv = mb->rddiv;
+ /* Initialize the sentinel node of the trellis. */
+ tokens[eob][0].rate = 0;
+ tokens[eob][0].error = 0;
+ tokens[eob][0].next = default_eob;
+ tokens[eob][0].token = DCT_EOB_TOKEN;
+ tokens[eob][0].qc = 0;
+ *(tokens[eob] + 1) = *(tokens[eob] + 0);
+ next = eob;
+ for (i = 0; i < eob; i++)
+ token_cache[scan[i]] = vp9_pt_energy_class[vp9_dct_value_tokens_ptr[
+ qcoeff_ptr[scan[i]]].token];
+ nb = vp9_get_coef_neighbors_handle(scan);
+
+ for (i = eob; i-- > i0;) {
+ int base_bits, d2, dx;
+
+ rc = scan[i];
+ x = qcoeff_ptr[rc];
+ /* Only add a trellis state for non-zero coefficients. */
+ if (x) {
+ int shortcut = 0;
+ error0 = tokens[next][0].error;
+ error1 = tokens[next][1].error;
+ /* Evaluate the first possibility for this state. */
+ rate0 = tokens[next][0].rate;
+ rate1 = tokens[next][1].rate;
+ t0 = (vp9_dct_value_tokens_ptr + x)->token;
+ /* Consider both possible successor states. */
+ if (next < default_eob) {
+ band = get_coef_band(band_translate, i + 1);
+ pt = trellis_get_coeff_context(scan, nb, i, t0, token_cache);
+ rate0 +=
+ mb->token_costs[tx_size][type][ref][band][0][pt]
+ [tokens[next][0].token];
+ rate1 +=
+ mb->token_costs[tx_size][type][ref][band][0][pt]
+ [tokens[next][1].token];
+ }
+ UPDATE_RD_COST();
+ /* And pick the best. */
+ best = rd_cost1 < rd_cost0;
+ base_bits = *(vp9_dct_value_cost_ptr + x);
+ dx = mul * (dqcoeff_ptr[rc] - coeff_ptr[rc]);
+ d2 = dx * dx;
+ tokens[i][0].rate = base_bits + (best ? rate1 : rate0);
+ tokens[i][0].error = d2 + (best ? error1 : error0);
+ tokens[i][0].next = next;
+ tokens[i][0].token = t0;
+ tokens[i][0].qc = x;
+ best_index[i][0] = best;
+
+ /* Evaluate the second possibility for this state. */
+ rate0 = tokens[next][0].rate;
+ rate1 = tokens[next][1].rate;
+
+ if ((abs(x)*dequant_ptr[rc != 0] > abs(coeff_ptr[rc]) * mul) &&
+ (abs(x)*dequant_ptr[rc != 0] < abs(coeff_ptr[rc]) * mul +
+ dequant_ptr[rc != 0]))
+ shortcut = 1;
+ else
+ shortcut = 0;
+
+ if (shortcut) {
+ sz = -(x < 0);
+ x -= 2 * sz + 1;
+ }
+
+ /* Consider both possible successor states. */
+ if (!x) {
+ /* If we reduced this coefficient to zero, check to see if
+ * we need to move the EOB back here.
+ */
+ t0 = tokens[next][0].token == DCT_EOB_TOKEN ?
+ DCT_EOB_TOKEN : ZERO_TOKEN;
+ t1 = tokens[next][1].token == DCT_EOB_TOKEN ?
+ DCT_EOB_TOKEN : ZERO_TOKEN;
+ } else {
+ t0 = t1 = (vp9_dct_value_tokens_ptr + x)->token;
+ }
+ if (next < default_eob) {
+ band = get_coef_band(band_translate, i + 1);
+ if (t0 != DCT_EOB_TOKEN) {
+ pt = trellis_get_coeff_context(scan, nb, i, t0, token_cache);
+ rate0 += mb->token_costs[tx_size][type][ref][band][!x][pt]
+ [tokens[next][0].token];
+ }
+ if (t1 != DCT_EOB_TOKEN) {
+ pt = trellis_get_coeff_context(scan, nb, i, t1, token_cache);
+ rate1 += mb->token_costs[tx_size][type][ref][band][!x][pt]
+ [tokens[next][1].token];
+ }
+ }
+
+ UPDATE_RD_COST();
+ /* And pick the best. */
+ best = rd_cost1 < rd_cost0;
+ base_bits = *(vp9_dct_value_cost_ptr + x);
+
+ if (shortcut) {
+ dx -= (dequant_ptr[rc != 0] + sz) ^ sz;
+ d2 = dx * dx;
+ }
+ tokens[i][1].rate = base_bits + (best ? rate1 : rate0);
+ tokens[i][1].error = d2 + (best ? error1 : error0);
+ tokens[i][1].next = next;
+ tokens[i][1].token = best ? t1 : t0;
+ tokens[i][1].qc = x;
+ best_index[i][1] = best;
+ /* Finally, make this the new head of the trellis. */
+ next = i;
+ }
+ /* There's no choice to make for a zero coefficient, so we don't
+ * add a new trellis node, but we do need to update the costs.
+ */
+ else {
+ band = get_coef_band(band_translate, i + 1);
+ t0 = tokens[next][0].token;
+ t1 = tokens[next][1].token;
+ /* Update the cost of each path if we're past the EOB token. */
+ if (t0 != DCT_EOB_TOKEN) {
+ tokens[next][0].rate +=
+ mb->token_costs[tx_size][type][ref][band][1][0][t0];
+ tokens[next][0].token = ZERO_TOKEN;
+ }
+ if (t1 != DCT_EOB_TOKEN) {
+ tokens[next][1].rate +=
+ mb->token_costs[tx_size][type][ref][band][1][0][t1];
+ tokens[next][1].token = ZERO_TOKEN;
+ }
+ best_index[i][0] = best_index[i][1] = 0;
+ /* Don't update next, because we didn't add a new node. */
+ }
+ }
+
+ /* Now pick the best path through the whole trellis. */
+ band = get_coef_band(band_translate, i + 1);
+ pt = combine_entropy_contexts(*a, *l);
+ rate0 = tokens[next][0].rate;
+ rate1 = tokens[next][1].rate;
+ error0 = tokens[next][0].error;
+ error1 = tokens[next][1].error;
+ t0 = tokens[next][0].token;
+ t1 = tokens[next][1].token;
+ rate0 += mb->token_costs[tx_size][type][ref][band][0][pt][t0];
+ rate1 += mb->token_costs[tx_size][type][ref][band][0][pt][t1];
+ UPDATE_RD_COST();
+ best = rd_cost1 < rd_cost0;
+ final_eob = i0 - 1;
+ vpx_memset(qcoeff_ptr, 0, sizeof(*qcoeff_ptr) * (16 << (tx_size * 2)));
+ vpx_memset(dqcoeff_ptr, 0, sizeof(*dqcoeff_ptr) * (16 << (tx_size * 2)));
+ for (i = next; i < eob; i = next) {
+ x = tokens[i][best].qc;
+ if (x) {
+ final_eob = i;
+ }
+ rc = scan[i];
+ qcoeff_ptr[rc] = x;
+ dqcoeff_ptr[rc] = (x * dequant_ptr[rc != 0]) / mul;
+
+ next = tokens[i][best].next;
+ best = best_index[i][best];
+ }
+ final_eob++;
+
+ xd->plane[plane].eobs[block] = final_eob;
+ *a = *l = (final_eob > 0);
+}
+
+void vp9_optimize_b(int plane, int block, BLOCK_SIZE plane_bsize,
+ TX_SIZE tx_size, MACROBLOCK *mb, struct optimize_ctx *ctx) {
+ int x, y;
+ txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y);
+ optimize_b(mb, plane, block, plane_bsize,
+ &ctx->ta[plane][x], &ctx->tl[plane][y], tx_size);
+}
+
+static void optimize_init_b(int plane, BLOCK_SIZE bsize,
+ struct encode_b_args *args) {
+ const MACROBLOCKD *xd = &args->x->e_mbd;
+ const struct macroblockd_plane* const pd = &xd->plane[plane];
+ const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
+ const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
+ const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
+ const MB_MODE_INFO *mbmi = &xd->this_mi->mbmi;
+ const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi) : mbmi->tx_size;
+ int i;
+
+ switch (tx_size) {
+ case TX_4X4:
+ vpx_memcpy(args->ctx->ta[plane], pd->above_context,
+ sizeof(ENTROPY_CONTEXT) * num_4x4_w);
+ vpx_memcpy(args->ctx->tl[plane], pd->left_context,
+ sizeof(ENTROPY_CONTEXT) * num_4x4_h);
+ break;
+ case TX_8X8:
+ for (i = 0; i < num_4x4_w; i += 2)
+ args->ctx->ta[plane][i] = !!*(uint16_t *)&pd->above_context[i];
+ for (i = 0; i < num_4x4_h; i += 2)
+ args->ctx->tl[plane][i] = !!*(uint16_t *)&pd->left_context[i];
+ break;
+ case TX_16X16:
+ for (i = 0; i < num_4x4_w; i += 4)
+ args->ctx->ta[plane][i] = !!*(uint32_t *)&pd->above_context[i];
+ for (i = 0; i < num_4x4_h; i += 4)
+ args->ctx->tl[plane][i] = !!*(uint32_t *)&pd->left_context[i];
+ break;
+ case TX_32X32:
+ for (i = 0; i < num_4x4_w; i += 8)
+ args->ctx->ta[plane][i] = !!*(uint64_t *)&pd->above_context[i];
+ for (i = 0; i < num_4x4_h; i += 8)
+ args->ctx->tl[plane][i] = !!*(uint64_t *)&pd->left_context[i];
+ break;
+ default:
+ assert(0);
+ }
+}
+
+void vp9_xform_quant(int plane, int block, BLOCK_SIZE plane_bsize,
+ TX_SIZE tx_size, void *arg) {
+ struct encode_b_args* const args = arg;
+ MACROBLOCK* const x = args->x;
+ MACROBLOCKD* const xd = &x->e_mbd;
+ struct macroblock_plane *const p = &x->plane[plane];
+ struct macroblockd_plane *const pd = &xd->plane[plane];
+ int16_t *coeff = BLOCK_OFFSET(p->coeff, block);
+ int16_t *qcoeff = BLOCK_OFFSET(pd->qcoeff, block);
+ int16_t *dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
+ const int16_t *scan, *iscan;
+ uint16_t *eob = &pd->eobs[block];
+ const int bwl = b_width_log2(plane_bsize), bw = 1 << bwl;
+ const int twl = bwl - tx_size, twmask = (1 << twl) - 1;
+ int xoff, yoff;
+ int16_t *src_diff;
+
+ switch (tx_size) {
+ case TX_32X32:
+ scan = vp9_default_scan_32x32;
+ iscan = vp9_default_iscan_32x32;
+ block >>= 6;
+ xoff = 32 * (block & twmask);
+ yoff = 32 * (block >> twl);
+ src_diff = p->src_diff + 4 * bw * yoff + xoff;
+ if (x->use_lp32x32fdct)
+ vp9_short_fdct32x32_rd(src_diff, coeff, bw * 8);
+ else
+ vp9_short_fdct32x32(src_diff, coeff, bw * 8);
+ vp9_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin, p->round,
+ p->quant, p->quant_shift, qcoeff, dqcoeff,
+ pd->dequant, p->zbin_extra, eob, scan, iscan);
+ break;
+ case TX_16X16:
+ scan = vp9_default_scan_16x16;
+ iscan = vp9_default_iscan_16x16;
+ block >>= 4;
+ xoff = 16 * (block & twmask);
+ yoff = 16 * (block >> twl);
+ src_diff = p->src_diff + 4 * bw * yoff + xoff;
+ x->fwd_txm16x16(src_diff, coeff, bw * 8);
+ vp9_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round,
+ p->quant, p->quant_shift, qcoeff, dqcoeff,
+ pd->dequant, p->zbin_extra, eob, scan, iscan);
+ break;
+ case TX_8X8:
+ scan = vp9_default_scan_8x8;
+ iscan = vp9_default_iscan_8x8;
+ block >>= 2;
+ xoff = 8 * (block & twmask);
+ yoff = 8 * (block >> twl);
+ src_diff = p->src_diff + 4 * bw * yoff + xoff;
+ x->fwd_txm8x8(src_diff, coeff, bw * 8);
+ vp9_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round,
+ p->quant, p->quant_shift, qcoeff, dqcoeff,
+ pd->dequant, p->zbin_extra, eob, scan, iscan);
+ break;
+ case TX_4X4:
+ scan = vp9_default_scan_4x4;
+ iscan = vp9_default_iscan_4x4;
+ xoff = 4 * (block & twmask);
+ yoff = 4 * (block >> twl);
+ src_diff = p->src_diff + 4 * bw * yoff + xoff;
+ x->fwd_txm4x4(src_diff, coeff, bw * 8);
+ vp9_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round,
+ p->quant, p->quant_shift, qcoeff, dqcoeff,
+ pd->dequant, p->zbin_extra, eob, scan, iscan);
+ break;
+ default:
+ assert(0);
+ }
+}
+
+static void encode_block(int plane, int block, BLOCK_SIZE plane_bsize,
+ TX_SIZE tx_size, void *arg) {
+ struct encode_b_args *const args = arg;
+ MACROBLOCK *const x = args->x;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ struct macroblockd_plane *const pd = &xd->plane[plane];
+ const int raster_block = txfrm_block_to_raster_block(plane_bsize, tx_size,
+ block);
+
+ int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
+ uint8_t *const dst = raster_block_offset_uint8(plane_bsize, raster_block,
+ pd->dst.buf, pd->dst.stride);
+ vp9_xform_quant(plane, block, plane_bsize, tx_size, arg);
+
+ if (x->optimize)
+ vp9_optimize_b(plane, block, plane_bsize, tx_size, x, args->ctx);
+
+ if (x->skip_encode || pd->eobs[block] == 0)
+ return;
+
+ switch (tx_size) {
+ case TX_32X32:
+ vp9_short_idct32x32_add(dqcoeff, dst, pd->dst.stride);
+ break;
+ case TX_16X16:
+ inverse_transform_b_16x16_add(pd->eobs[block], dqcoeff, dst,
+ pd->dst.stride);
+ break;
+ case TX_8X8:
+ inverse_transform_b_8x8_add(pd->eobs[block], dqcoeff, dst,
+ pd->dst.stride);
+ break;
+ case TX_4X4:
+ // this is like vp9_short_idct4x4 but has a special case around eob<=1
+ // which is significant (not just an optimization) for the lossless
+ // case.
+ inverse_transform_b_4x4_add(xd, pd->eobs[block], dqcoeff,
+ dst, pd->dst.stride);
+ break;
+ default:
+ assert(!"Invalid transform size");
+ }
+}
+
+void vp9_encode_sby(MACROBLOCK *x, BLOCK_SIZE bsize) {
+ MACROBLOCKD *const xd = &x->e_mbd;
+ struct optimize_ctx ctx;
+ struct encode_b_args arg = {x, &ctx};
+
+ vp9_subtract_sby(x, bsize);
+ if (x->optimize)
+ optimize_init_b(0, bsize, &arg);
+
+ foreach_transformed_block_in_plane(xd, bsize, 0, encode_block, &arg);
+}
+
+void vp9_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize) {
+ MACROBLOCKD *const xd = &x->e_mbd;
+ struct optimize_ctx ctx;
+ struct encode_b_args arg = {x, &ctx};
+
+ vp9_subtract_sb(x, bsize);
+
+ if (x->optimize) {
+ int i;
+ for (i = 0; i < MAX_MB_PLANE; ++i)
+ optimize_init_b(i, bsize, &arg);
+ }
+
+ foreach_transformed_block(xd, bsize, encode_block, &arg);
+}
+
+void vp9_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
+ TX_SIZE tx_size, void *arg) {
+ struct encode_b_args* const args = arg;
+ MACROBLOCK *const x = args->x;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MB_MODE_INFO *mbmi = &xd->this_mi->mbmi;
+ struct macroblock_plane *const p = &x->plane[plane];
+ struct macroblockd_plane *const pd = &xd->plane[plane];
+ int16_t *coeff = BLOCK_OFFSET(p->coeff, block);
+ int16_t *qcoeff = BLOCK_OFFSET(pd->qcoeff, block);
+ int16_t *dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
+ const int16_t *scan, *iscan;
+ TX_TYPE tx_type;
+ MB_PREDICTION_MODE mode;
+ const int bwl = b_width_log2(plane_bsize), bw = 1 << bwl;
+ const int twl = bwl - tx_size, twmask = (1 << twl) - 1;
+ int xoff, yoff;
+ uint8_t *src, *dst;
+ int16_t *src_diff;
+ uint16_t *eob = &pd->eobs[block];
+
+ if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0)
+ extend_for_intra(xd, plane_bsize, plane, block, tx_size);
+
+ // if (x->optimize)
+ // vp9_optimize_b(plane, block, plane_bsize, tx_size, x, args->ctx);
+
+ switch (tx_size) {
+ case TX_32X32:
+ scan = vp9_default_scan_32x32;
+ iscan = vp9_default_iscan_32x32;
+ mode = plane == 0 ? mbmi->mode : mbmi->uv_mode;
+ block >>= 6;
+ xoff = 32 * (block & twmask);
+ yoff = 32 * (block >> twl);
+ dst = pd->dst.buf + yoff * pd->dst.stride + xoff;
+ src = p->src.buf + yoff * p->src.stride + xoff;
+ src_diff = p->src_diff + 4 * bw * yoff + xoff;
+ vp9_predict_intra_block(xd, block, bwl, TX_32X32, mode,
+ dst, pd->dst.stride, dst, pd->dst.stride);
+ vp9_subtract_block(32, 32, src_diff, bw * 4,
+ src, p->src.stride, dst, pd->dst.stride);
+ if (x->use_lp32x32fdct)
+ vp9_short_fdct32x32_rd(src_diff, coeff, bw * 8);
+ else
+ vp9_short_fdct32x32(src_diff, coeff, bw * 8);
+ vp9_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin, p->round,
+ p->quant, p->quant_shift, qcoeff, dqcoeff,
+ pd->dequant, p->zbin_extra, eob, scan, iscan);
+ if (!x->skip_encode && *eob)
+ vp9_short_idct32x32_add(dqcoeff, dst, pd->dst.stride);
+ break;
+ case TX_16X16:
+ tx_type = get_tx_type_16x16(pd->plane_type, xd);
+ scan = get_scan_16x16(tx_type);
+ iscan = get_iscan_16x16(tx_type);
+ mode = plane == 0 ? mbmi->mode : mbmi->uv_mode;
+ block >>= 4;
+ xoff = 16 * (block & twmask);
+ yoff = 16 * (block >> twl);
+ dst = pd->dst.buf + yoff * pd->dst.stride + xoff;
+ src = p->src.buf + yoff * p->src.stride + xoff;
+ src_diff = p->src_diff + 4 * bw * yoff + xoff;
+ vp9_predict_intra_block(xd, block, bwl, TX_16X16, mode,
+ dst, pd->dst.stride, dst, pd->dst.stride);
+ vp9_subtract_block(16, 16, src_diff, bw * 4,
+ src, p->src.stride, dst, pd->dst.stride);
+ if (tx_type != DCT_DCT)
+ vp9_short_fht16x16(src_diff, coeff, bw * 4, tx_type);
+ else
+ x->fwd_txm16x16(src_diff, coeff, bw * 8);
+ vp9_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round,
+ p->quant, p->quant_shift, qcoeff, dqcoeff,
+ pd->dequant, p->zbin_extra, eob, scan, iscan);
+ if (!x->skip_encode && *eob) {
+ if (tx_type == DCT_DCT)
+ inverse_transform_b_16x16_add(*eob, dqcoeff, dst, pd->dst.stride);
+ else
+ vp9_short_iht16x16_add(dqcoeff, dst, pd->dst.stride, tx_type);
+ }
+ break;
+ case TX_8X8:
+ tx_type = get_tx_type_8x8(pd->plane_type, xd);
+ scan = get_scan_8x8(tx_type);
+ iscan = get_iscan_8x8(tx_type);
+ mode = plane == 0 ? mbmi->mode : mbmi->uv_mode;
+ block >>= 2;
+ xoff = 8 * (block & twmask);
+ yoff = 8 * (block >> twl);
+ dst = pd->dst.buf + yoff * pd->dst.stride + xoff;
+ src = p->src.buf + yoff * p->src.stride + xoff;
+ src_diff = p->src_diff + 4 * bw * yoff + xoff;
+ vp9_predict_intra_block(xd, block, bwl, TX_8X8, mode,
+ dst, pd->dst.stride, dst, pd->dst.stride);
+ vp9_subtract_block(8, 8, src_diff, bw * 4,
+ src, p->src.stride, dst, pd->dst.stride);
+ if (tx_type != DCT_DCT)
+ vp9_short_fht8x8(src_diff, coeff, bw * 4, tx_type);
+ else
+ x->fwd_txm8x8(src_diff, coeff, bw * 8);
+ vp9_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round, p->quant,
+ p->quant_shift, qcoeff, dqcoeff,
+ pd->dequant, p->zbin_extra, eob, scan, iscan);
+ if (!x->skip_encode && *eob) {
+ if (tx_type == DCT_DCT)
+ inverse_transform_b_8x8_add(*eob, dqcoeff, dst, pd->dst.stride);
+ else
+ vp9_short_iht8x8_add(dqcoeff, dst, pd->dst.stride, tx_type);
+ }
+ break;
+ case TX_4X4:
+ tx_type = get_tx_type_4x4(pd->plane_type, xd, block);
+ scan = get_scan_4x4(tx_type);
+ iscan = get_iscan_4x4(tx_type);
+ if (mbmi->sb_type < BLOCK_8X8 && plane == 0)
+ mode = xd->this_mi->bmi[block].as_mode;
+ else
+ mode = plane == 0 ? mbmi->mode : mbmi->uv_mode;
+
+ xoff = 4 * (block & twmask);
+ yoff = 4 * (block >> twl);
+ dst = pd->dst.buf + yoff * pd->dst.stride + xoff;
+ src = p->src.buf + yoff * p->src.stride + xoff;
+ src_diff = p->src_diff + 4 * bw * yoff + xoff;
+ vp9_predict_intra_block(xd, block, bwl, TX_4X4, mode,
+ dst, pd->dst.stride, dst, pd->dst.stride);
+ vp9_subtract_block(4, 4, src_diff, bw * 4,
+ src, p->src.stride, dst, pd->dst.stride);
+ if (tx_type != DCT_DCT)
+ vp9_short_fht4x4(src_diff, coeff, bw * 4, tx_type);
+ else
+ x->fwd_txm4x4(src_diff, coeff, bw * 8);
+ vp9_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, p->quant,
+ p->quant_shift, qcoeff, dqcoeff,
+ pd->dequant, p->zbin_extra, eob, scan, iscan);
+ if (!x->skip_encode && *eob) {
+ if (tx_type == DCT_DCT)
+ // this is like vp9_short_idct4x4 but has a special case around eob<=1
+ // which is significant (not just an optimization) for the lossless
+ // case.
+ inverse_transform_b_4x4_add(xd, *eob, dqcoeff, dst, pd->dst.stride);
+ else
+ vp9_short_iht4x4_add(dqcoeff, dst, pd->dst.stride, tx_type);
+ }
+ break;
+ default:
+ assert(0);
+ }
+}
+
+void vp9_encode_intra_block_y(MACROBLOCK *x, BLOCK_SIZE bsize) {
+ MACROBLOCKD* const xd = &x->e_mbd;
+ struct optimize_ctx ctx;
+ struct encode_b_args arg = {x, &ctx};
+
+ foreach_transformed_block_in_plane(xd, bsize, 0, vp9_encode_block_intra,
+ &arg);
+}
+void vp9_encode_intra_block_uv(MACROBLOCK *x, BLOCK_SIZE bsize) {
+ MACROBLOCKD* const xd = &x->e_mbd;
+ struct optimize_ctx ctx;
+ struct encode_b_args arg = {x, &ctx};
+ foreach_transformed_block_uv(xd, bsize, vp9_encode_block_intra, &arg);
+}
+
diff --git a/libvpx/vp9/encoder/vp9_encodemb.h b/libvpx/vp9/encoder/vp9_encodemb.h
new file mode 100644
index 0000000..54e69fd
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_encodemb.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_ENCODER_VP9_ENCODEMB_H_
+#define VP9_ENCODER_VP9_ENCODEMB_H_
+
+#include "./vpx_config.h"
+#include "vp9/encoder/vp9_block.h"
+#include "vp9/encoder/vp9_onyx_int.h"
+#include "vp9/common/vp9_onyxc_int.h"
+
+typedef enum {
+ RD_DC_PRED = DC_PRED,
+ RD_V_PRED = V_PRED,
+ RD_H_PRED = H_PRED,
+ RD_D45_PRED = D45_PRED,
+ RD_D135_PRED = D135_PRED,
+ RD_D117_PRED = D117_PRED,
+ RD_D153_PRED = D153_PRED,
+ RD_D207_PRED = D207_PRED,
+ RD_D63_PRED = D63_PRED,
+ RD_TM_PRED = TM_PRED,
+ RD_NEARESTMV = NEARESTMV,
+ RD_NEARMV = NEARMV,
+ RD_ZEROMV = ZEROMV,
+ RD_NEWMV = NEWMV,
+ RD_I4X4_PRED,
+ RD_SPLITMV,
+ RD_MODE_COUNT
+} RD_PREDICTION_MODE;
+
+typedef struct {
+ RD_PREDICTION_MODE mode;
+ MV_REFERENCE_FRAME ref_frame;
+ MV_REFERENCE_FRAME second_ref_frame;
+} MODE_DEFINITION;
+
+struct optimize_ctx {
+ ENTROPY_CONTEXT ta[MAX_MB_PLANE][16];
+ ENTROPY_CONTEXT tl[MAX_MB_PLANE][16];
+};
+
+struct encode_b_args {
+ MACROBLOCK *x;
+ struct optimize_ctx *ctx;
+};
+
+void vp9_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize);
+void vp9_encode_sby(MACROBLOCK *x, BLOCK_SIZE bsize);
+
+void vp9_xform_quant(int plane, int block, BLOCK_SIZE plane_bsize,
+ TX_SIZE tx_size, void *arg);
+
+void vp9_subtract_sby(MACROBLOCK *x, BLOCK_SIZE bsize);
+void vp9_subtract_sbuv(MACROBLOCK *x, BLOCK_SIZE bsize);
+void vp9_subtract_sb(MACROBLOCK *x, BLOCK_SIZE bsize);
+
+void vp9_encode_intra_block_y(MACROBLOCK *x, BLOCK_SIZE bsize);
+void vp9_encode_intra_block_uv(MACROBLOCK *x, BLOCK_SIZE bsize);
+
+
+#endif // VP9_ENCODER_VP9_ENCODEMB_H_
diff --git a/libvpx/vp9/encoder/vp9_encodemv.c b/libvpx/vp9/encoder/vp9_encodemv.c
new file mode 100644
index 0000000..ed3a2bb
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_encodemv.c
@@ -0,0 +1,357 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vp9/common/vp9_common.h"
+#include "vp9/encoder/vp9_encodemv.h"
+#include "vp9/common/vp9_entropymode.h"
+#include "vp9/common/vp9_systemdependent.h"
+
+#include <math.h>
+
+#ifdef ENTROPY_STATS
+extern unsigned int active_section;
+#endif
+
+static void encode_mv_component(vp9_writer* w, int comp,
+ const nmv_component* mvcomp, int usehp) {
+ int offset;
+ const int sign = comp < 0;
+ const int mag = sign ? -comp : comp;
+ const int mv_class = vp9_get_mv_class(mag - 1, &offset);
+ const int d = offset >> 3; // int mv data
+ const int fr = (offset >> 1) & 3; // fractional mv data
+ const int hp = offset & 1; // high precision mv data
+
+ assert(comp != 0);
+
+ // Sign
+ vp9_write(w, sign, mvcomp->sign);
+
+ // Class
+ write_token(w, vp9_mv_class_tree, mvcomp->classes,
+ &vp9_mv_class_encodings[mv_class]);
+
+ // Integer bits
+ if (mv_class == MV_CLASS_0) {
+ write_token(w, vp9_mv_class0_tree, mvcomp->class0,
+ &vp9_mv_class0_encodings[d]);
+ } else {
+ int i;
+ const int n = mv_class + CLASS0_BITS - 1; // number of bits
+ for (i = 0; i < n; ++i)
+ vp9_write(w, (d >> i) & 1, mvcomp->bits[i]);
+ }
+
+ // Fractional bits
+ write_token(w, vp9_mv_fp_tree,
+ mv_class == MV_CLASS_0 ? mvcomp->class0_fp[d] : mvcomp->fp,
+ &vp9_mv_fp_encodings[fr]);
+
+ // High precision bit
+ if (usehp)
+ vp9_write(w, hp,
+ mv_class == MV_CLASS_0 ? mvcomp->class0_hp : mvcomp->hp);
+}
+
+
+static void build_nmv_component_cost_table(int *mvcost,
+ const nmv_component* const mvcomp,
+ int usehp) {
+ int i, v;
+ int sign_cost[2], class_cost[MV_CLASSES], class0_cost[CLASS0_SIZE];
+ int bits_cost[MV_OFFSET_BITS][2];
+ int class0_fp_cost[CLASS0_SIZE][4], fp_cost[4];
+ int class0_hp_cost[2], hp_cost[2];
+
+ sign_cost[0] = vp9_cost_zero(mvcomp->sign);
+ sign_cost[1] = vp9_cost_one(mvcomp->sign);
+ vp9_cost_tokens(class_cost, mvcomp->classes, vp9_mv_class_tree);
+ vp9_cost_tokens(class0_cost, mvcomp->class0, vp9_mv_class0_tree);
+ for (i = 0; i < MV_OFFSET_BITS; ++i) {
+ bits_cost[i][0] = vp9_cost_zero(mvcomp->bits[i]);
+ bits_cost[i][1] = vp9_cost_one(mvcomp->bits[i]);
+ }
+
+ for (i = 0; i < CLASS0_SIZE; ++i)
+ vp9_cost_tokens(class0_fp_cost[i], mvcomp->class0_fp[i], vp9_mv_fp_tree);
+ vp9_cost_tokens(fp_cost, mvcomp->fp, vp9_mv_fp_tree);
+
+ if (usehp) {
+ class0_hp_cost[0] = vp9_cost_zero(mvcomp->class0_hp);
+ class0_hp_cost[1] = vp9_cost_one(mvcomp->class0_hp);
+ hp_cost[0] = vp9_cost_zero(mvcomp->hp);
+ hp_cost[1] = vp9_cost_one(mvcomp->hp);
+ }
+ mvcost[0] = 0;
+ for (v = 1; v <= MV_MAX; ++v) {
+ int z, c, o, d, e, f, cost = 0;
+ z = v - 1;
+ c = vp9_get_mv_class(z, &o);
+ cost += class_cost[c];
+ d = (o >> 3); /* int mv data */
+ f = (o >> 1) & 3; /* fractional pel mv data */
+ e = (o & 1); /* high precision mv data */
+ if (c == MV_CLASS_0) {
+ cost += class0_cost[d];
+ } else {
+ int i, b;
+ b = c + CLASS0_BITS - 1; /* number of bits */
+ for (i = 0; i < b; ++i)
+ cost += bits_cost[i][((d >> i) & 1)];
+ }
+ if (c == MV_CLASS_0) {
+ cost += class0_fp_cost[d][f];
+ } else {
+ cost += fp_cost[f];
+ }
+ if (usehp) {
+ if (c == MV_CLASS_0) {
+ cost += class0_hp_cost[e];
+ } else {
+ cost += hp_cost[e];
+ }
+ }
+ mvcost[v] = cost + sign_cost[0];
+ mvcost[-v] = cost + sign_cost[1];
+ }
+}
+
+static int update_mv(vp9_writer *w, const unsigned int ct[2],
+ vp9_prob *cur_p, vp9_prob new_p, vp9_prob upd_p) {
+ vp9_prob mod_p = new_p | 1;
+ const int cur_b = cost_branch256(ct, *cur_p);
+ const int mod_b = cost_branch256(ct, mod_p);
+ const int cost = 7 * 256 + (vp9_cost_one(upd_p) - vp9_cost_zero(upd_p));
+ if (cur_b - mod_b > cost) {
+ *cur_p = mod_p;
+ vp9_write(w, 1, upd_p);
+ vp9_write_literal(w, mod_p >> 1, 7);
+ return 1;
+ } else {
+ vp9_write(w, 0, upd_p);
+ return 0;
+ }
+}
+
+static void counts_to_nmv_context(
+ nmv_context_counts *nmv_count,
+ nmv_context *prob,
+ int usehp,
+ unsigned int (*branch_ct_joint)[2],
+ unsigned int (*branch_ct_sign)[2],
+ unsigned int (*branch_ct_classes)[MV_CLASSES - 1][2],
+ unsigned int (*branch_ct_class0)[CLASS0_SIZE - 1][2],
+ unsigned int (*branch_ct_bits)[MV_OFFSET_BITS][2],
+ unsigned int (*branch_ct_class0_fp)[CLASS0_SIZE][4 - 1][2],
+ unsigned int (*branch_ct_fp)[4 - 1][2],
+ unsigned int (*branch_ct_class0_hp)[2],
+ unsigned int (*branch_ct_hp)[2]) {
+ int i, j, k;
+ vp9_tree_probs_from_distribution(vp9_mv_joint_tree,
+ prob->joints,
+ branch_ct_joint,
+ nmv_count->joints, 0);
+ for (i = 0; i < 2; ++i) {
+ const uint32_t s0 = nmv_count->comps[i].sign[0];
+ const uint32_t s1 = nmv_count->comps[i].sign[1];
+
+ prob->comps[i].sign = get_binary_prob(s0, s1);
+ branch_ct_sign[i][0] = s0;
+ branch_ct_sign[i][1] = s1;
+ vp9_tree_probs_from_distribution(vp9_mv_class_tree,
+ prob->comps[i].classes,
+ branch_ct_classes[i],
+ nmv_count->comps[i].classes, 0);
+ vp9_tree_probs_from_distribution(vp9_mv_class0_tree,
+ prob->comps[i].class0,
+ branch_ct_class0[i],
+ nmv_count->comps[i].class0, 0);
+ for (j = 0; j < MV_OFFSET_BITS; ++j) {
+ const uint32_t b0 = nmv_count->comps[i].bits[j][0];
+ const uint32_t b1 = nmv_count->comps[i].bits[j][1];
+
+ prob->comps[i].bits[j] = get_binary_prob(b0, b1);
+ branch_ct_bits[i][j][0] = b0;
+ branch_ct_bits[i][j][1] = b1;
+ }
+ }
+ for (i = 0; i < 2; ++i) {
+ for (k = 0; k < CLASS0_SIZE; ++k) {
+ vp9_tree_probs_from_distribution(vp9_mv_fp_tree,
+ prob->comps[i].class0_fp[k],
+ branch_ct_class0_fp[i][k],
+ nmv_count->comps[i].class0_fp[k], 0);
+ }
+ vp9_tree_probs_from_distribution(vp9_mv_fp_tree,
+ prob->comps[i].fp,
+ branch_ct_fp[i],
+ nmv_count->comps[i].fp, 0);
+ }
+ if (usehp) {
+ for (i = 0; i < 2; ++i) {
+ const uint32_t c0_hp0 = nmv_count->comps[i].class0_hp[0];
+ const uint32_t c0_hp1 = nmv_count->comps[i].class0_hp[1];
+ const uint32_t hp0 = nmv_count->comps[i].hp[0];
+ const uint32_t hp1 = nmv_count->comps[i].hp[1];
+
+ prob->comps[i].class0_hp = get_binary_prob(c0_hp0, c0_hp1);
+ branch_ct_class0_hp[i][0] = c0_hp0;
+ branch_ct_class0_hp[i][1] = c0_hp1;
+
+ prob->comps[i].hp = get_binary_prob(hp0, hp1);
+ branch_ct_hp[i][0] = hp0;
+ branch_ct_hp[i][1] = hp1;
+ }
+ }
+}
+
+void vp9_write_nmv_probs(VP9_COMP* const cpi, int usehp, vp9_writer* const bc) {
+ int i, j;
+ nmv_context prob;
+ unsigned int branch_ct_joint[MV_JOINTS - 1][2];
+ unsigned int branch_ct_sign[2][2];
+ unsigned int branch_ct_classes[2][MV_CLASSES - 1][2];
+ unsigned int branch_ct_class0[2][CLASS0_SIZE - 1][2];
+ unsigned int branch_ct_bits[2][MV_OFFSET_BITS][2];
+ unsigned int branch_ct_class0_fp[2][CLASS0_SIZE][4 - 1][2];
+ unsigned int branch_ct_fp[2][4 - 1][2];
+ unsigned int branch_ct_class0_hp[2][2];
+ unsigned int branch_ct_hp[2][2];
+ nmv_context *mvc = &cpi->common.fc.nmvc;
+
+ counts_to_nmv_context(&cpi->NMVcount, &prob, usehp,
+ branch_ct_joint, branch_ct_sign, branch_ct_classes,
+ branch_ct_class0, branch_ct_bits,
+ branch_ct_class0_fp, branch_ct_fp,
+ branch_ct_class0_hp, branch_ct_hp);
+
+ for (j = 0; j < MV_JOINTS - 1; ++j)
+ update_mv(bc, branch_ct_joint[j], &mvc->joints[j], prob.joints[j],
+ NMV_UPDATE_PROB);
+
+ for (i = 0; i < 2; ++i) {
+ update_mv(bc, branch_ct_sign[i], &mvc->comps[i].sign,
+ prob.comps[i].sign, NMV_UPDATE_PROB);
+ for (j = 0; j < MV_CLASSES - 1; ++j)
+ update_mv(bc, branch_ct_classes[i][j], &mvc->comps[i].classes[j],
+ prob.comps[i].classes[j], NMV_UPDATE_PROB);
+
+ for (j = 0; j < CLASS0_SIZE - 1; ++j)
+ update_mv(bc, branch_ct_class0[i][j], &mvc->comps[i].class0[j],
+ prob.comps[i].class0[j], NMV_UPDATE_PROB);
+
+ for (j = 0; j < MV_OFFSET_BITS; ++j)
+ update_mv(bc, branch_ct_bits[i][j], &mvc->comps[i].bits[j],
+ prob.comps[i].bits[j], NMV_UPDATE_PROB);
+ }
+
+ for (i = 0; i < 2; ++i) {
+ for (j = 0; j < CLASS0_SIZE; ++j) {
+ int k;
+ for (k = 0; k < 3; ++k)
+ update_mv(bc, branch_ct_class0_fp[i][j][k],
+ &mvc->comps[i].class0_fp[j][k],
+ prob.comps[i].class0_fp[j][k], NMV_UPDATE_PROB);
+ }
+
+ for (j = 0; j < 3; ++j)
+ update_mv(bc, branch_ct_fp[i][j], &mvc->comps[i].fp[j],
+ prob.comps[i].fp[j], NMV_UPDATE_PROB);
+ }
+
+ if (usehp) {
+ for (i = 0; i < 2; ++i) {
+ update_mv(bc, branch_ct_class0_hp[i], &mvc->comps[i].class0_hp,
+ prob.comps[i].class0_hp, NMV_UPDATE_PROB);
+ update_mv(bc, branch_ct_hp[i], &mvc->comps[i].hp,
+ prob.comps[i].hp, NMV_UPDATE_PROB);
+ }
+ }
+}
+
+void vp9_encode_mv(VP9_COMP* cpi, vp9_writer* w,
+ const MV* mv, const MV* ref,
+ const nmv_context* mvctx, int usehp) {
+ const MV diff = {mv->row - ref->row,
+ mv->col - ref->col};
+ const MV_JOINT_TYPE j = vp9_get_mv_joint(&diff);
+ usehp = usehp && vp9_use_mv_hp(ref);
+
+ write_token(w, vp9_mv_joint_tree, mvctx->joints, &vp9_mv_joint_encodings[j]);
+ if (mv_joint_vertical(j))
+ encode_mv_component(w, diff.row, &mvctx->comps[0], usehp);
+
+ if (mv_joint_horizontal(j))
+ encode_mv_component(w, diff.col, &mvctx->comps[1], usehp);
+
+ // If auto_mv_step_size is enabled then keep track of the largest
+ // motion vector component used.
+ if (!cpi->dummy_packing && cpi->sf.auto_mv_step_size) {
+ unsigned int maxv = MAX(abs(mv->row), abs(mv->col)) >> 3;
+ cpi->max_mv_magnitude = MAX(maxv, cpi->max_mv_magnitude);
+ }
+}
+
+void vp9_build_nmv_cost_table(int *mvjoint,
+ int *mvcost[2],
+ const nmv_context* const mvctx,
+ int usehp,
+ int mvc_flag_v,
+ int mvc_flag_h) {
+ vp9_clear_system_state();
+ vp9_cost_tokens(mvjoint, mvctx->joints, vp9_mv_joint_tree);
+ if (mvc_flag_v)
+ build_nmv_component_cost_table(mvcost[0], &mvctx->comps[0], usehp);
+ if (mvc_flag_h)
+ build_nmv_component_cost_table(mvcost[1], &mvctx->comps[1], usehp);
+}
+
+void vp9_update_nmv_count(VP9_COMP *cpi, MACROBLOCK *x,
+ int_mv *best_ref_mv, int_mv *second_best_ref_mv) {
+ MODE_INFO *mi = x->e_mbd.mi_8x8[0];
+ MB_MODE_INFO *const mbmi = &mi->mbmi;
+ MV diff;
+ const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[mbmi->sb_type];
+ const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[mbmi->sb_type];
+ int idx, idy;
+
+ if (mbmi->sb_type < BLOCK_8X8) {
+ PARTITION_INFO *pi = x->partition_info;
+ for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
+ for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
+ const int i = idy * 2 + idx;
+ if (pi->bmi[i].mode == NEWMV) {
+ diff.row = mi->bmi[i].as_mv[0].as_mv.row - best_ref_mv->as_mv.row;
+ diff.col = mi->bmi[i].as_mv[0].as_mv.col - best_ref_mv->as_mv.col;
+ vp9_inc_mv(&diff, &cpi->NMVcount);
+
+ if (mi->mbmi.ref_frame[1] > INTRA_FRAME) {
+ diff.row = mi->bmi[i].as_mv[1].as_mv.row -
+ second_best_ref_mv->as_mv.row;
+ diff.col = mi->bmi[i].as_mv[1].as_mv.col -
+ second_best_ref_mv->as_mv.col;
+ vp9_inc_mv(&diff, &cpi->NMVcount);
+ }
+ }
+ }
+ }
+ } else if (mbmi->mode == NEWMV) {
+ diff.row = mbmi->mv[0].as_mv.row - best_ref_mv->as_mv.row;
+ diff.col = mbmi->mv[0].as_mv.col - best_ref_mv->as_mv.col;
+ vp9_inc_mv(&diff, &cpi->NMVcount);
+
+ if (mbmi->ref_frame[1] > INTRA_FRAME) {
+ diff.row = mbmi->mv[1].as_mv.row - second_best_ref_mv->as_mv.row;
+ diff.col = mbmi->mv[1].as_mv.col - second_best_ref_mv->as_mv.col;
+ vp9_inc_mv(&diff, &cpi->NMVcount);
+ }
+ }
+}
diff --git a/libvpx/vp9/encoder/vp9_encodemv.h b/libvpx/vp9/encoder/vp9_encodemv.h
new file mode 100644
index 0000000..2789ce1
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_encodemv.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_ENCODER_VP9_ENCODEMV_H_
+#define VP9_ENCODER_VP9_ENCODEMV_H_
+
+#include "vp9/encoder/vp9_onyx_int.h"
+
+void vp9_write_nmv_probs(VP9_COMP* const, int usehp, vp9_writer* const);
+
+void vp9_encode_mv(VP9_COMP *cpi, vp9_writer* w, const MV* mv, const MV* ref,
+ const nmv_context* mvctx, int usehp);
+
+void vp9_build_nmv_cost_table(int *mvjoint,
+ int *mvcost[2],
+ const nmv_context* const mvctx,
+ int usehp,
+ int mvc_flag_v,
+ int mvc_flag_h);
+void vp9_update_nmv_count(VP9_COMP *cpi, MACROBLOCK *x,
+ int_mv *best_ref_mv, int_mv *second_best_ref_mv);
+
+#endif // VP9_ENCODER_VP9_ENCODEMV_H_
diff --git a/libvpx/vp9/encoder/vp9_firstpass.c b/libvpx/vp9/encoder/vp9_firstpass.c
new file mode 100644
index 0000000..9cf7b83
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_firstpass.c
@@ -0,0 +1,2654 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "math.h"
+#include "limits.h"
+#include "vp9/encoder/vp9_block.h"
+#include "vp9/encoder/vp9_onyx_int.h"
+#include "vp9/encoder/vp9_variance.h"
+#include "vp9/encoder/vp9_encodeintra.h"
+#include "vp9/encoder/vp9_mcomp.h"
+#include "vp9/encoder/vp9_firstpass.h"
+#include "vpx_scale/vpx_scale.h"
+#include "vp9/encoder/vp9_encodeframe.h"
+#include "vp9/encoder/vp9_encodemb.h"
+#include "vp9/common/vp9_extend.h"
+#include "vp9/common/vp9_systemdependent.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_scale/yv12config.h"
+#include <stdio.h>
+#include "vp9/encoder/vp9_quantize.h"
+#include "vp9/encoder/vp9_rdopt.h"
+#include "vp9/encoder/vp9_ratectrl.h"
+#include "vp9/common/vp9_quant_common.h"
+#include "vp9/common/vp9_entropymv.h"
+#include "vp9/encoder/vp9_encodemv.h"
+#include "./vpx_scale_rtcd.h"
+// TODO(jkoleszar): for setup_dst_planes
+#include "vp9/common/vp9_reconinter.h"
+
+#define OUTPUT_FPF 0
+
+#define IIFACTOR 12.5
+#define IIKFACTOR1 12.5
+#define IIKFACTOR2 15.0
+#define RMAX 512.0
+#define GF_RMAX 96.0
+#define ERR_DIVISOR 150.0
+#define MIN_DECAY_FACTOR 0.1
+
+#define KF_MB_INTRA_MIN 150
+#define GF_MB_INTRA_MIN 100
+
+#define DOUBLE_DIVIDE_CHECK(x) ((x) < 0 ? (x) - 0.000001 : (x) + 0.000001)
+
+#define POW1 (double)cpi->oxcf.two_pass_vbrbias/100.0
+#define POW2 (double)cpi->oxcf.two_pass_vbrbias/100.0
+
+static void swap_yv12(YV12_BUFFER_CONFIG *a, YV12_BUFFER_CONFIG *b) {
+ YV12_BUFFER_CONFIG temp = *a;
+ *a = *b;
+ *b = temp;
+}
+
+static void find_next_key_frame(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame);
+
+static int select_cq_level(int qindex) {
+ int ret_val = QINDEX_RANGE - 1;
+ int i;
+
+ double target_q = (vp9_convert_qindex_to_q(qindex) * 0.5847) + 1.0;
+
+ for (i = 0; i < QINDEX_RANGE; i++) {
+ if (target_q <= vp9_convert_qindex_to_q(i)) {
+ ret_val = i;
+ break;
+ }
+ }
+
+ return ret_val;
+}
+
+
+// Resets the first pass file to the given position using a relative seek from the current position
+static void reset_fpf_position(VP9_COMP *cpi, FIRSTPASS_STATS *position) {
+ cpi->twopass.stats_in = position;
+}
+
+static int lookup_next_frame_stats(VP9_COMP *cpi, FIRSTPASS_STATS *next_frame) {
+ if (cpi->twopass.stats_in >= cpi->twopass.stats_in_end)
+ return EOF;
+
+ *next_frame = *cpi->twopass.stats_in;
+ return 1;
+}
+
+// Read frame stats at an offset from the current position
+static int read_frame_stats(VP9_COMP *cpi,
+ FIRSTPASS_STATS *frame_stats,
+ int offset) {
+ FIRSTPASS_STATS *fps_ptr = cpi->twopass.stats_in;
+
+ // Check legality of offset
+ if (offset >= 0) {
+ if (&fps_ptr[offset] >= cpi->twopass.stats_in_end)
+ return EOF;
+ } else if (offset < 0) {
+ if (&fps_ptr[offset] < cpi->twopass.stats_in_start)
+ return EOF;
+ }
+
+ *frame_stats = fps_ptr[offset];
+ return 1;
+}
+
+static int input_stats(VP9_COMP *cpi, FIRSTPASS_STATS *fps) {
+ if (cpi->twopass.stats_in >= cpi->twopass.stats_in_end)
+ return EOF;
+
+ *fps = *cpi->twopass.stats_in;
+ cpi->twopass.stats_in =
+ (void *)((char *)cpi->twopass.stats_in + sizeof(FIRSTPASS_STATS));
+ return 1;
+}
+
+static void output_stats(const VP9_COMP *cpi,
+ struct vpx_codec_pkt_list *pktlist,
+ FIRSTPASS_STATS *stats) {
+ struct vpx_codec_cx_pkt pkt;
+ pkt.kind = VPX_CODEC_STATS_PKT;
+ pkt.data.twopass_stats.buf = stats;
+ pkt.data.twopass_stats.sz = sizeof(FIRSTPASS_STATS);
+ vpx_codec_pkt_list_add(pktlist, &pkt);
+
+// TEMP debug code
+#if OUTPUT_FPF
+
+ {
+ FILE *fpfile;
+ fpfile = fopen("firstpass.stt", "a");
+
+ fprintf(stdout, "%12.0f %12.0f %12.0f %12.0f %12.0f %12.4f %12.4f"
+ "%12.4f %12.4f %12.4f %12.4f %12.4f %12.4f %12.4f"
+ "%12.0f %12.0f %12.4f %12.0f %12.0f %12.4f\n",
+ stats->frame,
+ stats->intra_error,
+ stats->coded_error,
+ stats->sr_coded_error,
+ stats->ssim_weighted_pred_err,
+ stats->pcnt_inter,
+ stats->pcnt_motion,
+ stats->pcnt_second_ref,
+ stats->pcnt_neutral,
+ stats->MVr,
+ stats->mvr_abs,
+ stats->MVc,
+ stats->mvc_abs,
+ stats->MVrv,
+ stats->MVcv,
+ stats->mv_in_out_count,
+ stats->new_mv_count,
+ stats->count,
+ stats->duration);
+ fclose(fpfile);
+ }
+#endif
+}
+
+static void zero_stats(FIRSTPASS_STATS *section) {
+ section->frame = 0.0;
+ section->intra_error = 0.0;
+ section->coded_error = 0.0;
+ section->sr_coded_error = 0.0;
+ section->ssim_weighted_pred_err = 0.0;
+ section->pcnt_inter = 0.0;
+ section->pcnt_motion = 0.0;
+ section->pcnt_second_ref = 0.0;
+ section->pcnt_neutral = 0.0;
+ section->MVr = 0.0;
+ section->mvr_abs = 0.0;
+ section->MVc = 0.0;
+ section->mvc_abs = 0.0;
+ section->MVrv = 0.0;
+ section->MVcv = 0.0;
+ section->mv_in_out_count = 0.0;
+ section->new_mv_count = 0.0;
+ section->count = 0.0;
+ section->duration = 1.0;
+}
+
+static void accumulate_stats(FIRSTPASS_STATS *section, FIRSTPASS_STATS *frame) {
+ section->frame += frame->frame;
+ section->intra_error += frame->intra_error;
+ section->coded_error += frame->coded_error;
+ section->sr_coded_error += frame->sr_coded_error;
+ section->ssim_weighted_pred_err += frame->ssim_weighted_pred_err;
+ section->pcnt_inter += frame->pcnt_inter;
+ section->pcnt_motion += frame->pcnt_motion;
+ section->pcnt_second_ref += frame->pcnt_second_ref;
+ section->pcnt_neutral += frame->pcnt_neutral;
+ section->MVr += frame->MVr;
+ section->mvr_abs += frame->mvr_abs;
+ section->MVc += frame->MVc;
+ section->mvc_abs += frame->mvc_abs;
+ section->MVrv += frame->MVrv;
+ section->MVcv += frame->MVcv;
+ section->mv_in_out_count += frame->mv_in_out_count;
+ section->new_mv_count += frame->new_mv_count;
+ section->count += frame->count;
+ section->duration += frame->duration;
+}
+
+static void subtract_stats(FIRSTPASS_STATS *section, FIRSTPASS_STATS *frame) {
+ section->frame -= frame->frame;
+ section->intra_error -= frame->intra_error;
+ section->coded_error -= frame->coded_error;
+ section->sr_coded_error -= frame->sr_coded_error;
+ section->ssim_weighted_pred_err -= frame->ssim_weighted_pred_err;
+ section->pcnt_inter -= frame->pcnt_inter;
+ section->pcnt_motion -= frame->pcnt_motion;
+ section->pcnt_second_ref -= frame->pcnt_second_ref;
+ section->pcnt_neutral -= frame->pcnt_neutral;
+ section->MVr -= frame->MVr;
+ section->mvr_abs -= frame->mvr_abs;
+ section->MVc -= frame->MVc;
+ section->mvc_abs -= frame->mvc_abs;
+ section->MVrv -= frame->MVrv;
+ section->MVcv -= frame->MVcv;
+ section->mv_in_out_count -= frame->mv_in_out_count;
+ section->new_mv_count -= frame->new_mv_count;
+ section->count -= frame->count;
+ section->duration -= frame->duration;
+}
+
+static void avg_stats(FIRSTPASS_STATS *section) {
+ if (section->count < 1.0)
+ return;
+
+ section->intra_error /= section->count;
+ section->coded_error /= section->count;
+ section->sr_coded_error /= section->count;
+ section->ssim_weighted_pred_err /= section->count;
+ section->pcnt_inter /= section->count;
+ section->pcnt_second_ref /= section->count;
+ section->pcnt_neutral /= section->count;
+ section->pcnt_motion /= section->count;
+ section->MVr /= section->count;
+ section->mvr_abs /= section->count;
+ section->MVc /= section->count;
+ section->mvc_abs /= section->count;
+ section->MVrv /= section->count;
+ section->MVcv /= section->count;
+ section->mv_in_out_count /= section->count;
+ section->duration /= section->count;
+}
+
+// Calculate a modified Error used in distributing bits between easier and harder frames
+static double calculate_modified_err(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
+ const FIRSTPASS_STATS *const stats = &cpi->twopass.total_stats;
+ const double av_err = stats->ssim_weighted_pred_err / stats->count;
+ const double this_err = this_frame->ssim_weighted_pred_err;
+ return av_err * pow(this_err / DOUBLE_DIVIDE_CHECK(av_err),
+ this_err > av_err ? POW1 : POW2);
+}
+
+static const double weight_table[256] = {
+ 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000,
+ 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000,
+ 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000,
+ 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000,
+ 0.020000, 0.031250, 0.062500, 0.093750, 0.125000, 0.156250, 0.187500, 0.218750,
+ 0.250000, 0.281250, 0.312500, 0.343750, 0.375000, 0.406250, 0.437500, 0.468750,
+ 0.500000, 0.531250, 0.562500, 0.593750, 0.625000, 0.656250, 0.687500, 0.718750,
+ 0.750000, 0.781250, 0.812500, 0.843750, 0.875000, 0.906250, 0.937500, 0.968750,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000
+};
+
+static double simple_weight(YV12_BUFFER_CONFIG *source) {
+ int i, j;
+
+ uint8_t *src = source->y_buffer;
+ double sum_weights = 0.0;
+
+ // Loop throught the Y plane raw examining levels and creating a weight for the image
+ i = source->y_height;
+ do {
+ j = source->y_width;
+ do {
+ sum_weights += weight_table[ *src];
+ src++;
+ } while (--j);
+ src -= source->y_width;
+ src += source->y_stride;
+ } while (--i);
+
+ sum_weights /= (source->y_height * source->y_width);
+
+ return sum_weights;
+}
+
+
+// This function returns the current per frame maximum bitrate target.
+static int frame_max_bits(VP9_COMP *cpi) {
+ // Max allocation for a single frame based on the max section guidelines
+ // passed in and how many bits are left.
+ // For VBR base this on the bits and frames left plus the
+ // two_pass_vbrmax_section rate passed in by the user.
+ const double max_bits = (1.0 * cpi->twopass.bits_left /
+ (cpi->twopass.total_stats.count - cpi->common.current_video_frame)) *
+ (cpi->oxcf.two_pass_vbrmax_section / 100.0);
+
+ // Trap case where we are out of bits.
+ return MAX((int)max_bits, 0);
+}
+
+void vp9_init_first_pass(VP9_COMP *cpi) {
+ zero_stats(&cpi->twopass.total_stats);
+}
+
+void vp9_end_first_pass(VP9_COMP *cpi) {
+ output_stats(cpi, cpi->output_pkt_list, &cpi->twopass.total_stats);
+}
+
+static void zz_motion_search(VP9_COMP *cpi, MACROBLOCK *x, YV12_BUFFER_CONFIG *recon_buffer, int *best_motion_err, int recon_yoffset) {
+ MACROBLOCKD *const xd = &x->e_mbd;
+
+ // Set up pointers for this macro block recon buffer
+ xd->plane[0].pre[0].buf = recon_buffer->y_buffer + recon_yoffset;
+
+ switch (xd->this_mi->mbmi.sb_type) {
+ case BLOCK_8X8:
+ vp9_mse8x8(x->plane[0].src.buf, x->plane[0].src.stride,
+ xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride,
+ (unsigned int *)(best_motion_err));
+ break;
+ case BLOCK_16X8:
+ vp9_mse16x8(x->plane[0].src.buf, x->plane[0].src.stride,
+ xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride,
+ (unsigned int *)(best_motion_err));
+ break;
+ case BLOCK_8X16:
+ vp9_mse8x16(x->plane[0].src.buf, x->plane[0].src.stride,
+ xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride,
+ (unsigned int *)(best_motion_err));
+ break;
+ default:
+ vp9_mse16x16(x->plane[0].src.buf, x->plane[0].src.stride,
+ xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride,
+ (unsigned int *)(best_motion_err));
+ break;
+ }
+}
+
+static void first_pass_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
+ int_mv *ref_mv, MV *best_mv,
+ YV12_BUFFER_CONFIG *recon_buffer,
+ int *best_motion_err, int recon_yoffset) {
+ MACROBLOCKD *const xd = &x->e_mbd;
+ int num00;
+
+ int_mv tmp_mv;
+ int_mv ref_mv_full;
+
+ int tmp_err;
+ int step_param = 3;
+ int further_steps = (MAX_MVSEARCH_STEPS - 1) - step_param;
+ int n;
+ vp9_variance_fn_ptr_t v_fn_ptr =
+ cpi->fn_ptr[xd->this_mi->mbmi.sb_type];
+ int new_mv_mode_penalty = 256;
+
+ int sr = 0;
+ int quart_frm = MIN(cpi->common.width, cpi->common.height);
+
+ // refine the motion search range accroding to the frame dimension
+ // for first pass test
+ while ((quart_frm << sr) < MAX_FULL_PEL_VAL)
+ sr++;
+ if (sr)
+ sr--;
+
+ step_param += sr;
+ further_steps -= sr;
+
+ // override the default variance function to use MSE
+ switch (xd->this_mi->mbmi.sb_type) {
+ case BLOCK_8X8:
+ v_fn_ptr.vf = vp9_mse8x8;
+ break;
+ case BLOCK_16X8:
+ v_fn_ptr.vf = vp9_mse16x8;
+ break;
+ case BLOCK_8X16:
+ v_fn_ptr.vf = vp9_mse8x16;
+ break;
+ default:
+ v_fn_ptr.vf = vp9_mse16x16;
+ break;
+ }
+
+ // Set up pointers for this macro block recon buffer
+ xd->plane[0].pre[0].buf = recon_buffer->y_buffer + recon_yoffset;
+
+ // Initial step/diamond search centred on best mv
+ tmp_mv.as_int = 0;
+ ref_mv_full.as_mv.col = ref_mv->as_mv.col >> 3;
+ ref_mv_full.as_mv.row = ref_mv->as_mv.row >> 3;
+ tmp_err = cpi->diamond_search_sad(x, &ref_mv_full, &tmp_mv, step_param,
+ x->sadperbit16, &num00, &v_fn_ptr,
+ x->nmvjointcost,
+ x->mvcost, ref_mv);
+ if (tmp_err < INT_MAX - new_mv_mode_penalty)
+ tmp_err += new_mv_mode_penalty;
+
+ if (tmp_err < *best_motion_err) {
+ *best_motion_err = tmp_err;
+ best_mv->row = tmp_mv.as_mv.row;
+ best_mv->col = tmp_mv.as_mv.col;
+ }
+
+ // Further step/diamond searches as necessary
+ n = num00;
+ num00 = 0;
+
+ while (n < further_steps) {
+ n++;
+
+ if (num00)
+ num00--;
+ else {
+ tmp_err = cpi->diamond_search_sad(x, &ref_mv_full, &tmp_mv,
+ step_param + n, x->sadperbit16,
+ &num00, &v_fn_ptr,
+ x->nmvjointcost,
+ x->mvcost, ref_mv);
+ if (tmp_err < INT_MAX - new_mv_mode_penalty)
+ tmp_err += new_mv_mode_penalty;
+
+ if (tmp_err < *best_motion_err) {
+ *best_motion_err = tmp_err;
+ best_mv->row = tmp_mv.as_mv.row;
+ best_mv->col = tmp_mv.as_mv.col;
+ }
+ }
+ }
+}
+
+void vp9_first_pass(VP9_COMP *cpi) {
+ int mb_row, mb_col;
+ MACROBLOCK *const x = &cpi->mb;
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &x->e_mbd;
+
+ int recon_yoffset, recon_uvoffset;
+ const int lst_yv12_idx = cm->ref_frame_map[cpi->lst_fb_idx];
+ const int gld_yv12_idx = cm->ref_frame_map[cpi->gld_fb_idx];
+ YV12_BUFFER_CONFIG *const lst_yv12 = &cm->yv12_fb[lst_yv12_idx];
+ YV12_BUFFER_CONFIG *const new_yv12 = &cm->yv12_fb[cm->new_fb_idx];
+ YV12_BUFFER_CONFIG *const gld_yv12 = &cm->yv12_fb[gld_yv12_idx];
+ const int recon_y_stride = lst_yv12->y_stride;
+ const int recon_uv_stride = lst_yv12->uv_stride;
+ int64_t intra_error = 0;
+ int64_t coded_error = 0;
+ int64_t sr_coded_error = 0;
+
+ int sum_mvr = 0, sum_mvc = 0;
+ int sum_mvr_abs = 0, sum_mvc_abs = 0;
+ int sum_mvrs = 0, sum_mvcs = 0;
+ int mvcount = 0;
+ int intercount = 0;
+ int second_ref_count = 0;
+ int intrapenalty = 256;
+ int neutral_count = 0;
+ int new_mv_count = 0;
+ int sum_in_vectors = 0;
+ uint32_t lastmv_as_int = 0;
+
+ int_mv zero_ref_mv;
+
+ zero_ref_mv.as_int = 0;
+
+ vp9_clear_system_state(); // __asm emms;
+
+ vp9_setup_src_planes(x, cpi->Source, 0, 0);
+ setup_pre_planes(xd, 0, lst_yv12, 0, 0, NULL);
+ setup_dst_planes(xd, new_yv12, 0, 0);
+
+ x->partition_info = x->pi;
+ xd->mi_8x8 = cm->mi_grid_visible;
+ // required for vp9_frame_init_quantizer
+ xd->this_mi =
+ xd->mi_8x8[0] = cm->mi;
+ xd->mic_stream_ptr = cm->mi;
+
+ setup_block_dptrs(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
+
+ vp9_frame_init_quantizer(cpi);
+
+ // Initialise the MV cost table to the defaults
+ // if( cm->current_video_frame == 0)
+ // if ( 0 )
+ {
+ vp9_init_mv_probs(cm);
+ vp9_initialize_rd_consts(cpi, cm->base_qindex + cm->y_dc_delta_q);
+ }
+
+ // for each macroblock row in image
+ for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
+ int_mv best_ref_mv;
+
+ best_ref_mv.as_int = 0;
+
+ // reset above block coeffs
+ xd->up_available = (mb_row != 0);
+ recon_yoffset = (mb_row * recon_y_stride * 16);
+ recon_uvoffset = (mb_row * recon_uv_stride * 8);
+
+ // Set up limit values for motion vectors to prevent them extending outside the UMV borders
+ x->mv_row_min = -((mb_row * 16) + (VP9BORDERINPIXELS - 8));
+ x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16)
+ + (VP9BORDERINPIXELS - 8);
+
+ // for each macroblock col in image
+ for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
+ int this_error;
+ int gf_motion_error = INT_MAX;
+ int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
+
+ xd->plane[0].dst.buf = new_yv12->y_buffer + recon_yoffset;
+ xd->plane[1].dst.buf = new_yv12->u_buffer + recon_uvoffset;
+ xd->plane[2].dst.buf = new_yv12->v_buffer + recon_uvoffset;
+ xd->left_available = (mb_col != 0);
+
+ if (mb_col * 2 + 1 < cm->mi_cols) {
+ if (mb_row * 2 + 1 < cm->mi_rows) {
+ xd->this_mi->mbmi.sb_type = BLOCK_16X16;
+ } else {
+ xd->this_mi->mbmi.sb_type = BLOCK_16X8;
+ }
+ } else {
+ if (mb_row * 2 + 1 < cm->mi_rows) {
+ xd->this_mi->mbmi.sb_type = BLOCK_8X16;
+ } else {
+ xd->this_mi->mbmi.sb_type = BLOCK_8X8;
+ }
+ }
+ xd->this_mi->mbmi.ref_frame[0] = INTRA_FRAME;
+ set_mi_row_col(cm, xd,
+ mb_row << 1,
+ 1 << mi_height_log2(xd->this_mi->mbmi.sb_type),
+ mb_col << 1,
+ 1 << mi_height_log2(xd->this_mi->mbmi.sb_type));
+
+ // do intra 16x16 prediction
+ this_error = vp9_encode_intra(x, use_dc_pred);
+
+ // "intrapenalty" below deals with situations where the intra and inter error scores are very low (eg a plain black frame)
+ // We do not have special cases in first pass for 0,0 and nearest etc so all inter modes carry an overhead cost estimate fot the mv.
+ // When the error score is very low this causes us to pick all or lots of INTRA modes and throw lots of key frames.
+ // This penalty adds a cost matching that of a 0,0 mv to the intra case.
+ this_error += intrapenalty;
+
+ // Cumulative intra error total
+ intra_error += (int64_t)this_error;
+
+ // Set up limit values for motion vectors to prevent them extending outside the UMV borders
+ x->mv_col_min = -((mb_col * 16) + (VP9BORDERINPIXELS - 8));
+ x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16)
+ + (VP9BORDERINPIXELS - 8);
+
+ // Other than for the first frame do a motion search
+ if (cm->current_video_frame > 0) {
+ int tmp_err;
+ int motion_error = INT_MAX;
+ int_mv mv, tmp_mv;
+
+ // Simple 0,0 motion with no mv overhead
+ zz_motion_search(cpi, x, lst_yv12, &motion_error, recon_yoffset);
+ mv.as_int = tmp_mv.as_int = 0;
+
+ // Test last reference frame using the previous best mv as the
+ // starting point (best reference) for the search
+ first_pass_motion_search(cpi, x, &best_ref_mv,
+ &mv.as_mv, lst_yv12,
+ &motion_error, recon_yoffset);
+
+ // If the current best reference mv is not centred on 0,0 then do a 0,0 based search as well
+ if (best_ref_mv.as_int) {
+ tmp_err = INT_MAX;
+ first_pass_motion_search(cpi, x, &zero_ref_mv, &tmp_mv.as_mv,
+ lst_yv12, &tmp_err, recon_yoffset);
+
+ if (tmp_err < motion_error) {
+ motion_error = tmp_err;
+ mv.as_int = tmp_mv.as_int;
+ }
+ }
+
+ // Experimental search in an older reference frame
+ if (cm->current_video_frame > 1) {
+ // Simple 0,0 motion with no mv overhead
+ zz_motion_search(cpi, x, gld_yv12,
+ &gf_motion_error, recon_yoffset);
+
+ first_pass_motion_search(cpi, x, &zero_ref_mv,
+ &tmp_mv.as_mv, gld_yv12,
+ &gf_motion_error, recon_yoffset);
+
+ if ((gf_motion_error < motion_error) &&
+ (gf_motion_error < this_error)) {
+ second_ref_count++;
+ }
+
+ // Reset to last frame as reference buffer
+ xd->plane[0].pre[0].buf = lst_yv12->y_buffer + recon_yoffset;
+ xd->plane[1].pre[0].buf = lst_yv12->u_buffer + recon_uvoffset;
+ xd->plane[2].pre[0].buf = lst_yv12->v_buffer + recon_uvoffset;
+
+ // In accumulating a score for the older reference frame
+ // take the best of the motion predicted score and
+ // the intra coded error (just as will be done for)
+ // accumulation of "coded_error" for the last frame.
+ if (gf_motion_error < this_error)
+ sr_coded_error += gf_motion_error;
+ else
+ sr_coded_error += this_error;
+ } else
+ sr_coded_error += motion_error;
+
+ /* Intra assumed best */
+ best_ref_mv.as_int = 0;
+
+ if (motion_error <= this_error) {
+ // Keep a count of cases where the inter and intra were
+ // very close and very low. This helps with scene cut
+ // detection for example in cropped clips with black bars
+ // at the sides or top and bottom.
+ if ((((this_error - intrapenalty) * 9) <=
+ (motion_error * 10)) &&
+ (this_error < (2 * intrapenalty))) {
+ neutral_count++;
+ }
+
+ mv.as_mv.row <<= 3;
+ mv.as_mv.col <<= 3;
+ this_error = motion_error;
+ vp9_set_mbmode_and_mvs(x, NEWMV, &mv);
+ xd->this_mi->mbmi.tx_size = TX_4X4;
+ xd->this_mi->mbmi.ref_frame[0] = LAST_FRAME;
+ xd->this_mi->mbmi.ref_frame[1] = NONE;
+ vp9_build_inter_predictors_sby(xd, mb_row << 1,
+ mb_col << 1,
+ xd->this_mi->mbmi.sb_type);
+ vp9_encode_sby(x, xd->this_mi->mbmi.sb_type);
+ sum_mvr += mv.as_mv.row;
+ sum_mvr_abs += abs(mv.as_mv.row);
+ sum_mvc += mv.as_mv.col;
+ sum_mvc_abs += abs(mv.as_mv.col);
+ sum_mvrs += mv.as_mv.row * mv.as_mv.row;
+ sum_mvcs += mv.as_mv.col * mv.as_mv.col;
+ intercount++;
+
+ best_ref_mv.as_int = mv.as_int;
+
+ // Was the vector non-zero
+ if (mv.as_int) {
+ mvcount++;
+
+ // Was it different from the last non zero vector
+ if (mv.as_int != lastmv_as_int)
+ new_mv_count++;
+ lastmv_as_int = mv.as_int;
+
+ // Does the Row vector point inwards or outwards
+ if (mb_row < cm->mb_rows / 2) {
+ if (mv.as_mv.row > 0)
+ sum_in_vectors--;
+ else if (mv.as_mv.row < 0)
+ sum_in_vectors++;
+ } else if (mb_row > cm->mb_rows / 2) {
+ if (mv.as_mv.row > 0)
+ sum_in_vectors++;
+ else if (mv.as_mv.row < 0)
+ sum_in_vectors--;
+ }
+
+ // Does the Row vector point inwards or outwards
+ if (mb_col < cm->mb_cols / 2) {
+ if (mv.as_mv.col > 0)
+ sum_in_vectors--;
+ else if (mv.as_mv.col < 0)
+ sum_in_vectors++;
+ } else if (mb_col > cm->mb_cols / 2) {
+ if (mv.as_mv.col > 0)
+ sum_in_vectors++;
+ else if (mv.as_mv.col < 0)
+ sum_in_vectors--;
+ }
+ }
+ }
+ } else
+ sr_coded_error += (int64_t)this_error;
+
+ coded_error += (int64_t)this_error;
+
+ // adjust to the next column of macroblocks
+ x->plane[0].src.buf += 16;
+ x->plane[1].src.buf += 8;
+ x->plane[2].src.buf += 8;
+
+ recon_yoffset += 16;
+ recon_uvoffset += 8;
+ }
+
+ // adjust to the next row of mbs
+ x->plane[0].src.buf += 16 * x->plane[0].src.stride - 16 * cm->mb_cols;
+ x->plane[1].src.buf += 8 * x->plane[1].src.stride - 8 * cm->mb_cols;
+ x->plane[2].src.buf += 8 * x->plane[1].src.stride - 8 * cm->mb_cols;
+
+ vp9_clear_system_state(); // __asm emms;
+ }
+
+ vp9_clear_system_state(); // __asm emms;
+ {
+ double weight = 0.0;
+
+ FIRSTPASS_STATS fps;
+
+ fps.frame = cm->current_video_frame;
+ fps.intra_error = (double)(intra_error >> 8);
+ fps.coded_error = (double)(coded_error >> 8);
+ fps.sr_coded_error = (double)(sr_coded_error >> 8);
+ weight = simple_weight(cpi->Source);
+
+
+ if (weight < 0.1)
+ weight = 0.1;
+
+ fps.ssim_weighted_pred_err = fps.coded_error * weight;
+
+ fps.pcnt_inter = 0.0;
+ fps.pcnt_motion = 0.0;
+ fps.MVr = 0.0;
+ fps.mvr_abs = 0.0;
+ fps.MVc = 0.0;
+ fps.mvc_abs = 0.0;
+ fps.MVrv = 0.0;
+ fps.MVcv = 0.0;
+ fps.mv_in_out_count = 0.0;
+ fps.new_mv_count = 0.0;
+ fps.count = 1.0;
+
+ fps.pcnt_inter = 1.0 * (double)intercount / cm->MBs;
+ fps.pcnt_second_ref = 1.0 * (double)second_ref_count / cm->MBs;
+ fps.pcnt_neutral = 1.0 * (double)neutral_count / cm->MBs;
+
+ if (mvcount > 0) {
+ fps.MVr = (double)sum_mvr / (double)mvcount;
+ fps.mvr_abs = (double)sum_mvr_abs / (double)mvcount;
+ fps.MVc = (double)sum_mvc / (double)mvcount;
+ fps.mvc_abs = (double)sum_mvc_abs / (double)mvcount;
+ fps.MVrv = ((double)sum_mvrs - (fps.MVr * fps.MVr / (double)mvcount)) / (double)mvcount;
+ fps.MVcv = ((double)sum_mvcs - (fps.MVc * fps.MVc / (double)mvcount)) / (double)mvcount;
+ fps.mv_in_out_count = (double)sum_in_vectors / (double)(mvcount * 2);
+ fps.new_mv_count = new_mv_count;
+
+ fps.pcnt_motion = 1.0 * (double)mvcount / cpi->common.MBs;
+ }
+
+ // TODO: handle the case when duration is set to 0, or something less
+ // than the full time between subsequent values of cpi->source_time_stamp.
+ fps.duration = (double)(cpi->source->ts_end
+ - cpi->source->ts_start);
+
+ // don't want to do output stats with a stack variable!
+ cpi->twopass.this_frame_stats = fps;
+ output_stats(cpi, cpi->output_pkt_list, &cpi->twopass.this_frame_stats);
+ accumulate_stats(&cpi->twopass.total_stats, &fps);
+ }
+
+ // Copy the previous Last Frame back into gf and and arf buffers if
+ // the prediction is good enough... but also dont allow it to lag too far
+ if ((cpi->twopass.sr_update_lag > 3) ||
+ ((cm->current_video_frame > 0) &&
+ (cpi->twopass.this_frame_stats.pcnt_inter > 0.20) &&
+ ((cpi->twopass.this_frame_stats.intra_error /
+ DOUBLE_DIVIDE_CHECK(cpi->twopass.this_frame_stats.coded_error)) >
+ 2.0))) {
+ vp8_yv12_copy_frame(lst_yv12, gld_yv12);
+ cpi->twopass.sr_update_lag = 1;
+ } else
+ cpi->twopass.sr_update_lag++;
+
+ // swap frame pointers so last frame refers to the frame we just compressed
+ swap_yv12(lst_yv12, new_yv12);
+
+ vp9_extend_frame_borders(lst_yv12, cm->subsampling_x, cm->subsampling_y);
+
+ // Special case for the first frame. Copy into the GF buffer as a second reference.
+ if (cm->current_video_frame == 0)
+ vp8_yv12_copy_frame(lst_yv12, gld_yv12);
+
+ // use this to see what the first pass reconstruction looks like
+ if (0) {
+ char filename[512];
+ FILE *recon_file;
+ sprintf(filename, "enc%04d.yuv", (int) cm->current_video_frame);
+
+ if (cm->current_video_frame == 0)
+ recon_file = fopen(filename, "wb");
+ else
+ recon_file = fopen(filename, "ab");
+
+ (void)fwrite(lst_yv12->buffer_alloc, lst_yv12->frame_size, 1, recon_file);
+ fclose(recon_file);
+ }
+
+ cm->current_video_frame++;
+
+}
+
+// Estimate a cost per mb attributable to overheads such as the coding of
+// modes and motion vectors.
+// Currently simplistic in its assumptions for testing.
+//
+
+
+static double bitcost(double prob) {
+ return -(log(prob) / log(2.0));
+}
+
+static int64_t estimate_modemvcost(VP9_COMP *cpi,
+ FIRSTPASS_STATS *fpstats) {
+#if 0
+ int mv_cost;
+ int mode_cost;
+
+ double av_pct_inter = fpstats->pcnt_inter / fpstats->count;
+ double av_pct_motion = fpstats->pcnt_motion / fpstats->count;
+ double av_intra = (1.0 - av_pct_inter);
+
+ double zz_cost;
+ double motion_cost;
+ double intra_cost;
+
+ zz_cost = bitcost(av_pct_inter - av_pct_motion);
+ motion_cost = bitcost(av_pct_motion);
+ intra_cost = bitcost(av_intra);
+
+ // Estimate of extra bits per mv overhead for mbs
+ // << 9 is the normalization to the (bits * 512) used in vp9_bits_per_mb
+ mv_cost = ((int)(fpstats->new_mv_count / fpstats->count) * 8) << 9;
+
+ // Crude estimate of overhead cost from modes
+ // << 9 is the normalization to (bits * 512) used in vp9_bits_per_mb
+ mode_cost =
+ (int)((((av_pct_inter - av_pct_motion) * zz_cost) +
+ (av_pct_motion * motion_cost) +
+ (av_intra * intra_cost)) * cpi->common.MBs) << 9;
+
+ // return mv_cost + mode_cost;
+ // TODO PGW Fix overhead costs for extended Q range
+#endif
+ return 0;
+}
+
+static double calc_correction_factor(double err_per_mb,
+ double err_divisor,
+ double pt_low,
+ double pt_high,
+ int q) {
+ const double error_term = err_per_mb / err_divisor;
+
+ // Adjustment based on actual quantizer to power term.
+ const double power_term = MIN(vp9_convert_qindex_to_q(q) * 0.01 + pt_low,
+ pt_high);
+
+ // Calculate correction factor
+ if (power_term < 1.0)
+ assert(error_term >= 0.0);
+
+ return fclamp(pow(error_term, power_term), 0.05, 5.0);
+}
+
+// Given a current maxQ value sets a range for future values.
+// PGW TODO..
+// This code removes direct dependency on QIndex to determine the range
+// (now uses the actual quantizer) but has not been tuned.
+static void adjust_maxq_qrange(VP9_COMP *cpi) {
+ int i;
+ // Set the max corresponding to cpi->avg_q * 2.0
+ double q = cpi->avg_q * 2.0;
+ cpi->twopass.maxq_max_limit = cpi->worst_quality;
+ for (i = cpi->best_quality; i <= cpi->worst_quality; i++) {
+ cpi->twopass.maxq_max_limit = i;
+ if (vp9_convert_qindex_to_q(i) >= q)
+ break;
+ }
+
+ // Set the min corresponding to cpi->avg_q * 0.5
+ q = cpi->avg_q * 0.5;
+ cpi->twopass.maxq_min_limit = cpi->best_quality;
+ for (i = cpi->worst_quality; i >= cpi->best_quality; i--) {
+ cpi->twopass.maxq_min_limit = i;
+ if (vp9_convert_qindex_to_q(i) <= q)
+ break;
+ }
+}
+
+static int estimate_max_q(VP9_COMP *cpi,
+ FIRSTPASS_STATS *fpstats,
+ int section_target_bandwitdh) {
+ int q;
+ int num_mbs = cpi->common.MBs;
+ int target_norm_bits_per_mb;
+
+ double section_err = fpstats->coded_error / fpstats->count;
+ double sr_correction;
+ double err_per_mb = section_err / num_mbs;
+ double err_correction_factor;
+ double speed_correction = 1.0;
+
+ if (section_target_bandwitdh <= 0)
+ return cpi->twopass.maxq_max_limit; // Highest value allowed
+
+ target_norm_bits_per_mb = section_target_bandwitdh < (1 << 20)
+ ? (512 * section_target_bandwitdh) / num_mbs
+ : 512 * (section_target_bandwitdh / num_mbs);
+
+ // Look at the drop in prediction quality between the last frame
+ // and the GF buffer (which contained an older frame).
+ if (fpstats->sr_coded_error > fpstats->coded_error) {
+ double sr_err_diff = (fpstats->sr_coded_error - fpstats->coded_error) /
+ (fpstats->count * cpi->common.MBs);
+ sr_correction = fclamp(pow(sr_err_diff / 32.0, 0.25), 0.75, 1.25);
+ } else {
+ sr_correction = 0.75;
+ }
+
+ // Calculate a corrective factor based on a rolling ratio of bits spent
+ // vs target bits
+ if (cpi->rolling_target_bits > 0 &&
+ cpi->active_worst_quality < cpi->worst_quality) {
+ double rolling_ratio = (double)cpi->rolling_actual_bits /
+ (double)cpi->rolling_target_bits;
+
+ if (rolling_ratio < 0.95)
+ cpi->twopass.est_max_qcorrection_factor -= 0.005;
+ else if (rolling_ratio > 1.05)
+ cpi->twopass.est_max_qcorrection_factor += 0.005;
+
+ cpi->twopass.est_max_qcorrection_factor = fclamp(
+ cpi->twopass.est_max_qcorrection_factor, 0.1, 10.0);
+ }
+
+ // Corrections for higher compression speed settings
+ // (reduced compression expected)
+ // FIXME(jimbankoski): Once we settle on vp9 speed features we need to
+ // change this code.
+ if (cpi->compressor_speed == 1)
+ speed_correction = cpi->oxcf.cpu_used <= 5 ?
+ 1.04 + (/*cpi->oxcf.cpu_used*/0 * 0.04) :
+ 1.25;
+
+ // Try and pick a max Q that will be high enough to encode the
+ // content at the given rate.
+ for (q = cpi->twopass.maxq_min_limit; q < cpi->twopass.maxq_max_limit; q++) {
+ int bits_per_mb_at_this_q;
+
+ err_correction_factor = calc_correction_factor(err_per_mb,
+ ERR_DIVISOR, 0.4, 0.90, q) *
+ sr_correction * speed_correction *
+ cpi->twopass.est_max_qcorrection_factor;
+
+ bits_per_mb_at_this_q = vp9_bits_per_mb(INTER_FRAME, q,
+ err_correction_factor);
+
+ if (bits_per_mb_at_this_q <= target_norm_bits_per_mb)
+ break;
+ }
+
+ // Restriction on active max q for constrained quality mode.
+ if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY &&
+ q < cpi->cq_target_quality)
+ q = cpi->cq_target_quality;
+
+ // Adjust maxq_min_limit and maxq_max_limit limits based on
+ // average q observed in clip for non kf/gf/arf frames
+ // Give average a chance to settle though.
+ // PGW TODO.. This code is broken for the extended Q range
+ if (cpi->ni_frames > ((int)cpi->twopass.total_stats.count >> 8) &&
+ cpi->ni_frames > 25)
+ adjust_maxq_qrange(cpi);
+
+ return q;
+}
+
+// For cq mode estimate a cq level that matches the observed
+// complexity and data rate.
+static int estimate_cq(VP9_COMP *cpi,
+ FIRSTPASS_STATS *fpstats,
+ int section_target_bandwitdh) {
+ int q;
+ int num_mbs = cpi->common.MBs;
+ int target_norm_bits_per_mb;
+
+ double section_err = (fpstats->coded_error / fpstats->count);
+ double err_per_mb = section_err / num_mbs;
+ double err_correction_factor;
+ double sr_err_diff;
+ double sr_correction;
+ double speed_correction = 1.0;
+ double clip_iiratio;
+ double clip_iifactor;
+
+ target_norm_bits_per_mb = (section_target_bandwitdh < (1 << 20))
+ ? (512 * section_target_bandwitdh) / num_mbs
+ : 512 * (section_target_bandwitdh / num_mbs);
+
+
+ // Corrections for higher compression speed settings
+ // (reduced compression expected)
+ if (cpi->compressor_speed == 1) {
+ if (cpi->oxcf.cpu_used <= 5)
+ speed_correction = 1.04 + (/*cpi->oxcf.cpu_used*/ 0 * 0.04);
+ else
+ speed_correction = 1.25;
+ }
+
+ // Look at the drop in prediction quality between the last frame
+ // and the GF buffer (which contained an older frame).
+ if (fpstats->sr_coded_error > fpstats->coded_error) {
+ sr_err_diff =
+ (fpstats->sr_coded_error - fpstats->coded_error) /
+ (fpstats->count * cpi->common.MBs);
+ sr_correction = (sr_err_diff / 32.0);
+ sr_correction = pow(sr_correction, 0.25);
+ if (sr_correction < 0.75)
+ sr_correction = 0.75;
+ else if (sr_correction > 1.25)
+ sr_correction = 1.25;
+ } else {
+ sr_correction = 0.75;
+ }
+
+ // II ratio correction factor for clip as a whole
+ clip_iiratio = cpi->twopass.total_stats.intra_error /
+ DOUBLE_DIVIDE_CHECK(cpi->twopass.total_stats.coded_error);
+ clip_iifactor = 1.0 - ((clip_iiratio - 10.0) * 0.025);
+ if (clip_iifactor < 0.80)
+ clip_iifactor = 0.80;
+
+ // Try and pick a Q that can encode the content at the given rate.
+ for (q = 0; q < MAXQ; q++) {
+ int bits_per_mb_at_this_q;
+
+ // Error per MB based correction factor
+ err_correction_factor =
+ calc_correction_factor(err_per_mb, 100.0, 0.4, 0.90, q) *
+ sr_correction * speed_correction * clip_iifactor;
+
+ bits_per_mb_at_this_q =
+ vp9_bits_per_mb(INTER_FRAME, q, err_correction_factor);
+
+ if (bits_per_mb_at_this_q <= target_norm_bits_per_mb)
+ break;
+ }
+
+ // Clip value to range "best allowed to (worst allowed - 1)"
+ q = select_cq_level(q);
+ if (q >= cpi->worst_quality)
+ q = cpi->worst_quality - 1;
+ if (q < cpi->best_quality)
+ q = cpi->best_quality;
+
+ return q;
+}
+
+extern void vp9_new_framerate(VP9_COMP *cpi, double framerate);
+
+void vp9_init_second_pass(VP9_COMP *cpi) {
+ FIRSTPASS_STATS this_frame;
+ FIRSTPASS_STATS *start_pos;
+
+ double lower_bounds_min_rate = FRAME_OVERHEAD_BITS * cpi->oxcf.framerate;
+ double two_pass_min_rate = (double)(cpi->oxcf.target_bandwidth
+ * cpi->oxcf.two_pass_vbrmin_section / 100);
+
+ if (two_pass_min_rate < lower_bounds_min_rate)
+ two_pass_min_rate = lower_bounds_min_rate;
+
+ zero_stats(&cpi->twopass.total_stats);
+ zero_stats(&cpi->twopass.total_left_stats);
+
+ if (!cpi->twopass.stats_in_end)
+ return;
+
+ cpi->twopass.total_stats = *cpi->twopass.stats_in_end;
+ cpi->twopass.total_left_stats = cpi->twopass.total_stats;
+
+ // each frame can have a different duration, as the frame rate in the source
+ // isn't guaranteed to be constant. The frame rate prior to the first frame
+ // encoded in the second pass is a guess. However the sum duration is not.
+ // Its calculated based on the actual durations of all frames from the first
+ // pass.
+ vp9_new_framerate(cpi, 10000000.0 * cpi->twopass.total_stats.count /
+ cpi->twopass.total_stats.duration);
+
+ cpi->output_framerate = cpi->oxcf.framerate;
+ cpi->twopass.bits_left = (int64_t)(cpi->twopass.total_stats.duration *
+ cpi->oxcf.target_bandwidth / 10000000.0);
+ cpi->twopass.bits_left -= (int64_t)(cpi->twopass.total_stats.duration *
+ two_pass_min_rate / 10000000.0);
+
+ // Calculate a minimum intra value to be used in determining the IIratio
+ // scores used in the second pass. We have this minimum to make sure
+ // that clips that are static but "low complexity" in the intra domain
+ // are still boosted appropriately for KF/GF/ARF
+ cpi->twopass.kf_intra_err_min = KF_MB_INTRA_MIN * cpi->common.MBs;
+ cpi->twopass.gf_intra_err_min = GF_MB_INTRA_MIN * cpi->common.MBs;
+
+ // This variable monitors how far behind the second ref update is lagging
+ cpi->twopass.sr_update_lag = 1;
+
+ // Scan the first pass file and calculate an average Intra / Inter error score ratio for the sequence
+ {
+ double sum_iiratio = 0.0;
+ double IIRatio;
+
+ start_pos = cpi->twopass.stats_in; // Note starting "file" position
+
+ while (input_stats(cpi, &this_frame) != EOF) {
+ IIRatio = this_frame.intra_error / DOUBLE_DIVIDE_CHECK(this_frame.coded_error);
+ IIRatio = (IIRatio < 1.0) ? 1.0 : (IIRatio > 20.0) ? 20.0 : IIRatio;
+ sum_iiratio += IIRatio;
+ }
+
+ cpi->twopass.avg_iiratio = sum_iiratio /
+ DOUBLE_DIVIDE_CHECK((double)cpi->twopass.total_stats.count);
+
+ // Reset file position
+ reset_fpf_position(cpi, start_pos);
+ }
+
+ // Scan the first pass file and calculate a modified total error based upon the bias/power function
+ // used to allocate bits
+ {
+ start_pos = cpi->twopass.stats_in; // Note starting "file" position
+
+ cpi->twopass.modified_error_total = 0.0;
+ cpi->twopass.modified_error_used = 0.0;
+
+ while (input_stats(cpi, &this_frame) != EOF) {
+ cpi->twopass.modified_error_total += calculate_modified_err(cpi, &this_frame);
+ }
+ cpi->twopass.modified_error_left = cpi->twopass.modified_error_total;
+
+ reset_fpf_position(cpi, start_pos); // Reset file position
+
+ }
+}
+
+void vp9_end_second_pass(VP9_COMP *cpi) {
+}
+
+// This function gives and estimate of how badly we believe
+// the prediction quality is decaying from frame to frame.
+static double get_prediction_decay_rate(VP9_COMP *cpi,
+ FIRSTPASS_STATS *next_frame) {
+ double prediction_decay_rate;
+ double second_ref_decay;
+ double mb_sr_err_diff;
+
+ // Initial basis is the % mbs inter coded
+ prediction_decay_rate = next_frame->pcnt_inter;
+
+ // Look at the observed drop in prediction quality between the last frame
+ // and the GF buffer (which contains an older frame).
+ mb_sr_err_diff = (next_frame->sr_coded_error - next_frame->coded_error) /
+ cpi->common.MBs;
+ if (mb_sr_err_diff <= 512.0) {
+ second_ref_decay = 1.0 - (mb_sr_err_diff / 512.0);
+ second_ref_decay = pow(second_ref_decay, 0.5);
+ if (second_ref_decay < 0.85)
+ second_ref_decay = 0.85;
+ else if (second_ref_decay > 1.0)
+ second_ref_decay = 1.0;
+ } else {
+ second_ref_decay = 0.85;
+ }
+
+ if (second_ref_decay < prediction_decay_rate)
+ prediction_decay_rate = second_ref_decay;
+
+ return prediction_decay_rate;
+}
+
+// Function to test for a condition where a complex transition is followed
+// by a static section. For example in slide shows where there is a fade
+// between slides. This is to help with more optimal kf and gf positioning.
+static int detect_transition_to_still(
+ VP9_COMP *cpi,
+ int frame_interval,
+ int still_interval,
+ double loop_decay_rate,
+ double last_decay_rate) {
+ int trans_to_still = 0;
+
+ // Break clause to detect very still sections after motion
+ // For example a static image after a fade or other transition
+ // instead of a clean scene cut.
+ if (frame_interval > MIN_GF_INTERVAL &&
+ loop_decay_rate >= 0.999 &&
+ last_decay_rate < 0.9) {
+ int j;
+ FIRSTPASS_STATS *position = cpi->twopass.stats_in;
+ FIRSTPASS_STATS tmp_next_frame;
+ double zz_inter;
+
+ // Look ahead a few frames to see if static condition
+ // persists...
+ for (j = 0; j < still_interval; j++) {
+ if (EOF == input_stats(cpi, &tmp_next_frame))
+ break;
+
+ zz_inter =
+ (tmp_next_frame.pcnt_inter - tmp_next_frame.pcnt_motion);
+ if (zz_inter < 0.999)
+ break;
+ }
+ // Reset file position
+ reset_fpf_position(cpi, position);
+
+ // Only if it does do we signal a transition to still
+ if (j == still_interval)
+ trans_to_still = 1;
+ }
+
+ return trans_to_still;
+}
+
+// This function detects a flash through the high relative pcnt_second_ref
+// score in the frame following a flash frame. The offset passed in should
+// reflect this
+static int detect_flash(VP9_COMP *cpi, int offset) {
+ FIRSTPASS_STATS next_frame;
+
+ int flash_detected = 0;
+
+ // Read the frame data.
+ // The return is FALSE (no flash detected) if not a valid frame
+ if (read_frame_stats(cpi, &next_frame, offset) != EOF) {
+ // What we are looking for here is a situation where there is a
+ // brief break in prediction (such as a flash) but subsequent frames
+ // are reasonably well predicted by an earlier (pre flash) frame.
+ // The recovery after a flash is indicated by a high pcnt_second_ref
+ // comapred to pcnt_inter.
+ if (next_frame.pcnt_second_ref > next_frame.pcnt_inter &&
+ next_frame.pcnt_second_ref >= 0.5)
+ flash_detected = 1;
+ }
+
+ return flash_detected;
+}
+
+// Update the motion related elements to the GF arf boost calculation
+static void accumulate_frame_motion_stats(
+ FIRSTPASS_STATS *this_frame,
+ double *this_frame_mv_in_out,
+ double *mv_in_out_accumulator,
+ double *abs_mv_in_out_accumulator,
+ double *mv_ratio_accumulator) {
+ // double this_frame_mv_in_out;
+ double this_frame_mvr_ratio;
+ double this_frame_mvc_ratio;
+ double motion_pct;
+
+ // Accumulate motion stats.
+ motion_pct = this_frame->pcnt_motion;
+
+ // Accumulate Motion In/Out of frame stats
+ *this_frame_mv_in_out = this_frame->mv_in_out_count * motion_pct;
+ *mv_in_out_accumulator += this_frame->mv_in_out_count * motion_pct;
+ *abs_mv_in_out_accumulator +=
+ fabs(this_frame->mv_in_out_count * motion_pct);
+
+ // Accumulate a measure of how uniform (or conversely how random)
+ // the motion field is. (A ratio of absmv / mv)
+ if (motion_pct > 0.05) {
+ this_frame_mvr_ratio = fabs(this_frame->mvr_abs) /
+ DOUBLE_DIVIDE_CHECK(fabs(this_frame->MVr));
+
+ this_frame_mvc_ratio = fabs(this_frame->mvc_abs) /
+ DOUBLE_DIVIDE_CHECK(fabs(this_frame->MVc));
+
+ *mv_ratio_accumulator +=
+ (this_frame_mvr_ratio < this_frame->mvr_abs)
+ ? (this_frame_mvr_ratio * motion_pct)
+ : this_frame->mvr_abs * motion_pct;
+
+ *mv_ratio_accumulator +=
+ (this_frame_mvc_ratio < this_frame->mvc_abs)
+ ? (this_frame_mvc_ratio * motion_pct)
+ : this_frame->mvc_abs * motion_pct;
+
+ }
+}
+
+// Calculate a baseline boost number for the current frame.
+static double calc_frame_boost(
+ VP9_COMP *cpi,
+ FIRSTPASS_STATS *this_frame,
+ double this_frame_mv_in_out) {
+ double frame_boost;
+
+ // Underlying boost factor is based on inter intra error ratio
+ if (this_frame->intra_error > cpi->twopass.gf_intra_err_min)
+ frame_boost = (IIFACTOR * this_frame->intra_error /
+ DOUBLE_DIVIDE_CHECK(this_frame->coded_error));
+ else
+ frame_boost = (IIFACTOR * cpi->twopass.gf_intra_err_min /
+ DOUBLE_DIVIDE_CHECK(this_frame->coded_error));
+
+ // Increase boost for frames where new data coming into frame
+ // (eg zoom out). Slightly reduce boost if there is a net balance
+ // of motion out of the frame (zoom in).
+ // The range for this_frame_mv_in_out is -1.0 to +1.0
+ if (this_frame_mv_in_out > 0.0)
+ frame_boost += frame_boost * (this_frame_mv_in_out * 2.0);
+ // In extreme case boost is halved
+ else
+ frame_boost += frame_boost * (this_frame_mv_in_out / 2.0);
+
+ // Clip to maximum
+ if (frame_boost > GF_RMAX)
+ frame_boost = GF_RMAX;
+
+ return frame_boost;
+}
+
+static int calc_arf_boost(VP9_COMP *cpi, int offset,
+ int f_frames, int b_frames,
+ int *f_boost, int *b_boost) {
+ FIRSTPASS_STATS this_frame;
+
+ int i;
+ double boost_score = 0.0;
+ double mv_ratio_accumulator = 0.0;
+ double decay_accumulator = 1.0;
+ double this_frame_mv_in_out = 0.0;
+ double mv_in_out_accumulator = 0.0;
+ double abs_mv_in_out_accumulator = 0.0;
+ int arf_boost;
+ int flash_detected = 0;
+
+ // Search forward from the proposed arf/next gf position
+ for (i = 0; i < f_frames; i++) {
+ if (read_frame_stats(cpi, &this_frame, (i + offset)) == EOF)
+ break;
+
+ // Update the motion related elements to the boost calculation
+ accumulate_frame_motion_stats(&this_frame,
+ &this_frame_mv_in_out, &mv_in_out_accumulator,
+ &abs_mv_in_out_accumulator, &mv_ratio_accumulator);
+
+ // We want to discount the flash frame itself and the recovery
+ // frame that follows as both will have poor scores.
+ flash_detected = detect_flash(cpi, (i + offset)) ||
+ detect_flash(cpi, (i + offset + 1));
+
+ // Cumulative effect of prediction quality decay
+ if (!flash_detected) {
+ decay_accumulator *= get_prediction_decay_rate(cpi, &this_frame);
+ decay_accumulator = decay_accumulator < MIN_DECAY_FACTOR
+ ? MIN_DECAY_FACTOR : decay_accumulator;
+ }
+
+ boost_score += (decay_accumulator *
+ calc_frame_boost(cpi, &this_frame, this_frame_mv_in_out));
+ }
+
+ *f_boost = (int)boost_score;
+
+ // Reset for backward looking loop
+ boost_score = 0.0;
+ mv_ratio_accumulator = 0.0;
+ decay_accumulator = 1.0;
+ this_frame_mv_in_out = 0.0;
+ mv_in_out_accumulator = 0.0;
+ abs_mv_in_out_accumulator = 0.0;
+
+ // Search backward towards last gf position
+ for (i = -1; i >= -b_frames; i--) {
+ if (read_frame_stats(cpi, &this_frame, (i + offset)) == EOF)
+ break;
+
+ // Update the motion related elements to the boost calculation
+ accumulate_frame_motion_stats(&this_frame,
+ &this_frame_mv_in_out, &mv_in_out_accumulator,
+ &abs_mv_in_out_accumulator, &mv_ratio_accumulator);
+
+ // We want to discount the the flash frame itself and the recovery
+ // frame that follows as both will have poor scores.
+ flash_detected = detect_flash(cpi, (i + offset)) ||
+ detect_flash(cpi, (i + offset + 1));
+
+ // Cumulative effect of prediction quality decay
+ if (!flash_detected) {
+ decay_accumulator *= get_prediction_decay_rate(cpi, &this_frame);
+ decay_accumulator = decay_accumulator < MIN_DECAY_FACTOR
+ ? MIN_DECAY_FACTOR : decay_accumulator;
+ }
+
+ boost_score += (decay_accumulator *
+ calc_frame_boost(cpi, &this_frame, this_frame_mv_in_out));
+
+ }
+ *b_boost = (int)boost_score;
+
+ arf_boost = (*f_boost + *b_boost);
+ if (arf_boost < ((b_frames + f_frames) * 20))
+ arf_boost = ((b_frames + f_frames) * 20);
+
+ return arf_boost;
+}
+
+#if CONFIG_MULTIPLE_ARF
+// Work out the frame coding order for a GF or an ARF group.
+// The current implementation codes frames in their natural order for a
+// GF group, and inserts additional ARFs into an ARF group using a
+// binary split approach.
+// NOTE: this function is currently implemented recursively.
+static void schedule_frames(VP9_COMP *cpi, const int start, const int end,
+ const int arf_idx, const int gf_or_arf_group,
+ const int level) {
+ int i, abs_end, half_range;
+ int *cfo = cpi->frame_coding_order;
+ int idx = cpi->new_frame_coding_order_period;
+
+ // If (end < 0) an ARF should be coded at position (-end).
+ assert(start >= 0);
+
+ // printf("start:%d end:%d\n", start, end);
+
+ // GF Group: code frames in logical order.
+ if (gf_or_arf_group == 0) {
+ assert(end >= start);
+ for (i = start; i <= end; ++i) {
+ cfo[idx] = i;
+ cpi->arf_buffer_idx[idx] = arf_idx;
+ cpi->arf_weight[idx] = -1;
+ ++idx;
+ }
+ cpi->new_frame_coding_order_period = idx;
+ return;
+ }
+
+ // ARF Group: work out the ARF schedule.
+ // Mark ARF frames as negative.
+ if (end < 0) {
+ // printf("start:%d end:%d\n", -end, -end);
+ // ARF frame is at the end of the range.
+ cfo[idx] = end;
+ // What ARF buffer does this ARF use as predictor.
+ cpi->arf_buffer_idx[idx] = (arf_idx > 2) ? (arf_idx - 1) : 2;
+ cpi->arf_weight[idx] = level;
+ ++idx;
+ abs_end = -end;
+ } else {
+ abs_end = end;
+ }
+
+ half_range = (abs_end - start) >> 1;
+
+ // ARFs may not be adjacent, they must be separated by at least
+ // MIN_GF_INTERVAL non-ARF frames.
+ if ((start + MIN_GF_INTERVAL) >= (abs_end - MIN_GF_INTERVAL)) {
+ // printf("start:%d end:%d\n", start, abs_end);
+ // Update the coding order and active ARF.
+ for (i = start; i <= abs_end; ++i) {
+ cfo[idx] = i;
+ cpi->arf_buffer_idx[idx] = arf_idx;
+ cpi->arf_weight[idx] = -1;
+ ++idx;
+ }
+ cpi->new_frame_coding_order_period = idx;
+ } else {
+ // Place a new ARF at the mid-point of the range.
+ cpi->new_frame_coding_order_period = idx;
+ schedule_frames(cpi, start, -(start + half_range), arf_idx + 1,
+ gf_or_arf_group, level + 1);
+ schedule_frames(cpi, start + half_range + 1, abs_end, arf_idx,
+ gf_or_arf_group, level + 1);
+ }
+}
+
+#define FIXED_ARF_GROUP_SIZE 16
+
+void define_fixed_arf_period(VP9_COMP *cpi) {
+ int i;
+ int max_level = INT_MIN;
+
+ assert(cpi->multi_arf_enabled);
+ assert(cpi->oxcf.lag_in_frames >= FIXED_ARF_GROUP_SIZE);
+
+ // Save the weight of the last frame in the sequence before next
+ // sequence pattern overwrites it.
+ cpi->this_frame_weight = cpi->arf_weight[cpi->sequence_number];
+ assert(cpi->this_frame_weight >= 0);
+
+ // Initialize frame coding order variables.
+ cpi->new_frame_coding_order_period = 0;
+ cpi->next_frame_in_order = 0;
+ cpi->arf_buffered = 0;
+ vp9_zero(cpi->frame_coding_order);
+ vp9_zero(cpi->arf_buffer_idx);
+ vpx_memset(cpi->arf_weight, -1, sizeof(cpi->arf_weight));
+
+ if (cpi->twopass.frames_to_key <= (FIXED_ARF_GROUP_SIZE + 8)) {
+ // Setup a GF group close to the keyframe.
+ cpi->source_alt_ref_pending = 0;
+ cpi->baseline_gf_interval = cpi->twopass.frames_to_key;
+ schedule_frames(cpi, 0, (cpi->baseline_gf_interval - 1), 2, 0, 0);
+ } else {
+ // Setup a fixed period ARF group.
+ cpi->source_alt_ref_pending = 1;
+ cpi->baseline_gf_interval = FIXED_ARF_GROUP_SIZE;
+ schedule_frames(cpi, 0, -(cpi->baseline_gf_interval - 1), 2, 1, 0);
+ }
+
+ // Replace level indicator of -1 with correct level.
+ for (i = 0; i < cpi->new_frame_coding_order_period; ++i) {
+ if (cpi->arf_weight[i] > max_level) {
+ max_level = cpi->arf_weight[i];
+ }
+ }
+ ++max_level;
+ for (i = 0; i < cpi->new_frame_coding_order_period; ++i) {
+ if (cpi->arf_weight[i] == -1) {
+ cpi->arf_weight[i] = max_level;
+ }
+ }
+ cpi->max_arf_level = max_level;
+#if 0
+ printf("\nSchedule: ");
+ for (i = 0; i < cpi->new_frame_coding_order_period; ++i) {
+ printf("%4d ", cpi->frame_coding_order[i]);
+ }
+ printf("\n");
+ printf("ARFref: ");
+ for (i = 0; i < cpi->new_frame_coding_order_period; ++i) {
+ printf("%4d ", cpi->arf_buffer_idx[i]);
+ }
+ printf("\n");
+ printf("Weight: ");
+ for (i = 0; i < cpi->new_frame_coding_order_period; ++i) {
+ printf("%4d ", cpi->arf_weight[i]);
+ }
+ printf("\n");
+#endif
+}
+#endif
+
+// Analyse and define a gf/arf group.
+static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
+ FIRSTPASS_STATS next_frame = { 0 };
+ FIRSTPASS_STATS *start_pos;
+ int i;
+ double boost_score = 0.0;
+ double old_boost_score = 0.0;
+ double gf_group_err = 0.0;
+ double gf_first_frame_err = 0.0;
+ double mod_frame_err = 0.0;
+
+ double mv_ratio_accumulator = 0.0;
+ double decay_accumulator = 1.0;
+ double zero_motion_accumulator = 1.0;
+
+ double loop_decay_rate = 1.00; // Starting decay rate
+ double last_loop_decay_rate = 1.00;
+
+ double this_frame_mv_in_out = 0.0;
+ double mv_in_out_accumulator = 0.0;
+ double abs_mv_in_out_accumulator = 0.0;
+ double mv_ratio_accumulator_thresh;
+ int max_bits = frame_max_bits(cpi); // Max for a single frame
+
+ unsigned int allow_alt_ref =
+ cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames;
+
+ int f_boost = 0;
+ int b_boost = 0;
+ int flash_detected;
+ int active_max_gf_interval;
+
+ cpi->twopass.gf_group_bits = 0;
+
+ vp9_clear_system_state(); // __asm emms;
+
+ start_pos = cpi->twopass.stats_in;
+
+ // Load stats for the current frame.
+ mod_frame_err = calculate_modified_err(cpi, this_frame);
+
+ // Note the error of the frame at the start of the group (this will be
+ // the GF frame error if we code a normal gf
+ gf_first_frame_err = mod_frame_err;
+
+ // Special treatment if the current frame is a key frame (which is also
+ // a gf). If it is then its error score (and hence bit allocation) need
+ // to be subtracted out from the calculation for the GF group
+ if (cpi->common.frame_type == KEY_FRAME)
+ gf_group_err -= gf_first_frame_err;
+
+ // Motion breakout threshold for loop below depends on image size.
+ mv_ratio_accumulator_thresh = (cpi->common.width + cpi->common.height) / 10.0;
+
+ // Work out a maximum interval for the GF.
+ // If the image appears completely static we can extend beyond this.
+ // The value chosen depends on the active Q range. At low Q we have
+ // bits to spare and are better with a smaller interval and smaller boost.
+ // At high Q when there are few bits to spare we are better with a longer
+ // interval to spread the cost of the GF.
+ active_max_gf_interval =
+ 12 + ((int)vp9_convert_qindex_to_q(cpi->active_worst_quality) >> 5);
+
+ if (active_max_gf_interval > cpi->max_gf_interval)
+ active_max_gf_interval = cpi->max_gf_interval;
+
+ i = 0;
+ while (((i < cpi->twopass.static_scene_max_gf_interval) ||
+ ((cpi->twopass.frames_to_key - i) < MIN_GF_INTERVAL)) &&
+ (i < cpi->twopass.frames_to_key)) {
+ i++; // Increment the loop counter
+
+ // Accumulate error score of frames in this gf group
+ mod_frame_err = calculate_modified_err(cpi, this_frame);
+ gf_group_err += mod_frame_err;
+
+ if (EOF == input_stats(cpi, &next_frame))
+ break;
+
+ // Test for the case where there is a brief flash but the prediction
+ // quality back to an earlier frame is then restored.
+ flash_detected = detect_flash(cpi, 0);
+
+ // Update the motion related elements to the boost calculation
+ accumulate_frame_motion_stats(&next_frame,
+ &this_frame_mv_in_out, &mv_in_out_accumulator,
+ &abs_mv_in_out_accumulator, &mv_ratio_accumulator);
+
+ // Cumulative effect of prediction quality decay
+ if (!flash_detected) {
+ last_loop_decay_rate = loop_decay_rate;
+ loop_decay_rate = get_prediction_decay_rate(cpi, &next_frame);
+ decay_accumulator = decay_accumulator * loop_decay_rate;
+
+ // Monitor for static sections.
+ if ((next_frame.pcnt_inter - next_frame.pcnt_motion) <
+ zero_motion_accumulator) {
+ zero_motion_accumulator =
+ (next_frame.pcnt_inter - next_frame.pcnt_motion);
+ }
+
+ // Break clause to detect very still sections after motion
+ // (for example a static image after a fade or other transition).
+ if (detect_transition_to_still(cpi, i, 5, loop_decay_rate,
+ last_loop_decay_rate)) {
+ allow_alt_ref = 0;
+ break;
+ }
+ }
+
+ // Calculate a boost number for this frame
+ boost_score +=
+ (decay_accumulator *
+ calc_frame_boost(cpi, &next_frame, this_frame_mv_in_out));
+
+ // Break out conditions.
+ if (
+ // Break at cpi->max_gf_interval unless almost totally static
+ (i >= active_max_gf_interval && (zero_motion_accumulator < 0.995)) ||
+ (
+ // Don't break out with a very short interval
+ (i > MIN_GF_INTERVAL) &&
+ // Don't break out very close to a key frame
+ ((cpi->twopass.frames_to_key - i) >= MIN_GF_INTERVAL) &&
+ ((boost_score > 125.0) || (next_frame.pcnt_inter < 0.75)) &&
+ (!flash_detected) &&
+ ((mv_ratio_accumulator > mv_ratio_accumulator_thresh) ||
+ (abs_mv_in_out_accumulator > 3.0) ||
+ (mv_in_out_accumulator < -2.0) ||
+ ((boost_score - old_boost_score) < IIFACTOR))
+ )) {
+ boost_score = old_boost_score;
+ break;
+ }
+
+ *this_frame = next_frame;
+
+ old_boost_score = boost_score;
+ }
+
+ cpi->gf_zeromotion_pct = (int)(zero_motion_accumulator * 1000.0);
+
+ // Don't allow a gf too near the next kf
+ if ((cpi->twopass.frames_to_key - i) < MIN_GF_INTERVAL) {
+ while (i < cpi->twopass.frames_to_key) {
+ i++;
+
+ if (EOF == input_stats(cpi, this_frame))
+ break;
+
+ if (i < cpi->twopass.frames_to_key) {
+ mod_frame_err = calculate_modified_err(cpi, this_frame);
+ gf_group_err += mod_frame_err;
+ }
+ }
+ }
+
+ // Set the interval until the next gf or arf.
+ cpi->baseline_gf_interval = i;
+
+#if CONFIG_MULTIPLE_ARF
+ if (cpi->multi_arf_enabled) {
+ // Initialize frame coding order variables.
+ cpi->new_frame_coding_order_period = 0;
+ cpi->next_frame_in_order = 0;
+ cpi->arf_buffered = 0;
+ vp9_zero(cpi->frame_coding_order);
+ vp9_zero(cpi->arf_buffer_idx);
+ vpx_memset(cpi->arf_weight, -1, sizeof(cpi->arf_weight));
+ }
+#endif
+
+ // Should we use the alternate reference frame
+ if (allow_alt_ref &&
+ (i < cpi->oxcf.lag_in_frames) &&
+ (i >= MIN_GF_INTERVAL) &&
+ // dont use ARF very near next kf
+ (i <= (cpi->twopass.frames_to_key - MIN_GF_INTERVAL)) &&
+ ((next_frame.pcnt_inter > 0.75) ||
+ (next_frame.pcnt_second_ref > 0.5)) &&
+ ((mv_in_out_accumulator / (double)i > -0.2) ||
+ (mv_in_out_accumulator > -2.0)) &&
+ (boost_score > 100)) {
+ // Alternative boost calculation for alt ref
+ cpi->gfu_boost = calc_arf_boost(cpi, 0, (i - 1), (i - 1), &f_boost, &b_boost);
+ cpi->source_alt_ref_pending = 1;
+
+#if CONFIG_MULTIPLE_ARF
+ // Set the ARF schedule.
+ if (cpi->multi_arf_enabled) {
+ schedule_frames(cpi, 0, -(cpi->baseline_gf_interval - 1), 2, 1, 0);
+ }
+#endif
+ } else {
+ cpi->gfu_boost = (int)boost_score;
+ cpi->source_alt_ref_pending = 0;
+#if CONFIG_MULTIPLE_ARF
+ // Set the GF schedule.
+ if (cpi->multi_arf_enabled) {
+ schedule_frames(cpi, 0, cpi->baseline_gf_interval - 1, 2, 0, 0);
+ assert(cpi->new_frame_coding_order_period == cpi->baseline_gf_interval);
+ }
+#endif
+ }
+
+#if CONFIG_MULTIPLE_ARF
+ if (cpi->multi_arf_enabled && (cpi->common.frame_type != KEY_FRAME)) {
+ int max_level = INT_MIN;
+ // Replace level indicator of -1 with correct level.
+ for (i = 0; i < cpi->frame_coding_order_period; ++i) {
+ if (cpi->arf_weight[i] > max_level) {
+ max_level = cpi->arf_weight[i];
+ }
+ }
+ ++max_level;
+ for (i = 0; i < cpi->frame_coding_order_period; ++i) {
+ if (cpi->arf_weight[i] == -1) {
+ cpi->arf_weight[i] = max_level;
+ }
+ }
+ cpi->max_arf_level = max_level;
+ }
+#if 0
+ if (cpi->multi_arf_enabled) {
+ printf("\nSchedule: ");
+ for (i = 0; i < cpi->new_frame_coding_order_period; ++i) {
+ printf("%4d ", cpi->frame_coding_order[i]);
+ }
+ printf("\n");
+ printf("ARFref: ");
+ for (i = 0; i < cpi->new_frame_coding_order_period; ++i) {
+ printf("%4d ", cpi->arf_buffer_idx[i]);
+ }
+ printf("\n");
+ printf("Weight: ");
+ for (i = 0; i < cpi->new_frame_coding_order_period; ++i) {
+ printf("%4d ", cpi->arf_weight[i]);
+ }
+ printf("\n");
+ }
+#endif
+#endif
+
+ // Now decide how many bits should be allocated to the GF group as a
+ // proportion of those remaining in the kf group.
+ // The final key frame group in the clip is treated as a special case
+ // where cpi->twopass.kf_group_bits is tied to cpi->twopass.bits_left.
+ // This is also important for short clips where there may only be one
+ // key frame.
+ if (cpi->twopass.frames_to_key >= (int)(cpi->twopass.total_stats.count -
+ cpi->common.current_video_frame)) {
+ cpi->twopass.kf_group_bits =
+ (cpi->twopass.bits_left > 0) ? cpi->twopass.bits_left : 0;
+ }
+
+ // Calculate the bits to be allocated to the group as a whole
+ if ((cpi->twopass.kf_group_bits > 0) &&
+ (cpi->twopass.kf_group_error_left > 0)) {
+ cpi->twopass.gf_group_bits =
+ (int64_t)(cpi->twopass.kf_group_bits *
+ (gf_group_err / cpi->twopass.kf_group_error_left));
+ } else
+ cpi->twopass.gf_group_bits = 0;
+
+ cpi->twopass.gf_group_bits =
+ (cpi->twopass.gf_group_bits < 0)
+ ? 0
+ : (cpi->twopass.gf_group_bits > cpi->twopass.kf_group_bits)
+ ? cpi->twopass.kf_group_bits : cpi->twopass.gf_group_bits;
+
+ // Clip cpi->twopass.gf_group_bits based on user supplied data rate
+ // variability limit (cpi->oxcf.two_pass_vbrmax_section)
+ if (cpi->twopass.gf_group_bits >
+ (int64_t)max_bits * cpi->baseline_gf_interval)
+ cpi->twopass.gf_group_bits = (int64_t)max_bits * cpi->baseline_gf_interval;
+
+ // Reset the file position
+ reset_fpf_position(cpi, start_pos);
+
+ // Update the record of error used so far (only done once per gf group)
+ cpi->twopass.modified_error_used += gf_group_err;
+
+ // Assign bits to the arf or gf.
+ for (i = 0;
+ i <= (cpi->source_alt_ref_pending && cpi->common.frame_type != KEY_FRAME);
+ ++i) {
+ int allocation_chunks;
+ int q = cpi->oxcf.fixed_q < 0 ? cpi->last_q[INTER_FRAME]
+ : cpi->oxcf.fixed_q;
+ int gf_bits;
+
+ int boost = (cpi->gfu_boost * vp9_gfboost_qadjust(q)) / 100;
+
+ // Set max and minimum boost and hence minimum allocation
+ boost = clamp(boost, 125, (cpi->baseline_gf_interval + 1) * 200);
+
+ if (cpi->source_alt_ref_pending && i == 0)
+ allocation_chunks = ((cpi->baseline_gf_interval + 1) * 100) + boost;
+ else
+ allocation_chunks = (cpi->baseline_gf_interval * 100) + (boost - 100);
+
+ // Prevent overflow
+ if (boost > 1023) {
+ int divisor = boost >> 10;
+ boost /= divisor;
+ allocation_chunks /= divisor;
+ }
+
+ // Calculate the number of bits to be spent on the gf or arf based on
+ // the boost number
+ gf_bits = (int)((double)boost * (cpi->twopass.gf_group_bits /
+ (double)allocation_chunks));
+
+ // If the frame that is to be boosted is simpler than the average for
+ // the gf/arf group then use an alternative calculation
+ // based on the error score of the frame itself
+ if (mod_frame_err < gf_group_err / (double)cpi->baseline_gf_interval) {
+ double alt_gf_grp_bits =
+ (double)cpi->twopass.kf_group_bits *
+ (mod_frame_err * (double)cpi->baseline_gf_interval) /
+ DOUBLE_DIVIDE_CHECK(cpi->twopass.kf_group_error_left);
+
+ int alt_gf_bits = (int)((double)boost * (alt_gf_grp_bits /
+ (double)allocation_chunks));
+
+ if (gf_bits > alt_gf_bits)
+ gf_bits = alt_gf_bits;
+ }
+ // Else if it is harder than other frames in the group make sure it at
+ // least receives an allocation in keeping with its relative error
+ // score, otherwise it may be worse off than an "un-boosted" frame
+ else {
+ int alt_gf_bits = (int)((double)cpi->twopass.kf_group_bits *
+ mod_frame_err /
+ DOUBLE_DIVIDE_CHECK(cpi->twopass.kf_group_error_left));
+
+ if (alt_gf_bits > gf_bits)
+ gf_bits = alt_gf_bits;
+ }
+
+ // Dont allow a negative value for gf_bits
+ if (gf_bits < 0)
+ gf_bits = 0;
+
+ // Add in minimum for a frame
+ gf_bits += cpi->min_frame_bandwidth;
+
+ if (i == 0) {
+ cpi->twopass.gf_bits = gf_bits;
+ }
+ if (i == 1 || (!cpi->source_alt_ref_pending
+ && (cpi->common.frame_type != KEY_FRAME))) {
+ // Per frame bit target for this frame
+ cpi->per_frame_bandwidth = gf_bits;
+ }
+ }
+
+ {
+ // Adjust KF group bits and error remaining
+ cpi->twopass.kf_group_error_left -= (int64_t)gf_group_err;
+ cpi->twopass.kf_group_bits -= cpi->twopass.gf_group_bits;
+
+ if (cpi->twopass.kf_group_bits < 0)
+ cpi->twopass.kf_group_bits = 0;
+
+ // Note the error score left in the remaining frames of the group.
+ // For normal GFs we want to remove the error score for the first frame
+ // of the group (except in Key frame case where this has already
+ // happened)
+ if (!cpi->source_alt_ref_pending && cpi->common.frame_type != KEY_FRAME)
+ cpi->twopass.gf_group_error_left = (int64_t)(gf_group_err
+ - gf_first_frame_err);
+ else
+ cpi->twopass.gf_group_error_left = (int64_t)gf_group_err;
+
+ cpi->twopass.gf_group_bits -= cpi->twopass.gf_bits
+ - cpi->min_frame_bandwidth;
+
+ if (cpi->twopass.gf_group_bits < 0)
+ cpi->twopass.gf_group_bits = 0;
+
+ // This condition could fail if there are two kfs very close together
+ // despite (MIN_GF_INTERVAL) and would cause a divide by 0 in the
+ // calculation of alt_extra_bits.
+ if (cpi->baseline_gf_interval >= 3) {
+ const int boost = cpi->source_alt_ref_pending ? b_boost : cpi->gfu_boost;
+
+ if (boost >= 150) {
+ int alt_extra_bits;
+ int pct_extra = (boost - 100) / 50;
+ pct_extra = (pct_extra > 20) ? 20 : pct_extra;
+
+ alt_extra_bits = (int)((cpi->twopass.gf_group_bits * pct_extra) / 100);
+ cpi->twopass.gf_group_bits -= alt_extra_bits;
+ }
+ }
+ }
+
+ if (cpi->common.frame_type != KEY_FRAME) {
+ FIRSTPASS_STATS sectionstats;
+
+ zero_stats(&sectionstats);
+ reset_fpf_position(cpi, start_pos);
+
+ for (i = 0; i < cpi->baseline_gf_interval; i++) {
+ input_stats(cpi, &next_frame);
+ accumulate_stats(&sectionstats, &next_frame);
+ }
+
+ avg_stats(&sectionstats);
+
+ cpi->twopass.section_intra_rating = (int)
+ (sectionstats.intra_error /
+ DOUBLE_DIVIDE_CHECK(sectionstats.coded_error));
+
+ reset_fpf_position(cpi, start_pos);
+ }
+}
+
+// Allocate bits to a normal frame that is neither a gf an arf or a key frame.
+static void assign_std_frame_bits(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
+ int target_frame_size;
+
+ double modified_err;
+ double err_fraction;
+
+ // Max for a single frame.
+ int max_bits = frame_max_bits(cpi);
+
+ // Calculate modified prediction error used in bit allocation.
+ modified_err = calculate_modified_err(cpi, this_frame);
+
+ if (cpi->twopass.gf_group_error_left > 0)
+ // What portion of the remaining GF group error is used by this frame.
+ err_fraction = modified_err / cpi->twopass.gf_group_error_left;
+ else
+ err_fraction = 0.0;
+
+ // How many of those bits available for allocation should we give it?
+ target_frame_size = (int)((double)cpi->twopass.gf_group_bits * err_fraction);
+
+ // Clip target size to 0 - max_bits (or cpi->twopass.gf_group_bits) at
+ // the top end.
+ if (target_frame_size < 0)
+ target_frame_size = 0;
+ else {
+ if (target_frame_size > max_bits)
+ target_frame_size = max_bits;
+
+ if (target_frame_size > cpi->twopass.gf_group_bits)
+ target_frame_size = (int)cpi->twopass.gf_group_bits;
+ }
+
+ // Adjust error and bits remaining.
+ cpi->twopass.gf_group_error_left -= (int64_t)modified_err;
+ cpi->twopass.gf_group_bits -= target_frame_size;
+
+ if (cpi->twopass.gf_group_bits < 0)
+ cpi->twopass.gf_group_bits = 0;
+
+ // Add in the minimum number of bits that is set aside for every frame.
+ target_frame_size += cpi->min_frame_bandwidth;
+
+ // Per frame bit target for this frame.
+ cpi->per_frame_bandwidth = target_frame_size;
+}
+
+// Make a damped adjustment to the active max q.
+static int adjust_active_maxq(int old_maxqi, int new_maxqi) {
+ int i;
+ const double old_q = vp9_convert_qindex_to_q(old_maxqi);
+ const double new_q = vp9_convert_qindex_to_q(new_maxqi);
+ const double target_q = ((old_q * 7.0) + new_q) / 8.0;
+
+ if (target_q > old_q) {
+ for (i = old_maxqi; i <= new_maxqi; i++)
+ if (vp9_convert_qindex_to_q(i) >= target_q)
+ return i;
+ } else {
+ for (i = old_maxqi; i >= new_maxqi; i--)
+ if (vp9_convert_qindex_to_q(i) <= target_q)
+ return i;
+ }
+
+ return new_maxqi;
+}
+
+void vp9_second_pass(VP9_COMP *cpi) {
+ int tmp_q;
+ int frames_left = (int)(cpi->twopass.total_stats.count -
+ cpi->common.current_video_frame);
+
+ FIRSTPASS_STATS this_frame;
+ FIRSTPASS_STATS this_frame_copy;
+
+ double this_frame_intra_error;
+ double this_frame_coded_error;
+
+ if (!cpi->twopass.stats_in)
+ return;
+
+ vp9_clear_system_state();
+
+ if (cpi->oxcf.end_usage == USAGE_CONSTANT_QUALITY) {
+ cpi->active_worst_quality = cpi->oxcf.cq_level;
+ } else {
+ // Special case code for first frame.
+ if (cpi->common.current_video_frame == 0) {
+ int section_target_bandwidth =
+ (int)(cpi->twopass.bits_left / frames_left);
+ cpi->twopass.est_max_qcorrection_factor = 1.0;
+
+ // Set a cq_level in constrained quality mode.
+ if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
+ int est_cq = estimate_cq(cpi, &cpi->twopass.total_left_stats,
+ section_target_bandwidth);
+
+ cpi->cq_target_quality = cpi->oxcf.cq_level;
+ if (est_cq > cpi->cq_target_quality)
+ cpi->cq_target_quality = est_cq;
+ }
+
+ // guess at maxq needed in 2nd pass
+ cpi->twopass.maxq_max_limit = cpi->worst_quality;
+ cpi->twopass.maxq_min_limit = cpi->best_quality;
+
+ tmp_q = estimate_max_q(cpi, &cpi->twopass.total_left_stats,
+ section_target_bandwidth);
+
+ cpi->active_worst_quality = tmp_q;
+ cpi->ni_av_qi = tmp_q;
+ cpi->avg_q = vp9_convert_qindex_to_q(tmp_q);
+
+#ifndef ONE_SHOT_Q_ESTIMATE
+ // Limit the maxq value returned subsequently.
+ // This increases the risk of overspend or underspend if the initial
+ // estimate for the clip is bad, but helps prevent excessive
+ // variation in Q, especially near the end of a clip
+ // where for example a small overspend may cause Q to crash
+ adjust_maxq_qrange(cpi);
+#endif
+ }
+
+#ifndef ONE_SHOT_Q_ESTIMATE
+ // The last few frames of a clip almost always have to few or too many
+ // bits and for the sake of over exact rate control we dont want to make
+ // radical adjustments to the allowed quantizer range just to use up a
+ // few surplus bits or get beneath the target rate.
+ else if ((cpi->common.current_video_frame <
+ (((unsigned int)cpi->twopass.total_stats.count * 255) >> 8)) &&
+ ((cpi->common.current_video_frame + cpi->baseline_gf_interval) <
+ (unsigned int)cpi->twopass.total_stats.count)) {
+ int section_target_bandwidth =
+ (int)(cpi->twopass.bits_left / frames_left);
+ if (frames_left < 1)
+ frames_left = 1;
+
+ tmp_q = estimate_max_q(
+ cpi,
+ &cpi->twopass.total_left_stats,
+ section_target_bandwidth);
+
+ // Make a damped adjustment to active max Q
+ cpi->active_worst_quality =
+ adjust_active_maxq(cpi->active_worst_quality, tmp_q);
+ }
+#endif
+ }
+ vp9_zero(this_frame);
+ if (EOF == input_stats(cpi, &this_frame))
+ return;
+
+ this_frame_intra_error = this_frame.intra_error;
+ this_frame_coded_error = this_frame.coded_error;
+
+ // keyframe and section processing !
+ if (cpi->twopass.frames_to_key == 0) {
+ // Define next KF group and assign bits to it
+ this_frame_copy = this_frame;
+ find_next_key_frame(cpi, &this_frame_copy);
+ }
+
+ // Is this a GF / ARF (Note that a KF is always also a GF)
+ if (cpi->frames_till_gf_update_due == 0) {
+ // Define next gf group and assign bits to it
+ this_frame_copy = this_frame;
+
+ cpi->gf_zeromotion_pct = 0;
+
+#if CONFIG_MULTIPLE_ARF
+ if (cpi->multi_arf_enabled) {
+ define_fixed_arf_period(cpi);
+ } else {
+#endif
+ define_gf_group(cpi, &this_frame_copy);
+#if CONFIG_MULTIPLE_ARF
+ }
+#endif
+
+ if (cpi->gf_zeromotion_pct > 995) {
+ // As long as max_thresh for encode breakout is small enough, it is ok
+ // to enable it for no-show frame, i.e. set enable_encode_breakout to 2.
+ if (!cpi->common.show_frame)
+ cpi->enable_encode_breakout = 0;
+ else
+ cpi->enable_encode_breakout = 2;
+ }
+
+ // If we are going to code an altref frame at the end of the group
+ // and the current frame is not a key frame....
+ // If the previous group used an arf this frame has already benefited
+ // from that arf boost and it should not be given extra bits
+ // If the previous group was NOT coded using arf we may want to apply
+ // some boost to this GF as well
+ if (cpi->source_alt_ref_pending && (cpi->common.frame_type != KEY_FRAME)) {
+ // Assign a standard frames worth of bits from those allocated
+ // to the GF group
+ int bak = cpi->per_frame_bandwidth;
+ this_frame_copy = this_frame;
+ assign_std_frame_bits(cpi, &this_frame_copy);
+ cpi->per_frame_bandwidth = bak;
+ }
+ } else {
+ // Otherwise this is an ordinary frame
+ // Assign bits from those allocated to the GF group
+ this_frame_copy = this_frame;
+ assign_std_frame_bits(cpi, &this_frame_copy);
+ }
+
+ // Keep a globally available copy of this and the next frame's iiratio.
+ cpi->twopass.this_iiratio = (int)(this_frame_intra_error /
+ DOUBLE_DIVIDE_CHECK(this_frame_coded_error));
+ {
+ FIRSTPASS_STATS next_frame;
+ if (lookup_next_frame_stats(cpi, &next_frame) != EOF) {
+ cpi->twopass.next_iiratio = (int)(next_frame.intra_error /
+ DOUBLE_DIVIDE_CHECK(next_frame.coded_error));
+ }
+ }
+
+ // Set nominal per second bandwidth for this frame
+ cpi->target_bandwidth = (int)(cpi->per_frame_bandwidth
+ * cpi->output_framerate);
+ if (cpi->target_bandwidth < 0)
+ cpi->target_bandwidth = 0;
+
+ cpi->twopass.frames_to_key--;
+
+ // Update the total stats remaining structure
+ subtract_stats(&cpi->twopass.total_left_stats, &this_frame);
+}
+
+static int test_candidate_kf(VP9_COMP *cpi,
+ FIRSTPASS_STATS *last_frame,
+ FIRSTPASS_STATS *this_frame,
+ FIRSTPASS_STATS *next_frame) {
+ int is_viable_kf = 0;
+
+ // Does the frame satisfy the primary criteria of a key frame
+ // If so, then examine how well it predicts subsequent frames
+ if ((this_frame->pcnt_second_ref < 0.10) &&
+ (next_frame->pcnt_second_ref < 0.10) &&
+ ((this_frame->pcnt_inter < 0.05) ||
+ (
+ ((this_frame->pcnt_inter - this_frame->pcnt_neutral) < .35) &&
+ ((this_frame->intra_error / DOUBLE_DIVIDE_CHECK(this_frame->coded_error)) < 2.5) &&
+ ((fabs(last_frame->coded_error - this_frame->coded_error) / DOUBLE_DIVIDE_CHECK(this_frame->coded_error) > .40) ||
+ (fabs(last_frame->intra_error - this_frame->intra_error) / DOUBLE_DIVIDE_CHECK(this_frame->intra_error) > .40) ||
+ ((next_frame->intra_error / DOUBLE_DIVIDE_CHECK(next_frame->coded_error)) > 3.5)
+ )
+ )
+ )
+ ) {
+ int i;
+ FIRSTPASS_STATS *start_pos;
+
+ FIRSTPASS_STATS local_next_frame;
+
+ double boost_score = 0.0;
+ double old_boost_score = 0.0;
+ double decay_accumulator = 1.0;
+ double next_iiratio;
+
+ local_next_frame = *next_frame;
+
+ // Note the starting file position so we can reset to it
+ start_pos = cpi->twopass.stats_in;
+
+ // Examine how well the key frame predicts subsequent frames
+ for (i = 0; i < 16; i++) {
+ next_iiratio = (IIKFACTOR1 * local_next_frame.intra_error / DOUBLE_DIVIDE_CHECK(local_next_frame.coded_error));
+
+ if (next_iiratio > RMAX)
+ next_iiratio = RMAX;
+
+ // Cumulative effect of decay in prediction quality
+ if (local_next_frame.pcnt_inter > 0.85)
+ decay_accumulator = decay_accumulator * local_next_frame.pcnt_inter;
+ else
+ decay_accumulator = decay_accumulator * ((0.85 + local_next_frame.pcnt_inter) / 2.0);
+
+ // decay_accumulator = decay_accumulator * local_next_frame.pcnt_inter;
+
+ // Keep a running total
+ boost_score += (decay_accumulator * next_iiratio);
+
+ // Test various breakout clauses
+ if ((local_next_frame.pcnt_inter < 0.05) ||
+ (next_iiratio < 1.5) ||
+ (((local_next_frame.pcnt_inter -
+ local_next_frame.pcnt_neutral) < 0.20) &&
+ (next_iiratio < 3.0)) ||
+ ((boost_score - old_boost_score) < 3.0) ||
+ (local_next_frame.intra_error < 200)
+ ) {
+ break;
+ }
+
+ old_boost_score = boost_score;
+
+ // Get the next frame details
+ if (EOF == input_stats(cpi, &local_next_frame))
+ break;
+ }
+
+ // If there is tolerable prediction for at least the next 3 frames then
+ // break out else discard this potential key frame and move on
+ if (boost_score > 30.0 && (i > 3))
+ is_viable_kf = 1;
+ else {
+ // Reset the file position
+ reset_fpf_position(cpi, start_pos);
+
+ is_viable_kf = 0;
+ }
+ }
+
+ return is_viable_kf;
+}
+static void find_next_key_frame(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
+ int i, j;
+ FIRSTPASS_STATS last_frame;
+ FIRSTPASS_STATS first_frame;
+ FIRSTPASS_STATS next_frame;
+ FIRSTPASS_STATS *start_position;
+
+ double decay_accumulator = 1.0;
+ double zero_motion_accumulator = 1.0;
+ double boost_score = 0;
+ double loop_decay_rate;
+
+ double kf_mod_err = 0.0;
+ double kf_group_err = 0.0;
+ double kf_group_intra_err = 0.0;
+ double kf_group_coded_err = 0.0;
+ double recent_loop_decay[8] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
+
+ vp9_zero(next_frame);
+
+ vp9_clear_system_state(); // __asm emms;
+ start_position = cpi->twopass.stats_in;
+
+ cpi->common.frame_type = KEY_FRAME;
+
+ // is this a forced key frame by interval
+ cpi->this_key_frame_forced = cpi->next_key_frame_forced;
+
+ // Clear the alt ref active flag as this can never be active on a key frame
+ cpi->source_alt_ref_active = 0;
+
+ // Kf is always a gf so clear frames till next gf counter
+ cpi->frames_till_gf_update_due = 0;
+
+ cpi->twopass.frames_to_key = 1;
+
+ // Take a copy of the initial frame details
+ first_frame = *this_frame;
+
+ cpi->twopass.kf_group_bits = 0; // Total bits available to kf group
+ cpi->twopass.kf_group_error_left = 0; // Group modified error score.
+
+ kf_mod_err = calculate_modified_err(cpi, this_frame);
+
+ // find the next keyframe
+ i = 0;
+ while (cpi->twopass.stats_in < cpi->twopass.stats_in_end) {
+ // Accumulate kf group error
+ kf_group_err += calculate_modified_err(cpi, this_frame);
+
+ // These figures keep intra and coded error counts for all frames including key frames in the group.
+ // The effect of the key frame itself can be subtracted out using the first_frame data collected above
+ kf_group_intra_err += this_frame->intra_error;
+ kf_group_coded_err += this_frame->coded_error;
+
+ // load a the next frame's stats
+ last_frame = *this_frame;
+ input_stats(cpi, this_frame);
+
+ // Provided that we are not at the end of the file...
+ if (cpi->oxcf.auto_key
+ && lookup_next_frame_stats(cpi, &next_frame) != EOF) {
+ // Normal scene cut check
+ if (test_candidate_kf(cpi, &last_frame, this_frame, &next_frame))
+ break;
+
+
+ // How fast is prediction quality decaying
+ loop_decay_rate = get_prediction_decay_rate(cpi, &next_frame);
+
+ // We want to know something about the recent past... rather than
+ // as used elsewhere where we are concened with decay in prediction
+ // quality since the last GF or KF.
+ recent_loop_decay[i % 8] = loop_decay_rate;
+ decay_accumulator = 1.0;
+ for (j = 0; j < 8; j++)
+ decay_accumulator *= recent_loop_decay[j];
+
+ // Special check for transition or high motion followed by a
+ // to a static scene.
+ if (detect_transition_to_still(cpi, i, cpi->key_frame_frequency - i,
+ loop_decay_rate, decay_accumulator))
+ break;
+
+ // Step on to the next frame
+ cpi->twopass.frames_to_key++;
+
+ // If we don't have a real key frame within the next two
+ // forcekeyframeevery intervals then break out of the loop.
+ if (cpi->twopass.frames_to_key >= 2 * (int)cpi->key_frame_frequency)
+ break;
+ } else
+ cpi->twopass.frames_to_key++;
+
+ i++;
+ }
+
+ // If there is a max kf interval set by the user we must obey it.
+ // We already breakout of the loop above at 2x max.
+ // This code centers the extra kf if the actual natural
+ // interval is between 1x and 2x
+ if (cpi->oxcf.auto_key
+ && cpi->twopass.frames_to_key > (int)cpi->key_frame_frequency) {
+ FIRSTPASS_STATS *current_pos = cpi->twopass.stats_in;
+ FIRSTPASS_STATS tmp_frame;
+
+ cpi->twopass.frames_to_key /= 2;
+
+ // Copy first frame details
+ tmp_frame = first_frame;
+
+ // Reset to the start of the group
+ reset_fpf_position(cpi, start_position);
+
+ kf_group_err = 0;
+ kf_group_intra_err = 0;
+ kf_group_coded_err = 0;
+
+ // Rescan to get the correct error data for the forced kf group
+ for (i = 0; i < cpi->twopass.frames_to_key; i++) {
+ // Accumulate kf group errors
+ kf_group_err += calculate_modified_err(cpi, &tmp_frame);
+ kf_group_intra_err += tmp_frame.intra_error;
+ kf_group_coded_err += tmp_frame.coded_error;
+
+ // Load a the next frame's stats
+ input_stats(cpi, &tmp_frame);
+ }
+
+ // Reset to the start of the group
+ reset_fpf_position(cpi, current_pos);
+
+ cpi->next_key_frame_forced = 1;
+ } else
+ cpi->next_key_frame_forced = 0;
+
+ // Special case for the last frame of the file
+ if (cpi->twopass.stats_in >= cpi->twopass.stats_in_end) {
+ // Accumulate kf group error
+ kf_group_err += calculate_modified_err(cpi, this_frame);
+
+ // These figures keep intra and coded error counts for all frames including key frames in the group.
+ // The effect of the key frame itself can be subtracted out using the first_frame data collected above
+ kf_group_intra_err += this_frame->intra_error;
+ kf_group_coded_err += this_frame->coded_error;
+ }
+
+ // Calculate the number of bits that should be assigned to the kf group.
+ if ((cpi->twopass.bits_left > 0) && (cpi->twopass.modified_error_left > 0.0)) {
+ // Max for a single normal frame (not key frame)
+ int max_bits = frame_max_bits(cpi);
+
+ // Maximum bits for the kf group
+ int64_t max_grp_bits;
+
+ // Default allocation based on bits left and relative
+ // complexity of the section
+ cpi->twopass.kf_group_bits = (int64_t)(cpi->twopass.bits_left *
+ (kf_group_err /
+ cpi->twopass.modified_error_left));
+
+ // Clip based on maximum per frame rate defined by the user.
+ max_grp_bits = (int64_t)max_bits * (int64_t)cpi->twopass.frames_to_key;
+ if (cpi->twopass.kf_group_bits > max_grp_bits)
+ cpi->twopass.kf_group_bits = max_grp_bits;
+ } else
+ cpi->twopass.kf_group_bits = 0;
+
+ // Reset the first pass file position
+ reset_fpf_position(cpi, start_position);
+
+ // determine how big to make this keyframe based on how well the subsequent frames use inter blocks
+ decay_accumulator = 1.0;
+ boost_score = 0.0;
+ loop_decay_rate = 1.00; // Starting decay rate
+
+ // Scan through the kf group collating various stats.
+ for (i = 0; i < cpi->twopass.frames_to_key; i++) {
+ double r;
+
+ if (EOF == input_stats(cpi, &next_frame))
+ break;
+
+ // Monitor for static sections.
+ if ((next_frame.pcnt_inter - next_frame.pcnt_motion) <
+ zero_motion_accumulator) {
+ zero_motion_accumulator =
+ (next_frame.pcnt_inter - next_frame.pcnt_motion);
+ }
+
+ // For the first few frames collect data to decide kf boost.
+ if (i <= (cpi->max_gf_interval * 2)) {
+ if (next_frame.intra_error > cpi->twopass.kf_intra_err_min)
+ r = (IIKFACTOR2 * next_frame.intra_error /
+ DOUBLE_DIVIDE_CHECK(next_frame.coded_error));
+ else
+ r = (IIKFACTOR2 * cpi->twopass.kf_intra_err_min /
+ DOUBLE_DIVIDE_CHECK(next_frame.coded_error));
+
+ if (r > RMAX)
+ r = RMAX;
+
+ // How fast is prediction quality decaying
+ if (!detect_flash(cpi, 0)) {
+ loop_decay_rate = get_prediction_decay_rate(cpi, &next_frame);
+ decay_accumulator = decay_accumulator * loop_decay_rate;
+ decay_accumulator = decay_accumulator < MIN_DECAY_FACTOR
+ ? MIN_DECAY_FACTOR : decay_accumulator;
+ }
+
+ boost_score += (decay_accumulator * r);
+ }
+ }
+
+ {
+ FIRSTPASS_STATS sectionstats;
+
+ zero_stats(&sectionstats);
+ reset_fpf_position(cpi, start_position);
+
+ for (i = 0; i < cpi->twopass.frames_to_key; i++) {
+ input_stats(cpi, &next_frame);
+ accumulate_stats(&sectionstats, &next_frame);
+ }
+
+ avg_stats(&sectionstats);
+
+ cpi->twopass.section_intra_rating = (int)
+ (sectionstats.intra_error
+ / DOUBLE_DIVIDE_CHECK(sectionstats.coded_error));
+ }
+
+ // Reset the first pass file position
+ reset_fpf_position(cpi, start_position);
+
+ // Work out how many bits to allocate for the key frame itself
+ if (1) {
+ int kf_boost = (int)boost_score;
+ int allocation_chunks;
+ int alt_kf_bits;
+
+ if (kf_boost < (cpi->twopass.frames_to_key * 3))
+ kf_boost = (cpi->twopass.frames_to_key * 3);
+
+ if (kf_boost < 300) // Min KF boost
+ kf_boost = 300;
+
+ // Make a note of baseline boost and the zero motion
+ // accumulator value for use elsewhere.
+ cpi->kf_boost = kf_boost;
+ cpi->kf_zeromotion_pct = (int)(zero_motion_accumulator * 100.0);
+
+ // We do three calculations for kf size.
+ // The first is based on the error score for the whole kf group.
+ // The second (optionaly) on the key frames own error if this is
+ // smaller than the average for the group.
+ // The final one insures that the frame receives at least the
+ // allocation it would have received based on its own error score vs
+ // the error score remaining
+ // Special case if the sequence appears almost totaly static
+ // In this case we want to spend almost all of the bits on the
+ // key frame.
+ // cpi->twopass.frames_to_key-1 because key frame itself is taken
+ // care of by kf_boost.
+ if (zero_motion_accumulator >= 0.99) {
+ allocation_chunks =
+ ((cpi->twopass.frames_to_key - 1) * 10) + kf_boost;
+ } else {
+ allocation_chunks =
+ ((cpi->twopass.frames_to_key - 1) * 100) + kf_boost;
+ }
+
+ // Prevent overflow
+ if (kf_boost > 1028) {
+ int divisor = kf_boost >> 10;
+ kf_boost /= divisor;
+ allocation_chunks /= divisor;
+ }
+
+ cpi->twopass.kf_group_bits = (cpi->twopass.kf_group_bits < 0) ? 0 : cpi->twopass.kf_group_bits;
+
+ // Calculate the number of bits to be spent on the key frame
+ cpi->twopass.kf_bits = (int)((double)kf_boost * ((double)cpi->twopass.kf_group_bits / (double)allocation_chunks));
+
+ // If the key frame is actually easier than the average for the
+ // kf group (which does sometimes happen... eg a blank intro frame)
+ // Then use an alternate calculation based on the kf error score
+ // which should give a smaller key frame.
+ if (kf_mod_err < kf_group_err / cpi->twopass.frames_to_key) {
+ double alt_kf_grp_bits =
+ ((double)cpi->twopass.bits_left *
+ (kf_mod_err * (double)cpi->twopass.frames_to_key) /
+ DOUBLE_DIVIDE_CHECK(cpi->twopass.modified_error_left));
+
+ alt_kf_bits = (int)((double)kf_boost *
+ (alt_kf_grp_bits / (double)allocation_chunks));
+
+ if (cpi->twopass.kf_bits > alt_kf_bits) {
+ cpi->twopass.kf_bits = alt_kf_bits;
+ }
+ }
+ // Else if it is much harder than other frames in the group make sure
+ // it at least receives an allocation in keeping with its relative
+ // error score
+ else {
+ alt_kf_bits =
+ (int)((double)cpi->twopass.bits_left *
+ (kf_mod_err /
+ DOUBLE_DIVIDE_CHECK(cpi->twopass.modified_error_left)));
+
+ if (alt_kf_bits > cpi->twopass.kf_bits) {
+ cpi->twopass.kf_bits = alt_kf_bits;
+ }
+ }
+
+ cpi->twopass.kf_group_bits -= cpi->twopass.kf_bits;
+ // Add in the minimum frame allowance
+ cpi->twopass.kf_bits += cpi->min_frame_bandwidth;
+
+ // Peer frame bit target for this frame
+ cpi->per_frame_bandwidth = cpi->twopass.kf_bits;
+ // Convert to a per second bitrate
+ cpi->target_bandwidth = (int)(cpi->twopass.kf_bits *
+ cpi->output_framerate);
+ }
+
+ // Note the total error score of the kf group minus the key frame itself
+ cpi->twopass.kf_group_error_left = (int)(kf_group_err - kf_mod_err);
+
+ // Adjust the count of total modified error left.
+ // The count of bits left is adjusted elsewhere based on real coded frame sizes
+ cpi->twopass.modified_error_left -= kf_group_err;
+}
diff --git a/libvpx/vp9/encoder/vp9_firstpass.h b/libvpx/vp9/encoder/vp9_firstpass.h
new file mode 100644
index 0000000..2296a66
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_firstpass.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_ENCODER_VP9_FIRSTPASS_H_
+#define VP9_ENCODER_VP9_FIRSTPASS_H_
+
+void vp9_init_first_pass(VP9_COMP *cpi);
+void vp9_first_pass(VP9_COMP *cpi);
+void vp9_end_first_pass(VP9_COMP *cpi);
+
+void vp9_init_second_pass(VP9_COMP *cpi);
+void vp9_second_pass(VP9_COMP *cpi);
+void vp9_end_second_pass(VP9_COMP *cpi);
+
+#endif // VP9_ENCODER_VP9_FIRSTPASS_H_
diff --git a/libvpx/vp9/encoder/vp9_lookahead.c b/libvpx/vp9/encoder/vp9_lookahead.c
new file mode 100644
index 0000000..81445a9
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_lookahead.c
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include <assert.h>
+#include <stdlib.h>
+
+#include "vpx_config.h"
+#include "vp9/common/vp9_common.h"
+#include "vp9/encoder/vp9_lookahead.h"
+#include "vp9/common/vp9_extend.h"
+
+struct lookahead_ctx {
+ unsigned int max_sz; /* Absolute size of the queue */
+ unsigned int sz; /* Number of buffers currently in the queue */
+ unsigned int read_idx; /* Read index */
+ unsigned int write_idx; /* Write index */
+ struct lookahead_entry *buf; /* Buffer list */
+};
+
+
+/* Return the buffer at the given absolute index and increment the index */
+static struct lookahead_entry * pop(struct lookahead_ctx *ctx,
+ unsigned int *idx) {
+ unsigned int index = *idx;
+ struct lookahead_entry *buf = ctx->buf + index;
+
+ assert(index < ctx->max_sz);
+ if (++index >= ctx->max_sz)
+ index -= ctx->max_sz;
+ *idx = index;
+ return buf;
+}
+
+
+void vp9_lookahead_destroy(struct lookahead_ctx *ctx) {
+ if (ctx) {
+ if (ctx->buf) {
+ unsigned int i;
+
+ for (i = 0; i < ctx->max_sz; i++)
+ vp9_free_frame_buffer(&ctx->buf[i].img);
+ free(ctx->buf);
+ }
+ free(ctx);
+ }
+}
+
+
+struct lookahead_ctx * vp9_lookahead_init(unsigned int width,
+ unsigned int height,
+ unsigned int subsampling_x,
+ unsigned int subsampling_y,
+ unsigned int depth) {
+ struct lookahead_ctx *ctx = NULL;
+
+ // Clamp the lookahead queue depth
+ depth = clamp(depth, 1, MAX_LAG_BUFFERS);
+
+ // Allocate the lookahead structures
+ ctx = calloc(1, sizeof(*ctx));
+ if (ctx) {
+ unsigned int i;
+ ctx->max_sz = depth;
+ ctx->buf = calloc(depth, sizeof(*ctx->buf));
+ if (!ctx->buf)
+ goto bail;
+ for (i = 0; i < depth; i++)
+ if (vp9_alloc_frame_buffer(&ctx->buf[i].img,
+ width, height, subsampling_x, subsampling_y,
+ VP9BORDERINPIXELS))
+ goto bail;
+ }
+ return ctx;
+bail:
+ vp9_lookahead_destroy(ctx);
+ return NULL;
+}
+
+#define USE_PARTIAL_COPY 0
+
+int vp9_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
+ int64_t ts_start, int64_t ts_end, unsigned int flags,
+ unsigned char *active_map) {
+ struct lookahead_entry *buf;
+#if USE_PARTIAL_COPY
+ int row, col, active_end;
+ int mb_rows = (src->y_height + 15) >> 4;
+ int mb_cols = (src->y_width + 15) >> 4;
+#endif
+
+ if (ctx->sz + 1 > ctx->max_sz)
+ return 1;
+ ctx->sz++;
+ buf = pop(ctx, &ctx->write_idx);
+
+#if USE_PARTIAL_COPY
+ // TODO(jkoleszar): This is disabled for now, as
+ // vp9_copy_and_extend_frame_with_rect is not subsampling/alpha aware.
+
+ // Only do this partial copy if the following conditions are all met:
+ // 1. Lookahead queue has has size of 1.
+ // 2. Active map is provided.
+ // 3. This is not a key frame, golden nor altref frame.
+ if (ctx->max_sz == 1 && active_map && !flags) {
+ for (row = 0; row < mb_rows; ++row) {
+ col = 0;
+
+ while (1) {
+ // Find the first active macroblock in this row.
+ for (; col < mb_cols; ++col) {
+ if (active_map[col])
+ break;
+ }
+
+ // No more active macroblock in this row.
+ if (col == mb_cols)
+ break;
+
+ // Find the end of active region in this row.
+ active_end = col;
+
+ for (; active_end < mb_cols; ++active_end) {
+ if (!active_map[active_end])
+ break;
+ }
+
+ // Only copy this active region.
+ vp9_copy_and_extend_frame_with_rect(src, &buf->img,
+ row << 4,
+ col << 4, 16,
+ (active_end - col) << 4);
+
+ // Start again from the end of this active region.
+ col = active_end;
+ }
+
+ active_map += mb_cols;
+ }
+ } else {
+ vp9_copy_and_extend_frame(src, &buf->img);
+ }
+#else
+ // Partial copy not implemented yet
+ vp9_copy_and_extend_frame(src, &buf->img);
+#endif
+
+ buf->ts_start = ts_start;
+ buf->ts_end = ts_end;
+ buf->flags = flags;
+ return 0;
+}
+
+
+struct lookahead_entry * vp9_lookahead_pop(struct lookahead_ctx *ctx,
+ int drain) {
+ struct lookahead_entry *buf = NULL;
+
+ if (ctx->sz && (drain || ctx->sz == ctx->max_sz)) {
+ buf = pop(ctx, &ctx->read_idx);
+ ctx->sz--;
+ }
+ return buf;
+}
+
+
+struct lookahead_entry * vp9_lookahead_peek(struct lookahead_ctx *ctx,
+ int index) {
+ struct lookahead_entry *buf = NULL;
+
+ assert(index < (int)ctx->max_sz);
+ if (index < (int)ctx->sz) {
+ index += ctx->read_idx;
+ if (index >= (int)ctx->max_sz)
+ index -= ctx->max_sz;
+ buf = ctx->buf + index;
+ }
+ return buf;
+}
+
+unsigned int vp9_lookahead_depth(struct lookahead_ctx *ctx) {
+ return ctx->sz;
+}
diff --git a/libvpx/vp9/encoder/vp9_lookahead.h b/libvpx/vp9/encoder/vp9_lookahead.h
new file mode 100644
index 0000000..c773f8f
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_lookahead.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_ENCODER_VP9_LOOKAHEAD_H_
+#define VP9_ENCODER_VP9_LOOKAHEAD_H_
+
+#include "vpx_scale/yv12config.h"
+#include "vpx/vpx_integer.h"
+
+#define MAX_LAG_BUFFERS 25
+
+struct lookahead_entry {
+ YV12_BUFFER_CONFIG img;
+ int64_t ts_start;
+ int64_t ts_end;
+ unsigned int flags;
+};
+
+
+struct lookahead_ctx;
+
+/**\brief Initializes the lookahead stage
+ *
+ * The lookahead stage is a queue of frame buffers on which some analysis
+ * may be done when buffers are enqueued.
+ */
+struct lookahead_ctx *vp9_lookahead_init(unsigned int width,
+ unsigned int height,
+ unsigned int subsampling_x,
+ unsigned int subsampling_y,
+ unsigned int depth);
+
+
+/**\brief Destroys the lookahead stage
+ */
+void vp9_lookahead_destroy(struct lookahead_ctx *ctx);
+
+
+/**\brief Enqueue a source buffer
+ *
+ * This function will copy the source image into a new framebuffer with
+ * the expected stride/border.
+ *
+ * If active_map is non-NULL and there is only one frame in the queue, then copy
+ * only active macroblocks.
+ *
+ * \param[in] ctx Pointer to the lookahead context
+ * \param[in] src Pointer to the image to enqueue
+ * \param[in] ts_start Timestamp for the start of this frame
+ * \param[in] ts_end Timestamp for the end of this frame
+ * \param[in] flags Flags set on this frame
+ * \param[in] active_map Map that specifies which macroblock is active
+ */
+int vp9_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
+ int64_t ts_start, int64_t ts_end, unsigned int flags,
+ unsigned char *active_map);
+
+
+/**\brief Get the next source buffer to encode
+ *
+ *
+ * \param[in] ctx Pointer to the lookahead context
+ * \param[in] drain Flag indicating the buffer should be drained
+ * (return a buffer regardless of the current queue depth)
+ *
+ * \retval NULL, if drain set and queue is empty
+ * \retval NULL, if drain not set and queue not of the configured depth
+ */
+struct lookahead_entry *vp9_lookahead_pop(struct lookahead_ctx *ctx,
+ int drain);
+
+
+/**\brief Get a future source buffer to encode
+ *
+ * \param[in] ctx Pointer to the lookahead context
+ * \param[in] index Index of the frame to be returned, 0 == next frame
+ *
+ * \retval NULL, if no buffer exists at the specified index
+ */
+struct lookahead_entry *vp9_lookahead_peek(struct lookahead_ctx *ctx,
+ int index);
+
+
+/**\brief Get the number of frames currently in the lookahead queue
+ *
+ * \param[in] ctx Pointer to the lookahead context
+ */
+unsigned int vp9_lookahead_depth(struct lookahead_ctx *ctx);
+
+#endif // VP9_ENCODER_VP9_LOOKAHEAD_H_
diff --git a/libvpx/vp9/encoder/vp9_mbgraph.c b/libvpx/vp9/encoder/vp9_mbgraph.c
new file mode 100644
index 0000000..5a671f2
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_mbgraph.c
@@ -0,0 +1,429 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits.h>
+
+#include <vpx_mem/vpx_mem.h>
+#include <vp9/encoder/vp9_encodeintra.h>
+#include <vp9/encoder/vp9_rdopt.h>
+#include <vp9/common/vp9_blockd.h>
+#include <vp9/common/vp9_reconinter.h>
+#include <vp9/common/vp9_reconintra.h>
+#include <vp9/common/vp9_systemdependent.h>
+#include <vp9/encoder/vp9_segmentation.h>
+
+static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi,
+ int_mv *ref_mv,
+ int_mv *dst_mv,
+ int mb_row,
+ int mb_col) {
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ vp9_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16];
+ unsigned int best_err;
+
+ const int tmp_col_min = x->mv_col_min;
+ const int tmp_col_max = x->mv_col_max;
+ const int tmp_row_min = x->mv_row_min;
+ const int tmp_row_max = x->mv_row_max;
+ int_mv ref_full;
+
+ // Further step/diamond searches as necessary
+ int step_param = cpi->sf.reduce_first_step_size +
+ (cpi->speed < 8 ? (cpi->speed > 5 ? 1 : 0) : 2);
+ step_param = MIN(step_param, (cpi->sf.max_step_search_steps - 2));
+
+ vp9_clamp_mv_min_max(x, &ref_mv->as_mv);
+
+ ref_full.as_mv.col = ref_mv->as_mv.col >> 3;
+ ref_full.as_mv.row = ref_mv->as_mv.row >> 3;
+
+ /*cpi->sf.search_method == HEX*/
+ best_err = vp9_hex_search(x, &ref_full, step_param, x->errorperbit,
+ 0, &v_fn_ptr,
+ 0, ref_mv, dst_mv);
+
+ // Try sub-pixel MC
+ // if (bestsme > error_thresh && bestsme < INT_MAX)
+ {
+ int distortion;
+ unsigned int sse;
+ best_err = cpi->find_fractional_mv_step(
+ x,
+ dst_mv, ref_mv,
+ x->errorperbit, &v_fn_ptr,
+ 0, cpi->sf.subpel_iters_per_step, NULL, NULL,
+ & distortion, &sse);
+ }
+
+ vp9_set_mbmode_and_mvs(x, NEWMV, dst_mv);
+ vp9_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_16X16);
+ best_err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
+ xd->plane[0].dst.buf, xd->plane[0].dst.stride,
+ INT_MAX);
+
+ /* restore UMV window */
+ x->mv_col_min = tmp_col_min;
+ x->mv_col_max = tmp_col_max;
+ x->mv_row_min = tmp_row_min;
+ x->mv_row_max = tmp_row_max;
+
+ return best_err;
+}
+
+static int do_16x16_motion_search(VP9_COMP *cpi, int_mv *ref_mv, int_mv *dst_mv,
+ int mb_row, int mb_col) {
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ unsigned int err, tmp_err;
+ int_mv tmp_mv;
+
+ // Try zero MV first
+ // FIXME should really use something like near/nearest MV and/or MV prediction
+ err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
+ xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride,
+ INT_MAX);
+ dst_mv->as_int = 0;
+
+ // Test last reference frame using the previous best mv as the
+ // starting point (best reference) for the search
+ tmp_err = do_16x16_motion_iteration(cpi, ref_mv, &tmp_mv, mb_row, mb_col);
+ if (tmp_err < err) {
+ err = tmp_err;
+ dst_mv->as_int = tmp_mv.as_int;
+ }
+
+ // If the current best reference mv is not centred on 0,0 then do a 0,0 based search as well
+ if (ref_mv->as_int) {
+ unsigned int tmp_err;
+ int_mv zero_ref_mv, tmp_mv;
+
+ zero_ref_mv.as_int = 0;
+ tmp_err = do_16x16_motion_iteration(cpi, &zero_ref_mv, &tmp_mv,
+ mb_row, mb_col);
+ if (tmp_err < err) {
+ dst_mv->as_int = tmp_mv.as_int;
+ err = tmp_err;
+ }
+ }
+
+ return err;
+}
+
+static int do_16x16_zerozero_search(VP9_COMP *cpi, int_mv *dst_mv) {
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ unsigned int err;
+
+ // Try zero MV first
+ // FIXME should really use something like near/nearest MV and/or MV prediction
+ err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
+ xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride,
+ INT_MAX);
+
+ dst_mv->as_int = 0;
+
+ return err;
+}
+static int find_best_16x16_intra(VP9_COMP *cpi,
+ int mb_y_offset,
+ MB_PREDICTION_MODE *pbest_mode) {
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MB_PREDICTION_MODE best_mode = -1, mode;
+ unsigned int best_err = INT_MAX;
+
+ // calculate SATD for each intra prediction mode;
+ // we're intentionally not doing 4x4, we just want a rough estimate
+ for (mode = DC_PRED; mode <= TM_PRED; mode++) {
+ unsigned int err;
+
+ xd->this_mi->mbmi.mode = mode;
+ vp9_predict_intra_block(xd, 0, 2, TX_16X16, mode,
+ x->plane[0].src.buf, x->plane[0].src.stride,
+ xd->plane[0].dst.buf, xd->plane[0].dst.stride);
+ err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
+ xd->plane[0].dst.buf, xd->plane[0].dst.stride, best_err);
+
+ // find best
+ if (err < best_err) {
+ best_err = err;
+ best_mode = mode;
+ }
+ }
+
+ if (pbest_mode)
+ *pbest_mode = best_mode;
+
+ return best_err;
+}
+
+static void update_mbgraph_mb_stats
+(
+ VP9_COMP *cpi,
+ MBGRAPH_MB_STATS *stats,
+ YV12_BUFFER_CONFIG *buf,
+ int mb_y_offset,
+ YV12_BUFFER_CONFIG *golden_ref,
+ int_mv *prev_golden_ref_mv,
+ int gld_y_offset,
+ YV12_BUFFER_CONFIG *alt_ref,
+ int_mv *prev_alt_ref_mv,
+ int arf_y_offset,
+ int mb_row,
+ int mb_col
+) {
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ int intra_error;
+ VP9_COMMON *cm = &cpi->common;
+
+ // FIXME in practice we're completely ignoring chroma here
+ x->plane[0].src.buf = buf->y_buffer + mb_y_offset;
+ x->plane[0].src.stride = buf->y_stride;
+
+ xd->plane[0].dst.buf = cm->yv12_fb[cm->new_fb_idx].y_buffer + mb_y_offset;
+ xd->plane[0].dst.stride = cm->yv12_fb[cm->new_fb_idx].y_stride;
+
+ // do intra 16x16 prediction
+ intra_error = find_best_16x16_intra(cpi, mb_y_offset,
+ &stats->ref[INTRA_FRAME].m.mode);
+ if (intra_error <= 0)
+ intra_error = 1;
+ stats->ref[INTRA_FRAME].err = intra_error;
+
+ // Golden frame MV search, if it exists and is different than last frame
+ if (golden_ref) {
+ int g_motion_error;
+ xd->plane[0].pre[0].buf = golden_ref->y_buffer + mb_y_offset;
+ xd->plane[0].pre[0].stride = golden_ref->y_stride;
+ g_motion_error = do_16x16_motion_search(cpi,
+ prev_golden_ref_mv,
+ &stats->ref[GOLDEN_FRAME].m.mv,
+ mb_row, mb_col);
+ stats->ref[GOLDEN_FRAME].err = g_motion_error;
+ } else {
+ stats->ref[GOLDEN_FRAME].err = INT_MAX;
+ stats->ref[GOLDEN_FRAME].m.mv.as_int = 0;
+ }
+
+ // Alt-ref frame MV search, if it exists and is different than last/golden frame
+ if (alt_ref) {
+ int a_motion_error;
+ xd->plane[0].pre[0].buf = alt_ref->y_buffer + mb_y_offset;
+ xd->plane[0].pre[0].stride = alt_ref->y_stride;
+ a_motion_error = do_16x16_zerozero_search(cpi,
+ &stats->ref[ALTREF_FRAME].m.mv);
+
+ stats->ref[ALTREF_FRAME].err = a_motion_error;
+ } else {
+ stats->ref[ALTREF_FRAME].err = INT_MAX;
+ stats->ref[ALTREF_FRAME].m.mv.as_int = 0;
+ }
+}
+
+static void update_mbgraph_frame_stats(VP9_COMP *cpi,
+ MBGRAPH_FRAME_STATS *stats,
+ YV12_BUFFER_CONFIG *buf,
+ YV12_BUFFER_CONFIG *golden_ref,
+ YV12_BUFFER_CONFIG *alt_ref) {
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ VP9_COMMON *const cm = &cpi->common;
+
+ int mb_col, mb_row, offset = 0;
+ int mb_y_offset = 0, arf_y_offset = 0, gld_y_offset = 0;
+ int_mv arf_top_mv, gld_top_mv;
+ MODE_INFO mi_local = { { 0 } };
+
+ // Set up limit values for motion vectors to prevent them extending outside the UMV borders
+ arf_top_mv.as_int = 0;
+ gld_top_mv.as_int = 0;
+ x->mv_row_min = -(VP9BORDERINPIXELS - 8 - VP9_INTERP_EXTEND);
+ x->mv_row_max = (cm->mb_rows - 1) * 8 + VP9BORDERINPIXELS
+ - 8 - VP9_INTERP_EXTEND;
+ xd->up_available = 0;
+ xd->plane[0].dst.stride = buf->y_stride;
+ xd->plane[0].pre[0].stride = buf->y_stride;
+ xd->plane[1].dst.stride = buf->uv_stride;
+ xd->this_mi = &mi_local;
+ mi_local.mbmi.sb_type = BLOCK_16X16;
+ mi_local.mbmi.ref_frame[0] = LAST_FRAME;
+ mi_local.mbmi.ref_frame[1] = NONE;
+
+ for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
+ int_mv arf_left_mv, gld_left_mv;
+ int mb_y_in_offset = mb_y_offset;
+ int arf_y_in_offset = arf_y_offset;
+ int gld_y_in_offset = gld_y_offset;
+
+ // Set up limit values for motion vectors to prevent them extending outside the UMV borders
+ arf_left_mv.as_int = arf_top_mv.as_int;
+ gld_left_mv.as_int = gld_top_mv.as_int;
+ x->mv_col_min = -(VP9BORDERINPIXELS - 8 - VP9_INTERP_EXTEND);
+ x->mv_col_max = (cm->mb_cols - 1) * 8 + VP9BORDERINPIXELS
+ - 8 - VP9_INTERP_EXTEND;
+ xd->left_available = 0;
+
+ for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
+ MBGRAPH_MB_STATS *mb_stats = &stats->mb_stats[offset + mb_col];
+
+ update_mbgraph_mb_stats(cpi, mb_stats, buf, mb_y_in_offset,
+ golden_ref, &gld_left_mv, gld_y_in_offset,
+ alt_ref, &arf_left_mv, arf_y_in_offset,
+ mb_row, mb_col);
+ arf_left_mv.as_int = mb_stats->ref[ALTREF_FRAME].m.mv.as_int;
+ gld_left_mv.as_int = mb_stats->ref[GOLDEN_FRAME].m.mv.as_int;
+ if (mb_col == 0) {
+ arf_top_mv.as_int = arf_left_mv.as_int;
+ gld_top_mv.as_int = gld_left_mv.as_int;
+ }
+ xd->left_available = 1;
+ mb_y_in_offset += 16;
+ gld_y_in_offset += 16;
+ arf_y_in_offset += 16;
+ x->mv_col_min -= 16;
+ x->mv_col_max -= 16;
+ }
+ xd->up_available = 1;
+ mb_y_offset += buf->y_stride * 16;
+ gld_y_offset += golden_ref->y_stride * 16;
+ if (alt_ref)
+ arf_y_offset += alt_ref->y_stride * 16;
+ x->mv_row_min -= 16;
+ x->mv_row_max -= 16;
+ offset += cm->mb_cols;
+ }
+}
+
+// void separate_arf_mbs_byzz
+static void separate_arf_mbs(VP9_COMP *cpi) {
+ VP9_COMMON *const cm = &cpi->common;
+ int mb_col, mb_row, offset, i;
+ int ncnt[4] = { 0 };
+ int n_frames = cpi->mbgraph_n_frames;
+
+ int *arf_not_zz;
+
+ CHECK_MEM_ERROR(cm, arf_not_zz,
+ vpx_calloc(cm->mb_rows * cm->mb_cols * sizeof(*arf_not_zz),
+ 1));
+
+ // We are not interested in results beyond the alt ref itself.
+ if (n_frames > cpi->frames_till_gf_update_due)
+ n_frames = cpi->frames_till_gf_update_due;
+
+ // defer cost to reference frames
+ for (i = n_frames - 1; i >= 0; i--) {
+ MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
+
+ for (offset = 0, mb_row = 0; mb_row < cm->mb_rows;
+ offset += cm->mb_cols, mb_row++) {
+ for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
+ MBGRAPH_MB_STATS *mb_stats = &frame_stats->mb_stats[offset + mb_col];
+
+ int altref_err = mb_stats->ref[ALTREF_FRAME].err;
+ int intra_err = mb_stats->ref[INTRA_FRAME ].err;
+ int golden_err = mb_stats->ref[GOLDEN_FRAME].err;
+
+ // Test for altref vs intra and gf and that its mv was 0,0.
+ if (altref_err > 1000 ||
+ altref_err > intra_err ||
+ altref_err > golden_err) {
+ arf_not_zz[offset + mb_col]++;
+ }
+ }
+ }
+ }
+
+ for (offset = 0, mb_row = 0; mb_row < cm->mb_rows;
+ offset += cm->mb_cols, mb_row++) {
+ for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
+ // If any of the blocks in the sequence failed then the MB
+ // goes in segment 0
+ if (arf_not_zz[offset + mb_col]) {
+ ncnt[0]++;
+ cpi->segmentation_map[offset * 4 + 2 * mb_col] = 0;
+ cpi->segmentation_map[offset * 4 + 2 * mb_col + 1] = 0;
+ cpi->segmentation_map[offset * 4 + 2 * mb_col + cm->mi_cols] = 0;
+ cpi->segmentation_map[offset * 4 + 2 * mb_col + cm->mi_cols + 1] = 0;
+ } else {
+ cpi->segmentation_map[offset * 4 + 2 * mb_col] = 1;
+ cpi->segmentation_map[offset * 4 + 2 * mb_col + 1] = 1;
+ cpi->segmentation_map[offset * 4 + 2 * mb_col + cm->mi_cols] = 1;
+ cpi->segmentation_map[offset * 4 + 2 * mb_col + cm->mi_cols + 1] = 1;
+ ncnt[1]++;
+ }
+ }
+ }
+
+ // Only bother with segmentation if over 10% of the MBs in static segment
+ // if ( ncnt[1] && (ncnt[0] / ncnt[1] < 10) )
+ if (1) {
+ // Note % of blocks that are marked as static
+ if (cm->MBs)
+ cpi->static_mb_pct = (ncnt[1] * 100) / cm->MBs;
+
+ // This error case should not be reachable as this function should
+ // never be called with the common data structure uninitialized.
+ else
+ cpi->static_mb_pct = 0;
+
+ cpi->seg0_cnt = ncnt[0];
+ vp9_enable_segmentation((VP9_PTR)cpi);
+ } else {
+ cpi->static_mb_pct = 0;
+ vp9_disable_segmentation((VP9_PTR)cpi);
+ }
+
+ // Free localy allocated storage
+ vpx_free(arf_not_zz);
+}
+
+void vp9_update_mbgraph_stats(VP9_COMP *cpi) {
+ VP9_COMMON *const cm = &cpi->common;
+ int i, n_frames = vp9_lookahead_depth(cpi->lookahead);
+ YV12_BUFFER_CONFIG *golden_ref =
+ &cm->yv12_fb[cm->ref_frame_map[cpi->gld_fb_idx]];
+
+ // we need to look ahead beyond where the ARF transitions into
+ // being a GF - so exit if we don't look ahead beyond that
+ if (n_frames <= cpi->frames_till_gf_update_due)
+ return;
+ if (n_frames > (int)cpi->frames_till_alt_ref_frame)
+ n_frames = cpi->frames_till_alt_ref_frame;
+ if (n_frames > MAX_LAG_BUFFERS)
+ n_frames = MAX_LAG_BUFFERS;
+
+ cpi->mbgraph_n_frames = n_frames;
+ for (i = 0; i < n_frames; i++) {
+ MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
+ vpx_memset(frame_stats->mb_stats, 0,
+ cm->mb_rows * cm->mb_cols * sizeof(*cpi->mbgraph_stats[i].mb_stats));
+ }
+
+ // do motion search to find contribution of each reference to data
+ // later on in this GF group
+ // FIXME really, the GF/last MC search should be done forward, and
+ // the ARF MC search backwards, to get optimal results for MV caching
+ for (i = 0; i < n_frames; i++) {
+ MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
+ struct lookahead_entry *q_cur = vp9_lookahead_peek(cpi->lookahead, i);
+
+ assert(q_cur != NULL);
+
+ update_mbgraph_frame_stats(cpi, frame_stats, &q_cur->img,
+ golden_ref, cpi->Source);
+ }
+
+ vp9_clear_system_state(); // __asm emms;
+
+ separate_arf_mbs(cpi);
+}
diff --git a/libvpx/vp9/encoder/vp9_mbgraph.h b/libvpx/vp9/encoder/vp9_mbgraph.h
new file mode 100644
index 0000000..c5bca4d
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_mbgraph.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_ENCODER_VP9_MBGRAPH_H_
+#define VP9_ENCODER_VP9_MBGRAPH_H_
+
+void vp9_update_mbgraph_stats(VP9_COMP *cpi);
+
+#endif // VP9_ENCODER_VP9_MBGRAPH_H_
diff --git a/libvpx/vp9/encoder/vp9_mcomp.c b/libvpx/vp9/encoder/vp9_mcomp.c
new file mode 100644
index 0000000..1360088
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_mcomp.c
@@ -0,0 +1,2160 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits.h>
+#include <math.h>
+#include <stdio.h>
+
+#include "./vpx_config.h"
+
+#include "vpx_mem/vpx_mem.h"
+
+#include "vp9/common/vp9_findnearmv.h"
+#include "vp9/common/vp9_common.h"
+
+#include "vp9/encoder/vp9_onyx_int.h"
+#include "vp9/encoder/vp9_mcomp.h"
+
+// #define NEW_DIAMOND_SEARCH
+
+void vp9_clamp_mv_min_max(MACROBLOCK *x, MV *mv) {
+ const int col_min = (mv->col >> 3) - MAX_FULL_PEL_VAL + (mv->col & 7 ? 1 : 0);
+ const int row_min = (mv->row >> 3) - MAX_FULL_PEL_VAL + (mv->row & 7 ? 1 : 0);
+ const int col_max = (mv->col >> 3) + MAX_FULL_PEL_VAL;
+ const int row_max = (mv->row >> 3) + MAX_FULL_PEL_VAL;
+
+ // Get intersection of UMV window and valid MV window to reduce # of checks
+ // in diamond search.
+ if (x->mv_col_min < col_min)
+ x->mv_col_min = col_min;
+ if (x->mv_col_max > col_max)
+ x->mv_col_max = col_max;
+ if (x->mv_row_min < row_min)
+ x->mv_row_min = row_min;
+ if (x->mv_row_max > row_max)
+ x->mv_row_max = row_max;
+}
+
+int vp9_init_search_range(VP9_COMP *cpi, int size) {
+ int sr = 0;
+
+ // Minimum search size no matter what the passed in value.
+ size = MAX(16, size);
+
+ while ((size << sr) < MAX_FULL_PEL_VAL)
+ sr++;
+
+ if (sr)
+ sr--;
+
+ sr += cpi->sf.reduce_first_step_size;
+ sr = MIN(sr, (cpi->sf.max_step_search_steps - 2));
+ return sr;
+}
+
+int vp9_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvjcost, int *mvcost[2],
+ int weight) {
+ MV v;
+ v.row = mv->as_mv.row - ref->as_mv.row;
+ v.col = mv->as_mv.col - ref->as_mv.col;
+ return ROUND_POWER_OF_TWO((mvjcost[vp9_get_mv_joint(&v)] +
+ mvcost[0][v.row] +
+ mvcost[1][v.col]) * weight, 7);
+}
+
+static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvjcost, int *mvcost[2],
+ int error_per_bit) {
+ if (mvcost) {
+ MV v;
+ v.row = mv->as_mv.row - ref->as_mv.row;
+ v.col = mv->as_mv.col - ref->as_mv.col;
+ return ROUND_POWER_OF_TWO((mvjcost[vp9_get_mv_joint(&v)] +
+ mvcost[0][v.row] +
+ mvcost[1][v.col]) * error_per_bit, 13);
+ }
+ return 0;
+}
+
+static int mvsad_err_cost(int_mv *mv, int_mv *ref, int *mvjsadcost,
+ int *mvsadcost[2], int error_per_bit) {
+ if (mvsadcost) {
+ MV v;
+ v.row = mv->as_mv.row - ref->as_mv.row;
+ v.col = mv->as_mv.col - ref->as_mv.col;
+ return ROUND_POWER_OF_TWO((mvjsadcost[vp9_get_mv_joint(&v)] +
+ mvsadcost[0][v.row] +
+ mvsadcost[1][v.col]) * error_per_bit, 8);
+ }
+ return 0;
+}
+
+void vp9_init_dsmotion_compensation(MACROBLOCK *x, int stride) {
+ int len;
+ int search_site_count = 0;
+
+ // Generate offsets for 4 search sites per step.
+ x->ss[search_site_count].mv.col = 0;
+ x->ss[search_site_count].mv.row = 0;
+ x->ss[search_site_count].offset = 0;
+ search_site_count++;
+
+ for (len = MAX_FIRST_STEP; len > 0; len /= 2) {
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = 0;
+ x->ss[search_site_count].mv.row = -len;
+ x->ss[search_site_count].offset = -len * stride;
+ search_site_count++;
+
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = 0;
+ x->ss[search_site_count].mv.row = len;
+ x->ss[search_site_count].offset = len * stride;
+ search_site_count++;
+
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = -len;
+ x->ss[search_site_count].mv.row = 0;
+ x->ss[search_site_count].offset = -len;
+ search_site_count++;
+
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = len;
+ x->ss[search_site_count].mv.row = 0;
+ x->ss[search_site_count].offset = len;
+ search_site_count++;
+ }
+
+ x->ss_count = search_site_count;
+ x->searches_per_step = 4;
+}
+
+void vp9_init3smotion_compensation(MACROBLOCK *x, int stride) {
+ int len;
+ int search_site_count = 0;
+
+ // Generate offsets for 8 search sites per step.
+ x->ss[search_site_count].mv.col = 0;
+ x->ss[search_site_count].mv.row = 0;
+ x->ss[search_site_count].offset = 0;
+ search_site_count++;
+
+ for (len = MAX_FIRST_STEP; len > 0; len /= 2) {
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = 0;
+ x->ss[search_site_count].mv.row = -len;
+ x->ss[search_site_count].offset = -len * stride;
+ search_site_count++;
+
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = 0;
+ x->ss[search_site_count].mv.row = len;
+ x->ss[search_site_count].offset = len * stride;
+ search_site_count++;
+
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = -len;
+ x->ss[search_site_count].mv.row = 0;
+ x->ss[search_site_count].offset = -len;
+ search_site_count++;
+
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = len;
+ x->ss[search_site_count].mv.row = 0;
+ x->ss[search_site_count].offset = len;
+ search_site_count++;
+
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = -len;
+ x->ss[search_site_count].mv.row = -len;
+ x->ss[search_site_count].offset = -len * stride - len;
+ search_site_count++;
+
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = len;
+ x->ss[search_site_count].mv.row = -len;
+ x->ss[search_site_count].offset = -len * stride + len;
+ search_site_count++;
+
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = -len;
+ x->ss[search_site_count].mv.row = len;
+ x->ss[search_site_count].offset = len * stride - len;
+ search_site_count++;
+
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = len;
+ x->ss[search_site_count].mv.row = len;
+ x->ss[search_site_count].offset = len * stride + len;
+ search_site_count++;
+ }
+
+ x->ss_count = search_site_count;
+ x->searches_per_step = 8;
+}
+
+/*
+ * To avoid the penalty for crossing cache-line read, preload the reference
+ * area in a small buffer, which is aligned to make sure there won't be crossing
+ * cache-line read while reading from this buffer. This reduced the cpu
+ * cycles spent on reading ref data in sub-pixel filter functions.
+ * TODO: Currently, since sub-pixel search range here is -3 ~ 3, copy 22 rows x
+ * 32 cols area that is enough for 16x16 macroblock. Later, for SPLITMV, we
+ * could reduce the area.
+ */
+
+/* estimated cost of a motion vector (r,c) */
+#define MVC(r, c) \
+ (mvcost ? \
+ ((mvjcost[((r) != rr) * 2 + ((c) != rc)] + \
+ mvcost[0][((r) - rr)] + mvcost[1][((c) - rc)]) * \
+ error_per_bit + 4096) >> 13 : 0)
+
+
+#define SP(x) (((x) & 7) << 1) // convert motion vector component to offset
+ // for svf calc
+
+#define IFMVCV(r, c, s, e) \
+ if (c >= minc && c <= maxc && r >= minr && r <= maxr) \
+ s \
+ else \
+ e;
+
+/* pointer to predictor base of a motionvector */
+#define PRE(r, c) (y + (((r) >> 3) * y_stride + ((c) >> 3) -(offset)))
+
+/* returns subpixel variance error function */
+#define DIST(r, c) \
+ vfp->svf(PRE(r, c), y_stride, SP(c), SP(r), z, src_stride, &sse)
+
+/* checks if (r, c) has better score than previous best */
+#define CHECK_BETTER(v, r, c) \
+ IFMVCV(r, c, { \
+ thismse = (DIST(r, c)); \
+ if ((v = MVC(r, c) + thismse) < besterr) { \
+ besterr = v; \
+ br = r; \
+ bc = c; \
+ *distortion = thismse; \
+ *sse1 = sse; \
+ } \
+ }, \
+ v = INT_MAX;)
+
+#define FIRST_LEVEL_CHECKS \
+ { \
+ unsigned int left, right, up, down, diag; \
+ CHECK_BETTER(left, tr, tc - hstep); \
+ CHECK_BETTER(right, tr, tc + hstep); \
+ CHECK_BETTER(up, tr - hstep, tc); \
+ CHECK_BETTER(down, tr + hstep, tc); \
+ whichdir = (left < right ? 0 : 1) + \
+ (up < down ? 0 : 2); \
+ switch (whichdir) { \
+ case 0: \
+ CHECK_BETTER(diag, tr - hstep, tc - hstep); \
+ break; \
+ case 1: \
+ CHECK_BETTER(diag, tr - hstep, tc + hstep); \
+ break; \
+ case 2: \
+ CHECK_BETTER(diag, tr + hstep, tc - hstep); \
+ break; \
+ case 3: \
+ CHECK_BETTER(diag, tr + hstep, tc + hstep); \
+ break; \
+ } \
+ }
+
+#define SECOND_LEVEL_CHECKS \
+ { \
+ int kr, kc; \
+ unsigned int second; \
+ if (tr != br && tc != bc) { \
+ kr = br - tr; \
+ kc = bc - tc; \
+ CHECK_BETTER(second, tr + kr, tc + 2 * kc); \
+ CHECK_BETTER(second, tr + 2 * kr, tc + kc); \
+ } else if (tr == br && tc != bc) { \
+ kc = bc - tc; \
+ CHECK_BETTER(second, tr + hstep, tc + 2 * kc); \
+ CHECK_BETTER(second, tr - hstep, tc + 2 * kc); \
+ switch (whichdir) { \
+ case 0: \
+ case 1: \
+ CHECK_BETTER(second, tr + hstep, tc + kc); \
+ break; \
+ case 2: \
+ case 3: \
+ CHECK_BETTER(second, tr - hstep, tc + kc); \
+ break; \
+ } \
+ } else if (tr != br && tc == bc) { \
+ kr = br - tr; \
+ CHECK_BETTER(second, tr + 2 * kr, tc + hstep); \
+ CHECK_BETTER(second, tr + 2 * kr, tc - hstep); \
+ switch (whichdir) { \
+ case 0: \
+ case 2: \
+ CHECK_BETTER(second, tr + kr, tc + hstep); \
+ break; \
+ case 1: \
+ case 3: \
+ CHECK_BETTER(second, tr + kr, tc - hstep); \
+ break; \
+ } \
+ } \
+ }
+
+int vp9_find_best_sub_pixel_iterative(MACROBLOCK *x,
+ int_mv *bestmv, int_mv *ref_mv,
+ int error_per_bit,
+ const vp9_variance_fn_ptr_t *vfp,
+ int forced_stop,
+ int iters_per_step,
+ int *mvjcost, int *mvcost[2],
+ int *distortion,
+ unsigned int *sse1) {
+ uint8_t *z = x->plane[0].src.buf;
+ int src_stride = x->plane[0].src.stride;
+ MACROBLOCKD *xd = &x->e_mbd;
+
+ unsigned int besterr = INT_MAX;
+ unsigned int sse;
+ unsigned int whichdir;
+ unsigned int halfiters = iters_per_step;
+ unsigned int quarteriters = iters_per_step;
+ unsigned int eighthiters = iters_per_step;
+ int thismse;
+
+ uint8_t *y = xd->plane[0].pre[0].buf +
+ (bestmv->as_mv.row) * xd->plane[0].pre[0].stride +
+ bestmv->as_mv.col;
+
+ const int y_stride = xd->plane[0].pre[0].stride;
+
+ int rr = ref_mv->as_mv.row;
+ int rc = ref_mv->as_mv.col;
+ int br = bestmv->as_mv.row << 3;
+ int bc = bestmv->as_mv.col << 3;
+ int hstep = 4;
+ const int minc = MAX(x->mv_col_min << 3, ref_mv->as_mv.col - MV_MAX);
+ const int maxc = MIN(x->mv_col_max << 3, ref_mv->as_mv.col + MV_MAX);
+ const int minr = MAX(x->mv_row_min << 3, ref_mv->as_mv.row - MV_MAX);
+ const int maxr = MIN(x->mv_row_max << 3, ref_mv->as_mv.row + MV_MAX);
+
+ int tr = br;
+ int tc = bc;
+
+ const int offset = (bestmv->as_mv.row) * y_stride + bestmv->as_mv.col;
+
+ // central mv
+ bestmv->as_mv.row <<= 3;
+ bestmv->as_mv.col <<= 3;
+
+ // calculate central point error
+ besterr = vfp->vf(y, y_stride, z, src_stride, sse1);
+ *distortion = besterr;
+ besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
+
+ // TODO: Each subsequent iteration checks at least one point in
+ // common with the last iteration could be 2 ( if diag selected)
+ while (halfiters--) {
+ // 1/2 pel
+ FIRST_LEVEL_CHECKS;
+ // no reason to check the same one again.
+ if (tr == br && tc == bc)
+ break;
+ tr = br;
+ tc = bc;
+ }
+
+ // TODO: Each subsequent iteration checks at least one point in common with
+ // the last iteration could be 2 ( if diag selected) 1/4 pel
+
+ // Note forced_stop: 0 - full, 1 - qtr only, 2 - half only
+ if (forced_stop != 2) {
+ hstep >>= 1;
+ while (quarteriters--) {
+ FIRST_LEVEL_CHECKS;
+ // no reason to check the same one again.
+ if (tr == br && tc == bc)
+ break;
+ tr = br;
+ tc = bc;
+ }
+ }
+
+ if (xd->allow_high_precision_mv && vp9_use_mv_hp(&ref_mv->as_mv) &&
+ forced_stop == 0) {
+ hstep >>= 1;
+ while (eighthiters--) {
+ FIRST_LEVEL_CHECKS;
+ // no reason to check the same one again.
+ if (tr == br && tc == bc)
+ break;
+ tr = br;
+ tc = bc;
+ }
+ }
+
+ bestmv->as_mv.row = br;
+ bestmv->as_mv.col = bc;
+
+ if ((abs(bestmv->as_mv.col - ref_mv->as_mv.col) > (MAX_FULL_PEL_VAL << 3)) ||
+ (abs(bestmv->as_mv.row - ref_mv->as_mv.row) > (MAX_FULL_PEL_VAL << 3)))
+ return INT_MAX;
+
+ return besterr;
+}
+
+int vp9_find_best_sub_pixel_tree(MACROBLOCK *x,
+ int_mv *bestmv, int_mv *ref_mv,
+ int error_per_bit,
+ const vp9_variance_fn_ptr_t *vfp,
+ int forced_stop,
+ int iters_per_step,
+ int *mvjcost, int *mvcost[2],
+ int *distortion,
+ unsigned int *sse1) {
+ uint8_t *z = x->plane[0].src.buf;
+ int src_stride = x->plane[0].src.stride;
+ MACROBLOCKD *xd = &x->e_mbd;
+ int rr, rc, br, bc, hstep;
+ int tr, tc;
+ unsigned int besterr = INT_MAX;
+ unsigned int sse;
+ unsigned int whichdir;
+ int thismse;
+ int maxc, minc, maxr, minr;
+ int y_stride;
+ int offset;
+ unsigned int halfiters = iters_per_step;
+ unsigned int quarteriters = iters_per_step;
+ unsigned int eighthiters = iters_per_step;
+
+ uint8_t *y = xd->plane[0].pre[0].buf +
+ (bestmv->as_mv.row) * xd->plane[0].pre[0].stride +
+ bestmv->as_mv.col;
+
+ y_stride = xd->plane[0].pre[0].stride;
+
+ rr = ref_mv->as_mv.row;
+ rc = ref_mv->as_mv.col;
+ br = bestmv->as_mv.row << 3;
+ bc = bestmv->as_mv.col << 3;
+ hstep = 4;
+ minc = MAX(x->mv_col_min << 3,
+ (ref_mv->as_mv.col) - ((1 << MV_MAX_BITS) - 1));
+ maxc = MIN(x->mv_col_max << 3,
+ (ref_mv->as_mv.col) + ((1 << MV_MAX_BITS) - 1));
+ minr = MAX(x->mv_row_min << 3,
+ (ref_mv->as_mv.row) - ((1 << MV_MAX_BITS) - 1));
+ maxr = MIN(x->mv_row_max << 3,
+ (ref_mv->as_mv.row) + ((1 << MV_MAX_BITS) - 1));
+
+ tr = br;
+ tc = bc;
+
+ offset = (bestmv->as_mv.row) * y_stride + bestmv->as_mv.col;
+
+ // central mv
+ bestmv->as_mv.row <<= 3;
+ bestmv->as_mv.col <<= 3;
+
+ // calculate central point error
+ besterr = vfp->vf(y, y_stride, z, src_stride, sse1);
+ *distortion = besterr;
+ besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
+
+ // 1/2 pel
+ FIRST_LEVEL_CHECKS;
+ if (halfiters > 1) {
+ SECOND_LEVEL_CHECKS;
+ }
+ tr = br;
+ tc = bc;
+
+ // Note forced_stop: 0 - full, 1 - qtr only, 2 - half only
+ if (forced_stop != 2) {
+ hstep >>= 1;
+ FIRST_LEVEL_CHECKS;
+ if (quarteriters > 1) {
+ SECOND_LEVEL_CHECKS;
+ }
+ tr = br;
+ tc = bc;
+ }
+
+ if (xd->allow_high_precision_mv && vp9_use_mv_hp(&ref_mv->as_mv) &&
+ forced_stop == 0) {
+ hstep >>= 1;
+ FIRST_LEVEL_CHECKS;
+ if (eighthiters > 1) {
+ SECOND_LEVEL_CHECKS;
+ }
+ tr = br;
+ tc = bc;
+ }
+
+ bestmv->as_mv.row = br;
+ bestmv->as_mv.col = bc;
+
+ if ((abs(bestmv->as_mv.col - ref_mv->as_mv.col) > (MAX_FULL_PEL_VAL << 3)) ||
+ (abs(bestmv->as_mv.row - ref_mv->as_mv.row) > (MAX_FULL_PEL_VAL << 3)))
+ return INT_MAX;
+
+ return besterr;
+}
+
+#undef DIST
+/* returns subpixel variance error function */
+#define DIST(r, c) \
+ vfp->svaf(PRE(r, c), y_stride, SP(c), SP(r), \
+ z, src_stride, &sse, second_pred)
+
+int vp9_find_best_sub_pixel_comp_iterative(MACROBLOCK *x,
+ int_mv *bestmv, int_mv *ref_mv,
+ int error_per_bit,
+ const vp9_variance_fn_ptr_t *vfp,
+ int forced_stop,
+ int iters_per_step,
+ int *mvjcost, int *mvcost[2],
+ int *distortion,
+ unsigned int *sse1,
+ const uint8_t *second_pred,
+ int w, int h) {
+ uint8_t *const z = x->plane[0].src.buf;
+ const int src_stride = x->plane[0].src.stride;
+ MACROBLOCKD *const xd = &x->e_mbd;
+
+ unsigned int besterr = INT_MAX;
+ unsigned int sse;
+ unsigned int whichdir;
+ unsigned int halfiters = iters_per_step;
+ unsigned int quarteriters = iters_per_step;
+ unsigned int eighthiters = iters_per_step;
+ int thismse;
+
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, comp_pred, 64 * 64);
+ uint8_t *const y = xd->plane[0].pre[0].buf +
+ (bestmv->as_mv.row) * xd->plane[0].pre[0].stride +
+ bestmv->as_mv.col;
+
+ const int y_stride = xd->plane[0].pre[0].stride;
+
+ int rr = ref_mv->as_mv.row;
+ int rc = ref_mv->as_mv.col;
+ int br = bestmv->as_mv.row << 3;
+ int bc = bestmv->as_mv.col << 3;
+ int hstep = 4;
+ const int minc = MAX(x->mv_col_min << 3, ref_mv->as_mv.col - MV_MAX);
+ const int maxc = MIN(x->mv_col_max << 3, ref_mv->as_mv.col + MV_MAX);
+ const int minr = MAX(x->mv_row_min << 3, ref_mv->as_mv.row - MV_MAX);
+ const int maxr = MIN(x->mv_row_max << 3, ref_mv->as_mv.row + MV_MAX);
+
+ int tr = br;
+ int tc = bc;
+
+ const int offset = (bestmv->as_mv.row) * y_stride + bestmv->as_mv.col;
+
+ // central mv
+ bestmv->as_mv.row <<= 3;
+ bestmv->as_mv.col <<= 3;
+
+ // calculate central point error
+ // TODO(yunqingwang): central pointer error was already calculated in full-
+ // pixel search, and can be passed in this function.
+ comp_avg_pred(comp_pred, second_pred, w, h, y, y_stride);
+ besterr = vfp->vf(comp_pred, w, z, src_stride, sse1);
+ *distortion = besterr;
+ besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
+
+ // Each subsequent iteration checks at least one point in
+ // common with the last iteration could be 2 ( if diag selected)
+ while (halfiters--) {
+ // 1/2 pel
+ FIRST_LEVEL_CHECKS;
+ // no reason to check the same one again.
+ if (tr == br && tc == bc)
+ break;
+ tr = br;
+ tc = bc;
+ }
+
+ // Each subsequent iteration checks at least one point in common with
+ // the last iteration could be 2 ( if diag selected) 1/4 pel
+
+ // Note forced_stop: 0 - full, 1 - qtr only, 2 - half only
+ if (forced_stop != 2) {
+ hstep >>= 1;
+ while (quarteriters--) {
+ FIRST_LEVEL_CHECKS;
+ // no reason to check the same one again.
+ if (tr == br && tc == bc)
+ break;
+ tr = br;
+ tc = bc;
+ }
+ }
+
+ if (xd->allow_high_precision_mv && vp9_use_mv_hp(&ref_mv->as_mv) &&
+ forced_stop == 0) {
+ hstep >>= 1;
+ while (eighthiters--) {
+ FIRST_LEVEL_CHECKS;
+ // no reason to check the same one again.
+ if (tr == br && tc == bc)
+ break;
+ tr = br;
+ tc = bc;
+ }
+ }
+ bestmv->as_mv.row = br;
+ bestmv->as_mv.col = bc;
+
+ if ((abs(bestmv->as_mv.col - ref_mv->as_mv.col) > (MAX_FULL_PEL_VAL << 3)) ||
+ (abs(bestmv->as_mv.row - ref_mv->as_mv.row) > (MAX_FULL_PEL_VAL << 3)))
+ return INT_MAX;
+
+ return besterr;
+}
+
+int vp9_find_best_sub_pixel_comp_tree(MACROBLOCK *x,
+ int_mv *bestmv, int_mv *ref_mv,
+ int error_per_bit,
+ const vp9_variance_fn_ptr_t *vfp,
+ int forced_stop,
+ int iters_per_step,
+ int *mvjcost, int *mvcost[2],
+ int *distortion,
+ unsigned int *sse1,
+ const uint8_t *second_pred,
+ int w, int h) {
+ uint8_t *z = x->plane[0].src.buf;
+ int src_stride = x->plane[0].src.stride;
+ MACROBLOCKD *xd = &x->e_mbd;
+ int rr, rc, br, bc, hstep;
+ int tr, tc;
+ unsigned int besterr = INT_MAX;
+ unsigned int sse;
+ unsigned int whichdir;
+ int thismse;
+ int maxc, minc, maxr, minr;
+ int y_stride;
+ int offset;
+ unsigned int halfiters = iters_per_step;
+ unsigned int quarteriters = iters_per_step;
+ unsigned int eighthiters = iters_per_step;
+
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, comp_pred, 64 * 64);
+ uint8_t *y = xd->plane[0].pre[0].buf +
+ (bestmv->as_mv.row) * xd->plane[0].pre[0].stride +
+ bestmv->as_mv.col;
+
+ y_stride = xd->plane[0].pre[0].stride;
+
+ rr = ref_mv->as_mv.row;
+ rc = ref_mv->as_mv.col;
+ br = bestmv->as_mv.row << 3;
+ bc = bestmv->as_mv.col << 3;
+ hstep = 4;
+ minc = MAX(x->mv_col_min << 3, (ref_mv->as_mv.col) -
+ ((1 << MV_MAX_BITS) - 1));
+ maxc = MIN(x->mv_col_max << 3, (ref_mv->as_mv.col) +
+ ((1 << MV_MAX_BITS) - 1));
+ minr = MAX(x->mv_row_min << 3, (ref_mv->as_mv.row) -
+ ((1 << MV_MAX_BITS) - 1));
+ maxr = MIN(x->mv_row_max << 3, (ref_mv->as_mv.row) +
+ ((1 << MV_MAX_BITS) - 1));
+
+ tr = br;
+ tc = bc;
+
+
+ offset = (bestmv->as_mv.row) * y_stride + bestmv->as_mv.col;
+
+ // central mv
+ bestmv->as_mv.row <<= 3;
+ bestmv->as_mv.col <<= 3;
+
+ // calculate central point error
+ // TODO(yunqingwang): central pointer error was already calculated in full-
+ // pixel search, and can be passed in this function.
+ comp_avg_pred(comp_pred, second_pred, w, h, y, y_stride);
+ besterr = vfp->vf(comp_pred, w, z, src_stride, sse1);
+ *distortion = besterr;
+ besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
+
+ // Each subsequent iteration checks at least one point in
+ // common with the last iteration could be 2 ( if diag selected)
+ // 1/2 pel
+ FIRST_LEVEL_CHECKS;
+ if (halfiters > 1) {
+ SECOND_LEVEL_CHECKS;
+ }
+ tr = br;
+ tc = bc;
+
+ // Each subsequent iteration checks at least one point in common with
+ // the last iteration could be 2 ( if diag selected) 1/4 pel
+
+ // Note forced_stop: 0 - full, 1 - qtr only, 2 - half only
+ if (forced_stop != 2) {
+ hstep >>= 1;
+ FIRST_LEVEL_CHECKS;
+ if (quarteriters > 1) {
+ SECOND_LEVEL_CHECKS;
+ }
+ tr = br;
+ tc = bc;
+ }
+
+ if (xd->allow_high_precision_mv && vp9_use_mv_hp(&ref_mv->as_mv) &&
+ forced_stop == 0) {
+ hstep >>= 1;
+ FIRST_LEVEL_CHECKS;
+ if (eighthiters > 1) {
+ SECOND_LEVEL_CHECKS;
+ }
+ tr = br;
+ tc = bc;
+ }
+ bestmv->as_mv.row = br;
+ bestmv->as_mv.col = bc;
+
+ if ((abs(bestmv->as_mv.col - ref_mv->as_mv.col) > (MAX_FULL_PEL_VAL << 3)) ||
+ (abs(bestmv->as_mv.row - ref_mv->as_mv.row) > (MAX_FULL_PEL_VAL << 3)))
+ return INT_MAX;
+
+ return besterr;
+}
+
+#undef MVC
+#undef PRE
+#undef DIST
+#undef IFMVCV
+#undef CHECK_BETTER
+#undef SP
+
+#define CHECK_BOUNDS(range) \
+ {\
+ all_in = 1;\
+ all_in &= ((br-range) >= x->mv_row_min);\
+ all_in &= ((br+range) <= x->mv_row_max);\
+ all_in &= ((bc-range) >= x->mv_col_min);\
+ all_in &= ((bc+range) <= x->mv_col_max);\
+ }
+
+#define CHECK_POINT \
+ {\
+ if (this_mv.as_mv.col < x->mv_col_min) continue;\
+ if (this_mv.as_mv.col > x->mv_col_max) continue;\
+ if (this_mv.as_mv.row < x->mv_row_min) continue;\
+ if (this_mv.as_mv.row > x->mv_row_max) continue;\
+ }
+
+#define CHECK_BETTER \
+ {\
+ if (thissad < bestsad)\
+ {\
+ if (use_mvcost) \
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv, \
+ mvjsadcost, mvsadcost, \
+ sad_per_bit);\
+ if (thissad < bestsad)\
+ {\
+ bestsad = thissad;\
+ best_site = i;\
+ }\
+ }\
+ }
+
+#define get_next_chkpts(list, i, n) \
+ list[0] = ((i) == 0 ? (n) - 1 : (i) - 1); \
+ list[1] = (i); \
+ list[2] = ((i) == (n) - 1 ? 0 : (i) + 1);
+
+#define MAX_PATTERN_SCALES 11
+#define MAX_PATTERN_CANDIDATES 8 // max number of canddiates per scale
+#define PATTERN_CANDIDATES_REF 3 // number of refinement candidates
+
+// Generic pattern search function that searches over multiple scales.
+// Each scale can have a different number of candidates and shape of
+// candidates as indicated in the num_candidates and candidates arrays
+// passed into this function
+static int vp9_pattern_search(MACROBLOCK *x,
+ int_mv *ref_mv,
+ int search_param,
+ int sad_per_bit,
+ int do_init_search,
+ int do_refine,
+ const vp9_variance_fn_ptr_t *vfp,
+ int use_mvcost,
+ int_mv *center_mv, int_mv *best_mv,
+ const int num_candidates[MAX_PATTERN_SCALES],
+ const MV candidates[MAX_PATTERN_SCALES]
+ [MAX_PATTERN_CANDIDATES]) {
+ const MACROBLOCKD* const xd = &x->e_mbd;
+ static const int search_param_to_steps[MAX_MVSEARCH_STEPS] = {
+ 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
+ };
+ int i, j, s, t;
+ uint8_t *what = x->plane[0].src.buf;
+ int what_stride = x->plane[0].src.stride;
+ int in_what_stride = xd->plane[0].pre[0].stride;
+ int br, bc;
+ int_mv this_mv;
+ int bestsad = INT_MAX;
+ int thissad;
+ uint8_t *base_offset;
+ uint8_t *this_offset;
+ int k = -1;
+ int all_in;
+ int best_site = -1;
+ int_mv fcenter_mv;
+ int best_init_s = search_param_to_steps[search_param];
+ int *mvjsadcost = x->nmvjointsadcost;
+ int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
+
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+
+ // adjust ref_mv to make sure it is within MV range
+ clamp_mv(&ref_mv->as_mv,
+ x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
+ br = ref_mv->as_mv.row;
+ bc = ref_mv->as_mv.col;
+
+ // Work out the start point for the search
+ base_offset = (uint8_t *)(xd->plane[0].pre[0].buf);
+ this_offset = base_offset + (br * in_what_stride) + bc;
+ this_mv.as_mv.row = br;
+ this_mv.as_mv.col = bc;
+ bestsad = vfp->sdf(what, what_stride, this_offset,
+ in_what_stride, 0x7fffffff)
+ + mvsad_err_cost(&this_mv, &fcenter_mv, mvjsadcost, mvsadcost,
+ sad_per_bit);
+
+ // Search all possible scales upto the search param around the center point
+ // pick the scale of the point that is best as the starting scale of
+ // further steps around it.
+ if (do_init_search) {
+ s = best_init_s;
+ best_init_s = -1;
+ for (t = 0; t <= s; ++t) {
+ best_site = -1;
+ CHECK_BOUNDS((1 << t))
+ if (all_in) {
+ for (i = 0; i < num_candidates[t]; i++) {
+ this_mv.as_mv.row = br + candidates[t][i].row;
+ this_mv.as_mv.col = bc + candidates[t][i].col;
+ this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) +
+ this_mv.as_mv.col;
+ thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride,
+ bestsad);
+ CHECK_BETTER
+ }
+ } else {
+ for (i = 0; i < num_candidates[t]; i++) {
+ this_mv.as_mv.row = br + candidates[t][i].row;
+ this_mv.as_mv.col = bc + candidates[t][i].col;
+ CHECK_POINT
+ this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) +
+ this_mv.as_mv.col;
+ thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride,
+ bestsad);
+ CHECK_BETTER
+ }
+ }
+ if (best_site == -1) {
+ continue;
+ } else {
+ best_init_s = t;
+ k = best_site;
+ }
+ }
+ if (best_init_s != -1) {
+ br += candidates[best_init_s][k].row;
+ bc += candidates[best_init_s][k].col;
+ }
+ }
+
+ // If the center point is still the best, just skip this and move to
+ // the refinement step.
+ if (best_init_s != -1) {
+ s = best_init_s;
+ best_site = -1;
+ do {
+ // No need to search all 6 points the 1st time if initial search was used
+ if (!do_init_search || s != best_init_s) {
+ CHECK_BOUNDS((1 << s))
+ if (all_in) {
+ for (i = 0; i < num_candidates[s]; i++) {
+ this_mv.as_mv.row = br + candidates[s][i].row;
+ this_mv.as_mv.col = bc + candidates[s][i].col;
+ this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) +
+ this_mv.as_mv.col;
+ thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride,
+ bestsad);
+ CHECK_BETTER
+ }
+ } else {
+ for (i = 0; i < num_candidates[s]; i++) {
+ this_mv.as_mv.row = br + candidates[s][i].row;
+ this_mv.as_mv.col = bc + candidates[s][i].col;
+ CHECK_POINT
+ this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) +
+ this_mv.as_mv.col;
+ thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride,
+ bestsad);
+ CHECK_BETTER
+ }
+ }
+
+ if (best_site == -1) {
+ continue;
+ } else {
+ br += candidates[s][best_site].row;
+ bc += candidates[s][best_site].col;
+ k = best_site;
+ }
+ }
+
+ do {
+ int next_chkpts_indices[PATTERN_CANDIDATES_REF];
+ best_site = -1;
+ CHECK_BOUNDS((1 << s))
+
+ get_next_chkpts(next_chkpts_indices, k, num_candidates[s]);
+ if (all_in) {
+ for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
+ this_mv.as_mv.row = br +
+ candidates[s][next_chkpts_indices[i]].row;
+ this_mv.as_mv.col = bc +
+ candidates[s][next_chkpts_indices[i]].col;
+ this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) +
+ this_mv.as_mv.col;
+ thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride,
+ bestsad);
+ CHECK_BETTER
+ }
+ } else {
+ for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
+ this_mv.as_mv.row = br +
+ candidates[s][next_chkpts_indices[i]].row;
+ this_mv.as_mv.col = bc +
+ candidates[s][next_chkpts_indices[i]].col;
+ CHECK_POINT
+ this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) +
+ this_mv.as_mv.col;
+ thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride,
+ bestsad);
+ CHECK_BETTER
+ }
+ }
+
+ if (best_site != -1) {
+ k = next_chkpts_indices[best_site];
+ br += candidates[s][k].row;
+ bc += candidates[s][k].col;
+ }
+ } while (best_site != -1);
+ } while (s--);
+ }
+
+ // Check 4 1-away neighbors if do_refine is true.
+ // For most well-designed schemes do_refine will not be necessary.
+ if (do_refine) {
+ static const MV neighbors[4] = {
+ {0, -1}, { -1, 0}, {1, 0}, {0, 1},
+ };
+ for (j = 0; j < 16; j++) {
+ best_site = -1;
+ CHECK_BOUNDS(1)
+ if (all_in) {
+ for (i = 0; i < 4; i++) {
+ this_mv.as_mv.row = br + neighbors[i].row;
+ this_mv.as_mv.col = bc + neighbors[i].col;
+ this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) +
+ this_mv.as_mv.col;
+ thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride,
+ bestsad);
+ CHECK_BETTER
+ }
+ } else {
+ for (i = 0; i < 4; i++) {
+ this_mv.as_mv.row = br + neighbors[i].row;
+ this_mv.as_mv.col = bc + neighbors[i].col;
+ CHECK_POINT
+ this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) +
+ this_mv.as_mv.col;
+ thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride,
+ bestsad);
+ CHECK_BETTER
+ }
+ }
+
+ if (best_site == -1) {
+ break;
+ } else {
+ br += neighbors[best_site].row;
+ bc += neighbors[best_site].col;
+ }
+ }
+ }
+
+ best_mv->as_mv.row = br;
+ best_mv->as_mv.col = bc;
+
+ this_offset = base_offset + (best_mv->as_mv.row * (in_what_stride)) +
+ best_mv->as_mv.col;
+ this_mv.as_mv.row = best_mv->as_mv.row << 3;
+ this_mv.as_mv.col = best_mv->as_mv.col << 3;
+ if (bestsad == INT_MAX)
+ return INT_MAX;
+ return
+ vfp->vf(what, what_stride, this_offset, in_what_stride,
+ (unsigned int *)(&bestsad)) +
+ use_mvcost ? mv_err_cost(&this_mv, center_mv, x->nmvjointcost, x->mvcost,
+ x->errorperbit) : 0;
+}
+
+
+int vp9_hex_search(MACROBLOCK *x,
+ int_mv *ref_mv,
+ int search_param,
+ int sad_per_bit,
+ int do_init_search,
+ const vp9_variance_fn_ptr_t *vfp,
+ int use_mvcost,
+ int_mv *center_mv, int_mv *best_mv) {
+ // First scale has 8-closest points, the rest have 6 points in hex shape
+ // at increasing scales
+ static const int hex_num_candidates[MAX_PATTERN_SCALES] = {
+ 8, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6
+ };
+ // Note that the largest candidate step at each scale is 2^scale
+ static const MV hex_candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES] = {
+ {{-1, -1}, {0, -1}, {1, -1}, {1, 0}, {1, 1}, { 0, 1}, { -1, 1}, {-1, 0}},
+ {{-1, -2}, {1, -2}, {2, 0}, {1, 2}, { -1, 2}, { -2, 0}},
+ {{-2, -4}, {2, -4}, {4, 0}, {2, 4}, { -2, 4}, { -4, 0}},
+ {{-4, -8}, {4, -8}, {8, 0}, {4, 8}, { -4, 8}, { -8, 0}},
+ {{-8, -16}, {8, -16}, {16, 0}, {8, 16}, { -8, 16}, { -16, 0}},
+ {{-16, -32}, {16, -32}, {32, 0}, {16, 32}, { -16, 32}, { -32, 0}},
+ {{-32, -64}, {32, -64}, {64, 0}, {32, 64}, { -32, 64}, { -64, 0}},
+ {{-64, -128}, {64, -128}, {128, 0}, {64, 128}, { -64, 128}, { -128, 0}},
+ {{-128, -256}, {128, -256}, {256, 0}, {128, 256}, { -128, 256}, { -256, 0}},
+ {{-256, -512}, {256, -512}, {512, 0}, {256, 512}, { -256, 512}, { -512, 0}},
+ {{-512, -1024}, {512, -1024}, {1024, 0}, {512, 1024}, { -512, 1024},
+ { -1024, 0}},
+ };
+ return
+ vp9_pattern_search(x, ref_mv, search_param, sad_per_bit,
+ do_init_search, 0, vfp, use_mvcost,
+ center_mv, best_mv,
+ hex_num_candidates, hex_candidates);
+}
+
+int vp9_bigdia_search(MACROBLOCK *x,
+ int_mv *ref_mv,
+ int search_param,
+ int sad_per_bit,
+ int do_init_search,
+ const vp9_variance_fn_ptr_t *vfp,
+ int use_mvcost,
+ int_mv *center_mv,
+ int_mv *best_mv) {
+ // First scale has 4-closest points, the rest have 8 points in diamond
+ // shape at increasing scales
+ static const int bigdia_num_candidates[MAX_PATTERN_SCALES] = {
+ 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ };
+ // Note that the largest candidate step at each scale is 2^scale
+ static const MV bigdia_candidates[MAX_PATTERN_SCALES]
+ [MAX_PATTERN_CANDIDATES] = {
+ {{0, -1}, {1, 0}, { 0, 1}, {-1, 0}},
+ {{-1, -1}, {0, -2}, {1, -1}, {2, 0}, {1, 1}, {0, 2}, {-1, 1}, {-2, 0}},
+ {{-2, -2}, {0, -4}, {2, -2}, {4, 0}, {2, 2}, {0, 4}, {-2, 2}, {-4, 0}},
+ {{-4, -4}, {0, -8}, {4, -4}, {8, 0}, {4, 4}, {0, 8}, {-4, 4}, {-8, 0}},
+ {{-8, -8}, {0, -16}, {8, -8}, {16, 0}, {8, 8}, {0, 16}, {-8, 8}, {-16, 0}},
+ {{-16, -16}, {0, -32}, {16, -16}, {32, 0}, {16, 16}, {0, 32},
+ {-16, 16}, {-32, 0}},
+ {{-32, -32}, {0, -64}, {32, -32}, {64, 0}, {32, 32}, {0, 64},
+ {-32, 32}, {-64, 0}},
+ {{-64, -64}, {0, -128}, {64, -64}, {128, 0}, {64, 64}, {0, 128},
+ {-64, 64}, {-128, 0}},
+ {{-128, -128}, {0, -256}, {128, -128}, {256, 0}, {128, 128}, {0, 256},
+ {-128, 128}, {-256, 0}},
+ {{-256, -256}, {0, -512}, {256, -256}, {512, 0}, {256, 256}, {0, 512},
+ {-256, 256}, {-512, 0}},
+ {{-512, -512}, {0, -1024}, {512, -512}, {1024, 0}, {512, 512}, {0, 1024},
+ {-512, 512}, {-1024, 0}},
+ };
+ return
+ vp9_pattern_search(x, ref_mv, search_param, sad_per_bit,
+ do_init_search, 0, vfp, use_mvcost,
+ center_mv, best_mv,
+ bigdia_num_candidates, bigdia_candidates);
+}
+
+int vp9_square_search(MACROBLOCK *x,
+ int_mv *ref_mv,
+ int search_param,
+ int sad_per_bit,
+ int do_init_search,
+ const vp9_variance_fn_ptr_t *vfp,
+ int use_mvcost,
+ int_mv *center_mv,
+ int_mv *best_mv) {
+ // All scales have 8 closest points in square shape
+ static const int square_num_candidates[MAX_PATTERN_SCALES] = {
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ };
+ // Note that the largest candidate step at each scale is 2^scale
+ static const MV square_candidates[MAX_PATTERN_SCALES]
+ [MAX_PATTERN_CANDIDATES] = {
+ {{-1, -1}, {0, -1}, {1, -1}, {1, 0}, {1, 1}, {0, 1}, {-1, 1}, {-1, 0}},
+ {{-2, -2}, {0, -2}, {2, -2}, {2, 0}, {2, 2}, {0, 2}, {-2, 2}, {-2, 0}},
+ {{-4, -4}, {0, -4}, {4, -4}, {4, 0}, {4, 4}, {0, 4}, {-4, 4}, {-4, 0}},
+ {{-8, -8}, {0, -8}, {8, -8}, {8, 0}, {8, 8}, {0, 8}, {-8, 8}, {-8, 0}},
+ {{-16, -16}, {0, -16}, {16, -16}, {16, 0}, {16, 16}, {0, 16},
+ {-16, 16}, {-16, 0}},
+ {{-32, -32}, {0, -32}, {32, -32}, {32, 0}, {32, 32}, {0, 32},
+ {-32, 32}, {-32, 0}},
+ {{-64, -64}, {0, -64}, {64, -64}, {64, 0}, {64, 64}, {0, 64},
+ {-64, 64}, {-64, 0}},
+ {{-128, -128}, {0, -128}, {128, -128}, {128, 0}, {128, 128}, {0, 128},
+ {-128, 128}, {-128, 0}},
+ {{-256, -256}, {0, -256}, {256, -256}, {256, 0}, {256, 256}, {0, 256},
+ {-256, 256}, {-256, 0}},
+ {{-512, -512}, {0, -512}, {512, -512}, {512, 0}, {512, 512}, {0, 512},
+ {-512, 512}, {-512, 0}},
+ {{-1024, -1024}, {0, -1024}, {1024, -1024}, {1024, 0}, {1024, 1024},
+ {0, 1024}, {-1024, 1024}, {-1024, 0}},
+ };
+ return
+ vp9_pattern_search(x, ref_mv, search_param, sad_per_bit,
+ do_init_search, 0, vfp, use_mvcost,
+ center_mv, best_mv,
+ square_num_candidates, square_candidates);
+};
+
+#undef CHECK_BOUNDS
+#undef CHECK_POINT
+#undef CHECK_BETTER
+
+int vp9_diamond_search_sad_c(MACROBLOCK *x,
+ int_mv *ref_mv, int_mv *best_mv,
+ int search_param, int sad_per_bit, int *num00,
+ vp9_variance_fn_ptr_t *fn_ptr, int *mvjcost,
+ int *mvcost[2], int_mv *center_mv) {
+ int i, j, step;
+
+ const MACROBLOCKD* const xd = &x->e_mbd;
+ uint8_t *what = x->plane[0].src.buf;
+ int what_stride = x->plane[0].src.stride;
+ uint8_t *in_what;
+ int in_what_stride = xd->plane[0].pre[0].stride;
+ uint8_t *best_address;
+
+ int tot_steps;
+ int_mv this_mv;
+
+ int bestsad = INT_MAX;
+ int best_site = 0;
+ int last_site = 0;
+
+ int ref_row, ref_col;
+ int this_row_offset, this_col_offset;
+ search_site *ss;
+
+ uint8_t *check_here;
+ int thissad;
+ int_mv fcenter_mv;
+
+ int *mvjsadcost = x->nmvjointsadcost;
+ int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
+
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+
+ clamp_mv(&ref_mv->as_mv,
+ x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
+ ref_row = ref_mv->as_mv.row;
+ ref_col = ref_mv->as_mv.col;
+ *num00 = 0;
+ best_mv->as_mv.row = ref_row;
+ best_mv->as_mv.col = ref_col;
+
+ // Work out the start point for the search
+ in_what = (uint8_t *)(xd->plane[0].pre[0].buf +
+ (ref_row * (xd->plane[0].pre[0].stride)) + ref_col);
+ best_address = in_what;
+
+ // Check the starting position
+ bestsad = fn_ptr->sdf(what, what_stride, in_what,
+ in_what_stride, 0x7fffffff)
+ + mvsad_err_cost(best_mv, &fcenter_mv, mvjsadcost, mvsadcost,
+ sad_per_bit);
+
+ // search_param determines the length of the initial step and hence the number of iterations
+ // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
+ ss = &x->ss[search_param * x->searches_per_step];
+ tot_steps = (x->ss_count / x->searches_per_step) - search_param;
+
+ i = 1;
+
+ for (step = 0; step < tot_steps; step++) {
+ for (j = 0; j < x->searches_per_step; j++) {
+ // Trap illegal vectors
+ this_row_offset = best_mv->as_mv.row + ss[i].mv.row;
+ this_col_offset = best_mv->as_mv.col + ss[i].mv.col;
+
+ if ((this_col_offset > x->mv_col_min) &&
+ (this_col_offset < x->mv_col_max) &&
+ (this_row_offset > x->mv_row_min) &&
+ (this_row_offset < x->mv_row_max)) {
+ check_here = ss[i].offset + best_address;
+ thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride,
+ bestsad);
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.row = this_row_offset;
+ this_mv.as_mv.col = this_col_offset;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
+ mvjsadcost, mvsadcost, sad_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_site = i;
+ }
+ }
+ }
+
+ i++;
+ }
+
+ if (best_site != last_site) {
+ best_mv->as_mv.row += ss[best_site].mv.row;
+ best_mv->as_mv.col += ss[best_site].mv.col;
+ best_address += ss[best_site].offset;
+ last_site = best_site;
+#if defined(NEW_DIAMOND_SEARCH)
+ while (1) {
+ this_row_offset = best_mv->as_mv.row + ss[best_site].mv.row;
+ this_col_offset = best_mv->as_mv.col + ss[best_site].mv.col;
+ if ((this_col_offset > x->mv_col_min) &&
+ (this_col_offset < x->mv_col_max) &&
+ (this_row_offset > x->mv_row_min) &&
+ (this_row_offset < x->mv_row_max)) {
+ check_here = ss[best_site].offset + best_address;
+ thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride,
+ bestsad);
+ if (thissad < bestsad) {
+ this_mv.as_mv.row = this_row_offset;
+ this_mv.as_mv.col = this_col_offset;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
+ mvjsadcost, mvsadcost, sad_per_bit);
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_mv->as_mv.row += ss[best_site].mv.row;
+ best_mv->as_mv.col += ss[best_site].mv.col;
+ best_address += ss[best_site].offset;
+ continue;
+ }
+ }
+ }
+ break;
+ };
+#endif
+ } else if (best_address == in_what)
+ (*num00)++;
+ }
+
+ this_mv.as_mv.row = best_mv->as_mv.row << 3;
+ this_mv.as_mv.col = best_mv->as_mv.col << 3;
+
+ if (bestsad == INT_MAX)
+ return INT_MAX;
+
+ return fn_ptr->vf(what, what_stride, best_address, in_what_stride,
+ (unsigned int *)(&thissad)) + mv_err_cost(&this_mv, center_mv, mvjcost,
+ mvcost, x->errorperbit);
+}
+
+int vp9_diamond_search_sadx4(MACROBLOCK *x,
+ int_mv *ref_mv, int_mv *best_mv, int search_param,
+ int sad_per_bit, int *num00,
+ vp9_variance_fn_ptr_t *fn_ptr,
+ int *mvjcost, int *mvcost[2], int_mv *center_mv) {
+ int i, j, step;
+
+ const MACROBLOCKD* const xd = &x->e_mbd;
+ uint8_t *what = x->plane[0].src.buf;
+ int what_stride = x->plane[0].src.stride;
+ uint8_t *in_what;
+ int in_what_stride = xd->plane[0].pre[0].stride;
+ uint8_t *best_address;
+
+ int tot_steps;
+ int_mv this_mv;
+
+ unsigned int bestsad = INT_MAX;
+ int best_site = 0;
+ int last_site = 0;
+
+ int ref_row;
+ int ref_col;
+ int this_row_offset;
+ int this_col_offset;
+ search_site *ss;
+
+ uint8_t *check_here;
+ unsigned int thissad;
+ int_mv fcenter_mv;
+
+ int *mvjsadcost = x->nmvjointsadcost;
+ int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
+
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+
+ clamp_mv(&ref_mv->as_mv,
+ x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
+ ref_row = ref_mv->as_mv.row;
+ ref_col = ref_mv->as_mv.col;
+ *num00 = 0;
+ best_mv->as_mv.row = ref_row;
+ best_mv->as_mv.col = ref_col;
+
+ // Work out the start point for the search
+ in_what = (uint8_t *)(xd->plane[0].pre[0].buf +
+ (ref_row * (xd->plane[0].pre[0].stride)) + ref_col);
+ best_address = in_what;
+
+ // Check the starting position
+ bestsad = fn_ptr->sdf(what, what_stride,
+ in_what, in_what_stride, 0x7fffffff)
+ + mvsad_err_cost(best_mv, &fcenter_mv, mvjsadcost, mvsadcost,
+ sad_per_bit);
+
+ // search_param determines the length of the initial step and hence the number of iterations
+ // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
+ ss = &x->ss[search_param * x->searches_per_step];
+ tot_steps = (x->ss_count / x->searches_per_step) - search_param;
+
+ i = 1;
+
+ for (step = 0; step < tot_steps; step++) {
+ int all_in = 1, t;
+
+ // To know if all neighbor points are within the bounds, 4 bounds checking are enough instead of
+ // checking 4 bounds for each points.
+ all_in &= ((best_mv->as_mv.row + ss[i].mv.row) > x->mv_row_min);
+ all_in &= ((best_mv->as_mv.row + ss[i + 1].mv.row) < x->mv_row_max);
+ all_in &= ((best_mv->as_mv.col + ss[i + 2].mv.col) > x->mv_col_min);
+ all_in &= ((best_mv->as_mv.col + ss[i + 3].mv.col) < x->mv_col_max);
+
+ if (all_in) {
+ unsigned int sad_array[4];
+
+ for (j = 0; j < x->searches_per_step; j += 4) {
+ unsigned char const *block_offset[4];
+
+ for (t = 0; t < 4; t++)
+ block_offset[t] = ss[i + t].offset + best_address;
+
+ fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride,
+ sad_array);
+
+ for (t = 0; t < 4; t++, i++) {
+ if (sad_array[t] < bestsad) {
+ this_mv.as_mv.row = best_mv->as_mv.row + ss[i].mv.row;
+ this_mv.as_mv.col = best_mv->as_mv.col + ss[i].mv.col;
+ sad_array[t] += mvsad_err_cost(&this_mv, &fcenter_mv,
+ mvjsadcost, mvsadcost, sad_per_bit);
+
+ if (sad_array[t] < bestsad) {
+ bestsad = sad_array[t];
+ best_site = i;
+ }
+ }
+ }
+ }
+ } else {
+ for (j = 0; j < x->searches_per_step; j++) {
+ // Trap illegal vectors
+ this_row_offset = best_mv->as_mv.row + ss[i].mv.row;
+ this_col_offset = best_mv->as_mv.col + ss[i].mv.col;
+
+ if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
+ (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max)) {
+ check_here = ss[i].offset + best_address;
+ thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, bestsad);
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.row = this_row_offset;
+ this_mv.as_mv.col = this_col_offset;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
+ mvjsadcost, mvsadcost, sad_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_site = i;
+ }
+ }
+ }
+ i++;
+ }
+ }
+ if (best_site != last_site) {
+ best_mv->as_mv.row += ss[best_site].mv.row;
+ best_mv->as_mv.col += ss[best_site].mv.col;
+ best_address += ss[best_site].offset;
+ last_site = best_site;
+#if defined(NEW_DIAMOND_SEARCH)
+ while (1) {
+ this_row_offset = best_mv->as_mv.row + ss[best_site].mv.row;
+ this_col_offset = best_mv->as_mv.col + ss[best_site].mv.col;
+ if ((this_col_offset > x->mv_col_min) &&
+ (this_col_offset < x->mv_col_max) &&
+ (this_row_offset > x->mv_row_min) &&
+ (this_row_offset < x->mv_row_max)) {
+ check_here = ss[best_site].offset + best_address;
+ thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride,
+ bestsad);
+ if (thissad < bestsad) {
+ this_mv.as_mv.row = this_row_offset;
+ this_mv.as_mv.col = this_col_offset;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
+ mvjsadcost, mvsadcost, sad_per_bit);
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_mv->as_mv.row += ss[best_site].mv.row;
+ best_mv->as_mv.col += ss[best_site].mv.col;
+ best_address += ss[best_site].offset;
+ continue;
+ }
+ }
+ }
+ break;
+ };
+#endif
+ } else if (best_address == in_what)
+ (*num00)++;
+ }
+
+ this_mv.as_mv.row = best_mv->as_mv.row << 3;
+ this_mv.as_mv.col = best_mv->as_mv.col << 3;
+
+ if (bestsad == INT_MAX)
+ return INT_MAX;
+
+ return fn_ptr->vf(what, what_stride, best_address, in_what_stride,
+ (unsigned int *)(&thissad)) + mv_err_cost(&this_mv,
+ center_mv, mvjcost, mvcost, x->errorperbit);
+}
+
+/* do_refine: If last step (1-away) of n-step search doesn't pick the center
+ point as the best match, we will do a final 1-away diamond
+ refining search */
+
+int vp9_full_pixel_diamond(VP9_COMP *cpi, MACROBLOCK *x,
+ int_mv *mvp_full, int step_param,
+ int sadpb, int further_steps,
+ int do_refine, vp9_variance_fn_ptr_t *fn_ptr,
+ int_mv *ref_mv, int_mv *dst_mv) {
+ int_mv temp_mv;
+ int thissme, n, num00;
+ int bestsme = cpi->diamond_search_sad(x, mvp_full, &temp_mv,
+ step_param, sadpb, &num00,
+ fn_ptr, x->nmvjointcost,
+ x->mvcost, ref_mv);
+ dst_mv->as_int = temp_mv.as_int;
+
+ n = num00;
+ num00 = 0;
+
+ /* If there won't be more n-step search, check to see if refining search is needed. */
+ if (n > further_steps)
+ do_refine = 0;
+
+ while (n < further_steps) {
+ n++;
+
+ if (num00)
+ num00--;
+ else {
+ thissme = cpi->diamond_search_sad(x, mvp_full, &temp_mv,
+ step_param + n, sadpb, &num00,
+ fn_ptr, x->nmvjointcost, x->mvcost,
+ ref_mv);
+
+ /* check to see if refining search is needed. */
+ if (num00 > (further_steps - n))
+ do_refine = 0;
+
+ if (thissme < bestsme) {
+ bestsme = thissme;
+ dst_mv->as_int = temp_mv.as_int;
+ }
+ }
+ }
+
+ /* final 1-away diamond refining search */
+ if (do_refine == 1) {
+ int search_range = 8;
+ int_mv best_mv;
+ best_mv.as_int = dst_mv->as_int;
+ thissme = cpi->refining_search_sad(x, &best_mv, sadpb, search_range,
+ fn_ptr, x->nmvjointcost, x->mvcost,
+ ref_mv);
+
+ if (thissme < bestsme) {
+ bestsme = thissme;
+ dst_mv->as_int = best_mv.as_int;
+ }
+ }
+ return bestsme;
+}
+
+int vp9_full_search_sad_c(MACROBLOCK *x, int_mv *ref_mv,
+ int sad_per_bit, int distance,
+ vp9_variance_fn_ptr_t *fn_ptr, int *mvjcost,
+ int *mvcost[2],
+ int_mv *center_mv, int n) {
+ const MACROBLOCKD* const xd = &x->e_mbd;
+ uint8_t *what = x->plane[0].src.buf;
+ int what_stride = x->plane[0].src.stride;
+ uint8_t *in_what;
+ int in_what_stride = xd->plane[0].pre[0].stride;
+ int mv_stride = xd->plane[0].pre[0].stride;
+ uint8_t *bestaddress;
+ int_mv *best_mv = &x->e_mbd.mi_8x8[0]->bmi[n].as_mv[0];
+ int_mv this_mv;
+ int bestsad = INT_MAX;
+ int r, c;
+
+ uint8_t *check_here;
+ int thissad;
+
+ int ref_row = ref_mv->as_mv.row;
+ int ref_col = ref_mv->as_mv.col;
+
+ int row_min = ref_row - distance;
+ int row_max = ref_row + distance;
+ int col_min = ref_col - distance;
+ int col_max = ref_col + distance;
+ int_mv fcenter_mv;
+
+ int *mvjsadcost = x->nmvjointsadcost;
+ int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
+
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+
+ // Work out the mid point for the search
+ in_what = xd->plane[0].pre[0].buf;
+ bestaddress = in_what + (ref_row * xd->plane[0].pre[0].stride) + ref_col;
+
+ best_mv->as_mv.row = ref_row;
+ best_mv->as_mv.col = ref_col;
+
+ // Baseline value at the centre
+ bestsad = fn_ptr->sdf(what, what_stride, bestaddress,
+ in_what_stride, 0x7fffffff)
+ + mvsad_err_cost(best_mv, &fcenter_mv, mvjsadcost, mvsadcost,
+ sad_per_bit);
+
+ // Apply further limits to prevent us looking using vectors that stretch
+ // beyond the UMV border
+ col_min = MAX(col_min, x->mv_col_min);
+ col_max = MIN(col_max, x->mv_col_max);
+ row_min = MAX(row_min, x->mv_row_min);
+ row_max = MIN(row_max, x->mv_row_max);
+
+ for (r = row_min; r < row_max; r++) {
+ this_mv.as_mv.row = r;
+ check_here = r * mv_stride + in_what + col_min;
+
+ for (c = col_min; c < col_max; c++) {
+ thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, bestsad);
+
+ this_mv.as_mv.col = c;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
+ mvjsadcost, mvsadcost, sad_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_mv->as_mv.row = r;
+ best_mv->as_mv.col = c;
+ bestaddress = check_here;
+ }
+
+ check_here++;
+ }
+ }
+
+ this_mv.as_mv.row = best_mv->as_mv.row << 3;
+ this_mv.as_mv.col = best_mv->as_mv.col << 3;
+
+ if (bestsad < INT_MAX)
+ return
+ fn_ptr->vf(what, what_stride, bestaddress, in_what_stride,
+ (unsigned int *)(&thissad)) +
+ mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit);
+ else
+ return INT_MAX;
+}
+
+int vp9_full_search_sadx3(MACROBLOCK *x, int_mv *ref_mv,
+ int sad_per_bit, int distance,
+ vp9_variance_fn_ptr_t *fn_ptr, int *mvjcost,
+ int *mvcost[2], int_mv *center_mv, int n) {
+ const MACROBLOCKD* const xd = &x->e_mbd;
+ uint8_t *what = x->plane[0].src.buf;
+ int what_stride = x->plane[0].src.stride;
+ uint8_t *in_what;
+ int in_what_stride = xd->plane[0].pre[0].stride;
+ int mv_stride = xd->plane[0].pre[0].stride;
+ uint8_t *bestaddress;
+ int_mv *best_mv = &x->e_mbd.mi_8x8[0]->bmi[n].as_mv[0];
+ int_mv this_mv;
+ unsigned int bestsad = INT_MAX;
+ int r, c;
+
+ uint8_t *check_here;
+ unsigned int thissad;
+
+ int ref_row = ref_mv->as_mv.row;
+ int ref_col = ref_mv->as_mv.col;
+
+ int row_min = ref_row - distance;
+ int row_max = ref_row + distance;
+ int col_min = ref_col - distance;
+ int col_max = ref_col + distance;
+
+ unsigned int sad_array[3];
+ int_mv fcenter_mv;
+
+ int *mvjsadcost = x->nmvjointsadcost;
+ int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
+
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+
+ // Work out the mid point for the search
+ in_what = xd->plane[0].pre[0].buf;
+ bestaddress = in_what + (ref_row * xd->plane[0].pre[0].stride) + ref_col;
+
+ best_mv->as_mv.row = ref_row;
+ best_mv->as_mv.col = ref_col;
+
+ // Baseline value at the centre
+ bestsad = fn_ptr->sdf(what, what_stride,
+ bestaddress, in_what_stride, 0x7fffffff)
+ + mvsad_err_cost(best_mv, &fcenter_mv, mvjsadcost, mvsadcost,
+ sad_per_bit);
+
+ // Apply further limits to prevent us looking using vectors that stretch
+ // beyond the UMV border
+ col_min = MAX(col_min, x->mv_col_min);
+ col_max = MIN(col_max, x->mv_col_max);
+ row_min = MAX(row_min, x->mv_row_min);
+ row_max = MIN(row_max, x->mv_row_max);
+
+ for (r = row_min; r < row_max; r++) {
+ this_mv.as_mv.row = r;
+ check_here = r * mv_stride + in_what + col_min;
+ c = col_min;
+
+ while ((c + 2) < col_max) {
+ int i;
+
+ fn_ptr->sdx3f(what, what_stride, check_here, in_what_stride, sad_array);
+
+ for (i = 0; i < 3; i++) {
+ thissad = sad_array[i];
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.col = c;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
+ mvjsadcost, mvsadcost, sad_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_mv->as_mv.row = r;
+ best_mv->as_mv.col = c;
+ bestaddress = check_here;
+ }
+ }
+
+ check_here++;
+ c++;
+ }
+ }
+
+ while (c < col_max) {
+ thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, bestsad);
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.col = c;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
+ mvjsadcost, mvsadcost, sad_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_mv->as_mv.row = r;
+ best_mv->as_mv.col = c;
+ bestaddress = check_here;
+ }
+ }
+
+ check_here++;
+ c++;
+ }
+
+ }
+
+ this_mv.as_mv.row = best_mv->as_mv.row << 3;
+ this_mv.as_mv.col = best_mv->as_mv.col << 3;
+
+ if (bestsad < INT_MAX)
+ return
+ fn_ptr->vf(what, what_stride, bestaddress, in_what_stride,
+ (unsigned int *)(&thissad)) +
+ mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit);
+ else
+ return INT_MAX;
+}
+
+int vp9_full_search_sadx8(MACROBLOCK *x, int_mv *ref_mv,
+ int sad_per_bit, int distance,
+ vp9_variance_fn_ptr_t *fn_ptr,
+ int *mvjcost, int *mvcost[2],
+ int_mv *center_mv, int n) {
+ const MACROBLOCKD* const xd = &x->e_mbd;
+ uint8_t *what = x->plane[0].src.buf;
+ int what_stride = x->plane[0].src.stride;
+ uint8_t *in_what;
+ int in_what_stride = xd->plane[0].pre[0].stride;
+ int mv_stride = xd->plane[0].pre[0].stride;
+ uint8_t *bestaddress;
+ int_mv *best_mv = &x->e_mbd.mi_8x8[0]->bmi[n].as_mv[0];
+ int_mv this_mv;
+ unsigned int bestsad = INT_MAX;
+ int r, c;
+
+ uint8_t *check_here;
+ unsigned int thissad;
+
+ int ref_row = ref_mv->as_mv.row;
+ int ref_col = ref_mv->as_mv.col;
+
+ int row_min = ref_row - distance;
+ int row_max = ref_row + distance;
+ int col_min = ref_col - distance;
+ int col_max = ref_col + distance;
+
+ DECLARE_ALIGNED_ARRAY(16, uint32_t, sad_array8, 8);
+ unsigned int sad_array[3];
+ int_mv fcenter_mv;
+
+ int *mvjsadcost = x->nmvjointsadcost;
+ int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
+
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+
+ // Work out the mid point for the search
+ in_what = xd->plane[0].pre[0].buf;
+ bestaddress = in_what + (ref_row * xd->plane[0].pre[0].stride) + ref_col;
+
+ best_mv->as_mv.row = ref_row;
+ best_mv->as_mv.col = ref_col;
+
+ // Baseline value at the centre
+ bestsad = fn_ptr->sdf(what, what_stride,
+ bestaddress, in_what_stride, 0x7fffffff)
+ + mvsad_err_cost(best_mv, &fcenter_mv, mvjsadcost, mvsadcost,
+ sad_per_bit);
+
+ // Apply further limits to prevent us looking using vectors that stretch
+ // beyond the UMV border
+ col_min = MAX(col_min, x->mv_col_min);
+ col_max = MIN(col_max, x->mv_col_max);
+ row_min = MAX(row_min, x->mv_row_min);
+ row_max = MIN(row_max, x->mv_row_max);
+
+ for (r = row_min; r < row_max; r++) {
+ this_mv.as_mv.row = r;
+ check_here = r * mv_stride + in_what + col_min;
+ c = col_min;
+
+ while ((c + 7) < col_max) {
+ int i;
+
+ fn_ptr->sdx8f(what, what_stride, check_here, in_what_stride, sad_array8);
+
+ for (i = 0; i < 8; i++) {
+ thissad = (unsigned int)sad_array8[i];
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.col = c;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
+ mvjsadcost, mvsadcost, sad_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_mv->as_mv.row = r;
+ best_mv->as_mv.col = c;
+ bestaddress = check_here;
+ }
+ }
+
+ check_here++;
+ c++;
+ }
+ }
+
+ while ((c + 2) < col_max && fn_ptr->sdx3f != NULL) {
+ int i;
+
+ fn_ptr->sdx3f(what, what_stride, check_here, in_what_stride, sad_array);
+
+ for (i = 0; i < 3; i++) {
+ thissad = sad_array[i];
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.col = c;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
+ mvjsadcost, mvsadcost, sad_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_mv->as_mv.row = r;
+ best_mv->as_mv.col = c;
+ bestaddress = check_here;
+ }
+ }
+
+ check_here++;
+ c++;
+ }
+ }
+
+ while (c < col_max) {
+ thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, bestsad);
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.col = c;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
+ mvjsadcost, mvsadcost, sad_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_mv->as_mv.row = r;
+ best_mv->as_mv.col = c;
+ bestaddress = check_here;
+ }
+ }
+
+ check_here++;
+ c++;
+ }
+ }
+
+ this_mv.as_mv.row = best_mv->as_mv.row << 3;
+ this_mv.as_mv.col = best_mv->as_mv.col << 3;
+
+ if (bestsad < INT_MAX)
+ return
+ fn_ptr->vf(what, what_stride, bestaddress, in_what_stride,
+ (unsigned int *)(&thissad)) +
+ mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit);
+ else
+ return INT_MAX;
+}
+int vp9_refining_search_sad_c(MACROBLOCK *x,
+ int_mv *ref_mv, int error_per_bit,
+ int search_range, vp9_variance_fn_ptr_t *fn_ptr,
+ int *mvjcost, int *mvcost[2], int_mv *center_mv) {
+ const MACROBLOCKD* const xd = &x->e_mbd;
+ MV neighbors[4] = {{ -1, 0}, {0, -1}, {0, 1}, {1, 0}};
+ int i, j;
+ int this_row_offset, this_col_offset;
+
+ int what_stride = x->plane[0].src.stride;
+ int in_what_stride = xd->plane[0].pre[0].stride;
+ uint8_t *what = x->plane[0].src.buf;
+ uint8_t *best_address = xd->plane[0].pre[0].buf +
+ (ref_mv->as_mv.row * xd->plane[0].pre[0].stride) +
+ ref_mv->as_mv.col;
+ uint8_t *check_here;
+ unsigned int thissad;
+ int_mv this_mv;
+ unsigned int bestsad = INT_MAX;
+ int_mv fcenter_mv;
+
+ int *mvjsadcost = x->nmvjointsadcost;
+ int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
+
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+
+ bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride, 0x7fffffff) +
+ mvsad_err_cost(ref_mv, &fcenter_mv, mvjsadcost, mvsadcost, error_per_bit);
+
+ for (i = 0; i < search_range; i++) {
+ int best_site = -1;
+
+ for (j = 0; j < 4; j++) {
+ this_row_offset = ref_mv->as_mv.row + neighbors[j].row;
+ this_col_offset = ref_mv->as_mv.col + neighbors[j].col;
+
+ if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
+ (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max)) {
+ check_here = (neighbors[j].row) * in_what_stride + neighbors[j].col + best_address;
+ thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, bestsad);
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.row = this_row_offset;
+ this_mv.as_mv.col = this_col_offset;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvjsadcost,
+ mvsadcost, error_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_site = j;
+ }
+ }
+ }
+ }
+
+ if (best_site == -1)
+ break;
+ else {
+ ref_mv->as_mv.row += neighbors[best_site].row;
+ ref_mv->as_mv.col += neighbors[best_site].col;
+ best_address += (neighbors[best_site].row) * in_what_stride + neighbors[best_site].col;
+ }
+ }
+
+ this_mv.as_mv.row = ref_mv->as_mv.row << 3;
+ this_mv.as_mv.col = ref_mv->as_mv.col << 3;
+
+ if (bestsad < INT_MAX)
+ return
+ fn_ptr->vf(what, what_stride, best_address, in_what_stride,
+ (unsigned int *)(&thissad)) +
+ mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit);
+ else
+ return INT_MAX;
+}
+
+int vp9_refining_search_sadx4(MACROBLOCK *x,
+ int_mv *ref_mv, int error_per_bit,
+ int search_range, vp9_variance_fn_ptr_t *fn_ptr,
+ int *mvjcost, int *mvcost[2], int_mv *center_mv) {
+ const MACROBLOCKD* const xd = &x->e_mbd;
+ MV neighbors[4] = {{ -1, 0}, {0, -1}, {0, 1}, {1, 0}};
+ int i, j;
+ int this_row_offset, this_col_offset;
+
+ int what_stride = x->plane[0].src.stride;
+ int in_what_stride = xd->plane[0].pre[0].stride;
+ uint8_t *what = x->plane[0].src.buf;
+ uint8_t *best_address = xd->plane[0].pre[0].buf +
+ (ref_mv->as_mv.row * xd->plane[0].pre[0].stride) +
+ ref_mv->as_mv.col;
+ uint8_t *check_here;
+ unsigned int thissad;
+ int_mv this_mv;
+ unsigned int bestsad = INT_MAX;
+ int_mv fcenter_mv;
+
+ int *mvjsadcost = x->nmvjointsadcost;
+ int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
+
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+
+ bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride, 0x7fffffff) +
+ mvsad_err_cost(ref_mv, &fcenter_mv, mvjsadcost, mvsadcost, error_per_bit);
+
+ for (i = 0; i < search_range; i++) {
+ int best_site = -1;
+ int all_in = ((ref_mv->as_mv.row - 1) > x->mv_row_min) &
+ ((ref_mv->as_mv.row + 1) < x->mv_row_max) &
+ ((ref_mv->as_mv.col - 1) > x->mv_col_min) &
+ ((ref_mv->as_mv.col + 1) < x->mv_col_max);
+
+ if (all_in) {
+ unsigned int sad_array[4];
+ unsigned char const *block_offset[4];
+ block_offset[0] = best_address - in_what_stride;
+ block_offset[1] = best_address - 1;
+ block_offset[2] = best_address + 1;
+ block_offset[3] = best_address + in_what_stride;
+
+ fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride, sad_array);
+
+ for (j = 0; j < 4; j++) {
+ if (sad_array[j] < bestsad) {
+ this_mv.as_mv.row = ref_mv->as_mv.row + neighbors[j].row;
+ this_mv.as_mv.col = ref_mv->as_mv.col + neighbors[j].col;
+ sad_array[j] += mvsad_err_cost(&this_mv, &fcenter_mv, mvjsadcost,
+ mvsadcost, error_per_bit);
+
+ if (sad_array[j] < bestsad) {
+ bestsad = sad_array[j];
+ best_site = j;
+ }
+ }
+ }
+ } else {
+ for (j = 0; j < 4; j++) {
+ this_row_offset = ref_mv->as_mv.row + neighbors[j].row;
+ this_col_offset = ref_mv->as_mv.col + neighbors[j].col;
+
+ if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
+ (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max)) {
+ check_here = (neighbors[j].row) * in_what_stride + neighbors[j].col + best_address;
+ thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, bestsad);
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.row = this_row_offset;
+ this_mv.as_mv.col = this_col_offset;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvjsadcost,
+ mvsadcost, error_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_site = j;
+ }
+ }
+ }
+ }
+ }
+
+ if (best_site == -1)
+ break;
+ else {
+ ref_mv->as_mv.row += neighbors[best_site].row;
+ ref_mv->as_mv.col += neighbors[best_site].col;
+ best_address += (neighbors[best_site].row) * in_what_stride + neighbors[best_site].col;
+ }
+ }
+
+ this_mv.as_mv.row = ref_mv->as_mv.row << 3;
+ this_mv.as_mv.col = ref_mv->as_mv.col << 3;
+
+ if (bestsad < INT_MAX)
+ return
+ fn_ptr->vf(what, what_stride, best_address, in_what_stride,
+ (unsigned int *)(&thissad)) +
+ mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit);
+ else
+ return INT_MAX;
+}
+
+/* This function is called when we do joint motion search in comp_inter_inter
+ * mode.
+ */
+int vp9_refining_search_8p_c(MACROBLOCK *x,
+ int_mv *ref_mv, int error_per_bit,
+ int search_range, vp9_variance_fn_ptr_t *fn_ptr,
+ int *mvjcost, int *mvcost[2], int_mv *center_mv,
+ const uint8_t *second_pred, int w, int h) {
+ const MACROBLOCKD* const xd = &x->e_mbd;
+ MV neighbors[8] = {{-1, 0}, {0, -1}, {0, 1}, {1, 0},
+ {-1, -1}, {1, -1}, {-1, 1}, {1, 1}};
+ int i, j;
+ int this_row_offset, this_col_offset;
+
+ int what_stride = x->plane[0].src.stride;
+ int in_what_stride = xd->plane[0].pre[0].stride;
+ uint8_t *what = x->plane[0].src.buf;
+ uint8_t *best_address = xd->plane[0].pre[0].buf +
+ (ref_mv->as_mv.row * xd->plane[0].pre[0].stride) +
+ ref_mv->as_mv.col;
+ uint8_t *check_here;
+ unsigned int thissad;
+ int_mv this_mv;
+ unsigned int bestsad = INT_MAX;
+ int_mv fcenter_mv;
+
+ int *mvjsadcost = x->nmvjointsadcost;
+ int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
+
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+
+ /* Get compound pred by averaging two pred blocks. */
+ bestsad = fn_ptr->sdaf(what, what_stride, best_address, in_what_stride,
+ second_pred, 0x7fffffff) +
+ mvsad_err_cost(ref_mv, &fcenter_mv, mvjsadcost, mvsadcost, error_per_bit);
+
+ for (i = 0; i < search_range; i++) {
+ int best_site = -1;
+
+ for (j = 0; j < 8; j++) {
+ this_row_offset = ref_mv->as_mv.row + neighbors[j].row;
+ this_col_offset = ref_mv->as_mv.col + neighbors[j].col;
+
+ if ((this_col_offset > x->mv_col_min) &&
+ (this_col_offset < x->mv_col_max) &&
+ (this_row_offset > x->mv_row_min) &&
+ (this_row_offset < x->mv_row_max)) {
+ check_here = (neighbors[j].row) * in_what_stride + neighbors[j].col +
+ best_address;
+
+ /* Get compound block and use it to calculate SAD. */
+ thissad = fn_ptr->sdaf(what, what_stride, check_here, in_what_stride,
+ second_pred, bestsad);
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.row = this_row_offset;
+ this_mv.as_mv.col = this_col_offset;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvjsadcost,
+ mvsadcost, error_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_site = j;
+ }
+ }
+ }
+ }
+
+ if (best_site == -1) {
+ break;
+ } else {
+ ref_mv->as_mv.row += neighbors[best_site].row;
+ ref_mv->as_mv.col += neighbors[best_site].col;
+ best_address += (neighbors[best_site].row) * in_what_stride +
+ neighbors[best_site].col;
+ }
+ }
+
+ this_mv.as_mv.row = ref_mv->as_mv.row << 3;
+ this_mv.as_mv.col = ref_mv->as_mv.col << 3;
+
+ if (bestsad < INT_MAX) {
+ // FIXME(rbultje, yunqing): add full-pixel averaging variance functions
+ // so we don't have to use the subpixel with xoff=0,yoff=0 here.
+ return fn_ptr->svaf(best_address, in_what_stride, 0, 0,
+ what, what_stride, (unsigned int *)(&thissad),
+ second_pred) +
+ mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit);
+ } else {
+ return INT_MAX;
+ }
+}
diff --git a/libvpx/vp9/encoder/vp9_mcomp.h b/libvpx/vp9/encoder/vp9_mcomp.h
new file mode 100644
index 0000000..3598fa0
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_mcomp.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_ENCODER_VP9_MCOMP_H_
+#define VP9_ENCODER_VP9_MCOMP_H_
+
+#include "vp9/encoder/vp9_block.h"
+#include "vp9/encoder/vp9_variance.h"
+
+// The maximum number of steps in a step search given the largest
+// allowed initial step
+#define MAX_MVSEARCH_STEPS 11
+// Max full pel mv specified in 1 pel units
+#define MAX_FULL_PEL_VAL ((1 << (MAX_MVSEARCH_STEPS)) - 1)
+// Maximum size of the first step in full pel units
+#define MAX_FIRST_STEP (1 << (MAX_MVSEARCH_STEPS-1))
+
+void vp9_clamp_mv_min_max(MACROBLOCK *x, MV *mv);
+int vp9_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvjcost,
+ int *mvcost[2], int weight);
+void vp9_init_dsmotion_compensation(MACROBLOCK *x, int stride);
+void vp9_init3smotion_compensation(MACROBLOCK *x, int stride);
+
+struct VP9_COMP;
+int vp9_init_search_range(struct VP9_COMP *cpi, int size);
+
+// Runs sequence of diamond searches in smaller steps for RD
+int vp9_full_pixel_diamond(struct VP9_COMP *cpi, MACROBLOCK *x,
+ int_mv *mvp_full, int step_param,
+ int sadpb, int further_steps, int do_refine,
+ vp9_variance_fn_ptr_t *fn_ptr,
+ int_mv *ref_mv, int_mv *dst_mv);
+
+int vp9_hex_search(MACROBLOCK *x,
+ int_mv *ref_mv,
+ int search_param,
+ int error_per_bit,
+ int do_init_search,
+ const vp9_variance_fn_ptr_t *vf,
+ int use_mvcost,
+ int_mv *center_mv,
+ int_mv *best_mv);
+int vp9_bigdia_search(MACROBLOCK *x,
+ int_mv *ref_mv,
+ int search_param,
+ int error_per_bit,
+ int do_init_search,
+ const vp9_variance_fn_ptr_t *vf,
+ int use_mvcost,
+ int_mv *center_mv,
+ int_mv *best_mv);
+int vp9_square_search(MACROBLOCK *x,
+ int_mv *ref_mv,
+ int search_param,
+ int error_per_bit,
+ int do_init_search,
+ const vp9_variance_fn_ptr_t *vf,
+ int use_mvcost,
+ int_mv *center_mv,
+ int_mv *best_mv);
+
+typedef int (fractional_mv_step_fp) (
+ MACROBLOCK *x,
+ int_mv *bestmv,
+ int_mv *ref_mv,
+ int error_per_bit,
+ const vp9_variance_fn_ptr_t *vfp,
+ int forced_stop, // 0 - full, 1 - qtr only, 2 - half only
+ int iters_per_step,
+ int *mvjcost,
+ int *mvcost[2],
+ int *distortion,
+ unsigned int *sse);
+extern fractional_mv_step_fp vp9_find_best_sub_pixel_iterative;
+extern fractional_mv_step_fp vp9_find_best_sub_pixel_tree;
+
+typedef int (fractional_mv_step_comp_fp) (
+ MACROBLOCK *x,
+ int_mv *bestmv, int_mv *ref_mv,
+ int error_per_bit,
+ const vp9_variance_fn_ptr_t *vfp,
+ int forced_stop, // 0 - full, 1 - qtr only, 2 - half only
+ int iters_per_step,
+ int *mvjcost, int *mvcost[2],
+ int *distortion, unsigned int *sse1,
+ const uint8_t *second_pred,
+ int w, int h);
+extern fractional_mv_step_comp_fp vp9_find_best_sub_pixel_comp_iterative;
+extern fractional_mv_step_comp_fp vp9_find_best_sub_pixel_comp_tree;
+
+typedef int (*vp9_full_search_fn_t)(MACROBLOCK *x,
+ int_mv *ref_mv, int sad_per_bit,
+ int distance, vp9_variance_fn_ptr_t *fn_ptr,
+ int *mvjcost, int *mvcost[2],
+ int_mv *center_mv, int n);
+
+typedef int (*vp9_refining_search_fn_t)(MACROBLOCK *x,
+ int_mv *ref_mv, int sad_per_bit,
+ int distance,
+ vp9_variance_fn_ptr_t *fn_ptr,
+ int *mvjcost, int *mvcost[2],
+ int_mv *center_mv);
+
+typedef int (*vp9_diamond_search_fn_t)(MACROBLOCK *x,
+ int_mv *ref_mv, int_mv *best_mv,
+ int search_param, int sad_per_bit,
+ int *num00,
+ vp9_variance_fn_ptr_t *fn_ptr,
+ int *mvjcost, int *mvcost[2],
+ int_mv *center_mv);
+
+int vp9_refining_search_8p_c(MACROBLOCK *x,
+ int_mv *ref_mv, int error_per_bit,
+ int search_range, vp9_variance_fn_ptr_t *fn_ptr,
+ int *mvjcost, int *mvcost[2],
+ int_mv *center_mv, const uint8_t *second_pred,
+ int w, int h);
+#endif // VP9_ENCODER_VP9_MCOMP_H_
diff --git a/libvpx/vp9/encoder/vp9_modecosts.c b/libvpx/vp9/encoder/vp9_modecosts.c
new file mode 100644
index 0000000..a5dfaed
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_modecosts.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/encoder/vp9_onyx_int.h"
+#include "vp9/encoder/vp9_treewriter.h"
+#include "vp9/common/vp9_entropymode.h"
+
+
+void vp9_init_mode_costs(VP9_COMP *c) {
+ VP9_COMMON *const cm = &c->common;
+ const vp9_tree_p KT = vp9_intra_mode_tree;
+ int i, j;
+
+ for (i = 0; i < INTRA_MODES; i++) {
+ for (j = 0; j < INTRA_MODES; j++) {
+ vp9_cost_tokens((int *)c->mb.y_mode_costs[i][j], vp9_kf_y_mode_prob[i][j],
+ KT);
+ }
+ }
+
+ // TODO(rbultje) separate tables for superblock costing?
+ vp9_cost_tokens(c->mb.mbmode_cost, cm->fc.y_mode_prob[1],
+ vp9_intra_mode_tree);
+ vp9_cost_tokens(c->mb.intra_uv_mode_cost[1],
+ cm->fc.uv_mode_prob[INTRA_MODES - 1], vp9_intra_mode_tree);
+ vp9_cost_tokens(c->mb.intra_uv_mode_cost[0],
+ vp9_kf_uv_mode_prob[INTRA_MODES - 1],
+ vp9_intra_mode_tree);
+
+ for (i = 0; i <= SWITCHABLE_FILTERS; ++i)
+ vp9_cost_tokens((int *)c->mb.switchable_interp_costs[i],
+ cm->fc.switchable_interp_prob[i],
+ vp9_switchable_interp_tree);
+}
diff --git a/libvpx/vp9/encoder/vp9_modecosts.h b/libvpx/vp9/encoder/vp9_modecosts.h
new file mode 100644
index 0000000..f43033e
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_modecosts.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_ENCODER_VP9_MODECOSTS_H_
+#define VP9_ENCODER_VP9_MODECOSTS_H_
+
+void vp9_init_mode_costs(VP9_COMP *x);
+
+#endif // VP9_ENCODER_VP9_MODECOSTS_H_
diff --git a/libvpx/vp9/encoder/vp9_onyx_if.c b/libvpx/vp9/encoder/vp9_onyx_if.c
new file mode 100644
index 0000000..883b31e
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_onyx_if.c
@@ -0,0 +1,4201 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vpx_config.h"
+#include "vp9/common/vp9_filter.h"
+#include "vp9/common/vp9_onyxc_int.h"
+#include "vp9/common/vp9_reconinter.h"
+#include "vp9/encoder/vp9_onyx_int.h"
+#include "vp9/common/vp9_systemdependent.h"
+#include "vp9/encoder/vp9_quantize.h"
+#include "vp9/common/vp9_alloccommon.h"
+#include "vp9/encoder/vp9_mcomp.h"
+#include "vp9/encoder/vp9_firstpass.h"
+#include "vp9/encoder/vp9_psnr.h"
+#include "vpx_scale/vpx_scale.h"
+#include "vp9/common/vp9_extend.h"
+#include "vp9/encoder/vp9_ratectrl.h"
+#include "vp9/common/vp9_quant_common.h"
+#include "vp9/common/vp9_tile_common.h"
+#include "vp9/encoder/vp9_segmentation.h"
+#include "./vp9_rtcd.h"
+#include "./vpx_scale_rtcd.h"
+#if CONFIG_VP9_POSTPROC
+#include "vp9/common/vp9_postproc.h"
+#endif
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_ports/vpx_timer.h"
+
+#include "vp9/common/vp9_seg_common.h"
+#include "vp9/encoder/vp9_mbgraph.h"
+#include "vp9/common/vp9_pred_common.h"
+#include "vp9/encoder/vp9_rdopt.h"
+#include "vp9/encoder/vp9_bitstream.h"
+#include "vp9/encoder/vp9_picklpf.h"
+#include "vp9/common/vp9_mvref_common.h"
+#include "vp9/encoder/vp9_temporal_filter.h"
+
+#include <math.h>
+#include <stdio.h>
+#include <limits.h>
+
+extern void print_tree_update_probs();
+
+static void set_default_lf_deltas(struct loopfilter *lf);
+
+#define DEFAULT_INTERP_FILTER SWITCHABLE
+
+#define SHARP_FILTER_QTHRESH 0 /* Q threshold for 8-tap sharp filter */
+
+#define ALTREF_HIGH_PRECISION_MV 1 /* whether to use high precision mv
+ for altref computation */
+#define HIGH_PRECISION_MV_QTHRESH 200 /* Q threshold for use of high precision
+ mv. Choose a very high value for
+ now so that HIGH_PRECISION is always
+ chosen */
+
+#if CONFIG_INTERNAL_STATS
+#include "math.h"
+
+extern double vp9_calc_ssim(YV12_BUFFER_CONFIG *source,
+ YV12_BUFFER_CONFIG *dest, int lumamask,
+ double *weight);
+
+
+extern double vp9_calc_ssimg(YV12_BUFFER_CONFIG *source,
+ YV12_BUFFER_CONFIG *dest, double *ssim_y,
+ double *ssim_u, double *ssim_v);
+
+
+#endif
+
+// #define OUTPUT_YUV_REC
+
+#ifdef OUTPUT_YUV_SRC
+FILE *yuv_file;
+#endif
+#ifdef OUTPUT_YUV_REC
+FILE *yuv_rec_file;
+#endif
+
+#if 0
+FILE *framepsnr;
+FILE *kf_list;
+FILE *keyfile;
+#endif
+
+
+#ifdef ENTROPY_STATS
+extern int intra_mode_stats[INTRA_MODES]
+ [INTRA_MODES]
+ [INTRA_MODES];
+#endif
+
+#ifdef MODE_STATS
+extern void init_tx_count_stats();
+extern void write_tx_count_stats();
+extern void init_switchable_interp_stats();
+extern void write_switchable_interp_stats();
+#endif
+
+#ifdef SPEEDSTATS
+unsigned int frames_at_speed[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+#endif
+
+#if defined(SECTIONBITS_OUTPUT)
+extern unsigned __int64 Sectionbits[500];
+#endif
+
+extern void vp9_init_quantizer(VP9_COMP *cpi);
+
+// Tables relating active max Q to active min Q
+static int kf_low_motion_minq[QINDEX_RANGE];
+static int kf_high_motion_minq[QINDEX_RANGE];
+static int gf_low_motion_minq[QINDEX_RANGE];
+static int gf_high_motion_minq[QINDEX_RANGE];
+static int inter_minq[QINDEX_RANGE];
+
+static INLINE void Scale2Ratio(int mode, int *hr, int *hs) {
+ switch (mode) {
+ case NORMAL:
+ *hr = 1;
+ *hs = 1;
+ break;
+ case FOURFIVE:
+ *hr = 4;
+ *hs = 5;
+ break;
+ case THREEFIVE:
+ *hr = 3;
+ *hs = 5;
+ break;
+ case ONETWO:
+ *hr = 1;
+ *hs = 2;
+ break;
+ default:
+ *hr = 1;
+ *hs = 1;
+ assert(0);
+ break;
+ }
+}
+
+// Functions to compute the active minq lookup table entries based on a
+// formulaic approach to facilitate easier adjustment of the Q tables.
+// The formulae were derived from computing a 3rd order polynomial best
+// fit to the original data (after plotting real maxq vs minq (not q index))
+static int calculate_minq_index(double maxq,
+ double x3, double x2, double x1, double c) {
+ int i;
+ const double minqtarget = MIN(((x3 * maxq + x2) * maxq + x1) * maxq + c,
+ maxq);
+
+ // Special case handling to deal with the step from q2.0
+ // down to lossless mode represented by q 1.0.
+ if (minqtarget <= 2.0)
+ return 0;
+
+ for (i = 0; i < QINDEX_RANGE; i++) {
+ if (minqtarget <= vp9_convert_qindex_to_q(i))
+ return i;
+ }
+
+ return QINDEX_RANGE - 1;
+}
+
+static void init_minq_luts(void) {
+ int i;
+
+ for (i = 0; i < QINDEX_RANGE; i++) {
+ const double maxq = vp9_convert_qindex_to_q(i);
+
+
+ kf_low_motion_minq[i] = calculate_minq_index(maxq,
+ 0.000001,
+ -0.0004,
+ 0.15,
+ 0.0);
+ kf_high_motion_minq[i] = calculate_minq_index(maxq,
+ 0.000002,
+ -0.0012,
+ 0.5,
+ 0.0);
+
+ gf_low_motion_minq[i] = calculate_minq_index(maxq,
+ 0.0000015,
+ -0.0009,
+ 0.33,
+ 0.0);
+ gf_high_motion_minq[i] = calculate_minq_index(maxq,
+ 0.0000021,
+ -0.00125,
+ 0.45,
+ 0.0);
+ inter_minq[i] = calculate_minq_index(maxq,
+ 0.00000271,
+ -0.00113,
+ 0.697,
+ 0.0);
+
+ }
+}
+
+static void set_mvcost(MACROBLOCK *mb) {
+ if (mb->e_mbd.allow_high_precision_mv) {
+ mb->mvcost = mb->nmvcost_hp;
+ mb->mvsadcost = mb->nmvsadcost_hp;
+ } else {
+ mb->mvcost = mb->nmvcost;
+ mb->mvsadcost = mb->nmvsadcost;
+ }
+}
+
+void vp9_initialize_enc() {
+ static int init_done = 0;
+
+ if (!init_done) {
+ vp9_initialize_common();
+ vp9_tokenize_initialize();
+ vp9_init_quant_tables();
+ vp9_init_me_luts();
+ init_minq_luts();
+ // init_base_skip_probs();
+ init_done = 1;
+ }
+}
+
+static void setup_features(VP9_COMMON *cm) {
+ struct loopfilter *const lf = &cm->lf;
+ struct segmentation *const seg = &cm->seg;
+
+ // Set up default state for MB feature flags
+ seg->enabled = 0;
+
+ seg->update_map = 0;
+ seg->update_data = 0;
+ vpx_memset(seg->tree_probs, 255, sizeof(seg->tree_probs));
+
+ vp9_clearall_segfeatures(seg);
+
+ lf->mode_ref_delta_enabled = 0;
+ lf->mode_ref_delta_update = 0;
+ vp9_zero(lf->ref_deltas);
+ vp9_zero(lf->mode_deltas);
+ vp9_zero(lf->last_ref_deltas);
+ vp9_zero(lf->last_mode_deltas);
+
+ set_default_lf_deltas(lf);
+}
+
+static void dealloc_compressor_data(VP9_COMP *cpi) {
+ // Delete sementation map
+ vpx_free(cpi->segmentation_map);
+ cpi->segmentation_map = 0;
+ vpx_free(cpi->common.last_frame_seg_map);
+ cpi->common.last_frame_seg_map = 0;
+ vpx_free(cpi->coding_context.last_frame_seg_map_copy);
+ cpi->coding_context.last_frame_seg_map_copy = 0;
+
+ vpx_free(cpi->active_map);
+ cpi->active_map = 0;
+
+ vp9_free_frame_buffers(&cpi->common);
+
+ vp9_free_frame_buffer(&cpi->last_frame_uf);
+ vp9_free_frame_buffer(&cpi->scaled_source);
+ vp9_free_frame_buffer(&cpi->alt_ref_buffer);
+ vp9_lookahead_destroy(cpi->lookahead);
+
+ vpx_free(cpi->tok);
+ cpi->tok = 0;
+
+ // Activity mask based per mb zbin adjustments
+ vpx_free(cpi->mb_activity_map);
+ cpi->mb_activity_map = 0;
+ vpx_free(cpi->mb_norm_activity_map);
+ cpi->mb_norm_activity_map = 0;
+
+ vpx_free(cpi->mb.pip);
+ cpi->mb.pip = 0;
+}
+
+// Computes a q delta (in "q index" terms) to get from a starting q value
+// to a target value
+// target q value
+static int compute_qdelta(VP9_COMP *cpi, double qstart, double qtarget) {
+ int i;
+ int start_index = cpi->worst_quality;
+ int target_index = cpi->worst_quality;
+
+ // Convert the average q value to an index.
+ for (i = cpi->best_quality; i < cpi->worst_quality; i++) {
+ start_index = i;
+ if (vp9_convert_qindex_to_q(i) >= qstart)
+ break;
+ }
+
+ // Convert the q target to an index
+ for (i = cpi->best_quality; i < cpi->worst_quality; i++) {
+ target_index = i;
+ if (vp9_convert_qindex_to_q(i) >= qtarget)
+ break;
+ }
+
+ return target_index - start_index;
+}
+
+static void configure_static_seg_features(VP9_COMP *cpi) {
+ VP9_COMMON *cm = &cpi->common;
+ struct segmentation *seg = &cm->seg;
+
+ int high_q = (int)(cpi->avg_q > 48.0);
+ int qi_delta;
+
+ // Disable and clear down for KF
+ if (cm->frame_type == KEY_FRAME) {
+ // Clear down the global segmentation map
+ vpx_memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
+ seg->update_map = 0;
+ seg->update_data = 0;
+ cpi->static_mb_pct = 0;
+
+ // Disable segmentation
+ vp9_disable_segmentation((VP9_PTR)cpi);
+
+ // Clear down the segment features.
+ vp9_clearall_segfeatures(seg);
+ } else if (cpi->refresh_alt_ref_frame) {
+ // If this is an alt ref frame
+ // Clear down the global segmentation map
+ vpx_memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
+ seg->update_map = 0;
+ seg->update_data = 0;
+ cpi->static_mb_pct = 0;
+
+ // Disable segmentation and individual segment features by default
+ vp9_disable_segmentation((VP9_PTR)cpi);
+ vp9_clearall_segfeatures(seg);
+
+ // Scan frames from current to arf frame.
+ // This function re-enables segmentation if appropriate.
+ vp9_update_mbgraph_stats(cpi);
+
+ // If segmentation was enabled set those features needed for the
+ // arf itself.
+ if (seg->enabled) {
+ seg->update_map = 1;
+ seg->update_data = 1;
+
+ qi_delta = compute_qdelta(cpi, cpi->avg_q, (cpi->avg_q * 0.875));
+ vp9_set_segdata(seg, 1, SEG_LVL_ALT_Q, (qi_delta - 2));
+ vp9_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2);
+
+ vp9_enable_segfeature(seg, 1, SEG_LVL_ALT_Q);
+ vp9_enable_segfeature(seg, 1, SEG_LVL_ALT_LF);
+
+ // Where relevant assume segment data is delta data
+ seg->abs_delta = SEGMENT_DELTADATA;
+
+ }
+ } else if (seg->enabled) {
+ // All other frames if segmentation has been enabled
+
+ // First normal frame in a valid gf or alt ref group
+ if (cpi->frames_since_golden == 0) {
+ // Set up segment features for normal frames in an arf group
+ if (cpi->source_alt_ref_active) {
+ seg->update_map = 0;
+ seg->update_data = 1;
+ seg->abs_delta = SEGMENT_DELTADATA;
+
+ qi_delta = compute_qdelta(cpi, cpi->avg_q,
+ (cpi->avg_q * 1.125));
+ vp9_set_segdata(seg, 1, SEG_LVL_ALT_Q, (qi_delta + 2));
+ vp9_enable_segfeature(seg, 1, SEG_LVL_ALT_Q);
+
+ vp9_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2);
+ vp9_enable_segfeature(seg, 1, SEG_LVL_ALT_LF);
+
+ // Segment coding disabled for compred testing
+ if (high_q || (cpi->static_mb_pct == 100)) {
+ vp9_set_segdata(seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
+ vp9_enable_segfeature(seg, 1, SEG_LVL_REF_FRAME);
+ vp9_enable_segfeature(seg, 1, SEG_LVL_SKIP);
+ }
+ } else {
+ // Disable segmentation and clear down features if alt ref
+ // is not active for this group
+
+ vp9_disable_segmentation((VP9_PTR)cpi);
+
+ vpx_memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
+
+ seg->update_map = 0;
+ seg->update_data = 0;
+
+ vp9_clearall_segfeatures(seg);
+ }
+ } else if (cpi->is_src_frame_alt_ref) {
+ // Special case where we are coding over the top of a previous
+ // alt ref frame.
+ // Segment coding disabled for compred testing
+
+ // Enable ref frame features for segment 0 as well
+ vp9_enable_segfeature(seg, 0, SEG_LVL_REF_FRAME);
+ vp9_enable_segfeature(seg, 1, SEG_LVL_REF_FRAME);
+
+ // All mbs should use ALTREF_FRAME
+ vp9_clear_segdata(seg, 0, SEG_LVL_REF_FRAME);
+ vp9_set_segdata(seg, 0, SEG_LVL_REF_FRAME, ALTREF_FRAME);
+ vp9_clear_segdata(seg, 1, SEG_LVL_REF_FRAME);
+ vp9_set_segdata(seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
+
+ // Skip all MBs if high Q (0,0 mv and skip coeffs)
+ if (high_q) {
+ vp9_enable_segfeature(seg, 0, SEG_LVL_SKIP);
+ vp9_enable_segfeature(seg, 1, SEG_LVL_SKIP);
+ }
+ // Enable data update
+ seg->update_data = 1;
+ } else {
+ // All other frames.
+
+ // No updates.. leave things as they are.
+ seg->update_map = 0;
+ seg->update_data = 0;
+ }
+ }
+}
+
+#ifdef ENTROPY_STATS
+void vp9_update_mode_context_stats(VP9_COMP *cpi) {
+ VP9_COMMON *cm = &cpi->common;
+ int i, j;
+ unsigned int (*inter_mode_counts)[INTER_MODES - 1][2] =
+ cm->fc.inter_mode_counts;
+ int64_t (*mv_ref_stats)[INTER_MODES - 1][2] = cpi->mv_ref_stats;
+ FILE *f;
+
+ // Read the past stats counters
+ f = fopen("mode_context.bin", "rb");
+ if (!f) {
+ vpx_memset(cpi->mv_ref_stats, 0, sizeof(cpi->mv_ref_stats));
+ } else {
+ fread(cpi->mv_ref_stats, sizeof(cpi->mv_ref_stats), 1, f);
+ fclose(f);
+ }
+
+ // Add in the values for this frame
+ for (i = 0; i < INTER_MODE_CONTEXTS; i++) {
+ for (j = 0; j < INTER_MODES - 1; j++) {
+ mv_ref_stats[i][j][0] += (int64_t)inter_mode_counts[i][j][0];
+ mv_ref_stats[i][j][1] += (int64_t)inter_mode_counts[i][j][1];
+ }
+ }
+
+ // Write back the accumulated stats
+ f = fopen("mode_context.bin", "wb");
+ fwrite(cpi->mv_ref_stats, sizeof(cpi->mv_ref_stats), 1, f);
+ fclose(f);
+}
+
+void print_mode_context(VP9_COMP *cpi) {
+ FILE *f = fopen("vp9_modecont.c", "a");
+ int i, j;
+
+ fprintf(f, "#include \"vp9_entropy.h\"\n");
+ fprintf(
+ f,
+ "const int inter_mode_probs[INTER_MODE_CONTEXTS][INTER_MODES - 1] =");
+ fprintf(f, "{\n");
+ for (j = 0; j < INTER_MODE_CONTEXTS; j++) {
+ fprintf(f, " {/* %d */ ", j);
+ fprintf(f, " ");
+ for (i = 0; i < INTER_MODES - 1; i++) {
+ int this_prob;
+ int64_t count = cpi->mv_ref_stats[j][i][0] + cpi->mv_ref_stats[j][i][1];
+ if (count)
+ this_prob = ((cpi->mv_ref_stats[j][i][0] * 256) + (count >> 1)) / count;
+ else
+ this_prob = 128;
+
+ // context probs
+ fprintf(f, "%5d, ", this_prob);
+ }
+ fprintf(f, " },\n");
+ }
+
+ fprintf(f, "};\n");
+ fclose(f);
+}
+#endif // ENTROPY_STATS
+
+// DEBUG: Print out the segment id of each MB in the current frame.
+static void print_seg_map(VP9_COMP *cpi) {
+ VP9_COMMON *cm = &cpi->common;
+ int row, col;
+ int map_index = 0;
+ FILE *statsfile = fopen("segmap.stt", "a");
+
+ fprintf(statsfile, "%10d\n", cm->current_video_frame);
+
+ for (row = 0; row < cpi->common.mi_rows; row++) {
+ for (col = 0; col < cpi->common.mi_cols; col++) {
+ fprintf(statsfile, "%10d", cpi->segmentation_map[map_index]);
+ map_index++;
+ }
+ fprintf(statsfile, "\n");
+ }
+ fprintf(statsfile, "\n");
+
+ fclose(statsfile);
+}
+
+static void update_reference_segmentation_map(VP9_COMP *cpi) {
+ VP9_COMMON *const cm = &cpi->common;
+ int row, col;
+ MODE_INFO **mi_8x8, **mi_8x8_ptr = cm->mi_grid_visible;
+ uint8_t *cache_ptr = cm->last_frame_seg_map, *cache;
+
+ for (row = 0; row < cm->mi_rows; row++) {
+ mi_8x8 = mi_8x8_ptr;
+ cache = cache_ptr;
+ for (col = 0; col < cm->mi_cols; col++, mi_8x8++, cache++)
+ cache[0] = mi_8x8[0]->mbmi.segment_id;
+ mi_8x8_ptr += cm->mode_info_stride;
+ cache_ptr += cm->mi_cols;
+ }
+}
+
+static void set_default_lf_deltas(struct loopfilter *lf) {
+ lf->mode_ref_delta_enabled = 1;
+ lf->mode_ref_delta_update = 1;
+
+ vp9_zero(lf->ref_deltas);
+ vp9_zero(lf->mode_deltas);
+
+ // Test of ref frame deltas
+ lf->ref_deltas[INTRA_FRAME] = 2;
+ lf->ref_deltas[LAST_FRAME] = 0;
+ lf->ref_deltas[GOLDEN_FRAME] = -2;
+ lf->ref_deltas[ALTREF_FRAME] = -2;
+
+ lf->mode_deltas[0] = 0; // Zero
+ lf->mode_deltas[1] = 0; // New mv
+}
+
+static void set_rd_speed_thresholds(VP9_COMP *cpi, int mode) {
+ SPEED_FEATURES *sf = &cpi->sf;
+ int i;
+
+ // Set baseline threshold values
+ for (i = 0; i < MAX_MODES; ++i)
+ sf->thresh_mult[i] = mode == 0 ? -500 : 0;
+
+ sf->thresh_mult[THR_NEARESTMV] = 0;
+ sf->thresh_mult[THR_NEARESTG] = 0;
+ sf->thresh_mult[THR_NEARESTA] = 0;
+
+ sf->thresh_mult[THR_NEWMV] += 1000;
+ sf->thresh_mult[THR_COMP_NEARESTLA] += 1000;
+ sf->thresh_mult[THR_NEARMV] += 1000;
+ sf->thresh_mult[THR_COMP_NEARESTGA] += 1000;
+
+ sf->thresh_mult[THR_DC] += 1000;
+
+ sf->thresh_mult[THR_NEWG] += 1000;
+ sf->thresh_mult[THR_NEWA] += 1000;
+ sf->thresh_mult[THR_NEARA] += 1000;
+
+ sf->thresh_mult[THR_TM] += 1000;
+
+ sf->thresh_mult[THR_COMP_NEARLA] += 1500;
+ sf->thresh_mult[THR_COMP_NEWLA] += 2000;
+ sf->thresh_mult[THR_NEARG] += 1000;
+ sf->thresh_mult[THR_COMP_NEARGA] += 1500;
+ sf->thresh_mult[THR_COMP_NEWGA] += 2000;
+
+ sf->thresh_mult[THR_SPLITMV] += 2500;
+ sf->thresh_mult[THR_SPLITG] += 2500;
+ sf->thresh_mult[THR_SPLITA] += 2500;
+ sf->thresh_mult[THR_COMP_SPLITLA] += 4500;
+ sf->thresh_mult[THR_COMP_SPLITGA] += 4500;
+
+ sf->thresh_mult[THR_ZEROMV] += 2000;
+ sf->thresh_mult[THR_ZEROG] += 2000;
+ sf->thresh_mult[THR_ZEROA] += 2000;
+ sf->thresh_mult[THR_COMP_ZEROLA] += 2500;
+ sf->thresh_mult[THR_COMP_ZEROGA] += 2500;
+
+ sf->thresh_mult[THR_B_PRED] += 2500;
+ sf->thresh_mult[THR_H_PRED] += 2000;
+ sf->thresh_mult[THR_V_PRED] += 2000;
+ sf->thresh_mult[THR_D45_PRED ] += 2500;
+ sf->thresh_mult[THR_D135_PRED] += 2500;
+ sf->thresh_mult[THR_D117_PRED] += 2500;
+ sf->thresh_mult[THR_D153_PRED] += 2500;
+ sf->thresh_mult[THR_D207_PRED] += 2500;
+ sf->thresh_mult[THR_D63_PRED] += 2500;
+
+ if (cpi->sf.skip_lots_of_modes) {
+ for (i = 0; i < MAX_MODES; ++i)
+ sf->thresh_mult[i] = INT_MAX;
+
+ sf->thresh_mult[THR_DC] = 2000;
+ sf->thresh_mult[THR_TM] = 2000;
+ sf->thresh_mult[THR_NEWMV] = 4000;
+ sf->thresh_mult[THR_NEWG] = 4000;
+ sf->thresh_mult[THR_NEWA] = 4000;
+ sf->thresh_mult[THR_NEARESTMV] = 0;
+ sf->thresh_mult[THR_NEARESTG] = 0;
+ sf->thresh_mult[THR_NEARESTA] = 0;
+ sf->thresh_mult[THR_NEARMV] = 2000;
+ sf->thresh_mult[THR_NEARG] = 2000;
+ sf->thresh_mult[THR_NEARA] = 2000;
+ sf->thresh_mult[THR_COMP_NEARESTLA] = 2000;
+ sf->thresh_mult[THR_SPLITMV] = 2500;
+ sf->thresh_mult[THR_SPLITG] = 2500;
+ sf->thresh_mult[THR_SPLITA] = 2500;
+ sf->recode_loop = 0;
+ }
+
+ /* disable frame modes if flags not set */
+ if (!(cpi->ref_frame_flags & VP9_LAST_FLAG)) {
+ sf->thresh_mult[THR_NEWMV ] = INT_MAX;
+ sf->thresh_mult[THR_NEARESTMV] = INT_MAX;
+ sf->thresh_mult[THR_ZEROMV ] = INT_MAX;
+ sf->thresh_mult[THR_NEARMV ] = INT_MAX;
+ sf->thresh_mult[THR_SPLITMV ] = INT_MAX;
+ }
+ if (!(cpi->ref_frame_flags & VP9_GOLD_FLAG)) {
+ sf->thresh_mult[THR_NEARESTG ] = INT_MAX;
+ sf->thresh_mult[THR_ZEROG ] = INT_MAX;
+ sf->thresh_mult[THR_NEARG ] = INT_MAX;
+ sf->thresh_mult[THR_NEWG ] = INT_MAX;
+ sf->thresh_mult[THR_SPLITG ] = INT_MAX;
+ }
+ if (!(cpi->ref_frame_flags & VP9_ALT_FLAG)) {
+ sf->thresh_mult[THR_NEARESTA ] = INT_MAX;
+ sf->thresh_mult[THR_ZEROA ] = INT_MAX;
+ sf->thresh_mult[THR_NEARA ] = INT_MAX;
+ sf->thresh_mult[THR_NEWA ] = INT_MAX;
+ sf->thresh_mult[THR_SPLITA ] = INT_MAX;
+ }
+
+ if ((cpi->ref_frame_flags & (VP9_LAST_FLAG | VP9_ALT_FLAG)) !=
+ (VP9_LAST_FLAG | VP9_ALT_FLAG)) {
+ sf->thresh_mult[THR_COMP_ZEROLA ] = INT_MAX;
+ sf->thresh_mult[THR_COMP_NEARESTLA] = INT_MAX;
+ sf->thresh_mult[THR_COMP_NEARLA ] = INT_MAX;
+ sf->thresh_mult[THR_COMP_NEWLA ] = INT_MAX;
+ sf->thresh_mult[THR_COMP_SPLITLA ] = INT_MAX;
+ }
+ if ((cpi->ref_frame_flags & (VP9_GOLD_FLAG | VP9_ALT_FLAG)) !=
+ (VP9_GOLD_FLAG | VP9_ALT_FLAG)) {
+ sf->thresh_mult[THR_COMP_ZEROGA ] = INT_MAX;
+ sf->thresh_mult[THR_COMP_NEARESTGA] = INT_MAX;
+ sf->thresh_mult[THR_COMP_NEARGA ] = INT_MAX;
+ sf->thresh_mult[THR_COMP_NEWGA ] = INT_MAX;
+ sf->thresh_mult[THR_COMP_SPLITGA ] = INT_MAX;
+ }
+
+ if (sf->disable_splitmv == 1) {
+ sf->thresh_mult[THR_SPLITMV ] = INT_MAX;
+ sf->thresh_mult[THR_SPLITG ] = INT_MAX;
+ sf->thresh_mult[THR_SPLITA ] = INT_MAX;
+
+ sf->thresh_mult[THR_COMP_SPLITLA ] = INT_MAX;
+ sf->thresh_mult[THR_COMP_SPLITGA ] = INT_MAX;
+ }
+}
+
+void vp9_set_speed_features(VP9_COMP *cpi) {
+ SPEED_FEATURES *sf = &cpi->sf;
+ int mode = cpi->compressor_speed;
+ int speed = cpi->speed;
+ int i;
+
+ // Only modes 0 and 1 supported for now in experimental code basae
+ if (mode > 1)
+ mode = 1;
+
+ // Initialise default mode frequency sampling variables
+ for (i = 0; i < MAX_MODES; i ++) {
+ cpi->mode_check_freq[i] = 0;
+ cpi->mode_test_hit_counts[i] = 0;
+ cpi->mode_chosen_counts[i] = 0;
+ }
+
+ // best quality defaults
+ sf->RD = 1;
+ sf->search_method = NSTEP;
+ sf->auto_filter = 1;
+ sf->recode_loop = 1;
+ sf->subpel_search_method = SUBPEL_TREE;
+ sf->subpel_iters_per_step = 2;
+ sf->optimize_coefficients = !cpi->oxcf.lossless;
+ sf->reduce_first_step_size = 0;
+ sf->auto_mv_step_size = 0;
+ sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
+ sf->comp_inter_joint_search_thresh = BLOCK_4X4;
+ sf->adaptive_rd_thresh = 0;
+ sf->use_lastframe_partitioning = 0;
+ sf->tx_size_search_method = USE_FULL_RD;
+ sf->use_lp32x32fdct = 0;
+ sf->adaptive_motion_search = 0;
+ sf->use_avoid_tested_higherror = 0;
+ sf->reference_masking = 0;
+ sf->skip_lots_of_modes = 0;
+ sf->partition_by_variance = 0;
+ sf->use_one_partition_size_always = 0;
+ sf->less_rectangular_check = 0;
+ sf->use_square_partition_only = 0;
+ sf->auto_min_max_partition_size = 0;
+ sf->auto_min_max_partition_interval = 0;
+ sf->auto_min_max_partition_count = 0;
+ sf->max_partition_size = BLOCK_64X64;
+ sf->min_partition_size = BLOCK_4X4;
+ sf->adjust_partitioning_from_last_frame = 0;
+ sf->last_partitioning_redo_frequency = 4;
+ sf->disable_splitmv = 0;
+ sf->mode_search_skip_flags = 0;
+ sf->disable_split_var_thresh = 0;
+ sf->disable_filter_search_var_thresh = 0;
+ sf->intra_y_mode_mask = ALL_INTRA_MODES;
+ sf->intra_uv_mode_mask = ALL_INTRA_MODES;
+ sf->use_rd_breakout = 0;
+ sf->skip_encode_sb = 0;
+ sf->use_uv_intra_rd_estimate = 0;
+ sf->use_fast_lpf_pick = 0;
+ sf->use_fast_coef_updates = 0;
+ sf->using_small_partition_info = 0;
+ sf->mode_skip_start = MAX_MODES; // Mode index at which mode skip mask set
+
+#if CONFIG_MULTIPLE_ARF
+ // Switch segmentation off.
+ sf->static_segmentation = 0;
+#else
+ sf->static_segmentation = 0;
+#endif
+
+ switch (mode) {
+ case 0: // best quality mode
+ break;
+
+ case 1:
+#if CONFIG_MULTIPLE_ARF
+ // Switch segmentation off.
+ sf->static_segmentation = 0;
+#else
+ sf->static_segmentation = 0;
+#endif
+ sf->use_avoid_tested_higherror = 1;
+ sf->adaptive_rd_thresh = MIN((speed + 1), 4);
+
+ if (speed == 1) {
+ sf->comp_inter_joint_search_thresh = BLOCK_SIZES;
+ sf->less_rectangular_check = 1;
+ sf->tx_size_search_method = ((cpi->common.frame_type == KEY_FRAME ||
+ cpi->common.intra_only ||
+ cpi->common.show_frame == 0) ?
+ USE_FULL_RD :
+ USE_LARGESTALL);
+ sf->use_square_partition_only = !(cpi->common.frame_type == KEY_FRAME ||
+ cpi->common.intra_only ||
+ cpi->common.show_frame == 0);
+ sf->disable_splitmv =
+ (MIN(cpi->common.width, cpi->common.height) >= 720)? 1 : 0;
+ sf->mode_search_skip_flags = FLAG_SKIP_INTRA_DIRMISMATCH |
+ FLAG_SKIP_INTRA_BESTINTER |
+ FLAG_SKIP_COMP_BESTINTRA |
+ FLAG_SKIP_INTRA_LOWVAR;
+ sf->use_uv_intra_rd_estimate = 1;
+ sf->use_rd_breakout = 1;
+ sf->skip_encode_sb = 1;
+ sf->use_lp32x32fdct = 1;
+ sf->adaptive_motion_search = 1;
+ sf->auto_mv_step_size = 1;
+
+ sf->auto_min_max_partition_size = 1;
+ sf->auto_min_max_partition_interval = 1;
+ // FIXME(jingning): temporarily turn off disable_split_var_thresh
+ // during refactoring process. will get this back after finishing
+ // the main framework of partition search type.
+ sf->disable_split_var_thresh = 0;
+ sf->disable_filter_search_var_thresh = 16;
+
+ sf->intra_y_mode_mask = INTRA_DC_TM_H_V;
+ sf->intra_uv_mode_mask = INTRA_DC_TM_H_V;
+ sf->use_fast_coef_updates = 1;
+ sf->mode_skip_start = 9;
+ }
+ if (speed == 2) {
+ sf->less_rectangular_check = 1;
+ sf->use_square_partition_only = 1;
+ sf->comp_inter_joint_search_thresh = BLOCK_SIZES;
+ sf->use_lastframe_partitioning = 1;
+ sf->adjust_partitioning_from_last_frame = 1;
+ sf->last_partitioning_redo_frequency = 3;
+ sf->tx_size_search_method = ((cpi->common.frame_type == KEY_FRAME ||
+ cpi->common.intra_only ||
+ cpi->common.show_frame == 0) ?
+ USE_FULL_RD :
+ USE_LARGESTALL);
+ sf->mode_search_skip_flags = FLAG_SKIP_INTRA_DIRMISMATCH |
+ FLAG_SKIP_INTRA_BESTINTER |
+ FLAG_SKIP_COMP_BESTINTRA |
+ FLAG_SKIP_COMP_REFMISMATCH |
+ FLAG_SKIP_INTRA_LOWVAR |
+ FLAG_EARLY_TERMINATE;
+ sf->intra_y_mode_mask = INTRA_DC_TM;
+ sf->intra_uv_mode_mask = INTRA_DC_TM;
+ sf->use_uv_intra_rd_estimate = 1;
+ sf->use_rd_breakout = 1;
+ sf->skip_encode_sb = 1;
+ sf->use_lp32x32fdct = 1;
+ sf->adaptive_motion_search = 1;
+ sf->using_small_partition_info = 0;
+ sf->disable_splitmv =
+ (MIN(cpi->common.width, cpi->common.height) >= 720)? 1 : 0;
+ sf->auto_mv_step_size = 1;
+ sf->search_method = SQUARE;
+ sf->subpel_iters_per_step = 1;
+ sf->use_fast_lpf_pick = 1;
+ sf->auto_min_max_partition_size = 1;
+ sf->auto_min_max_partition_interval = 2;
+ sf->disable_split_var_thresh = 32;
+ sf->disable_filter_search_var_thresh = 32;
+ sf->use_fast_coef_updates = 2;
+ sf->mode_skip_start = 9;
+ }
+ if (speed == 3) {
+ sf->comp_inter_joint_search_thresh = BLOCK_SIZES;
+ sf->partition_by_variance = 1;
+ sf->tx_size_search_method = ((cpi->common.frame_type == KEY_FRAME ||
+ cpi->common.intra_only ||
+ cpi->common.show_frame == 0) ?
+ USE_FULL_RD :
+ USE_LARGESTALL);
+ sf->mode_search_skip_flags = FLAG_SKIP_INTRA_DIRMISMATCH |
+ FLAG_SKIP_INTRA_BESTINTER |
+ FLAG_SKIP_COMP_BESTINTRA |
+ FLAG_SKIP_COMP_REFMISMATCH |
+ FLAG_SKIP_INTRA_LOWVAR |
+ FLAG_EARLY_TERMINATE;
+ sf->use_rd_breakout = 1;
+ sf->skip_encode_sb = 1;
+ sf->use_lp32x32fdct = 1;
+ sf->disable_splitmv = 1;
+ sf->auto_mv_step_size = 1;
+ sf->search_method = BIGDIA;
+ sf->subpel_iters_per_step = 1;
+ sf->disable_split_var_thresh = 64;
+ sf->disable_filter_search_var_thresh = 64;
+ sf->intra_y_mode_mask = INTRA_DC_ONLY;
+ sf->intra_uv_mode_mask = INTRA_DC_ONLY;
+ sf->use_fast_coef_updates = 2;
+ sf->mode_skip_start = 9;
+ }
+ if (speed == 4) {
+ sf->comp_inter_joint_search_thresh = BLOCK_SIZES;
+ sf->use_one_partition_size_always = 1;
+ sf->always_this_block_size = BLOCK_16X16;
+ sf->tx_size_search_method = ((cpi->common.frame_type == KEY_FRAME ||
+ cpi->common.intra_only ||
+ cpi->common.show_frame == 0) ?
+ USE_FULL_RD :
+ USE_LARGESTALL);
+ sf->mode_search_skip_flags = FLAG_SKIP_INTRA_DIRMISMATCH |
+ FLAG_SKIP_INTRA_BESTINTER |
+ FLAG_SKIP_COMP_BESTINTRA |
+ FLAG_SKIP_COMP_REFMISMATCH |
+ FLAG_SKIP_INTRA_LOWVAR |
+ FLAG_EARLY_TERMINATE;
+ sf->use_rd_breakout = 1;
+ sf->use_lp32x32fdct = 1;
+ sf->optimize_coefficients = 0;
+ sf->auto_mv_step_size = 1;
+ // sf->reduce_first_step_size = 1;
+ // sf->reference_masking = 1;
+
+ sf->disable_splitmv = 1;
+ sf->search_method = HEX;
+ sf->subpel_iters_per_step = 1;
+ sf->disable_split_var_thresh = 64;
+ sf->disable_filter_search_var_thresh = 96;
+ sf->intra_y_mode_mask = INTRA_DC_ONLY;
+ sf->intra_uv_mode_mask = INTRA_DC_ONLY;
+ sf->use_fast_coef_updates = 2;
+ sf->mode_skip_start = 9;
+ }
+ break;
+
+ }; /* switch */
+
+ // Set rd thresholds based on mode and speed setting
+ set_rd_speed_thresholds(cpi, mode);
+
+ // Slow quant, dct and trellis not worthwhile for first pass
+ // so make sure they are always turned off.
+ if (cpi->pass == 1) {
+ sf->optimize_coefficients = 0;
+ }
+
+ cpi->mb.fwd_txm16x16 = vp9_short_fdct16x16;
+ cpi->mb.fwd_txm8x8 = vp9_short_fdct8x8;
+ cpi->mb.fwd_txm8x4 = vp9_short_fdct8x4;
+ cpi->mb.fwd_txm4x4 = vp9_short_fdct4x4;
+ if (cpi->oxcf.lossless || cpi->mb.e_mbd.lossless) {
+ cpi->mb.fwd_txm8x4 = vp9_short_walsh8x4;
+ cpi->mb.fwd_txm4x4 = vp9_short_walsh4x4;
+ }
+
+ cpi->mb.quantize_b_4x4 = vp9_regular_quantize_b_4x4;
+
+ if (cpi->sf.subpel_search_method == SUBPEL_ITERATIVE) {
+ cpi->find_fractional_mv_step = vp9_find_best_sub_pixel_iterative;
+ cpi->find_fractional_mv_step_comp = vp9_find_best_sub_pixel_comp_iterative;
+ } else if (cpi->sf.subpel_search_method == SUBPEL_TREE) {
+ cpi->find_fractional_mv_step = vp9_find_best_sub_pixel_tree;
+ cpi->find_fractional_mv_step_comp = vp9_find_best_sub_pixel_comp_tree;
+ }
+
+ cpi->mb.optimize = cpi->sf.optimize_coefficients == 1 && cpi->pass != 1;
+
+#ifdef SPEEDSTATS
+ frames_at_speed[cpi->speed]++;
+#endif
+}
+
+static void alloc_raw_frame_buffers(VP9_COMP *cpi) {
+ VP9_COMMON *cm = &cpi->common;
+
+ cpi->lookahead = vp9_lookahead_init(cpi->oxcf.width, cpi->oxcf.height,
+ cm->subsampling_x, cm->subsampling_y,
+ cpi->oxcf.lag_in_frames);
+ if (!cpi->lookahead)
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate lag buffers");
+
+ if (vp9_realloc_frame_buffer(&cpi->alt_ref_buffer,
+ cpi->oxcf.width, cpi->oxcf.height,
+ cm->subsampling_x, cm->subsampling_y,
+ VP9BORDERINPIXELS))
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate altref buffer");
+}
+
+static int alloc_partition_data(VP9_COMP *cpi) {
+ vpx_free(cpi->mb.pip);
+
+ cpi->mb.pip = vpx_calloc(cpi->common.mode_info_stride *
+ (cpi->common.mi_rows + MI_BLOCK_SIZE),
+ sizeof(PARTITION_INFO));
+ if (!cpi->mb.pip)
+ return 1;
+
+ cpi->mb.pi = cpi->mb.pip + cpi->common.mode_info_stride + 1;
+
+ return 0;
+}
+
+void vp9_alloc_compressor_data(VP9_COMP *cpi) {
+ VP9_COMMON *cm = &cpi->common;
+
+ if (vp9_alloc_frame_buffers(cm, cm->width, cm->height))
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate frame buffers");
+
+ if (alloc_partition_data(cpi))
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate partition data");
+
+ if (vp9_alloc_frame_buffer(&cpi->last_frame_uf,
+ cm->width, cm->height,
+ cm->subsampling_x, cm->subsampling_y,
+ VP9BORDERINPIXELS))
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate last frame buffer");
+
+ if (vp9_alloc_frame_buffer(&cpi->scaled_source,
+ cm->width, cm->height,
+ cm->subsampling_x, cm->subsampling_y,
+ VP9BORDERINPIXELS))
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate scaled source buffer");
+
+ vpx_free(cpi->tok);
+
+ {
+ unsigned int tokens = get_token_alloc(cm->mb_rows, cm->mb_cols);
+
+ CHECK_MEM_ERROR(cm, cpi->tok, vpx_calloc(tokens, sizeof(*cpi->tok)));
+ }
+
+ // Data used for real time vc mode to see if gf needs refreshing
+ cpi->inter_zz_count = 0;
+ cpi->gf_bad_count = 0;
+ cpi->gf_update_recommended = 0;
+
+ vpx_free(cpi->mb_activity_map);
+ CHECK_MEM_ERROR(cm, cpi->mb_activity_map,
+ vpx_calloc(sizeof(unsigned int),
+ cm->mb_rows * cm->mb_cols));
+
+ vpx_free(cpi->mb_norm_activity_map);
+ CHECK_MEM_ERROR(cm, cpi->mb_norm_activity_map,
+ vpx_calloc(sizeof(unsigned int),
+ cm->mb_rows * cm->mb_cols));
+}
+
+
+static void update_frame_size(VP9_COMP *cpi) {
+ VP9_COMMON *cm = &cpi->common;
+
+ vp9_update_frame_size(cm);
+
+ // Update size of buffers local to this frame
+ if (vp9_realloc_frame_buffer(&cpi->last_frame_uf,
+ cm->width, cm->height,
+ cm->subsampling_x, cm->subsampling_y,
+ VP9BORDERINPIXELS))
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to reallocate last frame buffer");
+
+ if (vp9_realloc_frame_buffer(&cpi->scaled_source,
+ cm->width, cm->height,
+ cm->subsampling_x, cm->subsampling_y,
+ VP9BORDERINPIXELS))
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to reallocate scaled source buffer");
+
+ {
+ int y_stride = cpi->scaled_source.y_stride;
+
+ if (cpi->sf.search_method == NSTEP) {
+ vp9_init3smotion_compensation(&cpi->mb, y_stride);
+ } else if (cpi->sf.search_method == DIAMOND) {
+ vp9_init_dsmotion_compensation(&cpi->mb, y_stride);
+ }
+ }
+}
+
+
+// TODO perhaps change number of steps expose to outside world when setting
+// max and min limits. Also this will likely want refining for the extended Q
+// range.
+//
+// Table that converts 0-63 Q range values passed in outside to the Qindex
+// range used internally.
+static const int q_trans[] = {
+ 0, 4, 8, 12, 16, 20, 24, 28,
+ 32, 36, 40, 44, 48, 52, 56, 60,
+ 64, 68, 72, 76, 80, 84, 88, 92,
+ 96, 100, 104, 108, 112, 116, 120, 124,
+ 128, 132, 136, 140, 144, 148, 152, 156,
+ 160, 164, 168, 172, 176, 180, 184, 188,
+ 192, 196, 200, 204, 208, 212, 216, 220,
+ 224, 228, 232, 236, 240, 244, 249, 255,
+};
+
+int vp9_reverse_trans(int x) {
+ int i;
+
+ for (i = 0; i < 64; i++)
+ if (q_trans[i] >= x)
+ return i;
+
+ return 63;
+};
+void vp9_new_framerate(VP9_COMP *cpi, double framerate) {
+ if (framerate < 0.1)
+ framerate = 30;
+
+ cpi->oxcf.framerate = framerate;
+ cpi->output_framerate = cpi->oxcf.framerate;
+ cpi->per_frame_bandwidth = (int)(cpi->oxcf.target_bandwidth / cpi->output_framerate);
+ cpi->av_per_frame_bandwidth = (int)(cpi->oxcf.target_bandwidth / cpi->output_framerate);
+ cpi->min_frame_bandwidth = (int)(cpi->av_per_frame_bandwidth * cpi->oxcf.two_pass_vbrmin_section / 100);
+
+
+ cpi->min_frame_bandwidth = MAX(cpi->min_frame_bandwidth, FRAME_OVERHEAD_BITS);
+
+ // Set Maximum gf/arf interval
+ cpi->max_gf_interval = 16;
+
+ // Extended interval for genuinely static scenes
+ cpi->twopass.static_scene_max_gf_interval = cpi->key_frame_frequency >> 1;
+
+ // Special conditions when alt ref frame enabled in lagged compress mode
+ if (cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames) {
+ if (cpi->max_gf_interval > cpi->oxcf.lag_in_frames - 1)
+ cpi->max_gf_interval = cpi->oxcf.lag_in_frames - 1;
+
+ if (cpi->twopass.static_scene_max_gf_interval > cpi->oxcf.lag_in_frames - 1)
+ cpi->twopass.static_scene_max_gf_interval = cpi->oxcf.lag_in_frames - 1;
+ }
+
+ if (cpi->max_gf_interval > cpi->twopass.static_scene_max_gf_interval)
+ cpi->max_gf_interval = cpi->twopass.static_scene_max_gf_interval;
+}
+
+static int64_t rescale(int val, int64_t num, int denom) {
+ int64_t llnum = num;
+ int64_t llden = denom;
+ int64_t llval = val;
+
+ return (llval * llnum / llden);
+}
+
+static void set_tile_limits(VP9_COMP *cpi) {
+ VP9_COMMON *const cm = &cpi->common;
+
+ int min_log2_tile_cols, max_log2_tile_cols;
+ vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
+
+ cm->log2_tile_cols = clamp(cpi->oxcf.tile_columns,
+ min_log2_tile_cols, max_log2_tile_cols);
+ cm->log2_tile_rows = cpi->oxcf.tile_rows;
+}
+
+static void init_config(VP9_PTR ptr, VP9_CONFIG *oxcf) {
+ VP9_COMP *cpi = (VP9_COMP *)(ptr);
+ VP9_COMMON *const cm = &cpi->common;
+ int i;
+
+ cpi->oxcf = *oxcf;
+ cpi->goldfreq = 7;
+
+ cm->version = oxcf->version;
+
+ cm->width = oxcf->width;
+ cm->height = oxcf->height;
+ cm->subsampling_x = 0;
+ cm->subsampling_y = 0;
+ vp9_alloc_compressor_data(cpi);
+
+ // change includes all joint functionality
+ vp9_change_config(ptr, oxcf);
+
+ // Initialize active best and worst q and average q values.
+ cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
+ cpi->active_best_quality = cpi->oxcf.best_allowed_q;
+ cpi->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
+
+ // Initialise the starting buffer levels
+ cpi->buffer_level = cpi->oxcf.starting_buffer_level;
+ cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
+
+ cpi->rolling_target_bits = cpi->av_per_frame_bandwidth;
+ cpi->rolling_actual_bits = cpi->av_per_frame_bandwidth;
+ cpi->long_rolling_target_bits = cpi->av_per_frame_bandwidth;
+ cpi->long_rolling_actual_bits = cpi->av_per_frame_bandwidth;
+
+ cpi->total_actual_bits = 0;
+ cpi->total_target_vs_actual = 0;
+
+ cpi->static_mb_pct = 0;
+
+ cpi->lst_fb_idx = 0;
+ cpi->gld_fb_idx = 1;
+ cpi->alt_fb_idx = 2;
+
+ cpi->current_layer = 0;
+ cpi->use_svc = 0;
+
+ set_tile_limits(cpi);
+
+ cpi->fixed_divide[0] = 0;
+ for (i = 1; i < 512; i++)
+ cpi->fixed_divide[i] = 0x80000 / i;
+}
+
+
+void vp9_change_config(VP9_PTR ptr, VP9_CONFIG *oxcf) {
+ VP9_COMP *cpi = (VP9_COMP *)(ptr);
+ VP9_COMMON *const cm = &cpi->common;
+
+ if (!cpi || !oxcf)
+ return;
+
+ if (cm->version != oxcf->version) {
+ cm->version = oxcf->version;
+ }
+
+ cpi->oxcf = *oxcf;
+
+ switch (cpi->oxcf.Mode) {
+ // Real time and one pass deprecated in test code base
+ case MODE_FIRSTPASS:
+ cpi->pass = 1;
+ cpi->compressor_speed = 1;
+ break;
+
+ case MODE_SECONDPASS:
+ cpi->pass = 2;
+ cpi->compressor_speed = 1;
+ cpi->oxcf.cpu_used = clamp(cpi->oxcf.cpu_used, -5, 5);
+ break;
+
+ case MODE_SECONDPASS_BEST:
+ cpi->pass = 2;
+ cpi->compressor_speed = 0;
+ break;
+ }
+
+ cpi->oxcf.worst_allowed_q = q_trans[oxcf->worst_allowed_q];
+ cpi->oxcf.best_allowed_q = q_trans[oxcf->best_allowed_q];
+ cpi->oxcf.cq_level = q_trans[cpi->oxcf.cq_level];
+
+ cpi->oxcf.lossless = oxcf->lossless;
+ if (cpi->oxcf.lossless) {
+ cpi->mb.e_mbd.inv_txm4x4_1_add = vp9_short_iwalsh4x4_1_add;
+ cpi->mb.e_mbd.inv_txm4x4_add = vp9_short_iwalsh4x4_add;
+ } else {
+ cpi->mb.e_mbd.inv_txm4x4_1_add = vp9_short_idct4x4_1_add;
+ cpi->mb.e_mbd.inv_txm4x4_add = vp9_short_idct4x4_add;
+ }
+
+ cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL;
+
+ cpi->ref_frame_flags = VP9_ALT_FLAG | VP9_GOLD_FLAG | VP9_LAST_FLAG;
+
+ // cpi->use_golden_frame_only = 0;
+ // cpi->use_last_frame_only = 0;
+ cpi->refresh_golden_frame = 0;
+ cpi->refresh_last_frame = 1;
+ cm->refresh_frame_context = 1;
+ cm->reset_frame_context = 0;
+
+ setup_features(cm);
+ cpi->mb.e_mbd.allow_high_precision_mv = 0; // Default mv precision adaptation
+ set_mvcost(&cpi->mb);
+
+ {
+ int i;
+
+ for (i = 0; i < MAX_SEGMENTS; i++)
+ cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout;
+ }
+
+ // At the moment the first order values may not be > MAXQ
+ cpi->oxcf.fixed_q = MIN(cpi->oxcf.fixed_q, MAXQ);
+
+ // local file playback mode == really big buffer
+ if (cpi->oxcf.end_usage == USAGE_LOCAL_FILE_PLAYBACK) {
+ cpi->oxcf.starting_buffer_level = 60000;
+ cpi->oxcf.optimal_buffer_level = 60000;
+ cpi->oxcf.maximum_buffer_size = 240000;
+ }
+
+ // Convert target bandwidth from Kbit/s to Bit/s
+ cpi->oxcf.target_bandwidth *= 1000;
+
+ cpi->oxcf.starting_buffer_level = rescale(cpi->oxcf.starting_buffer_level,
+ cpi->oxcf.target_bandwidth, 1000);
+
+ // Set or reset optimal and maximum buffer levels.
+ if (cpi->oxcf.optimal_buffer_level == 0)
+ cpi->oxcf.optimal_buffer_level = cpi->oxcf.target_bandwidth / 8;
+ else
+ cpi->oxcf.optimal_buffer_level = rescale(cpi->oxcf.optimal_buffer_level,
+ cpi->oxcf.target_bandwidth, 1000);
+
+ if (cpi->oxcf.maximum_buffer_size == 0)
+ cpi->oxcf.maximum_buffer_size = cpi->oxcf.target_bandwidth / 8;
+ else
+ cpi->oxcf.maximum_buffer_size = rescale(cpi->oxcf.maximum_buffer_size,
+ cpi->oxcf.target_bandwidth, 1000);
+
+ // Set up frame rate and related parameters rate control values.
+ vp9_new_framerate(cpi, cpi->oxcf.framerate);
+
+ // Set absolute upper and lower quality limits
+ cpi->worst_quality = cpi->oxcf.worst_allowed_q;
+ cpi->best_quality = cpi->oxcf.best_allowed_q;
+
+ // active values should only be modified if out of new range
+ cpi->active_worst_quality = clamp(cpi->active_worst_quality,
+ cpi->oxcf.best_allowed_q,
+ cpi->oxcf.worst_allowed_q);
+
+ cpi->active_best_quality = clamp(cpi->active_best_quality,
+ cpi->oxcf.best_allowed_q,
+ cpi->oxcf.worst_allowed_q);
+
+ cpi->buffered_mode = cpi->oxcf.optimal_buffer_level > 0;
+
+ cpi->cq_target_quality = cpi->oxcf.cq_level;
+
+ cm->mcomp_filter_type = DEFAULT_INTERP_FILTER;
+
+ cpi->target_bandwidth = cpi->oxcf.target_bandwidth;
+
+ cm->display_width = cpi->oxcf.width;
+ cm->display_height = cpi->oxcf.height;
+
+ // VP8 sharpness level mapping 0-7 (vs 0-10 in general VPx dialogs)
+ cpi->oxcf.Sharpness = MIN(7, cpi->oxcf.Sharpness);
+
+ cpi->common.lf.sharpness_level = cpi->oxcf.Sharpness;
+
+ if (cpi->initial_width) {
+ // Increasing the size of the frame beyond the first seen frame, or some
+ // otherwise signalled maximum size, is not supported.
+ // TODO(jkoleszar): exit gracefully.
+ assert(cm->width <= cpi->initial_width);
+ assert(cm->height <= cpi->initial_height);
+ }
+ update_frame_size(cpi);
+
+ if (cpi->oxcf.fixed_q >= 0) {
+ cpi->last_q[0] = cpi->oxcf.fixed_q;
+ cpi->last_q[1] = cpi->oxcf.fixed_q;
+ cpi->last_boosted_qindex = cpi->oxcf.fixed_q;
+ }
+
+ cpi->speed = cpi->oxcf.cpu_used;
+
+ if (cpi->oxcf.lag_in_frames == 0) {
+ // force to allowlag to 0 if lag_in_frames is 0;
+ cpi->oxcf.allow_lag = 0;
+ } else if (cpi->oxcf.lag_in_frames > MAX_LAG_BUFFERS) {
+ // Limit on lag buffers as these are not currently dynamically allocated
+ cpi->oxcf.lag_in_frames = MAX_LAG_BUFFERS;
+ }
+
+ // YX Temp
+#if CONFIG_MULTIPLE_ARF
+ vp9_zero(cpi->alt_ref_source);
+#else
+ cpi->alt_ref_source = NULL;
+#endif
+ cpi->is_src_frame_alt_ref = 0;
+
+#if 0
+ // Experimental RD Code
+ cpi->frame_distortion = 0;
+ cpi->last_frame_distortion = 0;
+#endif
+
+ set_tile_limits(cpi);
+}
+
+#define M_LOG2_E 0.693147180559945309417
+#define log2f(x) (log (x) / (float) M_LOG2_E)
+
+static void cal_nmvjointsadcost(int *mvjointsadcost) {
+ mvjointsadcost[0] = 600;
+ mvjointsadcost[1] = 300;
+ mvjointsadcost[2] = 300;
+ mvjointsadcost[0] = 300;
+}
+
+static void cal_nmvsadcosts(int *mvsadcost[2]) {
+ int i = 1;
+
+ mvsadcost[0][0] = 0;
+ mvsadcost[1][0] = 0;
+
+ do {
+ double z = 256 * (2 * (log2f(8 * i) + .6));
+ mvsadcost[0][i] = (int)z;
+ mvsadcost[1][i] = (int)z;
+ mvsadcost[0][-i] = (int)z;
+ mvsadcost[1][-i] = (int)z;
+ } while (++i <= MV_MAX);
+}
+
+static void cal_nmvsadcosts_hp(int *mvsadcost[2]) {
+ int i = 1;
+
+ mvsadcost[0][0] = 0;
+ mvsadcost[1][0] = 0;
+
+ do {
+ double z = 256 * (2 * (log2f(8 * i) + .6));
+ mvsadcost[0][i] = (int)z;
+ mvsadcost[1][i] = (int)z;
+ mvsadcost[0][-i] = (int)z;
+ mvsadcost[1][-i] = (int)z;
+ } while (++i <= MV_MAX);
+}
+
+VP9_PTR vp9_create_compressor(VP9_CONFIG *oxcf) {
+ int i, j;
+ volatile union {
+ VP9_COMP *cpi;
+ VP9_PTR ptr;
+ } ctx;
+
+ VP9_COMP *cpi;
+ VP9_COMMON *cm;
+
+ cpi = ctx.cpi = vpx_memalign(32, sizeof(VP9_COMP));
+ // Check that the CPI instance is valid
+ if (!cpi)
+ return 0;
+
+ cm = &cpi->common;
+
+ vp9_zero(*cpi);
+
+ if (setjmp(cm->error.jmp)) {
+ VP9_PTR ptr = ctx.ptr;
+
+ ctx.cpi->common.error.setjmp = 0;
+ vp9_remove_compressor(&ptr);
+ return 0;
+ }
+
+ cm->error.setjmp = 1;
+
+ CHECK_MEM_ERROR(cm, cpi->mb.ss, vpx_calloc(sizeof(search_site),
+ (MAX_MVSEARCH_STEPS * 8) + 1));
+
+ vp9_create_common(cm);
+
+ init_config((VP9_PTR)cpi, oxcf);
+
+ cm->current_video_frame = 0;
+ cpi->kf_overspend_bits = 0;
+ cpi->kf_bitrate_adjustment = 0;
+ cpi->frames_till_gf_update_due = 0;
+ cpi->gf_overspend_bits = 0;
+ cpi->non_gf_bitrate_adjustment = 0;
+
+ // Set reference frame sign bias for ALTREF frame to 1 (for now)
+ cm->ref_frame_sign_bias[ALTREF_FRAME] = 1;
+
+ cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL;
+
+ cpi->gold_is_last = 0;
+ cpi->alt_is_last = 0;
+ cpi->gold_is_alt = 0;
+
+ // Spatial scalability
+ cpi->number_spatial_layers = oxcf->ss_number_layers;
+
+ // Create the encoder segmentation map and set all entries to 0
+ CHECK_MEM_ERROR(cm, cpi->segmentation_map,
+ vpx_calloc(cm->mi_rows * cm->mi_cols, 1));
+
+ // And a place holder structure is the coding context
+ // for use if we want to save and restore it
+ CHECK_MEM_ERROR(cm, cpi->coding_context.last_frame_seg_map_copy,
+ vpx_calloc(cm->mi_rows * cm->mi_cols, 1));
+
+ CHECK_MEM_ERROR(cm, cpi->active_map, vpx_calloc(cm->MBs, 1));
+ vpx_memset(cpi->active_map, 1, cm->MBs);
+ cpi->active_map_enabled = 0;
+
+ for (i = 0; i < (sizeof(cpi->mbgraph_stats) /
+ sizeof(cpi->mbgraph_stats[0])); i++) {
+ CHECK_MEM_ERROR(cm, cpi->mbgraph_stats[i].mb_stats,
+ vpx_calloc(cm->MBs *
+ sizeof(*cpi->mbgraph_stats[i].mb_stats), 1));
+ }
+
+#ifdef ENTROPY_STATS
+ if (cpi->pass != 1)
+ init_context_counters();
+#endif
+
+#ifdef MODE_STATS
+ init_tx_count_stats();
+ init_switchable_interp_stats();
+#endif
+
+ /*Initialize the feed-forward activity masking.*/
+ cpi->activity_avg = 90 << 12;
+
+ cpi->frames_since_key = 8; // Give a sensible default for the first frame.
+ cpi->key_frame_frequency = cpi->oxcf.key_freq;
+ cpi->this_key_frame_forced = 0;
+ cpi->next_key_frame_forced = 0;
+
+ cpi->source_alt_ref_pending = 0;
+ cpi->source_alt_ref_active = 0;
+ cpi->refresh_alt_ref_frame = 0;
+
+#if CONFIG_MULTIPLE_ARF
+ // Turn multiple ARF usage on/off. This is a quick hack for the initial test
+ // version. It should eventually be set via the codec API.
+ cpi->multi_arf_enabled = 1;
+
+ if (cpi->multi_arf_enabled) {
+ cpi->sequence_number = 0;
+ cpi->frame_coding_order_period = 0;
+ vp9_zero(cpi->frame_coding_order);
+ vp9_zero(cpi->arf_buffer_idx);
+ }
+#endif
+
+ cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS;
+#if CONFIG_INTERNAL_STATS
+ cpi->b_calculate_ssimg = 0;
+
+ cpi->count = 0;
+ cpi->bytes = 0;
+
+ if (cpi->b_calculate_psnr) {
+ cpi->total_sq_error = 0.0;
+ cpi->total_sq_error2 = 0.0;
+ cpi->total_y = 0.0;
+ cpi->total_u = 0.0;
+ cpi->total_v = 0.0;
+ cpi->total = 0.0;
+ cpi->totalp_y = 0.0;
+ cpi->totalp_u = 0.0;
+ cpi->totalp_v = 0.0;
+ cpi->totalp = 0.0;
+ cpi->tot_recode_hits = 0;
+ cpi->summed_quality = 0;
+ cpi->summed_weights = 0;
+ cpi->summedp_quality = 0;
+ cpi->summedp_weights = 0;
+ }
+
+ if (cpi->b_calculate_ssimg) {
+ cpi->total_ssimg_y = 0;
+ cpi->total_ssimg_u = 0;
+ cpi->total_ssimg_v = 0;
+ cpi->total_ssimg_all = 0;
+ }
+
+#endif
+
+ cpi->first_time_stamp_ever = INT64_MAX;
+
+ cpi->frames_till_gf_update_due = 0;
+ cpi->key_frame_count = 1;
+
+ cpi->ni_av_qi = cpi->oxcf.worst_allowed_q;
+ cpi->ni_tot_qi = 0;
+ cpi->ni_frames = 0;
+ cpi->tot_q = 0.0;
+ cpi->avg_q = vp9_convert_qindex_to_q(cpi->oxcf.worst_allowed_q);
+ cpi->total_byte_count = 0;
+
+ cpi->rate_correction_factor = 1.0;
+ cpi->key_frame_rate_correction_factor = 1.0;
+ cpi->gf_rate_correction_factor = 1.0;
+ cpi->twopass.est_max_qcorrection_factor = 1.0;
+
+ cal_nmvjointsadcost(cpi->mb.nmvjointsadcost);
+ cpi->mb.nmvcost[0] = &cpi->mb.nmvcosts[0][MV_MAX];
+ cpi->mb.nmvcost[1] = &cpi->mb.nmvcosts[1][MV_MAX];
+ cpi->mb.nmvsadcost[0] = &cpi->mb.nmvsadcosts[0][MV_MAX];
+ cpi->mb.nmvsadcost[1] = &cpi->mb.nmvsadcosts[1][MV_MAX];
+ cal_nmvsadcosts(cpi->mb.nmvsadcost);
+
+ cpi->mb.nmvcost_hp[0] = &cpi->mb.nmvcosts_hp[0][MV_MAX];
+ cpi->mb.nmvcost_hp[1] = &cpi->mb.nmvcosts_hp[1][MV_MAX];
+ cpi->mb.nmvsadcost_hp[0] = &cpi->mb.nmvsadcosts_hp[0][MV_MAX];
+ cpi->mb.nmvsadcost_hp[1] = &cpi->mb.nmvsadcosts_hp[1][MV_MAX];
+ cal_nmvsadcosts_hp(cpi->mb.nmvsadcost_hp);
+
+ for (i = 0; i < KEY_FRAME_CONTEXT; i++)
+ cpi->prior_key_frame_distance[i] = (int)cpi->output_framerate;
+
+#ifdef OUTPUT_YUV_SRC
+ yuv_file = fopen("bd.yuv", "ab");
+#endif
+#ifdef OUTPUT_YUV_REC
+ yuv_rec_file = fopen("rec.yuv", "wb");
+#endif
+
+#if 0
+ framepsnr = fopen("framepsnr.stt", "a");
+ kf_list = fopen("kf_list.stt", "w");
+#endif
+
+ cpi->output_pkt_list = oxcf->output_pkt_list;
+
+ cpi->enable_encode_breakout = 1;
+
+ if (cpi->pass == 1) {
+ vp9_init_first_pass(cpi);
+ } else if (cpi->pass == 2) {
+ size_t packet_sz = sizeof(FIRSTPASS_STATS);
+ int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz);
+
+ cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf;
+ cpi->twopass.stats_in = cpi->twopass.stats_in_start;
+ cpi->twopass.stats_in_end = (void *)((char *)cpi->twopass.stats_in
+ + (packets - 1) * packet_sz);
+ vp9_init_second_pass(cpi);
+ }
+
+ vp9_set_speed_features(cpi);
+
+ // Default rd threshold factors for mode selection
+ for (i = 0; i < BLOCK_SIZES; ++i)
+ for (j = 0; j < MAX_MODES; ++j)
+ cpi->rd_thresh_freq_fact[i][j] = 32;
+
+#define BFP(BT, SDF, SDAF, VF, SVF, SVAF, SVFHH, SVFHV, SVFHHV, \
+ SDX3F, SDX8F, SDX4DF)\
+ cpi->fn_ptr[BT].sdf = SDF; \
+ cpi->fn_ptr[BT].sdaf = SDAF; \
+ cpi->fn_ptr[BT].vf = VF; \
+ cpi->fn_ptr[BT].svf = SVF; \
+ cpi->fn_ptr[BT].svaf = SVAF; \
+ cpi->fn_ptr[BT].svf_halfpix_h = SVFHH; \
+ cpi->fn_ptr[BT].svf_halfpix_v = SVFHV; \
+ cpi->fn_ptr[BT].svf_halfpix_hv = SVFHHV; \
+ cpi->fn_ptr[BT].sdx3f = SDX3F; \
+ cpi->fn_ptr[BT].sdx8f = SDX8F; \
+ cpi->fn_ptr[BT].sdx4df = SDX4DF;
+
+ BFP(BLOCK_32X16, vp9_sad32x16, vp9_sad32x16_avg,
+ vp9_variance32x16, vp9_sub_pixel_variance32x16,
+ vp9_sub_pixel_avg_variance32x16, NULL, NULL,
+ NULL, NULL, NULL,
+ vp9_sad32x16x4d)
+
+ BFP(BLOCK_16X32, vp9_sad16x32, vp9_sad16x32_avg,
+ vp9_variance16x32, vp9_sub_pixel_variance16x32,
+ vp9_sub_pixel_avg_variance16x32, NULL, NULL,
+ NULL, NULL, NULL,
+ vp9_sad16x32x4d)
+
+ BFP(BLOCK_64X32, vp9_sad64x32, vp9_sad64x32_avg,
+ vp9_variance64x32, vp9_sub_pixel_variance64x32,
+ vp9_sub_pixel_avg_variance64x32, NULL, NULL,
+ NULL, NULL, NULL,
+ vp9_sad64x32x4d)
+
+ BFP(BLOCK_32X64, vp9_sad32x64, vp9_sad32x64_avg,
+ vp9_variance32x64, vp9_sub_pixel_variance32x64,
+ vp9_sub_pixel_avg_variance32x64, NULL, NULL,
+ NULL, NULL, NULL,
+ vp9_sad32x64x4d)
+
+ BFP(BLOCK_32X32, vp9_sad32x32, vp9_sad32x32_avg,
+ vp9_variance32x32, vp9_sub_pixel_variance32x32,
+ vp9_sub_pixel_avg_variance32x32, vp9_variance_halfpixvar32x32_h,
+ vp9_variance_halfpixvar32x32_v,
+ vp9_variance_halfpixvar32x32_hv, vp9_sad32x32x3, vp9_sad32x32x8,
+ vp9_sad32x32x4d)
+
+ BFP(BLOCK_64X64, vp9_sad64x64, vp9_sad64x64_avg,
+ vp9_variance64x64, vp9_sub_pixel_variance64x64,
+ vp9_sub_pixel_avg_variance64x64, vp9_variance_halfpixvar64x64_h,
+ vp9_variance_halfpixvar64x64_v,
+ vp9_variance_halfpixvar64x64_hv, vp9_sad64x64x3, vp9_sad64x64x8,
+ vp9_sad64x64x4d)
+
+ BFP(BLOCK_16X16, vp9_sad16x16, vp9_sad16x16_avg,
+ vp9_variance16x16, vp9_sub_pixel_variance16x16,
+ vp9_sub_pixel_avg_variance16x16, vp9_variance_halfpixvar16x16_h,
+ vp9_variance_halfpixvar16x16_v,
+ vp9_variance_halfpixvar16x16_hv, vp9_sad16x16x3, vp9_sad16x16x8,
+ vp9_sad16x16x4d)
+
+ BFP(BLOCK_16X8, vp9_sad16x8, vp9_sad16x8_avg,
+ vp9_variance16x8, vp9_sub_pixel_variance16x8,
+ vp9_sub_pixel_avg_variance16x8, NULL, NULL, NULL,
+ vp9_sad16x8x3, vp9_sad16x8x8, vp9_sad16x8x4d)
+
+ BFP(BLOCK_8X16, vp9_sad8x16, vp9_sad8x16_avg,
+ vp9_variance8x16, vp9_sub_pixel_variance8x16,
+ vp9_sub_pixel_avg_variance8x16, NULL, NULL, NULL,
+ vp9_sad8x16x3, vp9_sad8x16x8, vp9_sad8x16x4d)
+
+ BFP(BLOCK_8X8, vp9_sad8x8, vp9_sad8x8_avg,
+ vp9_variance8x8, vp9_sub_pixel_variance8x8,
+ vp9_sub_pixel_avg_variance8x8, NULL, NULL, NULL,
+ vp9_sad8x8x3, vp9_sad8x8x8, vp9_sad8x8x4d)
+
+ BFP(BLOCK_8X4, vp9_sad8x4, vp9_sad8x4_avg,
+ vp9_variance8x4, vp9_sub_pixel_variance8x4,
+ vp9_sub_pixel_avg_variance8x4, NULL, NULL,
+ NULL, NULL, vp9_sad8x4x8,
+ vp9_sad8x4x4d)
+
+ BFP(BLOCK_4X8, vp9_sad4x8, vp9_sad4x8_avg,
+ vp9_variance4x8, vp9_sub_pixel_variance4x8,
+ vp9_sub_pixel_avg_variance4x8, NULL, NULL,
+ NULL, NULL, vp9_sad4x8x8,
+ vp9_sad4x8x4d)
+
+ BFP(BLOCK_4X4, vp9_sad4x4, vp9_sad4x4_avg,
+ vp9_variance4x4, vp9_sub_pixel_variance4x4,
+ vp9_sub_pixel_avg_variance4x4, NULL, NULL, NULL,
+ vp9_sad4x4x3, vp9_sad4x4x8, vp9_sad4x4x4d)
+
+ cpi->full_search_sad = vp9_full_search_sad;
+ cpi->diamond_search_sad = vp9_diamond_search_sad;
+ cpi->refining_search_sad = vp9_refining_search_sad;
+
+ // make sure frame 1 is okay
+ cpi->error_bins[0] = cpi->common.MBs;
+
+ /* vp9_init_quantizer() is first called here. Add check in
+ * vp9_frame_init_quantizer() so that vp9_init_quantizer is only
+ * called later when needed. This will avoid unnecessary calls of
+ * vp9_init_quantizer() for every frame.
+ */
+ vp9_init_quantizer(cpi);
+
+ vp9_loop_filter_init(cm);
+
+ cpi->common.error.setjmp = 0;
+
+ vp9_zero(cpi->y_uv_mode_count)
+
+#ifdef MODE_TEST_HIT_STATS
+ vp9_zero(cpi->mode_test_hits)
+#endif
+
+ return (VP9_PTR) cpi;
+}
+
+void vp9_remove_compressor(VP9_PTR *ptr) {
+ VP9_COMP *cpi = (VP9_COMP *)(*ptr);
+ int i;
+
+ if (!cpi)
+ return;
+
+ if (cpi && (cpi->common.current_video_frame > 0)) {
+ if (cpi->pass == 2) {
+ vp9_end_second_pass(cpi);
+ }
+
+#ifdef ENTROPY_STATS
+ if (cpi->pass != 1) {
+ print_context_counters();
+ print_tree_update_probs();
+ print_mode_context(cpi);
+ }
+#endif
+
+#ifdef MODE_STATS
+ if (cpi->pass != 1) {
+ write_tx_count_stats();
+ write_switchable_interp_stats();
+ }
+#endif
+
+#if CONFIG_INTERNAL_STATS
+
+ vp9_clear_system_state();
+
+ // printf("\n8x8-4x4:%d-%d\n", cpi->t8x8_count, cpi->t4x4_count);
+ if (cpi->pass != 1) {
+ FILE *f = fopen("opsnr.stt", "a");
+ double time_encoded = (cpi->last_end_time_stamp_seen
+ - cpi->first_time_stamp_ever) / 10000000.000;
+ double total_encode_time = (cpi->time_receive_data + cpi->time_compress_data) / 1000.000;
+ double dr = (double)cpi->bytes * (double) 8 / (double)1000 / time_encoded;
+
+ if (cpi->b_calculate_psnr) {
+ YV12_BUFFER_CONFIG *lst_yv12 =
+ &cpi->common.yv12_fb[cpi->common.ref_frame_map[cpi->lst_fb_idx]];
+ double samples = 3.0 / 2 * cpi->count *
+ lst_yv12->y_width * lst_yv12->y_height;
+ double total_psnr = vp9_mse2psnr(samples, 255.0, cpi->total_sq_error);
+ double total_psnr2 = vp9_mse2psnr(samples, 255.0, cpi->total_sq_error2);
+ double total_ssim = 100 * pow(cpi->summed_quality /
+ cpi->summed_weights, 8.0);
+ double total_ssimp = 100 * pow(cpi->summedp_quality /
+ cpi->summedp_weights, 8.0);
+
+ fprintf(f, "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\tGLPsnrP\t"
+ "VPXSSIM\tVPSSIMP\t Time(ms)\n");
+ fprintf(f, "%7.2f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t%8.0f\n",
+ dr, cpi->total / cpi->count, total_psnr,
+ cpi->totalp / cpi->count, total_psnr2, total_ssim, total_ssimp,
+ total_encode_time);
+// fprintf(f, "%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t%8.0f %10ld\n",
+// dr, cpi->total / cpi->count, total_psnr,
+// cpi->totalp / cpi->count, total_psnr2, total_ssim,
+// total_encode_time, cpi->tot_recode_hits);
+ }
+
+ if (cpi->b_calculate_ssimg) {
+ fprintf(f, "BitRate\tSSIM_Y\tSSIM_U\tSSIM_V\tSSIM_A\t Time(ms)\n");
+ fprintf(f, "%7.2f\t%6.4f\t%6.4f\t%6.4f\t%6.4f\t%8.0f\n", dr,
+ cpi->total_ssimg_y / cpi->count, cpi->total_ssimg_u / cpi->count,
+ cpi->total_ssimg_v / cpi->count, cpi->total_ssimg_all / cpi->count, total_encode_time);
+// fprintf(f, "%7.3f\t%6.4f\t%6.4f\t%6.4f\t%6.4f\t%8.0f %10ld\n", dr,
+// cpi->total_ssimg_y / cpi->count, cpi->total_ssimg_u / cpi->count,
+// cpi->total_ssimg_v / cpi->count, cpi->total_ssimg_all / cpi->count, total_encode_time, cpi->tot_recode_hits);
+ }
+
+ fclose(f);
+ }
+
+#endif
+
+#ifdef MODE_TEST_HIT_STATS
+ if (cpi->pass != 1) {
+ double norm_per_pixel_mode_tests = 0;
+ double norm_counts[BLOCK_SIZES];
+ int i;
+ int sb64_per_frame;
+ int norm_factors[BLOCK_SIZES] =
+ {256, 128, 128, 64, 32, 32, 16, 8, 8, 4, 2, 2, 1};
+ FILE *f = fopen("mode_hit_stats.stt", "a");
+
+ // On average, how many mode tests do we do
+ for (i = 0; i < BLOCK_SIZES; ++i) {
+ norm_counts[i] = (double)cpi->mode_test_hits[i] /
+ (double)norm_factors[i];
+ norm_per_pixel_mode_tests += norm_counts[i];
+ }
+ // Convert to a number per 64x64 and per frame
+ sb64_per_frame = ((cpi->common.height + 63) / 64) *
+ ((cpi->common.width + 63) / 64);
+ norm_per_pixel_mode_tests =
+ norm_per_pixel_mode_tests /
+ (double)(cpi->common.current_video_frame * sb64_per_frame);
+
+ fprintf(f, "%6.4f\n", norm_per_pixel_mode_tests);
+ fclose(f);
+ }
+#endif
+
+#ifdef ENTROPY_STATS
+ {
+ int i, j, k;
+ FILE *fmode = fopen("vp9_modecontext.c", "w");
+
+ fprintf(fmode, "\n#include \"vp9_entropymode.h\"\n\n");
+ fprintf(fmode, "const unsigned int vp9_kf_default_bmode_counts ");
+ fprintf(fmode, "[INTRA_MODES][INTRA_MODES]"
+ "[INTRA_MODES] =\n{\n");
+
+ for (i = 0; i < INTRA_MODES; i++) {
+
+ fprintf(fmode, " { // Above Mode : %d\n", i);
+
+ for (j = 0; j < INTRA_MODES; j++) {
+
+ fprintf(fmode, " {");
+
+ for (k = 0; k < INTRA_MODES; k++) {
+ if (!intra_mode_stats[i][j][k])
+ fprintf(fmode, " %5d, ", 1);
+ else
+ fprintf(fmode, " %5d, ", intra_mode_stats[i][j][k]);
+ }
+
+ fprintf(fmode, "}, // left_mode %d\n", j);
+
+ }
+
+ fprintf(fmode, " },\n");
+
+ }
+
+ fprintf(fmode, "};\n");
+ fclose(fmode);
+ }
+#endif
+
+
+#if defined(SECTIONBITS_OUTPUT)
+
+ if (0) {
+ int i;
+ FILE *f = fopen("tokenbits.stt", "a");
+
+ for (i = 0; i < 28; i++)
+ fprintf(f, "%8d", (int)(Sectionbits[i] / 256));
+
+ fprintf(f, "\n");
+ fclose(f);
+ }
+
+#endif
+
+#if 0
+ {
+ printf("\n_pick_loop_filter_level:%d\n", cpi->time_pick_lpf / 1000);
+ printf("\n_frames recive_data encod_mb_row compress_frame Total\n");
+ printf("%6d %10ld %10ld %10ld %10ld\n", cpi->common.current_video_frame,
+ cpi->time_receive_data / 1000, cpi->time_encode_sb_row / 1000,
+ cpi->time_compress_data / 1000,
+ (cpi->time_receive_data + cpi->time_compress_data) / 1000);
+ }
+#endif
+
+ }
+
+ dealloc_compressor_data(cpi);
+ vpx_free(cpi->mb.ss);
+ vpx_free(cpi->tok);
+
+ for (i = 0; i < sizeof(cpi->mbgraph_stats) / sizeof(cpi->mbgraph_stats[0]); i++) {
+ vpx_free(cpi->mbgraph_stats[i].mb_stats);
+ }
+
+ vp9_remove_common(&cpi->common);
+ vpx_free(cpi);
+ *ptr = 0;
+
+#ifdef OUTPUT_YUV_SRC
+ fclose(yuv_file);
+#endif
+#ifdef OUTPUT_YUV_REC
+ fclose(yuv_rec_file);
+#endif
+
+#if 0
+
+ if (keyfile)
+ fclose(keyfile);
+
+ if (framepsnr)
+ fclose(framepsnr);
+
+ if (kf_list)
+ fclose(kf_list);
+
+#endif
+
+}
+
+
+static uint64_t calc_plane_error(uint8_t *orig, int orig_stride,
+ uint8_t *recon, int recon_stride,
+ unsigned int cols, unsigned int rows) {
+ unsigned int row, col;
+ uint64_t total_sse = 0;
+ int diff;
+
+ for (row = 0; row + 16 <= rows; row += 16) {
+ for (col = 0; col + 16 <= cols; col += 16) {
+ unsigned int sse;
+
+ vp9_mse16x16(orig + col, orig_stride, recon + col, recon_stride, &sse);
+ total_sse += sse;
+ }
+
+ /* Handle odd-sized width */
+ if (col < cols) {
+ unsigned int border_row, border_col;
+ uint8_t *border_orig = orig;
+ uint8_t *border_recon = recon;
+
+ for (border_row = 0; border_row < 16; border_row++) {
+ for (border_col = col; border_col < cols; border_col++) {
+ diff = border_orig[border_col] - border_recon[border_col];
+ total_sse += diff * diff;
+ }
+
+ border_orig += orig_stride;
+ border_recon += recon_stride;
+ }
+ }
+
+ orig += orig_stride * 16;
+ recon += recon_stride * 16;
+ }
+
+ /* Handle odd-sized height */
+ for (; row < rows; row++) {
+ for (col = 0; col < cols; col++) {
+ diff = orig[col] - recon[col];
+ total_sse += diff * diff;
+ }
+
+ orig += orig_stride;
+ recon += recon_stride;
+ }
+
+ return total_sse;
+}
+
+
+static void generate_psnr_packet(VP9_COMP *cpi) {
+ YV12_BUFFER_CONFIG *orig = cpi->Source;
+ YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
+ struct vpx_codec_cx_pkt pkt;
+ uint64_t sse;
+ int i;
+ unsigned int width = orig->y_crop_width;
+ unsigned int height = orig->y_crop_height;
+
+ pkt.kind = VPX_CODEC_PSNR_PKT;
+ sse = calc_plane_error(orig->y_buffer, orig->y_stride,
+ recon->y_buffer, recon->y_stride,
+ width, height);
+ pkt.data.psnr.sse[0] = sse;
+ pkt.data.psnr.sse[1] = sse;
+ pkt.data.psnr.samples[0] = width * height;
+ pkt.data.psnr.samples[1] = width * height;
+
+ width = orig->uv_crop_width;
+ height = orig->uv_crop_height;
+
+ sse = calc_plane_error(orig->u_buffer, orig->uv_stride,
+ recon->u_buffer, recon->uv_stride,
+ width, height);
+ pkt.data.psnr.sse[0] += sse;
+ pkt.data.psnr.sse[2] = sse;
+ pkt.data.psnr.samples[0] += width * height;
+ pkt.data.psnr.samples[2] = width * height;
+
+ sse = calc_plane_error(orig->v_buffer, orig->uv_stride,
+ recon->v_buffer, recon->uv_stride,
+ width, height);
+ pkt.data.psnr.sse[0] += sse;
+ pkt.data.psnr.sse[3] = sse;
+ pkt.data.psnr.samples[0] += width * height;
+ pkt.data.psnr.samples[3] = width * height;
+
+ for (i = 0; i < 4; i++)
+ pkt.data.psnr.psnr[i] = vp9_mse2psnr(pkt.data.psnr.samples[i], 255.0,
+ (double)pkt.data.psnr.sse[i]);
+
+ vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
+}
+
+
+int vp9_use_as_reference(VP9_PTR ptr, int ref_frame_flags) {
+ VP9_COMP *cpi = (VP9_COMP *)(ptr);
+
+ if (ref_frame_flags > 7)
+ return -1;
+
+ cpi->ref_frame_flags = ref_frame_flags;
+ return 0;
+}
+int vp9_update_reference(VP9_PTR ptr, int ref_frame_flags) {
+ VP9_COMP *cpi = (VP9_COMP *)(ptr);
+
+ if (ref_frame_flags > 7)
+ return -1;
+
+ cpi->refresh_golden_frame = 0;
+ cpi->refresh_alt_ref_frame = 0;
+ cpi->refresh_last_frame = 0;
+
+ if (ref_frame_flags & VP9_LAST_FLAG)
+ cpi->refresh_last_frame = 1;
+
+ if (ref_frame_flags & VP9_GOLD_FLAG)
+ cpi->refresh_golden_frame = 1;
+
+ if (ref_frame_flags & VP9_ALT_FLAG)
+ cpi->refresh_alt_ref_frame = 1;
+
+ return 0;
+}
+
+int vp9_copy_reference_enc(VP9_PTR ptr, VP9_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd) {
+ VP9_COMP *cpi = (VP9_COMP *)(ptr);
+ VP9_COMMON *cm = &cpi->common;
+ int ref_fb_idx;
+
+ if (ref_frame_flag == VP9_LAST_FLAG)
+ ref_fb_idx = cm->ref_frame_map[cpi->lst_fb_idx];
+ else if (ref_frame_flag == VP9_GOLD_FLAG)
+ ref_fb_idx = cm->ref_frame_map[cpi->gld_fb_idx];
+ else if (ref_frame_flag == VP9_ALT_FLAG)
+ ref_fb_idx = cm->ref_frame_map[cpi->alt_fb_idx];
+ else
+ return -1;
+
+ vp8_yv12_copy_frame(&cm->yv12_fb[ref_fb_idx], sd);
+
+ return 0;
+}
+
+int vp9_get_reference_enc(VP9_PTR ptr, int index, YV12_BUFFER_CONFIG **fb) {
+ VP9_COMP *cpi = (VP9_COMP *)(ptr);
+ VP9_COMMON *cm = &cpi->common;
+
+ if (index < 0 || index >= NUM_REF_FRAMES)
+ return -1;
+
+ *fb = &cm->yv12_fb[cm->ref_frame_map[index]];
+ return 0;
+}
+
+int vp9_set_reference_enc(VP9_PTR ptr, VP9_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd) {
+ VP9_COMP *cpi = (VP9_COMP *)(ptr);
+ VP9_COMMON *cm = &cpi->common;
+
+ int ref_fb_idx;
+
+ if (ref_frame_flag == VP9_LAST_FLAG)
+ ref_fb_idx = cm->ref_frame_map[cpi->lst_fb_idx];
+ else if (ref_frame_flag == VP9_GOLD_FLAG)
+ ref_fb_idx = cm->ref_frame_map[cpi->gld_fb_idx];
+ else if (ref_frame_flag == VP9_ALT_FLAG)
+ ref_fb_idx = cm->ref_frame_map[cpi->alt_fb_idx];
+ else
+ return -1;
+
+ vp8_yv12_copy_frame(sd, &cm->yv12_fb[ref_fb_idx]);
+
+ return 0;
+}
+int vp9_update_entropy(VP9_PTR comp, int update) {
+ ((VP9_COMP *)comp)->common.refresh_frame_context = update;
+ return 0;
+}
+
+
+#ifdef OUTPUT_YUV_SRC
+void vp9_write_yuv_frame(YV12_BUFFER_CONFIG *s) {
+ uint8_t *src = s->y_buffer;
+ int h = s->y_height;
+
+ do {
+ fwrite(src, s->y_width, 1, yuv_file);
+ src += s->y_stride;
+ } while (--h);
+
+ src = s->u_buffer;
+ h = s->uv_height;
+
+ do {
+ fwrite(src, s->uv_width, 1, yuv_file);
+ src += s->uv_stride;
+ } while (--h);
+
+ src = s->v_buffer;
+ h = s->uv_height;
+
+ do {
+ fwrite(src, s->uv_width, 1, yuv_file);
+ src += s->uv_stride;
+ } while (--h);
+}
+#endif
+
+#ifdef OUTPUT_YUV_REC
+void vp9_write_yuv_rec_frame(VP9_COMMON *cm) {
+ YV12_BUFFER_CONFIG *s = cm->frame_to_show;
+ uint8_t *src = s->y_buffer;
+ int h = cm->height;
+
+ do {
+ fwrite(src, s->y_width, 1, yuv_rec_file);
+ src += s->y_stride;
+ } while (--h);
+
+ src = s->u_buffer;
+ h = s->uv_height;
+
+ do {
+ fwrite(src, s->uv_width, 1, yuv_rec_file);
+ src += s->uv_stride;
+ } while (--h);
+
+ src = s->v_buffer;
+ h = s->uv_height;
+
+ do {
+ fwrite(src, s->uv_width, 1, yuv_rec_file);
+ src += s->uv_stride;
+ } while (--h);
+
+#if CONFIG_ALPHA
+ if (s->alpha_buffer) {
+ src = s->alpha_buffer;
+ h = s->alpha_height;
+ do {
+ fwrite(src, s->alpha_width, 1, yuv_rec_file);
+ src += s->alpha_stride;
+ } while (--h);
+ }
+#endif
+
+ fflush(yuv_rec_file);
+}
+#endif
+
+static void scale_and_extend_frame(YV12_BUFFER_CONFIG *src_fb,
+ YV12_BUFFER_CONFIG *dst_fb) {
+ const int in_w = src_fb->y_crop_width;
+ const int in_h = src_fb->y_crop_height;
+ const int out_w = dst_fb->y_crop_width;
+ const int out_h = dst_fb->y_crop_height;
+ int x, y, i;
+
+ uint8_t *srcs[4] = {src_fb->y_buffer, src_fb->u_buffer, src_fb->v_buffer,
+ src_fb->alpha_buffer};
+ int src_strides[4] = {src_fb->y_stride, src_fb->uv_stride, src_fb->uv_stride,
+ src_fb->alpha_stride};
+
+ uint8_t *dsts[4] = {dst_fb->y_buffer, dst_fb->u_buffer, dst_fb->v_buffer,
+ dst_fb->alpha_buffer};
+ int dst_strides[4] = {dst_fb->y_stride, dst_fb->uv_stride, dst_fb->uv_stride,
+ dst_fb->alpha_stride};
+
+ for (y = 0; y < out_h; y += 16) {
+ for (x = 0; x < out_w; x += 16) {
+ for (i = 0; i < MAX_MB_PLANE; ++i) {
+ const int factor = i == 0 ? 1 : 2;
+ const int x_q4 = x * (16 / factor) * in_w / out_w;
+ const int y_q4 = y * (16 / factor) * in_h / out_h;
+ const int src_stride = src_strides[i];
+ const int dst_stride = dst_strides[i];
+ uint8_t *src = srcs[i] + y / factor * in_h / out_h * src_stride +
+ x / factor * in_w / out_w;
+ uint8_t *dst = dsts[i] + y / factor * dst_stride + x / factor;
+
+ vp9_convolve8(src, src_stride, dst, dst_stride,
+ vp9_sub_pel_filters_8[x_q4 & 0xf], 16 * in_w / out_w,
+ vp9_sub_pel_filters_8[y_q4 & 0xf], 16 * in_h / out_h,
+ 16 / factor, 16 / factor);
+ }
+ }
+ }
+
+ vp8_yv12_extend_frame_borders(dst_fb);
+}
+
+
+static void update_alt_ref_frame_stats(VP9_COMP *cpi) {
+ // this frame refreshes means next frames don't unless specified by user
+ cpi->frames_since_golden = 0;
+
+#if CONFIG_MULTIPLE_ARF
+ if (!cpi->multi_arf_enabled)
+#endif
+ // Clear the alternate reference update pending flag.
+ cpi->source_alt_ref_pending = 0;
+
+ // Set the alternate reference frame active flag
+ cpi->source_alt_ref_active = 1;
+}
+static void update_golden_frame_stats(VP9_COMP *cpi) {
+ // Update the Golden frame usage counts.
+ if (cpi->refresh_golden_frame) {
+ // this frame refreshes means next frames don't unless specified by user
+ cpi->refresh_golden_frame = 0;
+ cpi->frames_since_golden = 0;
+
+ // ******** Fixed Q test code only ************
+ // If we are going to use the ALT reference for the next group of frames set a flag to say so.
+ if (cpi->oxcf.fixed_q >= 0 &&
+ cpi->oxcf.play_alternate && !cpi->refresh_alt_ref_frame) {
+ cpi->source_alt_ref_pending = 1;
+ cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
+
+ // TODO(ivan): for SVC encoder, GF automatic update is disabled by using a
+ // large GF_interval
+ if (cpi->use_svc) {
+ cpi->frames_till_gf_update_due = INT_MAX;
+ }
+ }
+
+ if (!cpi->source_alt_ref_pending)
+ cpi->source_alt_ref_active = 0;
+
+ // Decrement count down till next gf
+ if (cpi->frames_till_gf_update_due > 0)
+ cpi->frames_till_gf_update_due--;
+
+ } else if (!cpi->refresh_alt_ref_frame) {
+ // Decrement count down till next gf
+ if (cpi->frames_till_gf_update_due > 0)
+ cpi->frames_till_gf_update_due--;
+
+ if (cpi->frames_till_alt_ref_frame)
+ cpi->frames_till_alt_ref_frame--;
+
+ cpi->frames_since_golden++;
+ }
+}
+
+static int find_fp_qindex() {
+ int i;
+
+ for (i = 0; i < QINDEX_RANGE; i++) {
+ if (vp9_convert_qindex_to_q(i) >= 30.0) {
+ break;
+ }
+ }
+
+ if (i == QINDEX_RANGE)
+ i--;
+
+ return i;
+}
+
+static void Pass1Encode(VP9_COMP *cpi, unsigned long *size, unsigned char *dest, unsigned int *frame_flags) {
+ (void) size;
+ (void) dest;
+ (void) frame_flags;
+
+
+ vp9_set_quantizer(cpi, find_fp_qindex());
+ vp9_first_pass(cpi);
+}
+
+#define WRITE_RECON_BUFFER 0
+#if WRITE_RECON_BUFFER
+void write_cx_frame_to_file(YV12_BUFFER_CONFIG *frame, int this_frame) {
+
+ // write the frame
+ FILE *yframe;
+ int i;
+ char filename[255];
+
+ sprintf(filename, "cx\\y%04d.raw", this_frame);
+ yframe = fopen(filename, "wb");
+
+ for (i = 0; i < frame->y_height; i++)
+ fwrite(frame->y_buffer + i * frame->y_stride,
+ frame->y_width, 1, yframe);
+
+ fclose(yframe);
+ sprintf(filename, "cx\\u%04d.raw", this_frame);
+ yframe = fopen(filename, "wb");
+
+ for (i = 0; i < frame->uv_height; i++)
+ fwrite(frame->u_buffer + i * frame->uv_stride,
+ frame->uv_width, 1, yframe);
+
+ fclose(yframe);
+ sprintf(filename, "cx\\v%04d.raw", this_frame);
+ yframe = fopen(filename, "wb");
+
+ for (i = 0; i < frame->uv_height; i++)
+ fwrite(frame->v_buffer + i * frame->uv_stride,
+ frame->uv_width, 1, yframe);
+
+ fclose(yframe);
+}
+#endif
+
+static double compute_edge_pixel_proportion(YV12_BUFFER_CONFIG *frame) {
+#define EDGE_THRESH 128
+ int i, j;
+ int num_edge_pels = 0;
+ int num_pels = (frame->y_height - 2) * (frame->y_width - 2);
+ uint8_t *prev = frame->y_buffer + 1;
+ uint8_t *curr = frame->y_buffer + 1 + frame->y_stride;
+ uint8_t *next = frame->y_buffer + 1 + 2 * frame->y_stride;
+ for (i = 1; i < frame->y_height - 1; i++) {
+ for (j = 1; j < frame->y_width - 1; j++) {
+ /* Sobel hor and ver gradients */
+ int v = 2 * (curr[1] - curr[-1]) + (prev[1] - prev[-1]) + (next[1] - next[-1]);
+ int h = 2 * (prev[0] - next[0]) + (prev[1] - next[1]) + (prev[-1] - next[-1]);
+ h = (h < 0 ? -h : h);
+ v = (v < 0 ? -v : v);
+ if (h > EDGE_THRESH || v > EDGE_THRESH)
+ num_edge_pels++;
+ curr++;
+ prev++;
+ next++;
+ }
+ curr += frame->y_stride - frame->y_width + 2;
+ prev += frame->y_stride - frame->y_width + 2;
+ next += frame->y_stride - frame->y_width + 2;
+ }
+ return (double)num_edge_pels / num_pels;
+}
+
+// Function to test for conditions that indicate we should loop
+// back and recode a frame.
+static int recode_loop_test(VP9_COMP *cpi,
+ int high_limit, int low_limit,
+ int q, int maxq, int minq) {
+ int force_recode = 0;
+ VP9_COMMON *cm = &cpi->common;
+
+ // Is frame recode allowed at all
+ // Yes if either recode mode 1 is selected or mode two is selected
+ // and the frame is a key frame. golden frame or alt_ref_frame
+ if ((cpi->sf.recode_loop == 1) ||
+ ((cpi->sf.recode_loop == 2) &&
+ ((cm->frame_type == KEY_FRAME) ||
+ cpi->refresh_golden_frame ||
+ cpi->refresh_alt_ref_frame))) {
+ // General over and under shoot tests
+ if (((cpi->projected_frame_size > high_limit) && (q < maxq)) ||
+ ((cpi->projected_frame_size < low_limit) && (q > minq))) {
+ force_recode = 1;
+ }
+ // Special Constrained quality tests
+ else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
+ // Undershoot and below auto cq level
+ if (q > cpi->cq_target_quality &&
+ cpi->projected_frame_size < ((cpi->this_frame_target * 7) >> 3)) {
+ force_recode = 1;
+ } else if (q > cpi->oxcf.cq_level &&
+ cpi->projected_frame_size < cpi->min_frame_bandwidth &&
+ cpi->active_best_quality > cpi->oxcf.cq_level) {
+ // Severe undershoot and between auto and user cq level
+ force_recode = 1;
+ cpi->active_best_quality = cpi->oxcf.cq_level;
+ }
+ }
+ }
+
+ return force_recode;
+}
+
+static void update_reference_frames(VP9_COMP * const cpi) {
+ VP9_COMMON * const cm = &cpi->common;
+
+ // At this point the new frame has been encoded.
+ // If any buffer copy / swapping is signaled it should be done here.
+ if (cm->frame_type == KEY_FRAME) {
+ ref_cnt_fb(cm->fb_idx_ref_cnt,
+ &cm->ref_frame_map[cpi->gld_fb_idx], cm->new_fb_idx);
+ ref_cnt_fb(cm->fb_idx_ref_cnt,
+ &cm->ref_frame_map[cpi->alt_fb_idx], cm->new_fb_idx);
+ }
+#if CONFIG_MULTIPLE_ARF
+ else if (!cpi->multi_arf_enabled && cpi->refresh_golden_frame &&
+ !cpi->refresh_alt_ref_frame) {
+#else
+ else if (cpi->refresh_golden_frame && !cpi->refresh_alt_ref_frame &&
+ !cpi->use_svc) {
+#endif
+ /* Preserve the previously existing golden frame and update the frame in
+ * the alt ref slot instead. This is highly specific to the current use of
+ * alt-ref as a forward reference, and this needs to be generalized as
+ * other uses are implemented (like RTC/temporal scaling)
+ *
+ * The update to the buffer in the alt ref slot was signaled in
+ * vp9_pack_bitstream(), now swap the buffer pointers so that it's treated
+ * as the golden frame next time.
+ */
+ int tmp;
+
+ ref_cnt_fb(cm->fb_idx_ref_cnt,
+ &cm->ref_frame_map[cpi->alt_fb_idx], cm->new_fb_idx);
+
+ tmp = cpi->alt_fb_idx;
+ cpi->alt_fb_idx = cpi->gld_fb_idx;
+ cpi->gld_fb_idx = tmp;
+ } else { /* For non key/golden frames */
+ if (cpi->refresh_alt_ref_frame) {
+ int arf_idx = cpi->alt_fb_idx;
+#if CONFIG_MULTIPLE_ARF
+ if (cpi->multi_arf_enabled) {
+ arf_idx = cpi->arf_buffer_idx[cpi->sequence_number + 1];
+ }
+#endif
+ ref_cnt_fb(cm->fb_idx_ref_cnt,
+ &cm->ref_frame_map[arf_idx], cm->new_fb_idx);
+ }
+
+ if (cpi->refresh_golden_frame) {
+ ref_cnt_fb(cm->fb_idx_ref_cnt,
+ &cm->ref_frame_map[cpi->gld_fb_idx], cm->new_fb_idx);
+ }
+ }
+
+ if (cpi->refresh_last_frame) {
+ ref_cnt_fb(cm->fb_idx_ref_cnt,
+ &cm->ref_frame_map[cpi->lst_fb_idx], cm->new_fb_idx);
+ }
+}
+
+static void loopfilter_frame(VP9_COMP *cpi, VP9_COMMON *cm) {
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
+ struct loopfilter *lf = &cm->lf;
+ if (xd->lossless) {
+ lf->filter_level = 0;
+ } else {
+ struct vpx_usec_timer timer;
+
+ vp9_clear_system_state();
+
+ vpx_usec_timer_start(&timer);
+
+ vp9_pick_filter_level(cpi->Source, cpi, cpi->sf.use_fast_lpf_pick);
+
+ vpx_usec_timer_mark(&timer);
+ cpi->time_pick_lpf += vpx_usec_timer_elapsed(&timer);
+ }
+
+ if (lf->filter_level > 0) {
+ vp9_set_alt_lf_level(cpi, lf->filter_level);
+ vp9_loop_filter_frame(cm, xd, lf->filter_level, 0, 0);
+ }
+
+ vp9_extend_frame_inner_borders(cm->frame_to_show,
+ cm->subsampling_x, cm->subsampling_y);
+}
+
+static void scale_references(VP9_COMP *cpi) {
+ VP9_COMMON *cm = &cpi->common;
+ int i;
+ int refs[ALLOWED_REFS_PER_FRAME] = {cpi->lst_fb_idx, cpi->gld_fb_idx,
+ cpi->alt_fb_idx};
+
+ for (i = 0; i < 3; i++) {
+ YV12_BUFFER_CONFIG *ref = &cm->yv12_fb[cm->ref_frame_map[refs[i]]];
+
+ if (ref->y_crop_width != cm->width ||
+ ref->y_crop_height != cm->height) {
+ int new_fb = get_free_fb(cm);
+
+ vp9_realloc_frame_buffer(&cm->yv12_fb[new_fb],
+ cm->width, cm->height,
+ cm->subsampling_x, cm->subsampling_y,
+ VP9BORDERINPIXELS);
+ scale_and_extend_frame(ref, &cm->yv12_fb[new_fb]);
+ cpi->scaled_ref_idx[i] = new_fb;
+ } else {
+ cpi->scaled_ref_idx[i] = cm->ref_frame_map[refs[i]];
+ cm->fb_idx_ref_cnt[cm->ref_frame_map[refs[i]]]++;
+ }
+ }
+}
+
+static void release_scaled_references(VP9_COMP *cpi) {
+ VP9_COMMON *cm = &cpi->common;
+ int i;
+
+ for (i = 0; i < 3; i++)
+ cm->fb_idx_ref_cnt[cpi->scaled_ref_idx[i]]--;
+}
+
+static void full_to_model_count(unsigned int *model_count,
+ unsigned int *full_count) {
+ int n;
+ model_count[ZERO_TOKEN] = full_count[ZERO_TOKEN];
+ model_count[ONE_TOKEN] = full_count[ONE_TOKEN];
+ model_count[TWO_TOKEN] = full_count[TWO_TOKEN];
+ for (n = THREE_TOKEN; n < DCT_EOB_TOKEN; ++n)
+ model_count[TWO_TOKEN] += full_count[n];
+ model_count[DCT_EOB_MODEL_TOKEN] = full_count[DCT_EOB_TOKEN];
+}
+
+static void full_to_model_counts(
+ vp9_coeff_count_model *model_count, vp9_coeff_count *full_count) {
+ int i, j, k, l;
+ for (i = 0; i < BLOCK_TYPES; ++i)
+ for (j = 0; j < REF_TYPES; ++j)
+ for (k = 0; k < COEF_BANDS; ++k)
+ for (l = 0; l < PREV_COEF_CONTEXTS; ++l) {
+ if (l >= 3 && k == 0)
+ continue;
+ full_to_model_count(model_count[i][j][k][l], full_count[i][j][k][l]);
+ }
+}
+
+
+static void encode_frame_to_data_rate(VP9_COMP *cpi,
+ unsigned long *size,
+ unsigned char *dest,
+ unsigned int *frame_flags) {
+ VP9_COMMON *cm = &cpi->common;
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
+ TX_SIZE t;
+ int q;
+ int frame_over_shoot_limit;
+ int frame_under_shoot_limit;
+
+ int loop = 0;
+ int loop_count;
+
+ int q_low;
+ int q_high;
+
+ int top_index;
+ int bottom_index;
+ int active_worst_qchanged = 0;
+
+ int overshoot_seen = 0;
+ int undershoot_seen = 0;
+
+ SPEED_FEATURES *sf = &cpi->sf;
+ unsigned int max_mv_def = MIN(cpi->common.width, cpi->common.height);
+ struct segmentation *seg = &cm->seg;
+
+ /* Scale the source buffer, if required */
+ if (cm->mi_cols * 8 != cpi->un_scaled_source->y_width ||
+ cm->mi_rows * 8 != cpi->un_scaled_source->y_height) {
+ scale_and_extend_frame(cpi->un_scaled_source, &cpi->scaled_source);
+ cpi->Source = &cpi->scaled_source;
+ } else {
+ cpi->Source = cpi->un_scaled_source;
+ }
+
+ scale_references(cpi);
+
+ // Clear down mmx registers to allow floating point in what follows
+ vp9_clear_system_state();
+
+
+ // For an alt ref frame in 2 pass we skip the call to the second
+ // pass function that sets the target bandwidth so must set it here
+ if (cpi->refresh_alt_ref_frame) {
+ // Per frame bit target for the alt ref frame
+ cpi->per_frame_bandwidth = cpi->twopass.gf_bits;
+ // per second target bitrate
+ cpi->target_bandwidth = (int)(cpi->twopass.gf_bits *
+ cpi->output_framerate);
+ }
+
+ // Clear zbin over-quant value and mode boost values.
+ cpi->zbin_mode_boost = 0;
+
+ // Enable or disable mode based tweaking of the zbin
+ // For 2 Pass Only used where GF/ARF prediction quality
+ // is above a threshold
+ cpi->zbin_mode_boost = 0;
+
+ // if (cpi->oxcf.lossless)
+ cpi->zbin_mode_boost_enabled = 0;
+ // else
+ // cpi->zbin_mode_boost_enabled = 1;
+
+ // Current default encoder behaviour for the altref sign bias
+ cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = cpi->source_alt_ref_active;
+
+ // Check to see if a key frame is signaled
+ // For two pass with auto key frame enabled cm->frame_type may already be set, but not for one pass.
+ if ((cm->current_video_frame == 0) ||
+ (cm->frame_flags & FRAMEFLAGS_KEY) ||
+ (cpi->oxcf.auto_key && (cpi->frames_since_key % cpi->key_frame_frequency == 0))) {
+ // Key frame from VFW/auto-keyframe/first frame
+ cm->frame_type = KEY_FRAME;
+ }
+
+ // Set default state for segment based loop filter update flags
+ cm->lf.mode_ref_delta_update = 0;
+
+ // Initialize cpi->mv_step_param to default based on max resolution
+ cpi->mv_step_param = vp9_init_search_range(cpi, max_mv_def);
+ // Initialize cpi->max_mv_magnitude and cpi->mv_step_param if appropriate.
+ if (sf->auto_mv_step_size) {
+ if ((cpi->common.frame_type == KEY_FRAME) || cpi->common.intra_only) {
+ // initialize max_mv_magnitude for use in the first INTER frame
+ // after a key/intra-only frame
+ cpi->max_mv_magnitude = max_mv_def;
+ } else {
+ if (cm->show_frame)
+ // allow mv_steps to correspond to twice the max mv magnitude found
+ // in the previous frame, capped by the default max_mv_magnitude based
+ // on resolution
+ cpi->mv_step_param = vp9_init_search_range(
+ cpi, MIN(max_mv_def, 2 * cpi->max_mv_magnitude));
+ cpi->max_mv_magnitude = 0;
+ }
+ }
+
+ // Set various flags etc to special state if it is a key frame
+ if (cm->frame_type == KEY_FRAME) {
+ // Reset the loop filter deltas and segmentation map
+ setup_features(cm);
+
+ // If segmentation is enabled force a map update for key frames
+ if (seg->enabled) {
+ seg->update_map = 1;
+ seg->update_data = 1;
+ }
+
+ // The alternate reference frame cannot be active for a key frame
+ cpi->source_alt_ref_active = 0;
+
+ cm->error_resilient_mode = (cpi->oxcf.error_resilient_mode != 0);
+ cm->frame_parallel_decoding_mode =
+ (cpi->oxcf.frame_parallel_decoding_mode != 0);
+ if (cm->error_resilient_mode) {
+ cm->frame_parallel_decoding_mode = 1;
+ cm->reset_frame_context = 0;
+ cm->refresh_frame_context = 0;
+ }
+ }
+
+ // Configure experimental use of segmentation for enhanced coding of
+ // static regions if indicated.
+ // Only allowed for now in second pass of two pass (as requires lagged coding)
+ // and if the relevant speed feature flag is set.
+ if ((cpi->pass == 2) && (cpi->sf.static_segmentation)) {
+ configure_static_seg_features(cpi);
+ }
+
+ // Decide how big to make the frame
+ vp9_pick_frame_size(cpi);
+
+ vp9_clear_system_state();
+
+ // Set an active best quality and if necessary active worst quality
+ q = cpi->active_worst_quality;
+
+ if (cm->frame_type == KEY_FRAME) {
+#if !CONFIG_MULTIPLE_ARF
+ // Special case for key frames forced because we have reached
+ // the maximum key frame interval. Here force the Q to a range
+ // based on the ambient Q to reduce the risk of popping
+ if (cpi->this_key_frame_forced) {
+ int delta_qindex;
+ int qindex = cpi->last_boosted_qindex;
+ double last_boosted_q = vp9_convert_qindex_to_q(qindex);
+
+ delta_qindex = compute_qdelta(cpi, last_boosted_q,
+ (last_boosted_q * 0.75));
+
+ cpi->active_best_quality = MAX(qindex + delta_qindex,
+ cpi->best_quality);
+ } else {
+ int high = 5000;
+ int low = 400;
+ double q_adj_factor = 1.0;
+ double q_val;
+
+ // Baseline value derived from cpi->active_worst_quality and kf boost
+ if (cpi->kf_boost > high) {
+ cpi->active_best_quality = kf_low_motion_minq[q];
+ } else if (cpi->kf_boost < low) {
+ cpi->active_best_quality = kf_high_motion_minq[q];
+ } else {
+ const int gap = high - low;
+ const int offset = high - cpi->kf_boost;
+ const int qdiff = kf_high_motion_minq[q] - kf_low_motion_minq[q];
+ const int adjustment = ((offset * qdiff) + (gap >> 1)) / gap;
+
+ cpi->active_best_quality = kf_low_motion_minq[q] + adjustment;
+ }
+
+ // Allow somewhat lower kf minq with small image formats.
+ if ((cm->width * cm->height) <= (352 * 288)) {
+ q_adj_factor -= 0.25;
+ }
+
+ // Make a further adjustment based on the kf zero motion measure.
+ q_adj_factor += 0.05 - (0.001 * (double)cpi->kf_zeromotion_pct);
+
+ // Convert the adjustment factor to a qindex delta
+ // on active_best_quality.
+ q_val = vp9_convert_qindex_to_q(cpi->active_best_quality);
+ cpi->active_best_quality +=
+ compute_qdelta(cpi, q_val, (q_val * q_adj_factor));
+ }
+#else
+ double current_q;
+ // Force the KF quantizer to be 30% of the active_worst_quality.
+ current_q = vp9_convert_qindex_to_q(cpi->active_worst_quality);
+ cpi->active_best_quality = cpi->active_worst_quality
+ + compute_qdelta(cpi, current_q, current_q * 0.3);
+#endif
+ } else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame) {
+ int high = 2000;
+ int low = 400;
+
+ // Use the lower of cpi->active_worst_quality and recent
+ // average Q as basis for GF/ARF Q limit unless last frame was
+ // a key frame.
+ if (cpi->frames_since_key > 1 &&
+ cpi->avg_frame_qindex < cpi->active_worst_quality) {
+ q = cpi->avg_frame_qindex;
+ }
+ // For constrained quality dont allow Q less than the cq level
+ if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY &&
+ q < cpi->cq_target_quality) {
+ q = cpi->cq_target_quality;
+ }
+ if (cpi->gfu_boost > high) {
+ cpi->active_best_quality = gf_low_motion_minq[q];
+ } else if (cpi->gfu_boost < low) {
+ cpi->active_best_quality = gf_high_motion_minq[q];
+ } else {
+ const int gap = high - low;
+ const int offset = high - cpi->gfu_boost;
+ const int qdiff = gf_high_motion_minq[q] - gf_low_motion_minq[q];
+ const int adjustment = ((offset * qdiff) + (gap >> 1)) / gap;
+
+ cpi->active_best_quality = gf_low_motion_minq[q] + adjustment;
+ }
+
+ // Constrained quality use slightly lower active best.
+ if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY)
+ cpi->active_best_quality = cpi->active_best_quality * 15 / 16;
+
+ // TODO(debargha): Refine the logic below
+ if (cpi->oxcf.end_usage == USAGE_CONSTANT_QUALITY) {
+ if (!cpi->refresh_alt_ref_frame) {
+ cpi->active_best_quality = cpi->cq_target_quality;
+ } else {
+ if (cpi->frames_since_key > 1) {
+ if (cpi->gfu_boost > high) {
+ cpi->active_best_quality = cpi->cq_target_quality * 6 / 16;
+ } else if (cpi->gfu_boost < low) {
+ cpi->active_best_quality = cpi->cq_target_quality * 11 / 16;
+ } else {
+ const int gap = high - low;
+ const int offset = high - cpi->gfu_boost;
+ const int qdiff = cpi->cq_target_quality * 5 / 16;
+ const int adjustment = ((offset * qdiff) + (gap >> 1)) / gap;
+ cpi->active_best_quality = cpi->cq_target_quality * 6 / 16
+ + adjustment;
+ }
+ }
+ }
+ }
+ } else {
+ if (cpi->oxcf.end_usage == USAGE_CONSTANT_QUALITY) {
+ cpi->active_best_quality = cpi->cq_target_quality;
+ } else {
+#ifdef ONE_SHOT_Q_ESTIMATE
+#ifdef STRICT_ONE_SHOT_Q
+ cpi->active_best_quality = q;
+#else
+ cpi->active_best_quality = inter_minq[q];
+#endif
+#else
+ cpi->active_best_quality = inter_minq[q];
+#endif
+
+ // For the constant/constrained quality mode we don't want
+ // q to fall below the cq level.
+ if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
+ (cpi->active_best_quality < cpi->cq_target_quality)) {
+ // If we are strongly undershooting the target rate in the last
+ // frames then use the user passed in cq value not the auto
+ // cq value.
+ if (cpi->rolling_actual_bits < cpi->min_frame_bandwidth)
+ cpi->active_best_quality = cpi->oxcf.cq_level;
+ else
+ cpi->active_best_quality = cpi->cq_target_quality;
+ }
+ }
+ }
+
+ // Clip the active best and worst quality values to limits
+ if (cpi->active_worst_quality > cpi->worst_quality)
+ cpi->active_worst_quality = cpi->worst_quality;
+
+ if (cpi->active_best_quality < cpi->best_quality)
+ cpi->active_best_quality = cpi->best_quality;
+
+ if (cpi->active_best_quality > cpi->worst_quality)
+ cpi->active_best_quality = cpi->worst_quality;
+
+ if (cpi->active_worst_quality < cpi->active_best_quality)
+ cpi->active_worst_quality = cpi->active_best_quality;
+
+ // Special case code to try and match quality with forced key frames
+ if (cpi->oxcf.end_usage == USAGE_CONSTANT_QUALITY) {
+ q = cpi->active_best_quality;
+ } else if ((cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced) {
+ q = cpi->last_boosted_qindex;
+ } else {
+ // Determine initial Q to try
+ q = vp9_regulate_q(cpi, cpi->this_frame_target);
+ }
+
+ vp9_compute_frame_size_bounds(cpi, &frame_under_shoot_limit,
+ &frame_over_shoot_limit);
+
+#if CONFIG_MULTIPLE_ARF
+ // Force the quantizer determined by the coding order pattern.
+ if (cpi->multi_arf_enabled && (cm->frame_type != KEY_FRAME) &&
+ cpi->oxcf.end_usage != USAGE_CONSTANT_QUALITY) {
+ double new_q;
+ double current_q = vp9_convert_qindex_to_q(cpi->active_worst_quality);
+ int level = cpi->this_frame_weight;
+ assert(level >= 0);
+
+ // Set quantizer steps at 10% increments.
+ new_q = current_q * (1.0 - (0.2 * (cpi->max_arf_level - level)));
+ q = cpi->active_worst_quality + compute_qdelta(cpi, current_q, new_q);
+
+ bottom_index = q;
+ top_index = q;
+ q_low = q;
+ q_high = q;
+
+ printf("frame:%d q:%d\n", cm->current_video_frame, q);
+ } else {
+#endif
+ // Limit Q range for the adaptive loop.
+ bottom_index = cpi->active_best_quality;
+ top_index = cpi->active_worst_quality;
+ q_low = cpi->active_best_quality;
+ q_high = cpi->active_worst_quality;
+#if CONFIG_MULTIPLE_ARF
+ }
+#endif
+ loop_count = 0;
+ vp9_zero(cpi->rd_tx_select_threshes);
+
+ if (cm->frame_type != KEY_FRAME) {
+ cm->mcomp_filter_type = DEFAULT_INTERP_FILTER;
+ /* TODO: Decide this more intelligently */
+ xd->allow_high_precision_mv = q < HIGH_PRECISION_MV_QTHRESH;
+ set_mvcost(&cpi->mb);
+ }
+
+#if CONFIG_VP9_POSTPROC
+
+ if (cpi->oxcf.noise_sensitivity > 0) {
+ int l = 0;
+
+ switch (cpi->oxcf.noise_sensitivity) {
+ case 1:
+ l = 20;
+ break;
+ case 2:
+ l = 40;
+ break;
+ case 3:
+ l = 60;
+ break;
+ case 4:
+ case 5:
+ l = 100;
+ break;
+ case 6:
+ l = 150;
+ break;
+ }
+
+ vp9_denoise(cpi->Source, cpi->Source, l);
+ }
+
+#endif
+
+#ifdef OUTPUT_YUV_SRC
+ vp9_write_yuv_frame(cpi->Source);
+#endif
+
+ do {
+ vp9_clear_system_state(); // __asm emms;
+
+ vp9_set_quantizer(cpi, q);
+
+ if (loop_count == 0) {
+
+ // Set up entropy depending on frame type.
+ if (cm->frame_type == KEY_FRAME) {
+ /* Choose which entropy context to use. When using a forward reference
+ * frame, it immediately follows the keyframe, and thus benefits from
+ * using the same entropy context established by the keyframe.
+ * Otherwise, use the default context 0.
+ */
+ cm->frame_context_idx = cpi->oxcf.play_alternate;
+ vp9_setup_key_frame(cpi);
+ } else {
+ /* Choose which entropy context to use. Currently there are only two
+ * contexts used, one for normal frames and one for alt ref frames.
+ */
+ cpi->common.frame_context_idx = cpi->refresh_alt_ref_frame;
+ vp9_setup_inter_frame(cpi);
+ }
+ }
+
+ // transform / motion compensation build reconstruction frame
+
+ vp9_encode_frame(cpi);
+
+ // Update the skip mb flag probabilities based on the distribution
+ // seen in the last encoder iteration.
+ // update_base_skip_probs(cpi);
+
+ vp9_clear_system_state(); // __asm emms;
+
+ // Dummy pack of the bitstream using up to date stats to get an
+ // accurate estimate of output frame size to determine if we need
+ // to recode.
+ vp9_save_coding_context(cpi);
+ cpi->dummy_packing = 1;
+ vp9_pack_bitstream(cpi, dest, size);
+ cpi->projected_frame_size = (*size) << 3;
+ vp9_restore_coding_context(cpi);
+
+ if (frame_over_shoot_limit == 0)
+ frame_over_shoot_limit = 1;
+ active_worst_qchanged = 0;
+
+ // Special case handling for forced key frames
+ if (cpi->oxcf.end_usage == USAGE_CONSTANT_QUALITY) {
+ loop = 0;
+ } else {
+ if ((cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced) {
+ int last_q = q;
+ int kf_err = vp9_calc_ss_err(cpi->Source,
+ &cm->yv12_fb[cm->new_fb_idx]);
+
+ int high_err_target = cpi->ambient_err;
+ int low_err_target = cpi->ambient_err >> 1;
+
+ // Prevent possible divide by zero error below for perfect KF
+ kf_err += !kf_err;
+
+ // The key frame is not good enough or we can afford
+ // to make it better without undue risk of popping.
+ if ((kf_err > high_err_target &&
+ cpi->projected_frame_size <= frame_over_shoot_limit) ||
+ (kf_err > low_err_target &&
+ cpi->projected_frame_size <= frame_under_shoot_limit)) {
+ // Lower q_high
+ q_high = q > q_low ? q - 1 : q_low;
+
+ // Adjust Q
+ q = (q * high_err_target) / kf_err;
+ q = MIN(q, (q_high + q_low) >> 1);
+ } else if (kf_err < low_err_target &&
+ cpi->projected_frame_size >= frame_under_shoot_limit) {
+ // The key frame is much better than the previous frame
+ // Raise q_low
+ q_low = q < q_high ? q + 1 : q_high;
+
+ // Adjust Q
+ q = (q * low_err_target) / kf_err;
+ q = MIN(q, (q_high + q_low + 1) >> 1);
+ }
+
+ // Clamp Q to upper and lower limits:
+ q = clamp(q, q_low, q_high);
+
+ loop = q != last_q;
+ } else if (recode_loop_test(
+ cpi, frame_over_shoot_limit, frame_under_shoot_limit,
+ q, top_index, bottom_index)) {
+ // Is the projected frame size out of range and are we allowed
+ // to attempt to recode.
+ int last_q = q;
+ int retries = 0;
+
+ // Frame size out of permitted range:
+ // Update correction factor & compute new Q to try...
+
+ // Frame is too large
+ if (cpi->projected_frame_size > cpi->this_frame_target) {
+ // Raise Qlow as to at least the current value
+ q_low = q < q_high ? q + 1 : q_high;
+
+ if (undershoot_seen || loop_count > 1) {
+ // Update rate_correction_factor unless
+ // cpi->active_worst_quality has changed.
+ if (!active_worst_qchanged)
+ vp9_update_rate_correction_factors(cpi, 1);
+
+ q = (q_high + q_low + 1) / 2;
+ } else {
+ // Update rate_correction_factor unless
+ // cpi->active_worst_quality has changed.
+ if (!active_worst_qchanged)
+ vp9_update_rate_correction_factors(cpi, 0);
+
+ q = vp9_regulate_q(cpi, cpi->this_frame_target);
+
+ while (q < q_low && retries < 10) {
+ vp9_update_rate_correction_factors(cpi, 0);
+ q = vp9_regulate_q(cpi, cpi->this_frame_target);
+ retries++;
+ }
+ }
+
+ overshoot_seen = 1;
+ } else {
+ // Frame is too small
+ q_high = q > q_low ? q - 1 : q_low;
+
+ if (overshoot_seen || loop_count > 1) {
+ // Update rate_correction_factor unless
+ // cpi->active_worst_quality has changed.
+ if (!active_worst_qchanged)
+ vp9_update_rate_correction_factors(cpi, 1);
+
+ q = (q_high + q_low) / 2;
+ } else {
+ // Update rate_correction_factor unless
+ // cpi->active_worst_quality has changed.
+ if (!active_worst_qchanged)
+ vp9_update_rate_correction_factors(cpi, 0);
+
+ q = vp9_regulate_q(cpi, cpi->this_frame_target);
+
+ // Special case reset for qlow for constrained quality.
+ // This should only trigger where there is very substantial
+ // undershoot on a frame and the auto cq level is above
+ // the user passsed in value.
+ if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY && q < q_low) {
+ q_low = q;
+ }
+
+ while (q > q_high && retries < 10) {
+ vp9_update_rate_correction_factors(cpi, 0);
+ q = vp9_regulate_q(cpi, cpi->this_frame_target);
+ retries++;
+ }
+ }
+
+ undershoot_seen = 1;
+ }
+
+ // Clamp Q to upper and lower limits:
+ q = clamp(q, q_low, q_high);
+
+ loop = q != last_q;
+ } else {
+ loop = 0;
+ }
+ }
+
+ if (cpi->is_src_frame_alt_ref)
+ loop = 0;
+
+ if (loop) {
+ loop_count++;
+
+#if CONFIG_INTERNAL_STATS
+ cpi->tot_recode_hits++;
+#endif
+ }
+ } while (loop);
+
+ // Special case code to reduce pulsing when key frames are forced at a
+ // fixed interval. Note the reconstruction error if it is the frame before
+ // the force key frame
+ if (cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0)) {
+ cpi->ambient_err = vp9_calc_ss_err(cpi->Source,
+ &cm->yv12_fb[cm->new_fb_idx]);
+ }
+
+ if (cm->frame_type == KEY_FRAME)
+ cpi->refresh_last_frame = 1;
+
+ cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx];
+
+#if WRITE_RECON_BUFFER
+ if (cm->show_frame)
+ write_cx_frame_to_file(cm->frame_to_show,
+ cm->current_video_frame);
+ else
+ write_cx_frame_to_file(cm->frame_to_show,
+ cm->current_video_frame + 1000);
+#endif
+
+ // Pick the loop filter level for the frame.
+ loopfilter_frame(cpi, cm);
+
+#if WRITE_RECON_BUFFER
+ if (cm->show_frame)
+ write_cx_frame_to_file(cm->frame_to_show,
+ cm->current_video_frame + 2000);
+ else
+ write_cx_frame_to_file(cm->frame_to_show,
+ cm->current_video_frame + 3000);
+#endif
+
+ // build the bitstream
+ cpi->dummy_packing = 0;
+ vp9_pack_bitstream(cpi, dest, size);
+
+ if (cm->seg.update_map)
+ update_reference_segmentation_map(cpi);
+
+ release_scaled_references(cpi);
+ update_reference_frames(cpi);
+
+ for (t = TX_4X4; t <= TX_32X32; t++)
+ full_to_model_counts(cpi->common.counts.coef[t],
+ cpi->coef_counts[t]);
+ if (!cpi->common.error_resilient_mode &&
+ !cpi->common.frame_parallel_decoding_mode) {
+ vp9_adapt_coef_probs(&cpi->common);
+ }
+
+ if (cpi->common.frame_type != KEY_FRAME) {
+ FRAME_COUNTS *counts = &cpi->common.counts;
+
+ vp9_copy(counts->y_mode, cpi->y_mode_count);
+ vp9_copy(counts->uv_mode, cpi->y_uv_mode_count);
+ vp9_copy(counts->partition, cpi->partition_count);
+ vp9_copy(counts->intra_inter, cpi->intra_inter_count);
+ vp9_copy(counts->comp_inter, cpi->comp_inter_count);
+ vp9_copy(counts->single_ref, cpi->single_ref_count);
+ vp9_copy(counts->comp_ref, cpi->comp_ref_count);
+ counts->mv = cpi->NMVcount;
+ if (!cpi->common.error_resilient_mode &&
+ !cpi->common.frame_parallel_decoding_mode) {
+ vp9_adapt_mode_probs(&cpi->common);
+ vp9_adapt_mv_probs(&cpi->common, cpi->mb.e_mbd.allow_high_precision_mv);
+ }
+ }
+
+#ifdef ENTROPY_STATS
+ vp9_update_mode_context_stats(cpi);
+#endif
+
+ /* Move storing frame_type out of the above loop since it is also
+ * needed in motion search besides loopfilter */
+ cm->last_frame_type = cm->frame_type;
+
+ // Update rate control heuristics
+ cpi->total_byte_count += (*size);
+ cpi->projected_frame_size = (*size) << 3;
+
+ if (!active_worst_qchanged)
+ vp9_update_rate_correction_factors(cpi, 2);
+
+ cpi->last_q[cm->frame_type] = cm->base_qindex;
+
+ // Keep record of last boosted (KF/KF/ARF) Q value.
+ // If the current frame is coded at a lower Q then we also update it.
+ // If all mbs in this group are skipped only update if the Q value is
+ // better than that already stored.
+ // This is used to help set quality in forced key frames to reduce popping
+ if ((cm->base_qindex < cpi->last_boosted_qindex) ||
+ ((cpi->static_mb_pct < 100) &&
+ ((cm->frame_type == KEY_FRAME) ||
+ cpi->refresh_alt_ref_frame ||
+ (cpi->refresh_golden_frame && !cpi->is_src_frame_alt_ref)))) {
+ cpi->last_boosted_qindex = cm->base_qindex;
+ }
+
+ if (cm->frame_type == KEY_FRAME) {
+ vp9_adjust_key_frame_context(cpi);
+ }
+
+ // Keep a record of ambient average Q.
+ if (cm->frame_type != KEY_FRAME)
+ cpi->avg_frame_qindex = (2 + 3 * cpi->avg_frame_qindex + cm->base_qindex) >> 2;
+
+ // Keep a record from which we can calculate the average Q excluding GF updates and key frames
+ if (cm->frame_type != KEY_FRAME &&
+ !cpi->refresh_golden_frame &&
+ !cpi->refresh_alt_ref_frame) {
+ cpi->ni_frames++;
+ cpi->tot_q += vp9_convert_qindex_to_q(q);
+ cpi->avg_q = cpi->tot_q / (double)cpi->ni_frames;
+
+ // Calculate the average Q for normal inter frames (not key or GFU frames).
+ cpi->ni_tot_qi += q;
+ cpi->ni_av_qi = cpi->ni_tot_qi / cpi->ni_frames;
+ }
+
+ // Update the buffer level variable.
+ // Non-viewable frames are a special case and are treated as pure overhead.
+ if (!cm->show_frame)
+ cpi->bits_off_target -= cpi->projected_frame_size;
+ else
+ cpi->bits_off_target += cpi->av_per_frame_bandwidth - cpi->projected_frame_size;
+
+ // Clip the buffer level at the maximum buffer size
+ if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size)
+ cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
+
+ // Rolling monitors of whether we are over or underspending used to help
+ // regulate min and Max Q in two pass.
+ if (cm->frame_type != KEY_FRAME) {
+ cpi->rolling_target_bits =
+ ((cpi->rolling_target_bits * 3) + cpi->this_frame_target + 2) / 4;
+ cpi->rolling_actual_bits =
+ ((cpi->rolling_actual_bits * 3) + cpi->projected_frame_size + 2) / 4;
+ cpi->long_rolling_target_bits =
+ ((cpi->long_rolling_target_bits * 31) + cpi->this_frame_target + 16) / 32;
+ cpi->long_rolling_actual_bits =
+ ((cpi->long_rolling_actual_bits * 31) +
+ cpi->projected_frame_size + 16) / 32;
+ }
+
+ // Actual bits spent
+ cpi->total_actual_bits += cpi->projected_frame_size;
+
+ // Debug stats
+ cpi->total_target_vs_actual += (cpi->this_frame_target - cpi->projected_frame_size);
+
+ cpi->buffer_level = cpi->bits_off_target;
+
+ // Update bits left to the kf and gf groups to account for overshoot or undershoot on these frames
+ if (cm->frame_type == KEY_FRAME) {
+ cpi->twopass.kf_group_bits += cpi->this_frame_target - cpi->projected_frame_size;
+
+ cpi->twopass.kf_group_bits = MAX(cpi->twopass.kf_group_bits, 0);
+ } else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame) {
+ cpi->twopass.gf_group_bits += cpi->this_frame_target - cpi->projected_frame_size;
+
+ cpi->twopass.gf_group_bits = MAX(cpi->twopass.gf_group_bits, 0);
+ }
+
+ // Update the skip mb flag probabilities based on the distribution seen
+ // in this frame.
+ // update_base_skip_probs(cpi);
+
+#if CONFIG_INTERNAL_STATS
+ {
+ FILE *f = fopen("tmp.stt", cm->current_video_frame ? "a" : "w");
+ int recon_err;
+
+ vp9_clear_system_state(); // __asm emms;
+
+ recon_err = vp9_calc_ss_err(cpi->Source,
+ &cm->yv12_fb[cm->new_fb_idx]);
+
+ if (cpi->twopass.total_left_stats.coded_error != 0.0)
+ fprintf(f, "%10d %10d %10d %10d %10d %10d %10d %10d %10d"
+ "%7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f"
+ "%6d %6d %5d %5d %5d %8.2f %10d %10.3f"
+ "%10.3f %8d %10d %10d %10d\n",
+ cpi->common.current_video_frame, cpi->this_frame_target,
+ cpi->projected_frame_size, 0, //loop_size_estimate,
+ (cpi->projected_frame_size - cpi->this_frame_target),
+ (int)cpi->total_target_vs_actual,
+ (int)(cpi->oxcf.starting_buffer_level - cpi->bits_off_target),
+ (int)cpi->total_actual_bits,
+ cm->base_qindex,
+ vp9_convert_qindex_to_q(cm->base_qindex),
+ (double)vp9_dc_quant(cm->base_qindex, 0) / 4.0,
+ vp9_convert_qindex_to_q(cpi->active_best_quality),
+ vp9_convert_qindex_to_q(cpi->active_worst_quality),
+ cpi->avg_q,
+ vp9_convert_qindex_to_q(cpi->ni_av_qi),
+ vp9_convert_qindex_to_q(cpi->cq_target_quality),
+ cpi->refresh_last_frame,
+ cpi->refresh_golden_frame, cpi->refresh_alt_ref_frame,
+ cm->frame_type, cpi->gfu_boost,
+ cpi->twopass.est_max_qcorrection_factor,
+ (int)cpi->twopass.bits_left,
+ cpi->twopass.total_left_stats.coded_error,
+ (double)cpi->twopass.bits_left /
+ cpi->twopass.total_left_stats.coded_error,
+ cpi->tot_recode_hits, recon_err, cpi->kf_boost,
+ cpi->kf_zeromotion_pct);
+ else
+ fprintf(f, "%10d %10d %10d %10d %10d %10d %10d %10d %10d"
+ "%7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f"
+ "%5d %5d %5d %8d %8d %8.2f %10d %10.3f"
+ "%8d %10d %10d %10d\n",
+ cpi->common.current_video_frame,
+ cpi->this_frame_target, cpi->projected_frame_size,
+ 0, //loop_size_estimate,
+ (cpi->projected_frame_size - cpi->this_frame_target),
+ (int)cpi->total_target_vs_actual,
+ (int)(cpi->oxcf.starting_buffer_level - cpi->bits_off_target),
+ (int)cpi->total_actual_bits,
+ cm->base_qindex,
+ vp9_convert_qindex_to_q(cm->base_qindex),
+ (double)vp9_dc_quant(cm->base_qindex, 0) / 4.0,
+ vp9_convert_qindex_to_q(cpi->active_best_quality),
+ vp9_convert_qindex_to_q(cpi->active_worst_quality),
+ cpi->avg_q,
+ vp9_convert_qindex_to_q(cpi->ni_av_qi),
+ vp9_convert_qindex_to_q(cpi->cq_target_quality),
+ cpi->refresh_last_frame,
+ cpi->refresh_golden_frame, cpi->refresh_alt_ref_frame,
+ cm->frame_type, cpi->gfu_boost,
+ cpi->twopass.est_max_qcorrection_factor,
+ (int)cpi->twopass.bits_left,
+ cpi->twopass.total_left_stats.coded_error,
+ cpi->tot_recode_hits, recon_err, cpi->kf_boost,
+ cpi->kf_zeromotion_pct);
+
+ fclose(f);
+
+ if (0) {
+ FILE *fmodes = fopen("Modes.stt", "a");
+ int i;
+
+ fprintf(fmodes, "%6d:%1d:%1d:%1d ",
+ cpi->common.current_video_frame,
+ cm->frame_type, cpi->refresh_golden_frame,
+ cpi->refresh_alt_ref_frame);
+
+ for (i = 0; i < MAX_MODES; i++)
+ fprintf(fmodes, "%5d ", cpi->mode_chosen_counts[i]);
+
+ fprintf(fmodes, "\n");
+
+ fclose(fmodes);
+ }
+ }
+
+#endif
+
+#if 0
+ // Debug stats for segment feature experiments.
+ print_seg_map(cpi);
+#endif
+
+ // If this was a kf or Gf note the Q
+ if ((cm->frame_type == KEY_FRAME)
+ || cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)
+ cm->last_kf_gf_q = cm->base_qindex;
+
+ if (cpi->refresh_golden_frame == 1)
+ cm->frame_flags = cm->frame_flags | FRAMEFLAGS_GOLDEN;
+ else
+ cm->frame_flags = cm->frame_flags&~FRAMEFLAGS_GOLDEN;
+
+ if (cpi->refresh_alt_ref_frame == 1)
+ cm->frame_flags = cm->frame_flags | FRAMEFLAGS_ALTREF;
+ else
+ cm->frame_flags = cm->frame_flags&~FRAMEFLAGS_ALTREF;
+
+
+ if (cpi->refresh_last_frame & cpi->refresh_golden_frame)
+ cpi->gold_is_last = 1;
+ else if (cpi->refresh_last_frame ^ cpi->refresh_golden_frame)
+ cpi->gold_is_last = 0;
+
+ if (cpi->refresh_last_frame & cpi->refresh_alt_ref_frame)
+ cpi->alt_is_last = 1;
+ else if (cpi->refresh_last_frame ^ cpi->refresh_alt_ref_frame)
+ cpi->alt_is_last = 0;
+
+ if (cpi->refresh_alt_ref_frame & cpi->refresh_golden_frame)
+ cpi->gold_is_alt = 1;
+ else if (cpi->refresh_alt_ref_frame ^ cpi->refresh_golden_frame)
+ cpi->gold_is_alt = 0;
+
+ cpi->ref_frame_flags = VP9_ALT_FLAG | VP9_GOLD_FLAG | VP9_LAST_FLAG;
+
+ if (cpi->gold_is_last)
+ cpi->ref_frame_flags &= ~VP9_GOLD_FLAG;
+
+ if (cpi->alt_is_last)
+ cpi->ref_frame_flags &= ~VP9_ALT_FLAG;
+
+ if (cpi->gold_is_alt)
+ cpi->ref_frame_flags &= ~VP9_ALT_FLAG;
+
+ if (cpi->oxcf.play_alternate && cpi->refresh_alt_ref_frame
+ && (cm->frame_type != KEY_FRAME))
+ // Update the alternate reference frame stats as appropriate.
+ update_alt_ref_frame_stats(cpi);
+ else
+ // Update the Golden frame stats as appropriate.
+ update_golden_frame_stats(cpi);
+
+ if (cm->frame_type == KEY_FRAME) {
+ // Tell the caller that the frame was coded as a key frame
+ *frame_flags = cm->frame_flags | FRAMEFLAGS_KEY;
+
+#if CONFIG_MULTIPLE_ARF
+ // Reset the sequence number.
+ if (cpi->multi_arf_enabled) {
+ cpi->sequence_number = 0;
+ cpi->frame_coding_order_period = cpi->new_frame_coding_order_period;
+ cpi->new_frame_coding_order_period = -1;
+ }
+#endif
+
+ // As this frame is a key frame the next defaults to an inter frame.
+ cm->frame_type = INTER_FRAME;
+ } else {
+ *frame_flags = cm->frame_flags&~FRAMEFLAGS_KEY;
+
+#if CONFIG_MULTIPLE_ARF
+ /* Increment position in the coded frame sequence. */
+ if (cpi->multi_arf_enabled) {
+ ++cpi->sequence_number;
+ if (cpi->sequence_number >= cpi->frame_coding_order_period) {
+ cpi->sequence_number = 0;
+ cpi->frame_coding_order_period = cpi->new_frame_coding_order_period;
+ cpi->new_frame_coding_order_period = -1;
+ }
+ cpi->this_frame_weight = cpi->arf_weight[cpi->sequence_number];
+ assert(cpi->this_frame_weight >= 0);
+ }
+#endif
+ }
+
+ // Clear the one shot update flags for segmentation map and mode/ref loop filter deltas.
+ cm->seg.update_map = 0;
+ cm->seg.update_data = 0;
+ cm->lf.mode_ref_delta_update = 0;
+
+ // keep track of the last coded dimensions
+ cm->last_width = cm->width;
+ cm->last_height = cm->height;
+
+ // reset to normal state now that we are done.
+ cm->last_show_frame = cm->show_frame;
+ if (cm->show_frame) {
+ // current mip will be the prev_mip for the next frame
+ MODE_INFO *temp = cm->prev_mip;
+ MODE_INFO **temp2 = cm->prev_mi_grid_base;
+ cm->prev_mip = cm->mip;
+ cm->mip = temp;
+ cm->prev_mi_grid_base = cm->mi_grid_base;
+ cm->mi_grid_base = temp2;
+
+ // update the upper left visible macroblock ptrs
+ cm->mi = cm->mip + cm->mode_info_stride + 1;
+ cm->mi_grid_visible = cm->mi_grid_base + cm->mode_info_stride + 1;
+
+ // Don't increment frame counters if this was an altref buffer
+ // update not a real frame
+ ++cm->current_video_frame;
+ ++cpi->frames_since_key;
+ }
+ // restore prev_mi
+ cm->prev_mi = cm->prev_mip + cm->mode_info_stride + 1;
+ cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mode_info_stride + 1;
+
+ #if 0
+ {
+ char filename[512];
+ FILE *recon_file;
+ sprintf(filename, "enc%04d.yuv", (int) cm->current_video_frame);
+ recon_file = fopen(filename, "wb");
+ fwrite(cm->yv12_fb[cm->ref_frame_map[cpi->lst_fb_idx]].buffer_alloc,
+ cm->yv12_fb[cm->ref_frame_map[cpi->lst_fb_idx]].frame_size,
+ 1, recon_file);
+ fclose(recon_file);
+ }
+#endif
+#ifdef OUTPUT_YUV_REC
+ vp9_write_yuv_rec_frame(cm);
+#endif
+
+}
+
+static void Pass2Encode(VP9_COMP *cpi, unsigned long *size,
+ unsigned char *dest, unsigned int *frame_flags) {
+
+ cpi->enable_encode_breakout = 1;
+
+ if (!cpi->refresh_alt_ref_frame)
+ vp9_second_pass(cpi);
+
+ encode_frame_to_data_rate(cpi, size, dest, frame_flags);
+ // vp9_print_modes_and_motion_vectors(&cpi->common, "encode.stt");
+#ifdef DISABLE_RC_LONG_TERM_MEM
+ cpi->twopass.bits_left -= cpi->this_frame_target;
+#else
+ cpi->twopass.bits_left -= 8 * *size;
+#endif
+
+ if (!cpi->refresh_alt_ref_frame) {
+ double lower_bounds_min_rate = FRAME_OVERHEAD_BITS * cpi->oxcf.framerate;
+ double two_pass_min_rate = (double)(cpi->oxcf.target_bandwidth
+ * cpi->oxcf.two_pass_vbrmin_section / 100);
+
+ if (two_pass_min_rate < lower_bounds_min_rate)
+ two_pass_min_rate = lower_bounds_min_rate;
+
+ cpi->twopass.bits_left += (int64_t)(two_pass_min_rate / cpi->oxcf.framerate);
+ }
+}
+
+static void check_initial_width(VP9_COMP *cpi, YV12_BUFFER_CONFIG *sd) {
+ VP9_COMMON *cm = &cpi->common;
+ if (!cpi->initial_width) {
+ // TODO(jkoleszar): Support 1/4 subsampling?
+ cm->subsampling_x = (sd != NULL) && sd->uv_width < sd->y_width;
+ cm->subsampling_y = (sd != NULL) && sd->uv_height < sd->y_height;
+ alloc_raw_frame_buffers(cpi);
+
+ cpi->initial_width = cm->width;
+ cpi->initial_height = cm->height;
+ }
+}
+
+
+int vp9_receive_raw_frame(VP9_PTR ptr, unsigned int frame_flags,
+ YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
+ int64_t end_time) {
+ VP9_COMP *cpi = (VP9_COMP *) ptr;
+ struct vpx_usec_timer timer;
+ int res = 0;
+
+ check_initial_width(cpi, sd);
+ vpx_usec_timer_start(&timer);
+ if (vp9_lookahead_push(cpi->lookahead, sd, time_stamp, end_time, frame_flags,
+ cpi->active_map_enabled ? cpi->active_map : NULL))
+ res = -1;
+ vpx_usec_timer_mark(&timer);
+ cpi->time_receive_data += vpx_usec_timer_elapsed(&timer);
+
+ return res;
+}
+
+
+static int frame_is_reference(const VP9_COMP *cpi) {
+ const VP9_COMMON *cm = &cpi->common;
+
+ return cm->frame_type == KEY_FRAME ||
+ cpi->refresh_last_frame ||
+ cpi->refresh_golden_frame ||
+ cpi->refresh_alt_ref_frame ||
+ cm->refresh_frame_context ||
+ cm->lf.mode_ref_delta_update ||
+ cm->seg.update_map ||
+ cm->seg.update_data;
+}
+
+#if CONFIG_MULTIPLE_ARF
+int is_next_frame_arf(VP9_COMP *cpi) {
+ // Negative entry in frame_coding_order indicates an ARF at this position.
+ return cpi->frame_coding_order[cpi->sequence_number + 1] < 0 ? 1 : 0;
+}
+#endif
+
+int vp9_get_compressed_data(VP9_PTR ptr, unsigned int *frame_flags,
+ unsigned long *size, unsigned char *dest,
+ int64_t *time_stamp, int64_t *time_end, int flush) {
+ VP9_COMP *cpi = (VP9_COMP *) ptr;
+ VP9_COMMON *cm = &cpi->common;
+ struct vpx_usec_timer cmptimer;
+ YV12_BUFFER_CONFIG *force_src_buffer = NULL;
+ int i;
+ // FILE *fp_out = fopen("enc_frame_type.txt", "a");
+
+ if (!cpi)
+ return -1;
+
+ vpx_usec_timer_start(&cmptimer);
+
+ cpi->source = NULL;
+
+ cpi->mb.e_mbd.allow_high_precision_mv = ALTREF_HIGH_PRECISION_MV;
+ set_mvcost(&cpi->mb);
+
+ // Should we code an alternate reference frame.
+ if (cpi->oxcf.play_alternate && cpi->source_alt_ref_pending) {
+ int frames_to_arf;
+
+#if CONFIG_MULTIPLE_ARF
+ assert(!cpi->multi_arf_enabled ||
+ cpi->frame_coding_order[cpi->sequence_number] < 0);
+
+ if (cpi->multi_arf_enabled && (cpi->pass == 2))
+ frames_to_arf = (-cpi->frame_coding_order[cpi->sequence_number])
+ - cpi->next_frame_in_order;
+ else
+#endif
+ frames_to_arf = cpi->frames_till_gf_update_due;
+
+ assert(frames_to_arf < cpi->twopass.frames_to_key);
+
+ if ((cpi->source = vp9_lookahead_peek(cpi->lookahead, frames_to_arf))) {
+#if CONFIG_MULTIPLE_ARF
+ cpi->alt_ref_source[cpi->arf_buffered] = cpi->source;
+#else
+ cpi->alt_ref_source = cpi->source;
+#endif
+
+ if (cpi->oxcf.arnr_max_frames > 0) {
+ // Produce the filtered ARF frame.
+ // TODO(agrange) merge these two functions.
+ configure_arnr_filter(cpi, cm->current_video_frame + frames_to_arf,
+ cpi->gfu_boost);
+ vp9_temporal_filter_prepare(cpi, frames_to_arf);
+ vp9_extend_frame_borders(&cpi->alt_ref_buffer,
+ cm->subsampling_x, cm->subsampling_y);
+ force_src_buffer = &cpi->alt_ref_buffer;
+ }
+
+ cm->show_frame = 0;
+ cm->intra_only = 0;
+ cpi->refresh_alt_ref_frame = 1;
+ cpi->refresh_golden_frame = 0;
+ cpi->refresh_last_frame = 0;
+ cpi->is_src_frame_alt_ref = 0;
+
+ // TODO(agrange) This needs to vary depending on where the next ARF is.
+ cpi->frames_till_alt_ref_frame = frames_to_arf;
+
+#if CONFIG_MULTIPLE_ARF
+ if (!cpi->multi_arf_enabled)
+#endif
+ cpi->source_alt_ref_pending = 0; // Clear Pending altf Ref flag.
+ }
+ }
+
+ if (!cpi->source) {
+#if CONFIG_MULTIPLE_ARF
+ int i;
+#endif
+ if ((cpi->source = vp9_lookahead_pop(cpi->lookahead, flush))) {
+ cm->show_frame = 1;
+
+#if CONFIG_MULTIPLE_ARF
+ // Is this frame the ARF overlay.
+ cpi->is_src_frame_alt_ref = 0;
+ for (i = 0; i < cpi->arf_buffered; ++i) {
+ if (cpi->source == cpi->alt_ref_source[i]) {
+ cpi->is_src_frame_alt_ref = 1;
+ cpi->refresh_golden_frame = 1;
+ break;
+ }
+ }
+#else
+ cpi->is_src_frame_alt_ref = cpi->alt_ref_source
+ && (cpi->source == cpi->alt_ref_source);
+#endif
+ if (cpi->is_src_frame_alt_ref) {
+ // Current frame is an ARF overlay frame.
+#if CONFIG_MULTIPLE_ARF
+ cpi->alt_ref_source[i] = NULL;
+#else
+ cpi->alt_ref_source = NULL;
+#endif
+ // Don't refresh the last buffer for an ARF overlay frame. It will
+ // become the GF so preserve last as an alternative prediction option.
+ cpi->refresh_last_frame = 0;
+ }
+#if CONFIG_MULTIPLE_ARF
+ ++cpi->next_frame_in_order;
+#endif
+ }
+ }
+
+ if (cpi->source) {
+ cpi->un_scaled_source = cpi->Source = force_src_buffer ? force_src_buffer
+ : &cpi->source->img;
+ *time_stamp = cpi->source->ts_start;
+ *time_end = cpi->source->ts_end;
+ *frame_flags = cpi->source->flags;
+
+ // fprintf(fp_out, " Frame:%d", cm->current_video_frame);
+#if CONFIG_MULTIPLE_ARF
+ if (cpi->multi_arf_enabled) {
+ // fprintf(fp_out, " seq_no:%d this_frame_weight:%d",
+ // cpi->sequence_number, cpi->this_frame_weight);
+ } else {
+ // fprintf(fp_out, "\n");
+ }
+#else
+ // fprintf(fp_out, "\n");
+#endif
+
+#if CONFIG_MULTIPLE_ARF
+ if ((cm->frame_type != KEY_FRAME) && (cpi->pass == 2))
+ cpi->source_alt_ref_pending = is_next_frame_arf(cpi);
+#endif
+ } else {
+ *size = 0;
+ if (flush && cpi->pass == 1 && !cpi->twopass.first_pass_done) {
+ vp9_end_first_pass(cpi); /* get last stats packet */
+ cpi->twopass.first_pass_done = 1;
+ }
+
+ // fclose(fp_out);
+ return -1;
+ }
+
+ if (cpi->source->ts_start < cpi->first_time_stamp_ever) {
+ cpi->first_time_stamp_ever = cpi->source->ts_start;
+ cpi->last_end_time_stamp_seen = cpi->source->ts_start;
+ }
+
+ // adjust frame rates based on timestamps given
+ if (!cpi->refresh_alt_ref_frame) {
+ int64_t this_duration;
+ int step = 0;
+
+ if (cpi->source->ts_start == cpi->first_time_stamp_ever) {
+ this_duration = cpi->source->ts_end - cpi->source->ts_start;
+ step = 1;
+ } else {
+ int64_t last_duration = cpi->last_end_time_stamp_seen
+ - cpi->last_time_stamp_seen;
+
+ this_duration = cpi->source->ts_end - cpi->last_end_time_stamp_seen;
+
+ // do a step update if the duration changes by 10%
+ if (last_duration)
+ step = (int)((this_duration - last_duration) * 10 / last_duration);
+ }
+
+ if (this_duration) {
+ if (step) {
+ vp9_new_framerate(cpi, 10000000.0 / this_duration);
+ } else {
+ // Average this frame's rate into the last second's average
+ // frame rate. If we haven't seen 1 second yet, then average
+ // over the whole interval seen.
+ const double interval = MIN((double)(cpi->source->ts_end
+ - cpi->first_time_stamp_ever), 10000000.0);
+ double avg_duration = 10000000.0 / cpi->oxcf.framerate;
+ avg_duration *= (interval - avg_duration + this_duration);
+ avg_duration /= interval;
+
+ vp9_new_framerate(cpi, 10000000.0 / avg_duration);
+ }
+ }
+
+ cpi->last_time_stamp_seen = cpi->source->ts_start;
+ cpi->last_end_time_stamp_seen = cpi->source->ts_end;
+ }
+
+ // start with a 0 size frame
+ *size = 0;
+
+ // Clear down mmx registers
+ vp9_clear_system_state(); // __asm emms;
+
+ /* find a free buffer for the new frame, releasing the reference previously
+ * held.
+ */
+ cm->fb_idx_ref_cnt[cm->new_fb_idx]--;
+ cm->new_fb_idx = get_free_fb(cm);
+
+#if CONFIG_MULTIPLE_ARF
+ /* Set up the correct ARF frame. */
+ if (cpi->refresh_alt_ref_frame) {
+ ++cpi->arf_buffered;
+ }
+ if (cpi->multi_arf_enabled && (cm->frame_type != KEY_FRAME) &&
+ (cpi->pass == 2)) {
+ cpi->alt_fb_idx = cpi->arf_buffer_idx[cpi->sequence_number];
+ }
+#endif
+
+ /* Get the mapping of L/G/A to the reference buffer pool */
+ cm->active_ref_idx[0] = cm->ref_frame_map[cpi->lst_fb_idx];
+ cm->active_ref_idx[1] = cm->ref_frame_map[cpi->gld_fb_idx];
+ cm->active_ref_idx[2] = cm->ref_frame_map[cpi->alt_fb_idx];
+
+#if 0 // CONFIG_MULTIPLE_ARF
+ if (cpi->multi_arf_enabled) {
+ fprintf(fp_out, " idx(%d, %d, %d, %d) active(%d, %d, %d)",
+ cpi->lst_fb_idx, cpi->gld_fb_idx, cpi->alt_fb_idx, cm->new_fb_idx,
+ cm->active_ref_idx[0], cm->active_ref_idx[1], cm->active_ref_idx[2]);
+ if (cpi->refresh_alt_ref_frame)
+ fprintf(fp_out, " type:ARF");
+ if (cpi->is_src_frame_alt_ref)
+ fprintf(fp_out, " type:OVERLAY[%d]", cpi->alt_fb_idx);
+ fprintf(fp_out, "\n");
+ }
+#endif
+
+ cm->frame_type = INTER_FRAME;
+ cm->frame_flags = *frame_flags;
+
+ // Reset the frame pointers to the current frame size
+ vp9_realloc_frame_buffer(&cm->yv12_fb[cm->new_fb_idx],
+ cm->width, cm->height,
+ cm->subsampling_x, cm->subsampling_y,
+ VP9BORDERINPIXELS);
+
+ // Calculate scaling factors for each of the 3 available references
+ for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i)
+ vp9_setup_scale_factors(cm, i);
+
+ vp9_setup_interp_filters(&cpi->mb.e_mbd, DEFAULT_INTERP_FILTER, cm);
+
+ if (cpi->pass == 1) {
+ Pass1Encode(cpi, size, dest, frame_flags);
+ } else if (cpi->pass == 2) {
+ Pass2Encode(cpi, size, dest, frame_flags);
+ } else {
+ encode_frame_to_data_rate(cpi, size, dest, frame_flags);
+ }
+
+ if (cm->refresh_frame_context)
+ cm->frame_contexts[cm->frame_context_idx] = cm->fc;
+
+ if (*size > 0) {
+ // if its a dropped frame honor the requests on subsequent frames
+ cpi->droppable = !frame_is_reference(cpi);
+
+ // return to normal state
+ cm->reset_frame_context = 0;
+ cm->refresh_frame_context = 1;
+ cpi->refresh_alt_ref_frame = 0;
+ cpi->refresh_golden_frame = 0;
+ cpi->refresh_last_frame = 1;
+ cm->frame_type = INTER_FRAME;
+ }
+
+ vpx_usec_timer_mark(&cmptimer);
+ cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer);
+
+ if (cpi->b_calculate_psnr && cpi->pass != 1 && cm->show_frame)
+ generate_psnr_packet(cpi);
+
+#if CONFIG_INTERNAL_STATS
+
+ if (cpi->pass != 1) {
+ cpi->bytes += *size;
+
+ if (cm->show_frame) {
+
+ cpi->count++;
+
+ if (cpi->b_calculate_psnr) {
+ double ye, ue, ve;
+ double frame_psnr;
+ YV12_BUFFER_CONFIG *orig = cpi->Source;
+ YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
+ YV12_BUFFER_CONFIG *pp = &cm->post_proc_buffer;
+ int y_samples = orig->y_height * orig->y_width;
+ int uv_samples = orig->uv_height * orig->uv_width;
+ int t_samples = y_samples + 2 * uv_samples;
+ double sq_error;
+
+ ye = (double)calc_plane_error(orig->y_buffer, orig->y_stride,
+ recon->y_buffer, recon->y_stride,
+ orig->y_crop_width, orig->y_crop_height);
+
+ ue = (double)calc_plane_error(orig->u_buffer, orig->uv_stride,
+ recon->u_buffer, recon->uv_stride,
+ orig->uv_crop_width, orig->uv_crop_height);
+
+ ve = (double)calc_plane_error(orig->v_buffer, orig->uv_stride,
+ recon->v_buffer, recon->uv_stride,
+ orig->uv_crop_width, orig->uv_crop_height);
+
+ sq_error = ye + ue + ve;
+
+ frame_psnr = vp9_mse2psnr(t_samples, 255.0, sq_error);
+
+ cpi->total_y += vp9_mse2psnr(y_samples, 255.0, ye);
+ cpi->total_u += vp9_mse2psnr(uv_samples, 255.0, ue);
+ cpi->total_v += vp9_mse2psnr(uv_samples, 255.0, ve);
+ cpi->total_sq_error += sq_error;
+ cpi->total += frame_psnr;
+ {
+ double frame_psnr2, frame_ssim2 = 0;
+ double weight = 0;
+#if CONFIG_VP9_POSTPROC
+ vp9_deblock(cm->frame_to_show, &cm->post_proc_buffer,
+ cm->lf.filter_level * 10 / 6);
+#endif
+ vp9_clear_system_state();
+
+ ye = (double)calc_plane_error(orig->y_buffer, orig->y_stride,
+ pp->y_buffer, pp->y_stride,
+ orig->y_crop_width, orig->y_crop_height);
+
+ ue = (double)calc_plane_error(orig->u_buffer, orig->uv_stride,
+ pp->u_buffer, pp->uv_stride,
+ orig->uv_crop_width, orig->uv_crop_height);
+
+ ve = (double)calc_plane_error(orig->v_buffer, orig->uv_stride,
+ pp->v_buffer, pp->uv_stride,
+ orig->uv_crop_width, orig->uv_crop_height);
+
+ sq_error = ye + ue + ve;
+
+ frame_psnr2 = vp9_mse2psnr(t_samples, 255.0, sq_error);
+
+ cpi->totalp_y += vp9_mse2psnr(y_samples, 255.0, ye);
+ cpi->totalp_u += vp9_mse2psnr(uv_samples, 255.0, ue);
+ cpi->totalp_v += vp9_mse2psnr(uv_samples, 255.0, ve);
+ cpi->total_sq_error2 += sq_error;
+ cpi->totalp += frame_psnr2;
+
+ frame_ssim2 = vp9_calc_ssim(cpi->Source,
+ recon, 1, &weight);
+
+ cpi->summed_quality += frame_ssim2 * weight;
+ cpi->summed_weights += weight;
+
+ frame_ssim2 = vp9_calc_ssim(cpi->Source,
+ &cm->post_proc_buffer, 1, &weight);
+
+ cpi->summedp_quality += frame_ssim2 * weight;
+ cpi->summedp_weights += weight;
+#if 0
+ {
+ FILE *f = fopen("q_used.stt", "a");
+ fprintf(f, "%5d : Y%f7.3:U%f7.3:V%f7.3:F%f7.3:S%7.3f\n",
+ cpi->common.current_video_frame, y2, u2, v2,
+ frame_psnr2, frame_ssim2);
+ fclose(f);
+ }
+#endif
+ }
+ }
+
+ if (cpi->b_calculate_ssimg) {
+ double y, u, v, frame_all;
+ frame_all = vp9_calc_ssimg(cpi->Source, cm->frame_to_show,
+ &y, &u, &v);
+ cpi->total_ssimg_y += y;
+ cpi->total_ssimg_u += u;
+ cpi->total_ssimg_v += v;
+ cpi->total_ssimg_all += frame_all;
+ }
+ }
+ }
+
+#endif
+ // fclose(fp_out);
+ return 0;
+}
+
+int vp9_get_preview_raw_frame(VP9_PTR comp, YV12_BUFFER_CONFIG *dest,
+ vp9_ppflags_t *flags) {
+ VP9_COMP *cpi = (VP9_COMP *) comp;
+
+ if (!cpi->common.show_frame)
+ return -1;
+ else {
+ int ret;
+#if CONFIG_VP9_POSTPROC
+ ret = vp9_post_proc_frame(&cpi->common, dest, flags);
+#else
+
+ if (cpi->common.frame_to_show) {
+ *dest = *cpi->common.frame_to_show;
+ dest->y_width = cpi->common.width;
+ dest->y_height = cpi->common.height;
+ dest->uv_height = cpi->common.height / 2;
+ ret = 0;
+ } else {
+ ret = -1;
+ }
+
+#endif // !CONFIG_VP9_POSTPROC
+ vp9_clear_system_state();
+ return ret;
+ }
+}
+
+int vp9_set_roimap(VP9_PTR comp, unsigned char *map, unsigned int rows,
+ unsigned int cols, int delta_q[MAX_SEGMENTS],
+ int delta_lf[MAX_SEGMENTS],
+ unsigned int threshold[MAX_SEGMENTS]) {
+ VP9_COMP *cpi = (VP9_COMP *) comp;
+ signed char feature_data[SEG_LVL_MAX][MAX_SEGMENTS];
+ struct segmentation *seg = &cpi->common.seg;
+ int i;
+
+ if (cpi->common.mb_rows != rows || cpi->common.mb_cols != cols)
+ return -1;
+
+ if (!map) {
+ vp9_disable_segmentation((VP9_PTR)cpi);
+ return 0;
+ }
+
+ // Set the segmentation Map
+ vp9_set_segmentation_map((VP9_PTR)cpi, map);
+
+ // Activate segmentation.
+ vp9_enable_segmentation((VP9_PTR)cpi);
+
+ // Set up the quant, LF and breakout threshold segment data
+ for (i = 0; i < MAX_SEGMENTS; i++) {
+ feature_data[SEG_LVL_ALT_Q][i] = delta_q[i];
+ feature_data[SEG_LVL_ALT_LF][i] = delta_lf[i];
+ cpi->segment_encode_breakout[i] = threshold[i];
+ }
+
+ // Enable the loop and quant changes in the feature mask
+ for (i = 0; i < MAX_SEGMENTS; i++) {
+ if (delta_q[i])
+ vp9_enable_segfeature(seg, i, SEG_LVL_ALT_Q);
+ else
+ vp9_disable_segfeature(seg, i, SEG_LVL_ALT_Q);
+
+ if (delta_lf[i])
+ vp9_enable_segfeature(seg, i, SEG_LVL_ALT_LF);
+ else
+ vp9_disable_segfeature(seg, i, SEG_LVL_ALT_LF);
+ }
+
+ // Initialize the feature data structure
+ // SEGMENT_DELTADATA 0, SEGMENT_ABSDATA 1
+ vp9_set_segment_data((VP9_PTR)cpi, &feature_data[0][0], SEGMENT_DELTADATA);
+
+ return 0;
+}
+
+int vp9_set_active_map(VP9_PTR comp, unsigned char *map,
+ unsigned int rows, unsigned int cols) {
+ VP9_COMP *cpi = (VP9_COMP *) comp;
+
+ if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols) {
+ if (map) {
+ vpx_memcpy(cpi->active_map, map, rows * cols);
+ cpi->active_map_enabled = 1;
+ } else {
+ cpi->active_map_enabled = 0;
+ }
+
+ return 0;
+ } else {
+ // cpi->active_map_enabled = 0;
+ return -1;
+ }
+}
+
+int vp9_set_internal_size(VP9_PTR comp,
+ VPX_SCALING horiz_mode, VPX_SCALING vert_mode) {
+ VP9_COMP *cpi = (VP9_COMP *) comp;
+ VP9_COMMON *cm = &cpi->common;
+ int hr = 0, hs = 0, vr = 0, vs = 0;
+
+ if (horiz_mode > ONETWO || vert_mode > ONETWO)
+ return -1;
+
+ Scale2Ratio(horiz_mode, &hr, &hs);
+ Scale2Ratio(vert_mode, &vr, &vs);
+
+ // always go to the next whole number
+ cm->width = (hs - 1 + cpi->oxcf.width * hr) / hs;
+ cm->height = (vs - 1 + cpi->oxcf.height * vr) / vs;
+
+ assert(cm->width <= cpi->initial_width);
+ assert(cm->height <= cpi->initial_height);
+ update_frame_size(cpi);
+ return 0;
+}
+
+int vp9_set_size_literal(VP9_PTR comp, unsigned int width,
+ unsigned int height) {
+ VP9_COMP *cpi = (VP9_COMP *)comp;
+ VP9_COMMON *cm = &cpi->common;
+
+ check_initial_width(cpi, NULL);
+
+ if (width) {
+ cm->width = width;
+ if (cm->width * 5 < cpi->initial_width) {
+ cm->width = cpi->initial_width / 5 + 1;
+ printf("Warning: Desired width too small, changed to %d \n", cm->width);
+ }
+ if (cm->width > cpi->initial_width) {
+ cm->width = cpi->initial_width;
+ printf("Warning: Desired width too large, changed to %d \n", cm->width);
+ }
+ }
+
+ if (height) {
+ cm->height = height;
+ if (cm->height * 5 < cpi->initial_height) {
+ cm->height = cpi->initial_height / 5 + 1;
+ printf("Warning: Desired height too small, changed to %d \n", cm->height);
+ }
+ if (cm->height > cpi->initial_height) {
+ cm->height = cpi->initial_height;
+ printf("Warning: Desired height too large, changed to %d \n", cm->height);
+ }
+ }
+
+ assert(cm->width <= cpi->initial_width);
+ assert(cm->height <= cpi->initial_height);
+ update_frame_size(cpi);
+ return 0;
+}
+
+int vp9_switch_layer(VP9_PTR comp, int layer) {
+ VP9_COMP *cpi = (VP9_COMP *)comp;
+
+ if (cpi->use_svc) {
+ cpi->current_layer = layer;
+
+ // Use buffer i for layer i LST
+ cpi->lst_fb_idx = layer;
+
+ // Use buffer i-1 for layer i Alt (Inter-layer prediction)
+ if (layer != 0) cpi->alt_fb_idx = layer - 1;
+
+ // Use the rest for Golden
+ if (layer < 2 * cpi->number_spatial_layers - NUM_REF_FRAMES)
+ cpi->gld_fb_idx = cpi->lst_fb_idx;
+ else
+ cpi->gld_fb_idx = 2 * cpi->number_spatial_layers - 1 - layer;
+
+ printf("Switching to layer %d:\n", layer);
+ printf("Using references: LST/GLD/ALT [%d|%d|%d]\n", cpi->lst_fb_idx,
+ cpi->gld_fb_idx, cpi->alt_fb_idx);
+ } else {
+ printf("Switching layer not supported. Enable SVC first \n");
+ }
+ return 0;
+}
+
+void vp9_set_svc(VP9_PTR comp, int use_svc) {
+ VP9_COMP *cpi = (VP9_COMP *)comp;
+ cpi->use_svc = use_svc;
+ if (cpi->use_svc) printf("Enabled SVC encoder \n");
+ return;
+}
+
+int vp9_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest) {
+ int i, j;
+ int total = 0;
+
+ uint8_t *src = source->y_buffer;
+ uint8_t *dst = dest->y_buffer;
+
+ // Loop through the Y plane raw and reconstruction data summing
+ // (square differences)
+ for (i = 0; i < source->y_height; i += 16) {
+ for (j = 0; j < source->y_width; j += 16) {
+ unsigned int sse;
+ total += vp9_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride,
+ &sse);
+ }
+
+ src += 16 * source->y_stride;
+ dst += 16 * dest->y_stride;
+ }
+
+ return total;
+}
+
+
+int vp9_get_quantizer(VP9_PTR c) {
+ return ((VP9_COMP *)c)->common.base_qindex;
+}
diff --git a/libvpx/vp9/encoder/vp9_onyx_int.h b/libvpx/vp9/encoder/vp9_onyx_int.h
new file mode 100644
index 0000000..3e5796f
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_onyx_int.h
@@ -0,0 +1,722 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_ENCODER_VP9_ONYX_INT_H_
+#define VP9_ENCODER_VP9_ONYX_INT_H_
+
+#include <stdio.h>
+#include "./vpx_config.h"
+#include "vp9/common/vp9_onyx.h"
+#include "vp9/encoder/vp9_treewriter.h"
+#include "vp9/encoder/vp9_tokenize.h"
+#include "vp9/common/vp9_onyxc_int.h"
+#include "vp9/encoder/vp9_variance.h"
+#include "vp9/encoder/vp9_encodemb.h"
+#include "vp9/encoder/vp9_quantize.h"
+#include "vp9/common/vp9_entropy.h"
+#include "vp9/common/vp9_entropymode.h"
+#include "vpx_ports/mem.h"
+#include "vpx/internal/vpx_codec_internal.h"
+#include "vp9/encoder/vp9_mcomp.h"
+#include "vp9/common/vp9_findnearmv.h"
+#include "vp9/encoder/vp9_lookahead.h"
+
+// Experimental rate control switches
+#if CONFIG_ONESHOTQ
+#define ONE_SHOT_Q_ESTIMATE 0
+#define STRICT_ONE_SHOT_Q 0
+#define DISABLE_RC_LONG_TERM_MEM 0
+#endif
+
+// #define MODE_TEST_HIT_STATS
+
+// #define SPEEDSTATS 1
+#if CONFIG_MULTIPLE_ARF
+// Set MIN_GF_INTERVAL to 1 for the full decomposition.
+#define MIN_GF_INTERVAL 2
+#else
+#define MIN_GF_INTERVAL 4
+#endif
+#define DEFAULT_GF_INTERVAL 7
+
+#define KEY_FRAME_CONTEXT 5
+
+#define MAX_MODES 36
+
+#define MIN_THRESHMULT 32
+#define MAX_THRESHMULT 512
+
+#define GF_ZEROMV_ZBIN_BOOST 0
+#define LF_ZEROMV_ZBIN_BOOST 0
+#define MV_ZBIN_BOOST 0
+#define SPLIT_MV_ZBIN_BOOST 0
+#define INTRA_ZBIN_BOOST 0
+
+typedef struct {
+ nmv_context nmvc;
+ int nmvjointcost[MV_JOINTS];
+ int nmvcosts[2][MV_VALS];
+ int nmvcosts_hp[2][MV_VALS];
+
+ vp9_prob segment_pred_probs[PREDICTION_PROBS];
+ vp9_prob intra_inter_prob[INTRA_INTER_CONTEXTS];
+ vp9_prob comp_inter_prob[COMP_INTER_CONTEXTS];
+ vp9_prob single_ref_prob[REF_CONTEXTS][2];
+ vp9_prob comp_ref_prob[REF_CONTEXTS];
+
+ unsigned char *last_frame_seg_map_copy;
+
+ // 0 = Intra, Last, GF, ARF
+ signed char last_ref_lf_deltas[MAX_REF_LF_DELTAS];
+ // 0 = ZERO_MV, MV
+ signed char last_mode_lf_deltas[MAX_MODE_LF_DELTAS];
+
+ vp9_coeff_probs_model coef_probs[TX_SIZES][BLOCK_TYPES];
+
+ vp9_prob y_mode_prob[4][INTRA_MODES - 1];
+ vp9_prob uv_mode_prob[INTRA_MODES][INTRA_MODES - 1];
+ vp9_prob partition_prob[2][NUM_PARTITION_CONTEXTS][PARTITION_TYPES - 1];
+
+ vp9_prob switchable_interp_prob[SWITCHABLE_FILTERS + 1]
+ [SWITCHABLE_FILTERS - 1];
+
+ int inter_mode_counts[INTER_MODE_CONTEXTS][INTER_MODES - 1][2];
+ vp9_prob inter_mode_probs[INTER_MODE_CONTEXTS][INTER_MODES - 1];
+
+ struct tx_probs tx_probs;
+ vp9_prob mbskip_probs[MBSKIP_CONTEXTS];
+} CODING_CONTEXT;
+
+typedef struct {
+ double frame;
+ double intra_error;
+ double coded_error;
+ double sr_coded_error;
+ double ssim_weighted_pred_err;
+ double pcnt_inter;
+ double pcnt_motion;
+ double pcnt_second_ref;
+ double pcnt_neutral;
+ double MVr;
+ double mvr_abs;
+ double MVc;
+ double mvc_abs;
+ double MVrv;
+ double MVcv;
+ double mv_in_out_count;
+ double new_mv_count;
+ double duration;
+ double count;
+} FIRSTPASS_STATS;
+
+typedef struct {
+ int frames_so_far;
+ double frame_intra_error;
+ double frame_coded_error;
+ double frame_pcnt_inter;
+ double frame_pcnt_motion;
+ double frame_mvr;
+ double frame_mvr_abs;
+ double frame_mvc;
+ double frame_mvc_abs;
+} ONEPASS_FRAMESTATS;
+
+typedef struct {
+ struct {
+ int err;
+ union {
+ int_mv mv;
+ MB_PREDICTION_MODE mode;
+ } m;
+ } ref[MAX_REF_FRAMES];
+} MBGRAPH_MB_STATS;
+
+typedef struct {
+ MBGRAPH_MB_STATS *mb_stats;
+} MBGRAPH_FRAME_STATS;
+
+// This enumerator type needs to be kept aligned with the mode order in
+// const MODE_DEFINITION vp9_mode_order[MAX_MODES] used in the rd code.
+typedef enum {
+ THR_NEARESTMV,
+ THR_NEARESTA,
+ THR_NEARESTG,
+
+ THR_DC,
+
+ THR_NEWMV,
+ THR_NEWA,
+ THR_NEWG,
+
+ THR_NEARMV,
+ THR_NEARA,
+ THR_COMP_NEARESTLA,
+ THR_COMP_NEARESTGA,
+
+ THR_TM,
+
+ THR_COMP_NEARLA,
+ THR_COMP_NEWLA,
+ THR_NEARG,
+ THR_COMP_NEARGA,
+ THR_COMP_NEWGA,
+
+ THR_SPLITMV,
+ THR_SPLITG,
+ THR_SPLITA,
+ THR_COMP_SPLITLA,
+ THR_COMP_SPLITGA,
+
+ THR_ZEROMV,
+ THR_ZEROG,
+ THR_ZEROA,
+ THR_COMP_ZEROLA,
+ THR_COMP_ZEROGA,
+
+ THR_B_PRED,
+ THR_H_PRED,
+ THR_V_PRED,
+ THR_D135_PRED,
+ THR_D207_PRED,
+ THR_D153_PRED,
+ THR_D63_PRED,
+ THR_D117_PRED,
+ THR_D45_PRED,
+} THR_MODES;
+
+typedef enum {
+ DIAMOND = 0,
+ NSTEP = 1,
+ HEX = 2,
+ BIGDIA = 3,
+ SQUARE = 4
+} SEARCH_METHODS;
+
+typedef enum {
+ USE_FULL_RD = 0,
+ USE_LARGESTINTRA,
+ USE_LARGESTINTRA_MODELINTER,
+ USE_LARGESTALL
+} TX_SIZE_SEARCH_METHOD;
+
+typedef enum {
+ // Values should be powers of 2 so that they can be selected as bits of
+ // an integer flags field
+
+ // terminate search early based on distortion so far compared to
+ // qp step, distortion in the neighborhood of the frame, etc.
+ FLAG_EARLY_TERMINATE = 1,
+
+ // skips comp inter modes if the best so far is an intra mode
+ FLAG_SKIP_COMP_BESTINTRA = 2,
+
+ // skips comp inter modes if the best single intermode so far does
+ // not have the same reference as one of the two references being
+ // tested
+ FLAG_SKIP_COMP_REFMISMATCH = 4,
+
+ // skips oblique intra modes if the best so far is an inter mode
+ FLAG_SKIP_INTRA_BESTINTER = 8,
+
+ // skips oblique intra modes at angles 27, 63, 117, 153 if the best
+ // intra so far is not one of the neighboring directions
+ FLAG_SKIP_INTRA_DIRMISMATCH = 16,
+
+ // skips intra modes other than DC_PRED if the source variance
+ // is small
+ FLAG_SKIP_INTRA_LOWVAR = 32,
+} MODE_SEARCH_SKIP_LOGIC;
+
+typedef enum {
+ SUBPEL_ITERATIVE = 0,
+ SUBPEL_TREE = 1,
+ // Other methods to come
+} SUBPEL_SEARCH_METHODS;
+
+#define ALL_INTRA_MODES 0x3FF
+#define INTRA_DC_ONLY 0x01
+#define INTRA_DC_TM ((1 << TM_PRED) | (1 << DC_PRED))
+#define INTRA_DC_TM_H_V (INTRA_DC_TM | (1 << V_PRED) | (1 << H_PRED))
+
+typedef struct {
+ int RD;
+ SEARCH_METHODS search_method;
+ int auto_filter;
+ int recode_loop;
+ SUBPEL_SEARCH_METHODS subpel_search_method;
+ int subpel_iters_per_step;
+ int thresh_mult[MAX_MODES];
+ int max_step_search_steps;
+ int reduce_first_step_size;
+ int auto_mv_step_size;
+ int optimize_coefficients;
+ int static_segmentation;
+ int comp_inter_joint_search_thresh;
+ int adaptive_rd_thresh;
+ int skip_encode_sb;
+ int skip_encode_frame;
+ int use_lastframe_partitioning;
+ TX_SIZE_SEARCH_METHOD tx_size_search_method;
+ int use_lp32x32fdct;
+ int use_avoid_tested_higherror;
+ int skip_lots_of_modes;
+ int partition_by_variance;
+ int use_one_partition_size_always;
+ int less_rectangular_check;
+ int use_square_partition_only;
+ int mode_skip_start;
+ int reference_masking;
+ BLOCK_SIZE always_this_block_size;
+ int auto_min_max_partition_size;
+ int auto_min_max_partition_interval;
+ int auto_min_max_partition_count;
+ BLOCK_SIZE min_partition_size;
+ BLOCK_SIZE max_partition_size;
+ int adjust_partitioning_from_last_frame;
+ int last_partitioning_redo_frequency;
+ int disable_splitmv;
+ int using_small_partition_info;
+ // TODO(jingning): combine the related motion search speed features
+ int adaptive_motion_search;
+
+ // Implements various heuristics to skip searching modes
+ // The heuristics selected are based on flags
+ // defined in the MODE_SEARCH_SKIP_HEURISTICS enum
+ unsigned int mode_search_skip_flags;
+ // A source variance threshold below which the split mode is disabled
+ unsigned int disable_split_var_thresh;
+ // A source variance threshold below which filter search is disabled
+ // Choose a very large value (UINT_MAX) to use 8-tap always
+ unsigned int disable_filter_search_var_thresh;
+ int intra_y_mode_mask;
+ int intra_uv_mode_mask;
+ int use_rd_breakout;
+ int use_uv_intra_rd_estimate;
+ int use_fast_lpf_pick;
+ int use_fast_coef_updates; // 0: 2-loop, 1: 1-loop, 2: 1-loop reduced
+} SPEED_FEATURES;
+
+typedef struct VP9_COMP {
+ DECLARE_ALIGNED(16, int16_t, y_quant[QINDEX_RANGE][8]);
+ DECLARE_ALIGNED(16, int16_t, y_quant_shift[QINDEX_RANGE][8]);
+ DECLARE_ALIGNED(16, int16_t, y_zbin[QINDEX_RANGE][8]);
+ DECLARE_ALIGNED(16, int16_t, y_round[QINDEX_RANGE][8]);
+
+ DECLARE_ALIGNED(16, int16_t, uv_quant[QINDEX_RANGE][8]);
+ DECLARE_ALIGNED(16, int16_t, uv_quant_shift[QINDEX_RANGE][8]);
+ DECLARE_ALIGNED(16, int16_t, uv_zbin[QINDEX_RANGE][8]);
+ DECLARE_ALIGNED(16, int16_t, uv_round[QINDEX_RANGE][8]);
+
+#if CONFIG_ALPHA
+ DECLARE_ALIGNED(16, int16_t, a_quant[QINDEX_RANGE][8]);
+ DECLARE_ALIGNED(16, int16_t, a_quant_shift[QINDEX_RANGE][8]);
+ DECLARE_ALIGNED(16, int16_t, a_zbin[QINDEX_RANGE][8]);
+ DECLARE_ALIGNED(16, int16_t, a_round[QINDEX_RANGE][8]);
+#endif
+
+ MACROBLOCK mb;
+ VP9_COMMON common;
+ VP9_CONFIG oxcf;
+
+ struct lookahead_ctx *lookahead;
+ struct lookahead_entry *source;
+#if CONFIG_MULTIPLE_ARF
+ struct lookahead_entry *alt_ref_source[NUM_REF_FRAMES];
+#else
+ struct lookahead_entry *alt_ref_source;
+#endif
+
+ YV12_BUFFER_CONFIG *Source;
+ YV12_BUFFER_CONFIG *un_scaled_source;
+ YV12_BUFFER_CONFIG scaled_source;
+
+ unsigned int frames_till_alt_ref_frame;
+ int source_alt_ref_pending; // frame in src_buffers has been identified to be encoded as an alt ref
+ int source_alt_ref_active; // an alt ref frame has been encoded and is usable
+
+ int is_src_frame_alt_ref; // source of frame to encode is an exact copy of an alt ref frame
+
+ int gold_is_last; // golden frame same as last frame ( short circuit gold searches)
+ int alt_is_last; // Alt reference frame same as last ( short circuit altref search)
+ int gold_is_alt; // don't do both alt and gold search ( just do gold).
+
+ int scaled_ref_idx[3];
+ int lst_fb_idx;
+ int gld_fb_idx;
+ int alt_fb_idx;
+
+ int current_layer;
+ int use_svc;
+
+#if CONFIG_MULTIPLE_ARF
+ int alt_ref_fb_idx[NUM_REF_FRAMES - 3];
+#endif
+ int refresh_last_frame;
+ int refresh_golden_frame;
+ int refresh_alt_ref_frame;
+ YV12_BUFFER_CONFIG last_frame_uf;
+
+ TOKENEXTRA *tok;
+ unsigned int tok_count[4][1 << 6];
+
+
+ unsigned int frames_since_key;
+ unsigned int key_frame_frequency;
+ unsigned int this_key_frame_forced;
+ unsigned int next_key_frame_forced;
+#if CONFIG_MULTIPLE_ARF
+ // Position within a frame coding order (including any additional ARF frames).
+ unsigned int sequence_number;
+ // Next frame in naturally occurring order that has not yet been coded.
+ int next_frame_in_order;
+#endif
+
+ // Ambient reconstruction err target for force key frames
+ int ambient_err;
+
+ unsigned int mode_check_freq[MAX_MODES];
+ unsigned int mode_test_hit_counts[MAX_MODES];
+ unsigned int mode_chosen_counts[MAX_MODES];
+ int64_t mode_skip_mask;
+ int ref_frame_mask;
+ int set_ref_frame_mask;
+
+ int rd_threshes[BLOCK_SIZES][MAX_MODES];
+ int rd_thresh_freq_fact[BLOCK_SIZES][MAX_MODES];
+
+ int64_t rd_comp_pred_diff[NB_PREDICTION_TYPES];
+ // FIXME(rbultje) int64_t?
+ int rd_prediction_type_threshes[4][NB_PREDICTION_TYPES];
+ unsigned int intra_inter_count[INTRA_INTER_CONTEXTS][2];
+ unsigned int comp_inter_count[COMP_INTER_CONTEXTS][2];
+ unsigned int single_ref_count[REF_CONTEXTS][2][2];
+ unsigned int comp_ref_count[REF_CONTEXTS][2];
+
+ int64_t rd_tx_select_diff[TX_MODES];
+ // FIXME(rbultje) can this overflow?
+ int rd_tx_select_threshes[4][TX_MODES];
+
+ int64_t rd_filter_diff[SWITCHABLE_FILTERS + 1];
+ int64_t rd_filter_threshes[4][SWITCHABLE_FILTERS + 1];
+ int64_t rd_filter_cache[SWITCHABLE_FILTERS + 1];
+
+ int RDMULT;
+ int RDDIV;
+
+ CODING_CONTEXT coding_context;
+
+ // Rate targetting variables
+ int this_frame_target;
+ int projected_frame_size;
+ int last_q[2]; // Separate values for Intra/Inter
+ int last_boosted_qindex; // Last boosted GF/KF/ARF q
+
+ double rate_correction_factor;
+ double key_frame_rate_correction_factor;
+ double gf_rate_correction_factor;
+
+ unsigned int frames_since_golden;
+ int frames_till_gf_update_due; // Count down till next GF
+
+ int gf_overspend_bits; // Total bits overspent becasue of GF boost (cumulative)
+
+ int non_gf_bitrate_adjustment; // Used in the few frames following a GF to recover the extra bits spent in that GF
+
+ int kf_overspend_bits; // Extra bits spent on key frames that need to be recovered on inter frames
+ int kf_bitrate_adjustment; // Current number of bit s to try and recover on each inter frame.
+ int max_gf_interval;
+ int baseline_gf_interval;
+ int active_arnr_frames; // <= cpi->oxcf.arnr_max_frames
+ int active_arnr_strength; // <= cpi->oxcf.arnr_max_strength
+
+ int64_t key_frame_count;
+ int prior_key_frame_distance[KEY_FRAME_CONTEXT];
+ int per_frame_bandwidth; // Current section per frame bandwidth target
+ int av_per_frame_bandwidth; // Average frame size target for clip
+ int min_frame_bandwidth; // Minimum allocation that should be used for any frame
+ int inter_frame_target;
+ double output_framerate;
+ int64_t last_time_stamp_seen;
+ int64_t last_end_time_stamp_seen;
+ int64_t first_time_stamp_ever;
+
+ int ni_av_qi;
+ int ni_tot_qi;
+ int ni_frames;
+ int avg_frame_qindex;
+ double tot_q;
+ double avg_q;
+
+ int zbin_mode_boost;
+ int zbin_mode_boost_enabled;
+
+ int64_t total_byte_count;
+
+ int buffered_mode;
+
+ int buffer_level;
+ int bits_off_target;
+
+ int rolling_target_bits;
+ int rolling_actual_bits;
+
+ int long_rolling_target_bits;
+ int long_rolling_actual_bits;
+
+ int64_t total_actual_bits;
+ int total_target_vs_actual; // debug stats
+
+ int worst_quality;
+ int active_worst_quality;
+ int best_quality;
+ int active_best_quality;
+
+ int cq_target_quality;
+
+ int y_mode_count[4][INTRA_MODES];
+ int y_uv_mode_count[INTRA_MODES][INTRA_MODES];
+ unsigned int partition_count[NUM_PARTITION_CONTEXTS][PARTITION_TYPES];
+
+ nmv_context_counts NMVcount;
+
+ vp9_coeff_count coef_counts[TX_SIZES][BLOCK_TYPES];
+ vp9_coeff_probs_model frame_coef_probs[TX_SIZES][BLOCK_TYPES];
+ vp9_coeff_stats frame_branch_ct[TX_SIZES][BLOCK_TYPES];
+
+ int gfu_boost;
+ int last_boost;
+ int kf_boost;
+ int kf_zeromotion_pct;
+ int gf_zeromotion_pct;
+
+ int64_t target_bandwidth;
+ struct vpx_codec_pkt_list *output_pkt_list;
+
+#if 0
+ // Experimental code for lagged and one pass
+ ONEPASS_FRAMESTATS one_pass_frame_stats[MAX_LAG_BUFFERS];
+ int one_pass_frame_index;
+#endif
+ MBGRAPH_FRAME_STATS mbgraph_stats[MAX_LAG_BUFFERS];
+ int mbgraph_n_frames; // number of frames filled in the above
+ int static_mb_pct; // % forced skip mbs by segmentation
+ int seg0_progress, seg0_idx, seg0_cnt;
+
+ int decimation_factor;
+ int decimation_count;
+
+ // for real time encoding
+ int avg_encode_time; // microsecond
+ int avg_pick_mode_time; // microsecond
+ int speed;
+ unsigned int cpu_freq; // Mhz
+ int compressor_speed;
+
+ int interquantizer;
+ int goldfreq;
+ int auto_worst_q;
+ int cpu_used;
+ int pass;
+
+ vp9_prob last_skip_false_probs[3][MBSKIP_CONTEXTS];
+ int last_skip_probs_q[3];
+
+ int ref_frame_flags;
+
+ SPEED_FEATURES sf;
+ int error_bins[1024];
+
+ unsigned int max_mv_magnitude;
+ int mv_step_param;
+
+ // Data used for real time conferencing mode to help determine if it would be good to update the gf
+ int inter_zz_count;
+ int gf_bad_count;
+ int gf_update_recommended;
+
+ unsigned char *segmentation_map;
+
+ // segment threashold for encode breakout
+ int segment_encode_breakout[MAX_SEGMENTS];
+
+ unsigned char *active_map;
+ unsigned int active_map_enabled;
+
+ fractional_mv_step_fp *find_fractional_mv_step;
+ fractional_mv_step_comp_fp *find_fractional_mv_step_comp;
+ vp9_full_search_fn_t full_search_sad;
+ vp9_refining_search_fn_t refining_search_sad;
+ vp9_diamond_search_fn_t diamond_search_sad;
+ vp9_variance_fn_ptr_t fn_ptr[BLOCK_SIZES];
+ uint64_t time_receive_data;
+ uint64_t time_compress_data;
+ uint64_t time_pick_lpf;
+ uint64_t time_encode_sb_row;
+
+ struct twopass_rc {
+ unsigned int section_intra_rating;
+ unsigned int next_iiratio;
+ unsigned int this_iiratio;
+ FIRSTPASS_STATS total_stats;
+ FIRSTPASS_STATS this_frame_stats;
+ FIRSTPASS_STATS *stats_in, *stats_in_end, *stats_in_start;
+ FIRSTPASS_STATS total_left_stats;
+ int first_pass_done;
+ int64_t bits_left;
+ int64_t clip_bits_total;
+ double avg_iiratio;
+ double modified_error_total;
+ double modified_error_used;
+ double modified_error_left;
+ double kf_intra_err_min;
+ double gf_intra_err_min;
+ int frames_to_key;
+ int maxq_max_limit;
+ int maxq_min_limit;
+ int static_scene_max_gf_interval;
+ int kf_bits;
+ // Remaining error from uncoded frames in a gf group. Two pass use only
+ int64_t gf_group_error_left;
+
+ // Projected total bits available for a key frame group of frames
+ int64_t kf_group_bits;
+
+ // Error score of frames still to be coded in kf group
+ int64_t kf_group_error_left;
+
+ // Projected Bits available for a group of frames including 1 GF or ARF
+ int64_t gf_group_bits;
+ // Bits for the golden frame or ARF - 2 pass only
+ int gf_bits;
+ int alt_extra_bits;
+
+ int sr_update_lag;
+ double est_max_qcorrection_factor;
+ } twopass;
+
+ YV12_BUFFER_CONFIG alt_ref_buffer;
+ YV12_BUFFER_CONFIG *frames[MAX_LAG_BUFFERS];
+ int fixed_divide[512];
+
+#if CONFIG_INTERNAL_STATS
+ int count;
+ double total_y;
+ double total_u;
+ double total_v;
+ double total;
+ double total_sq_error;
+ double totalp_y;
+ double totalp_u;
+ double totalp_v;
+ double totalp;
+ double total_sq_error2;
+ int bytes;
+ double summed_quality;
+ double summed_weights;
+ double summedp_quality;
+ double summedp_weights;
+ unsigned int tot_recode_hits;
+
+
+ double total_ssimg_y;
+ double total_ssimg_u;
+ double total_ssimg_v;
+ double total_ssimg_all;
+
+ int b_calculate_ssimg;
+#endif
+ int b_calculate_psnr;
+
+ // Per MB activity measurement
+ unsigned int activity_avg;
+ unsigned int *mb_activity_map;
+ int *mb_norm_activity_map;
+ int output_partition;
+
+ /* force next frame to intra when kf_auto says so */
+ int force_next_frame_intra;
+
+ int droppable;
+
+ int dummy_packing; /* flag to indicate if packing is dummy */
+
+ unsigned int switchable_interp_count[SWITCHABLE_FILTERS + 1]
+ [SWITCHABLE_FILTERS];
+
+ unsigned int txfm_stepdown_count[TX_SIZES];
+
+ int initial_width;
+ int initial_height;
+
+ int number_spatial_layers;
+ int enable_encode_breakout; // Default value is 1. From first pass stats,
+ // encode_breakout may be disabled.
+
+#if CONFIG_MULTIPLE_ARF
+ // ARF tracking variables.
+ int multi_arf_enabled;
+ unsigned int frame_coding_order_period;
+ unsigned int new_frame_coding_order_period;
+ int frame_coding_order[MAX_LAG_BUFFERS * 2];
+ int arf_buffer_idx[MAX_LAG_BUFFERS * 3 / 2];
+ int arf_weight[MAX_LAG_BUFFERS];
+ int arf_buffered;
+ int this_frame_weight;
+ int max_arf_level;
+#endif
+
+#ifdef ENTROPY_STATS
+ int64_t mv_ref_stats[INTER_MODE_CONTEXTS][INTER_MODES - 1][2];
+#endif
+
+
+#ifdef MODE_TEST_HIT_STATS
+ // Debug / test stats
+ int64_t mode_test_hits[BLOCK_SIZES];
+#endif
+} VP9_COMP;
+
+static int get_ref_frame_idx(VP9_COMP *cpi, MV_REFERENCE_FRAME ref_frame) {
+ if (ref_frame == LAST_FRAME) {
+ return cpi->lst_fb_idx;
+ } else if (ref_frame == GOLDEN_FRAME) {
+ return cpi->gld_fb_idx;
+ } else {
+ return cpi->alt_fb_idx;
+ }
+}
+
+static int get_scale_ref_frame_idx(VP9_COMP *cpi,
+ MV_REFERENCE_FRAME ref_frame) {
+ if (ref_frame == LAST_FRAME) {
+ return 0;
+ } else if (ref_frame == GOLDEN_FRAME) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+void vp9_encode_frame(VP9_COMP *cpi);
+
+void vp9_pack_bitstream(VP9_COMP *cpi, unsigned char *dest,
+ unsigned long *size);
+
+void vp9_activity_masking(VP9_COMP *cpi, MACROBLOCK *x);
+
+void vp9_set_speed_features(VP9_COMP *cpi);
+
+extern int vp9_calc_ss_err(YV12_BUFFER_CONFIG *source,
+ YV12_BUFFER_CONFIG *dest);
+
+extern void vp9_alloc_compressor_data(VP9_COMP *cpi);
+
+#endif // VP9_ENCODER_VP9_ONYX_INT_H_
diff --git a/libvpx/vp9/encoder/vp9_picklpf.c b/libvpx/vp9/encoder/vp9_picklpf.c
new file mode 100644
index 0000000..239fd6b
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_picklpf.c
@@ -0,0 +1,218 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <limits.h>
+#include "vp9/common/vp9_onyxc_int.h"
+#include "vp9/encoder/vp9_onyx_int.h"
+#include "vp9/encoder/vp9_picklpf.h"
+#include "vp9/encoder/vp9_quantize.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_scale/vpx_scale.h"
+#include "vp9/common/vp9_alloccommon.h"
+#include "vp9/common/vp9_loopfilter.h"
+#include "./vpx_scale_rtcd.h"
+
+void vp9_yv12_copy_partial_frame_c(YV12_BUFFER_CONFIG *src_ybc,
+ YV12_BUFFER_CONFIG *dst_ybc, int fraction) {
+ const int height = src_ybc->y_height;
+ const int stride = src_ybc->y_stride;
+ const int offset = stride * ((height >> 5) * 16 - 8);
+ const int lines_to_copy = MAX(height >> (fraction + 4), 1) << 4;
+
+ assert(src_ybc->y_stride == dst_ybc->y_stride);
+ vpx_memcpy(dst_ybc->y_buffer + offset, src_ybc->y_buffer + offset,
+ stride * (lines_to_copy + 16));
+}
+
+static int calc_partial_ssl_err(YV12_BUFFER_CONFIG *source,
+ YV12_BUFFER_CONFIG *dest, int Fraction) {
+ int i, j;
+ int Total = 0;
+ int srcoffset, dstoffset;
+ uint8_t *src = source->y_buffer;
+ uint8_t *dst = dest->y_buffer;
+
+ int linestocopy = (source->y_height >> (Fraction + 4));
+
+ if (linestocopy < 1)
+ linestocopy = 1;
+
+ linestocopy <<= 4;
+
+
+ srcoffset = source->y_stride * (dest->y_height >> 5) * 16;
+ dstoffset = dest->y_stride * (dest->y_height >> 5) * 16;
+
+ src += srcoffset;
+ dst += dstoffset;
+
+ // Loop through the Y plane raw and reconstruction data summing (square differences)
+ for (i = 0; i < linestocopy; i += 16) {
+ for (j = 0; j < source->y_width; j += 16) {
+ unsigned int sse;
+ Total += vp9_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride,
+ &sse);
+ }
+
+ src += 16 * source->y_stride;
+ dst += 16 * dest->y_stride;
+ }
+
+ return Total;
+}
+
+// Enforce a minimum filter level based upon baseline Q
+static int get_min_filter_level(VP9_COMP *cpi, int base_qindex) {
+ int min_filter_level;
+ /*int q = (int) vp9_convert_qindex_to_q(base_qindex);
+
+ if (cpi->source_alt_ref_active && cpi->common.refresh_golden_frame && !cpi->common.refresh_alt_ref_frame)
+ min_filter_level = 0;
+ else
+ {
+ if (q <= 10)
+ min_filter_level = 0;
+ else if (q <= 64)
+ min_filter_level = 1;
+ else
+ min_filter_level = (q >> 6);
+ }
+ */
+ min_filter_level = 0;
+
+ return min_filter_level;
+}
+
+// Enforce a maximum filter level based upon baseline Q
+static int get_max_filter_level(VP9_COMP *cpi, int base_qindex) {
+ // PGW August 2006: Highest filter values almost always a bad idea
+
+ // jbb chg: 20100118 - not so any more with this overquant stuff allow high values
+ // with lots of intra coming in.
+ int max_filter_level = MAX_LOOP_FILTER;// * 3 / 4;
+ (void)base_qindex;
+
+ if (cpi->twopass.section_intra_rating > 8)
+ max_filter_level = MAX_LOOP_FILTER * 3 / 4;
+
+ return max_filter_level;
+}
+
+
+// Stub function for now Alt LF not used
+void vp9_set_alt_lf_level(VP9_COMP *cpi, int filt_val) {
+}
+
+void vp9_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP9_COMP *cpi, int partial) {
+ VP9_COMMON *const cm = &cpi->common;
+ struct loopfilter *const lf = &cm->lf;
+
+ int best_err = 0;
+ int filt_err = 0;
+ const int min_filter_level = get_min_filter_level(cpi, cm->base_qindex);
+ const int max_filter_level = get_max_filter_level(cpi, cm->base_qindex);
+
+ int filter_step;
+ int filt_high = 0;
+ // Start search at previous frame filter level
+ int filt_mid = lf->filter_level;
+ int filt_low = 0;
+ int filt_best;
+ int filt_direction = 0;
+
+ int Bias = 0; // Bias against raising loop filter and in favour of lowering it
+
+ // Make a copy of the unfiltered / processed recon buffer
+ vpx_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_uf);
+
+ lf->sharpness_level = cm->frame_type == KEY_FRAME ? 0
+ : cpi->oxcf.Sharpness;
+
+ // Start the search at the previous frame filter level unless it is now out of range.
+ filt_mid = clamp(lf->filter_level, min_filter_level, max_filter_level);
+
+ // Define the initial step size
+ filter_step = filt_mid < 16 ? 4 : filt_mid / 4;
+
+ // Get baseline error score
+ vp9_set_alt_lf_level(cpi, filt_mid);
+ vp9_loop_filter_frame(cm, &cpi->mb.e_mbd, filt_mid, 1, partial);
+
+ best_err = vp9_calc_ss_err(sd, cm->frame_to_show);
+ filt_best = filt_mid;
+
+ // Re-instate the unfiltered frame
+ vpx_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show);
+
+ while (filter_step > 0) {
+ Bias = (best_err >> (15 - (filt_mid / 8))) * filter_step; // PGW change 12/12/06 for small images
+
+ // jbb chg: 20100118 - in sections with lots of new material coming in don't bias as much to a low filter value
+ if (cpi->twopass.section_intra_rating < 20)
+ Bias = Bias * cpi->twopass.section_intra_rating / 20;
+
+ // yx, bias less for large block size
+ if (cpi->common.tx_mode != ONLY_4X4)
+ Bias >>= 1;
+
+ filt_high = ((filt_mid + filter_step) > max_filter_level) ? max_filter_level : (filt_mid + filter_step);
+ filt_low = ((filt_mid - filter_step) < min_filter_level) ? min_filter_level : (filt_mid - filter_step);
+
+ if ((filt_direction <= 0) && (filt_low != filt_mid)) {
+ // Get Low filter error score
+ vp9_set_alt_lf_level(cpi, filt_low);
+ vp9_loop_filter_frame(cm, &cpi->mb.e_mbd, filt_low, 1, partial);
+
+ filt_err = vp9_calc_ss_err(sd, cm->frame_to_show);
+
+ // Re-instate the unfiltered frame
+ vpx_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show);
+
+ // If value is close to the best so far then bias towards a lower loop filter value.
+ if ((filt_err - Bias) < best_err) {
+ // Was it actually better than the previous best?
+ if (filt_err < best_err)
+ best_err = filt_err;
+
+ filt_best = filt_low;
+ }
+ }
+
+ // Now look at filt_high
+ if ((filt_direction >= 0) && (filt_high != filt_mid)) {
+ vp9_set_alt_lf_level(cpi, filt_high);
+ vp9_loop_filter_frame(cm, &cpi->mb.e_mbd, filt_high, 1, partial);
+
+ filt_err = vp9_calc_ss_err(sd, cm->frame_to_show);
+
+ // Re-instate the unfiltered frame
+ vpx_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show);
+
+ // Was it better than the previous best?
+ if (filt_err < (best_err - Bias)) {
+ best_err = filt_err;
+ filt_best = filt_high;
+ }
+ }
+
+ // Half the step distance if the best filter value was the same as last time
+ if (filt_best == filt_mid) {
+ filter_step = filter_step / 2;
+ filt_direction = 0;
+ } else {
+ filt_direction = (filt_best < filt_mid) ? -1 : 1;
+ filt_mid = filt_best;
+ }
+ }
+
+ lf->filter_level = filt_best;
+}
+
diff --git a/libvpx/vp9/encoder/vp9_picklpf.h b/libvpx/vp9/encoder/vp9_picklpf.h
new file mode 100644
index 0000000..9de4cf8
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_picklpf.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_ENCODER_VP9_PICKLPF_H_
+#define VP9_ENCODER_VP9_PICKLPF_H_
+
+struct yv12_buffer_config;
+struct VP9_COMP;
+
+void vp9_set_alt_lf_level(struct VP9_COMP *cpi, int filt_val);
+
+void vp9_pick_filter_level(struct yv12_buffer_config *sd,
+ struct VP9_COMP *cpi, int partial);
+#endif // VP9_ENCODER_VP9_PICKLPF_H_
diff --git a/libvpx/vp9/encoder/vp9_psnr.c b/libvpx/vp9/encoder/vp9_psnr.c
new file mode 100644
index 0000000..9439434
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_psnr.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vpx_scale/yv12config.h"
+#include "math.h"
+
+#define MAX_PSNR 100
+
+double vp9_mse2psnr(double samples, double peak, double mse) {
+ double psnr;
+
+ if (mse > 0.0)
+ psnr = 10.0 * log10(peak * peak * samples / mse);
+ else
+ psnr = MAX_PSNR; // Limit to prevent / 0
+
+ if (psnr > MAX_PSNR)
+ psnr = MAX_PSNR;
+
+ return psnr;
+}
diff --git a/libvpx/vp9/encoder/vp9_psnr.h b/libvpx/vp9/encoder/vp9_psnr.h
new file mode 100644
index 0000000..15dd836
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_psnr.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_ENCODER_VP9_PSNR_H_
+#define VP9_ENCODER_VP9_PSNR_H_
+
+double vp9_mse2psnr(double samples, double peak, double mse);
+
+#endif // VP9_ENCODER_VP9_PSNR_H_
diff --git a/libvpx/vp9/encoder/vp9_quantize.c b/libvpx/vp9/encoder/vp9_quantize.c
new file mode 100644
index 0000000..6c8b2a0
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_quantize.c
@@ -0,0 +1,355 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include "vpx_mem/vpx_mem.h"
+
+#include "vp9/encoder/vp9_onyx_int.h"
+#include "vp9/encoder/vp9_quantize.h"
+#include "vp9/common/vp9_quant_common.h"
+
+#include "vp9/common/vp9_seg_common.h"
+
+#ifdef ENC_DEBUG
+extern int enc_debug;
+#endif
+
+void vp9_quantize_b_c(int16_t *coeff_ptr, intptr_t n_coeffs, int skip_block,
+ int16_t *zbin_ptr, int16_t *round_ptr, int16_t *quant_ptr,
+ int16_t *quant_shift_ptr, int16_t *qcoeff_ptr,
+ int16_t *dqcoeff_ptr, int16_t *dequant_ptr,
+ int zbin_oq_value, uint16_t *eob_ptr, const int16_t *scan,
+ const int16_t *iscan) {
+ int i, rc, eob;
+ int zbins[2], nzbins[2], zbin;
+ int x, y, z, sz;
+ int zero_flag = n_coeffs;
+
+ vpx_memset(qcoeff_ptr, 0, n_coeffs*sizeof(int16_t));
+ vpx_memset(dqcoeff_ptr, 0, n_coeffs*sizeof(int16_t));
+
+ eob = -1;
+
+ // Base ZBIN
+ zbins[0] = zbin_ptr[0] + zbin_oq_value;
+ zbins[1] = zbin_ptr[1] + zbin_oq_value;
+ nzbins[0] = zbins[0] * -1;
+ nzbins[1] = zbins[1] * -1;
+
+ if (!skip_block) {
+ // Pre-scan pass
+ for (i = n_coeffs - 1; i >= 0; i--) {
+ rc = scan[i];
+ z = coeff_ptr[rc];
+
+ if (z < zbins[rc != 0] && z > nzbins[rc != 0]) {
+ zero_flag--;
+ } else {
+ break;
+ }
+ }
+
+ // Quantization pass: All coefficients with index >= zero_flag are
+ // skippable. Note: zero_flag can be zero.
+ for (i = 0; i < zero_flag; i++) {
+ rc = scan[i];
+ z = coeff_ptr[rc];
+
+ zbin = (zbins[rc != 0]);
+
+ sz = (z >> 31); // sign of z
+ x = (z ^ sz) - sz;
+
+ if (x >= zbin) {
+ x += (round_ptr[rc != 0]);
+ x = clamp(x, INT16_MIN, INT16_MAX);
+ y = (((int)(((int)(x * quant_ptr[rc != 0]) >> 16) + x)) *
+ quant_shift_ptr[rc != 0]) >> 16; // quantize (x)
+ x = (y ^ sz) - sz; // get the sign back
+ qcoeff_ptr[rc] = x; // write to destination
+ dqcoeff_ptr[rc] = x * dequant_ptr[rc != 0]; // dequantized value
+
+ if (y) {
+ eob = i; // last nonzero coeffs
+ }
+ }
+ }
+ }
+ *eob_ptr = eob + 1;
+}
+
+void vp9_quantize_b_32x32_c(int16_t *coeff_ptr, intptr_t n_coeffs,
+ int skip_block,
+ int16_t *zbin_ptr, int16_t *round_ptr,
+ int16_t *quant_ptr, int16_t *quant_shift_ptr,
+ int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr,
+ int16_t *dequant_ptr, int zbin_oq_value,
+ uint16_t *eob_ptr, const int16_t *scan,
+ const int16_t *iscan) {
+ int i, rc, eob;
+ int zbins[2], nzbins[2];
+ int x, y, z, sz;
+ int idx = 0;
+ int idx_arr[1024];
+
+ vpx_memset(qcoeff_ptr, 0, n_coeffs*sizeof(int16_t));
+ vpx_memset(dqcoeff_ptr, 0, n_coeffs*sizeof(int16_t));
+
+ eob = -1;
+
+ // Base ZBIN
+ zbins[0] = ROUND_POWER_OF_TWO(zbin_ptr[0] + zbin_oq_value, 1);
+ zbins[1] = ROUND_POWER_OF_TWO(zbin_ptr[1] + zbin_oq_value, 1);
+ nzbins[0] = zbins[0] * -1;
+ nzbins[1] = zbins[1] * -1;
+
+ if (!skip_block) {
+ // Pre-scan pass
+ for (i = 0; i < n_coeffs; i++) {
+ rc = scan[i];
+ z = coeff_ptr[rc];
+
+ // If the coefficient is out of the base ZBIN range, keep it for
+ // quantization.
+ if (z >= zbins[rc != 0] || z <= nzbins[rc != 0])
+ idx_arr[idx++] = i;
+ }
+
+ // Quantization pass: only process the coefficients selected in
+ // pre-scan pass. Note: idx can be zero.
+ for (i = 0; i < idx; i++) {
+ rc = scan[idx_arr[i]];
+
+ z = coeff_ptr[rc];
+ sz = (z >> 31); // sign of z
+ x = (z ^ sz) - sz; // x = abs(z)
+
+ x += ROUND_POWER_OF_TWO(round_ptr[rc != 0], 1);
+ x = clamp(x, INT16_MIN, INT16_MAX);
+ y = ((((x * quant_ptr[rc != 0]) >> 16) + x) *
+ quant_shift_ptr[rc != 0]) >> 15; // quantize (x)
+
+ x = (y ^ sz) - sz; // get the sign back
+ qcoeff_ptr[rc] = x; // write to destination
+ dqcoeff_ptr[rc] = x * dequant_ptr[rc != 0] / 2; // dequantized value
+
+ if (y)
+ eob = idx_arr[i]; // last nonzero coeffs
+ }
+ }
+ *eob_ptr = eob + 1;
+}
+
+struct plane_block_idx {
+ int plane;
+ int block;
+};
+
+// TODO(jkoleszar): returning a struct so it can be used in a const context,
+// expect to refactor this further later.
+static INLINE struct plane_block_idx plane_block_idx(int y_blocks,
+ int b_idx) {
+ const int v_offset = y_blocks * 5 / 4;
+ struct plane_block_idx res;
+
+ if (b_idx < y_blocks) {
+ res.plane = 0;
+ res.block = b_idx;
+ } else if (b_idx < v_offset) {
+ res.plane = 1;
+ res.block = b_idx - y_blocks;
+ } else {
+ assert(b_idx < y_blocks * 3 / 2);
+ res.plane = 2;
+ res.block = b_idx - v_offset;
+ }
+ return res;
+}
+
+void vp9_regular_quantize_b_4x4(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type,
+ int y_blocks) {
+ MACROBLOCKD *const xd = &mb->e_mbd;
+ const struct plane_block_idx pb_idx = plane_block_idx(y_blocks, b_idx);
+ const int16_t *scan = get_scan_4x4(tx_type);
+ const int16_t *iscan = get_iscan_4x4(tx_type);
+
+ vp9_quantize_b(BLOCK_OFFSET(mb->plane[pb_idx.plane].coeff, pb_idx.block),
+ 16, mb->skip_block,
+ mb->plane[pb_idx.plane].zbin,
+ mb->plane[pb_idx.plane].round,
+ mb->plane[pb_idx.plane].quant,
+ mb->plane[pb_idx.plane].quant_shift,
+ BLOCK_OFFSET(xd->plane[pb_idx.plane].qcoeff, pb_idx.block),
+ BLOCK_OFFSET(xd->plane[pb_idx.plane].dqcoeff, pb_idx.block),
+ xd->plane[pb_idx.plane].dequant,
+ mb->plane[pb_idx.plane].zbin_extra,
+ &xd->plane[pb_idx.plane].eobs[pb_idx.block],
+ scan, iscan);
+}
+
+static void invert_quant(int16_t *quant, int16_t *shift, int d) {
+ unsigned t;
+ int l;
+ t = d;
+ for (l = 0; t > 1; l++)
+ t >>= 1;
+ t = 1 + (1 << (16 + l)) / d;
+ *quant = (int16_t)(t - (1 << 16));
+ *shift = 1 << (16 - l);
+}
+
+void vp9_init_quantizer(VP9_COMP *cpi) {
+ int i, q;
+ VP9_COMMON *const cm = &cpi->common;
+
+ for (q = 0; q < QINDEX_RANGE; q++) {
+ const int qzbin_factor = q == 0 ? 64 : (vp9_dc_quant(q, 0) < 148 ? 84 : 80);
+ const int qrounding_factor = q == 0 ? 64 : 48;
+
+ // y
+ for (i = 0; i < 2; ++i) {
+ const int quant = i == 0 ? vp9_dc_quant(q, cm->y_dc_delta_q)
+ : vp9_ac_quant(q, 0);
+ invert_quant(&cpi->y_quant[q][i], &cpi->y_quant_shift[q][i], quant);
+ cpi->y_zbin[q][i] = ROUND_POWER_OF_TWO(qzbin_factor * quant, 7);
+ cpi->y_round[q][i] = (qrounding_factor * quant) >> 7;
+ cm->y_dequant[q][i] = quant;
+ }
+
+ // uv
+ for (i = 0; i < 2; ++i) {
+ const int quant = i == 0 ? vp9_dc_quant(q, cm->uv_dc_delta_q)
+ : vp9_ac_quant(q, cm->uv_ac_delta_q);
+ invert_quant(&cpi->uv_quant[q][i], &cpi->uv_quant_shift[q][i], quant);
+ cpi->uv_zbin[q][i] = ROUND_POWER_OF_TWO(qzbin_factor * quant, 7);
+ cpi->uv_round[q][i] = (qrounding_factor * quant) >> 7;
+ cm->uv_dequant[q][i] = quant;
+ }
+
+#if CONFIG_ALPHA
+ // alpha
+ for (i = 0; i < 2; ++i) {
+ const int quant = i == 0 ? vp9_dc_quant(q, cm->a_dc_delta_q)
+ : vp9_ac_quant(q, cm->a_ac_delta_q);
+ invert_quant(&cpi->a_quant[q][i], &cpi->a_quant_shift[q][i], quant);
+ cpi->a_zbin[q][i] = ROUND_POWER_OF_TWO(qzbin_factor * quant, 7);
+ cpi->a_round[q][i] = (qrounding_factor * quant) >> 7;
+ cm->a_dequant[q][i] = quant;
+ }
+#endif
+
+ for (i = 2; i < 8; i++) {
+ cpi->y_quant[q][i] = cpi->y_quant[q][1];
+ cpi->y_quant_shift[q][i] = cpi->y_quant_shift[q][1];
+ cpi->y_zbin[q][i] = cpi->y_zbin[q][1];
+ cpi->y_round[q][i] = cpi->y_round[q][1];
+ cm->y_dequant[q][i] = cm->y_dequant[q][1];
+
+ cpi->uv_quant[q][i] = cpi->uv_quant[q][1];
+ cpi->uv_quant_shift[q][i] = cpi->uv_quant_shift[q][1];
+ cpi->uv_zbin[q][i] = cpi->uv_zbin[q][1];
+ cpi->uv_round[q][i] = cpi->uv_round[q][1];
+ cm->uv_dequant[q][i] = cm->uv_dequant[q][1];
+
+#if CONFIG_ALPHA
+ cpi->a_quant[q][i] = cpi->a_quant[q][1];
+ cpi->a_quant_shift[q][i] = cpi->a_quant_shift[q][1];
+ cpi->a_zbin[q][i] = cpi->a_zbin[q][1];
+ cpi->a_round[q][i] = cpi->a_round[q][1];
+ cm->a_dequant[q][i] = cm->a_dequant[q][1];
+#endif
+ }
+ }
+}
+
+void vp9_mb_init_quantizer(VP9_COMP *cpi, MACROBLOCK *x) {
+ int i;
+ MACROBLOCKD *xd = &x->e_mbd;
+ int zbin_extra;
+ int segment_id = xd->this_mi->mbmi.segment_id;
+ const int qindex = vp9_get_qindex(&cpi->common.seg, segment_id,
+ cpi->common.base_qindex);
+
+ // Y
+ zbin_extra = (cpi->common.y_dequant[qindex][1] *
+ (cpi->zbin_mode_boost + x->act_zbin_adj)) >> 7;
+
+ x->plane[0].quant = cpi->y_quant[qindex];
+ x->plane[0].quant_shift = cpi->y_quant_shift[qindex];
+ x->plane[0].zbin = cpi->y_zbin[qindex];
+ x->plane[0].round = cpi->y_round[qindex];
+ x->plane[0].zbin_extra = (int16_t)zbin_extra;
+ x->e_mbd.plane[0].dequant = cpi->common.y_dequant[qindex];
+
+ // UV
+ zbin_extra = (cpi->common.uv_dequant[qindex][1] *
+ (cpi->zbin_mode_boost + x->act_zbin_adj)) >> 7;
+
+ for (i = 1; i < 3; i++) {
+ x->plane[i].quant = cpi->uv_quant[qindex];
+ x->plane[i].quant_shift = cpi->uv_quant_shift[qindex];
+ x->plane[i].zbin = cpi->uv_zbin[qindex];
+ x->plane[i].round = cpi->uv_round[qindex];
+ x->plane[i].zbin_extra = (int16_t)zbin_extra;
+ x->e_mbd.plane[i].dequant = cpi->common.uv_dequant[qindex];
+ }
+
+#if CONFIG_ALPHA
+ x->plane[3].quant = cpi->a_quant[qindex];
+ x->plane[3].quant_shift = cpi->a_quant_shift[qindex];
+ x->plane[3].zbin = cpi->a_zbin[qindex];
+ x->plane[3].round = cpi->a_round[qindex];
+ x->plane[3].zbin_extra = (int16_t)zbin_extra;
+ x->e_mbd.plane[3].dequant = cpi->common.a_dequant[qindex];
+#endif
+
+ x->skip_block = vp9_segfeature_active(&cpi->common.seg, segment_id,
+ SEG_LVL_SKIP);
+
+ /* save this macroblock QIndex for vp9_update_zbin_extra() */
+ x->e_mbd.q_index = qindex;
+}
+
+void vp9_update_zbin_extra(VP9_COMP *cpi, MACROBLOCK *x) {
+ const int qindex = x->e_mbd.q_index;
+ const int y_zbin_extra = (cpi->common.y_dequant[qindex][1] *
+ (cpi->zbin_mode_boost + x->act_zbin_adj)) >> 7;
+ const int uv_zbin_extra = (cpi->common.uv_dequant[qindex][1] *
+ (cpi->zbin_mode_boost + x->act_zbin_adj)) >> 7;
+
+ x->plane[0].zbin_extra = (int16_t)y_zbin_extra;
+ x->plane[1].zbin_extra = (int16_t)uv_zbin_extra;
+ x->plane[2].zbin_extra = (int16_t)uv_zbin_extra;
+}
+
+void vp9_frame_init_quantizer(VP9_COMP *cpi) {
+ // Clear Zbin mode boost for default case
+ cpi->zbin_mode_boost = 0;
+
+ // MB level quantizer setup
+ vp9_mb_init_quantizer(cpi, &cpi->mb);
+}
+
+void vp9_set_quantizer(struct VP9_COMP *cpi, int Q) {
+ VP9_COMMON *cm = &cpi->common;
+
+ cm->base_qindex = Q;
+
+ // if any of the delta_q values are changing update flag will
+ // have to be set.
+ cm->y_dc_delta_q = 0;
+ cm->uv_dc_delta_q = 0;
+ cm->uv_ac_delta_q = 0;
+
+ // quantizer has to be reinitialized if any delta_q changes.
+ // As there are not any here for now this is inactive code.
+ // if(update)
+ // vp9_init_quantizer(cpi);
+}
diff --git a/libvpx/vp9/encoder/vp9_quantize.h b/libvpx/vp9/encoder/vp9_quantize.h
new file mode 100644
index 0000000..3229eaa
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_quantize.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_ENCODER_VP9_QUANTIZE_H_
+#define VP9_ENCODER_VP9_QUANTIZE_H_
+
+#include "vp9/encoder/vp9_block.h"
+
+#define prototype_quantize_block(sym) \
+ void (sym)(MACROBLOCK *mb, int b_idx)
+
+#define prototype_quantize_block_pair(sym) \
+ void (sym)(MACROBLOCK *mb, int b_idx1, int b_idx2)
+
+#define prototype_quantize_mb(sym) \
+ void (sym)(MACROBLOCK *x)
+
+void vp9_regular_quantize_b_4x4_pair(MACROBLOCK *mb, int b_idx1, int b_idx2,
+ int y_blocks);
+void vp9_regular_quantize_b_4x4(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type,
+ int y_blocks);
+void vp9_regular_quantize_b_8x8(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type,
+ int y_blocks);
+struct VP9_COMP;
+
+extern void vp9_set_quantizer(struct VP9_COMP *cpi, int Q);
+
+extern void vp9_frame_init_quantizer(struct VP9_COMP *cpi);
+
+extern void vp9_update_zbin_extra(struct VP9_COMP *cpi, MACROBLOCK *x);
+
+extern void vp9_mb_init_quantizer(struct VP9_COMP *cpi, MACROBLOCK *x);
+
+extern void vp9_init_quantizer(struct VP9_COMP *cpi);
+
+#endif // VP9_ENCODER_VP9_QUANTIZE_H_
diff --git a/libvpx/vp9/encoder/vp9_ratectrl.c b/libvpx/vp9/encoder/vp9_ratectrl.c
new file mode 100644
index 0000000..2d12ba9
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_ratectrl.c
@@ -0,0 +1,503 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <limits.h>
+#include <assert.h>
+#include <math.h>
+
+#include "vp9/common/vp9_alloccommon.h"
+#include "vp9/common/vp9_common.h"
+#include "vp9/encoder/vp9_ratectrl.h"
+#include "vp9/common/vp9_entropymode.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vp9/common/vp9_systemdependent.h"
+#include "vp9/encoder/vp9_encodemv.h"
+#include "vp9/common/vp9_quant_common.h"
+#include "vp9/common/vp9_seg_common.h"
+
+#define MIN_BPB_FACTOR 0.005
+#define MAX_BPB_FACTOR 50
+
+// Bits Per MB at different Q (Multiplied by 512)
+#define BPER_MB_NORMBITS 9
+
+static const unsigned int prior_key_frame_weight[KEY_FRAME_CONTEXT] =
+ { 1, 2, 3, 4, 5 };
+
+// These functions use formulaic calculations to make playing with the
+// quantizer tables easier. If necessary they can be replaced by lookup
+// tables if and when things settle down in the experimental bitstream
+double vp9_convert_qindex_to_q(int qindex) {
+ // Convert the index to a real Q value (scaled down to match old Q values)
+ return vp9_ac_quant(qindex, 0) / 4.0;
+}
+
+int vp9_gfboost_qadjust(int qindex) {
+ const double q = vp9_convert_qindex_to_q(qindex);
+ return (int)((0.00000828 * q * q * q) +
+ (-0.0055 * q * q) +
+ (1.32 * q) + 79.3);
+}
+
+static int kfboost_qadjust(int qindex) {
+ const double q = vp9_convert_qindex_to_q(qindex);
+ return (int)((0.00000973 * q * q * q) +
+ (-0.00613 * q * q) +
+ (1.316 * q) + 121.2);
+}
+
+int vp9_bits_per_mb(FRAME_TYPE frame_type, int qindex,
+ double correction_factor) {
+
+ const double q = vp9_convert_qindex_to_q(qindex);
+ int enumerator = frame_type == KEY_FRAME ? 4000000 : 2500000;
+
+ // q based adjustment to baseline enumerator
+ enumerator += (int)(enumerator * q) >> 12;
+ return (int)(0.5 + (enumerator * correction_factor / q));
+}
+
+void vp9_save_coding_context(VP9_COMP *cpi) {
+ CODING_CONTEXT *const cc = &cpi->coding_context;
+ VP9_COMMON *cm = &cpi->common;
+
+ // Stores a snapshot of key state variables which can subsequently be
+ // restored with a call to vp9_restore_coding_context. These functions are
+ // intended for use in a re-code loop in vp9_compress_frame where the
+ // quantizer value is adjusted between loop iterations.
+
+ cc->nmvc = cm->fc.nmvc;
+ vp9_copy(cc->nmvjointcost, cpi->mb.nmvjointcost);
+ vp9_copy(cc->nmvcosts, cpi->mb.nmvcosts);
+ vp9_copy(cc->nmvcosts_hp, cpi->mb.nmvcosts_hp);
+
+ vp9_copy(cc->inter_mode_probs, cm->fc.inter_mode_probs);
+
+ vp9_copy(cc->y_mode_prob, cm->fc.y_mode_prob);
+ vp9_copy(cc->uv_mode_prob, cm->fc.uv_mode_prob);
+ vp9_copy(cc->partition_prob, cm->fc.partition_prob);
+
+ vp9_copy(cc->segment_pred_probs, cm->seg.pred_probs);
+
+ vp9_copy(cc->intra_inter_prob, cm->fc.intra_inter_prob);
+ vp9_copy(cc->comp_inter_prob, cm->fc.comp_inter_prob);
+ vp9_copy(cc->single_ref_prob, cm->fc.single_ref_prob);
+ vp9_copy(cc->comp_ref_prob, cm->fc.comp_ref_prob);
+
+ vpx_memcpy(cpi->coding_context.last_frame_seg_map_copy,
+ cm->last_frame_seg_map, (cm->mi_rows * cm->mi_cols));
+
+ vp9_copy(cc->last_ref_lf_deltas, cm->lf.last_ref_deltas);
+ vp9_copy(cc->last_mode_lf_deltas, cm->lf.last_mode_deltas);
+
+ vp9_copy(cc->coef_probs, cm->fc.coef_probs);
+ vp9_copy(cc->switchable_interp_prob, cm->fc.switchable_interp_prob);
+ cc->tx_probs = cm->fc.tx_probs;
+ vp9_copy(cc->mbskip_probs, cm->fc.mbskip_probs);
+}
+
+void vp9_restore_coding_context(VP9_COMP *cpi) {
+ CODING_CONTEXT *const cc = &cpi->coding_context;
+ VP9_COMMON *cm = &cpi->common;
+
+ // Restore key state variables to the snapshot state stored in the
+ // previous call to vp9_save_coding_context.
+
+ cm->fc.nmvc = cc->nmvc;
+ vp9_copy(cpi->mb.nmvjointcost, cc->nmvjointcost);
+ vp9_copy(cpi->mb.nmvcosts, cc->nmvcosts);
+ vp9_copy(cpi->mb.nmvcosts_hp, cc->nmvcosts_hp);
+
+ vp9_copy(cm->fc.inter_mode_probs, cc->inter_mode_probs);
+
+ vp9_copy(cm->fc.y_mode_prob, cc->y_mode_prob);
+ vp9_copy(cm->fc.uv_mode_prob, cc->uv_mode_prob);
+ vp9_copy(cm->fc.partition_prob, cc->partition_prob);
+
+ vp9_copy(cm->seg.pred_probs, cc->segment_pred_probs);
+
+ vp9_copy(cm->fc.intra_inter_prob, cc->intra_inter_prob);
+ vp9_copy(cm->fc.comp_inter_prob, cc->comp_inter_prob);
+ vp9_copy(cm->fc.single_ref_prob, cc->single_ref_prob);
+ vp9_copy(cm->fc.comp_ref_prob, cc->comp_ref_prob);
+
+ vpx_memcpy(cm->last_frame_seg_map,
+ cpi->coding_context.last_frame_seg_map_copy,
+ (cm->mi_rows * cm->mi_cols));
+
+ vp9_copy(cm->lf.last_ref_deltas, cc->last_ref_lf_deltas);
+ vp9_copy(cm->lf.last_mode_deltas, cc->last_mode_lf_deltas);
+
+ vp9_copy(cm->fc.coef_probs, cc->coef_probs);
+ vp9_copy(cm->fc.switchable_interp_prob, cc->switchable_interp_prob);
+ cm->fc.tx_probs = cc->tx_probs;
+ vp9_copy(cm->fc.mbskip_probs, cc->mbskip_probs);
+}
+
+void vp9_setup_key_frame(VP9_COMP *cpi) {
+ VP9_COMMON *cm = &cpi->common;
+
+ vp9_setup_past_independence(cm);
+
+ // interval before next GF
+ cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
+ /* All buffers are implicitly updated on key frames. */
+ cpi->refresh_golden_frame = 1;
+ cpi->refresh_alt_ref_frame = 1;
+}
+
+void vp9_setup_inter_frame(VP9_COMP *cpi) {
+ VP9_COMMON *cm = &cpi->common;
+ if (cm->error_resilient_mode || cm->intra_only)
+ vp9_setup_past_independence(cm);
+
+ assert(cm->frame_context_idx < NUM_FRAME_CONTEXTS);
+ cm->fc = cm->frame_contexts[cm->frame_context_idx];
+}
+
+static int estimate_bits_at_q(int frame_kind, int q, int mbs,
+ double correction_factor) {
+ const int bpm = (int)(vp9_bits_per_mb(frame_kind, q, correction_factor));
+
+ // Attempt to retain reasonable accuracy without overflow. The cutoff is
+ // chosen such that the maximum product of Bpm and MBs fits 31 bits. The
+ // largest Bpm takes 20 bits.
+ return (mbs > (1 << 11)) ? (bpm >> BPER_MB_NORMBITS) * mbs
+ : (bpm * mbs) >> BPER_MB_NORMBITS;
+}
+
+
+static void calc_iframe_target_size(VP9_COMP *cpi) {
+ // boost defaults to half second
+ int target;
+
+ // Clear down mmx registers to allow floating point in what follows
+ vp9_clear_system_state(); // __asm emms;
+
+ // New Two pass RC
+ target = cpi->per_frame_bandwidth;
+
+ if (cpi->oxcf.rc_max_intra_bitrate_pct) {
+ int max_rate = cpi->per_frame_bandwidth
+ * cpi->oxcf.rc_max_intra_bitrate_pct / 100;
+
+ if (target > max_rate)
+ target = max_rate;
+ }
+
+ cpi->this_frame_target = target;
+}
+
+
+// Do the best we can to define the parameters for the next GF based
+// on what information we have available.
+//
+// In this experimental code only two pass is supported
+// so we just use the interval determined in the two pass code.
+static void calc_gf_params(VP9_COMP *cpi) {
+ // Set the gf interval
+ cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
+}
+
+
+static void calc_pframe_target_size(VP9_COMP *cpi) {
+ const int min_frame_target = MAX(cpi->min_frame_bandwidth,
+ cpi->av_per_frame_bandwidth >> 5);
+ if (cpi->refresh_alt_ref_frame) {
+ // Special alt reference frame case
+ // Per frame bit target for the alt ref frame
+ cpi->per_frame_bandwidth = cpi->twopass.gf_bits;
+ cpi->this_frame_target = cpi->per_frame_bandwidth;
+ } else {
+ // Normal frames (gf,and inter)
+ cpi->this_frame_target = cpi->per_frame_bandwidth;
+ }
+
+ // Sanity check that the total sum of adjustments is not above the maximum allowed
+ // That is that having allowed for KF and GF penalties we have not pushed the
+ // current interframe target to low. If the adjustment we apply here is not capable of recovering
+ // all the extra bits we have spent in the KF or GF then the remainder will have to be recovered over
+ // a longer time span via other buffer / rate control mechanisms.
+ if (cpi->this_frame_target < min_frame_target)
+ cpi->this_frame_target = min_frame_target;
+
+ if (!cpi->refresh_alt_ref_frame)
+ // Note the baseline target data rate for this inter frame.
+ cpi->inter_frame_target = cpi->this_frame_target;
+
+ // Adjust target frame size for Golden Frames:
+ if (cpi->frames_till_gf_update_due == 0) {
+ const int q = (cpi->oxcf.fixed_q < 0) ? cpi->last_q[INTER_FRAME]
+ : cpi->oxcf.fixed_q;
+
+ cpi->refresh_golden_frame = 1;
+
+ calc_gf_params(cpi);
+
+ // If we are using alternate ref instead of gf then do not apply the boost
+ // It will instead be applied to the altref update
+ // Jims modified boost
+ if (!cpi->source_alt_ref_active) {
+ if (cpi->oxcf.fixed_q < 0) {
+ // The spend on the GF is defined in the two pass code
+ // for two pass encodes
+ cpi->this_frame_target = cpi->per_frame_bandwidth;
+ } else {
+ cpi->this_frame_target =
+ (estimate_bits_at_q(1, q, cpi->common.MBs, 1.0)
+ * cpi->last_boost) / 100;
+ }
+ } else {
+ // If there is an active ARF at this location use the minimum
+ // bits on this frame even if it is a constructed arf.
+ // The active maximum quantizer insures that an appropriate
+ // number of bits will be spent if needed for constructed ARFs.
+ cpi->this_frame_target = 0;
+ }
+ }
+}
+
+
+void vp9_update_rate_correction_factors(VP9_COMP *cpi, int damp_var) {
+ const int q = cpi->common.base_qindex;
+ int correction_factor = 100;
+ double rate_correction_factor;
+ double adjustment_limit;
+
+ int projected_size_based_on_q = 0;
+
+ // Clear down mmx registers to allow floating point in what follows
+ vp9_clear_system_state(); // __asm emms;
+
+ if (cpi->common.frame_type == KEY_FRAME) {
+ rate_correction_factor = cpi->key_frame_rate_correction_factor;
+ } else {
+ if (cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame)
+ rate_correction_factor = cpi->gf_rate_correction_factor;
+ else
+ rate_correction_factor = cpi->rate_correction_factor;
+ }
+
+ // Work out how big we would have expected the frame to be at this Q given
+ // the current correction factor.
+ // Stay in double to avoid int overflow when values are large
+ projected_size_based_on_q = estimate_bits_at_q(cpi->common.frame_type, q,
+ cpi->common.MBs,
+ rate_correction_factor);
+
+ // Work out a size correction factor.
+ // if ( cpi->this_frame_target > 0 )
+ // correction_factor = (100 * cpi->projected_frame_size) / cpi->this_frame_target;
+ if (projected_size_based_on_q > 0)
+ correction_factor = (100 * cpi->projected_frame_size) / projected_size_based_on_q;
+
+ // More heavily damped adjustment used if we have been oscillating either side of target
+ switch (damp_var) {
+ case 0:
+ adjustment_limit = 0.75;
+ break;
+ case 1:
+ adjustment_limit = 0.375;
+ break;
+ case 2:
+ default:
+ adjustment_limit = 0.25;
+ break;
+ }
+
+ // if ( (correction_factor > 102) && (Q < cpi->active_worst_quality) )
+ if (correction_factor > 102) {
+ // We are not already at the worst allowable quality
+ correction_factor = (int)(100.5 + ((correction_factor - 100) * adjustment_limit));
+ rate_correction_factor = ((rate_correction_factor * correction_factor) / 100);
+
+ // Keep rate_correction_factor within limits
+ if (rate_correction_factor > MAX_BPB_FACTOR)
+ rate_correction_factor = MAX_BPB_FACTOR;
+ }
+ // else if ( (correction_factor < 99) && (Q > cpi->active_best_quality) )
+ else if (correction_factor < 99) {
+ // We are not already at the best allowable quality
+ correction_factor = (int)(100.5 - ((100 - correction_factor) * adjustment_limit));
+ rate_correction_factor = ((rate_correction_factor * correction_factor) / 100);
+
+ // Keep rate_correction_factor within limits
+ if (rate_correction_factor < MIN_BPB_FACTOR)
+ rate_correction_factor = MIN_BPB_FACTOR;
+ }
+
+ if (cpi->common.frame_type == KEY_FRAME)
+ cpi->key_frame_rate_correction_factor = rate_correction_factor;
+ else {
+ if (cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame)
+ cpi->gf_rate_correction_factor = rate_correction_factor;
+ else
+ cpi->rate_correction_factor = rate_correction_factor;
+ }
+}
+
+
+int vp9_regulate_q(VP9_COMP *cpi, int target_bits_per_frame) {
+ int q = cpi->active_worst_quality;
+
+ int i;
+ int last_error = INT_MAX;
+ int target_bits_per_mb;
+ int bits_per_mb_at_this_q;
+ double correction_factor;
+
+ // Select the appropriate correction factor based upon type of frame.
+ if (cpi->common.frame_type == KEY_FRAME)
+ correction_factor = cpi->key_frame_rate_correction_factor;
+ else {
+ if (cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame)
+ correction_factor = cpi->gf_rate_correction_factor;
+ else
+ correction_factor = cpi->rate_correction_factor;
+ }
+
+ // Calculate required scaling factor based on target frame size and size of frame produced using previous Q
+ if (target_bits_per_frame >= (INT_MAX >> BPER_MB_NORMBITS))
+ target_bits_per_mb = (target_bits_per_frame / cpi->common.MBs) << BPER_MB_NORMBITS; // Case where we would overflow int
+ else
+ target_bits_per_mb = (target_bits_per_frame << BPER_MB_NORMBITS) / cpi->common.MBs;
+
+ i = cpi->active_best_quality;
+
+ do {
+ bits_per_mb_at_this_q = (int)vp9_bits_per_mb(cpi->common.frame_type, i,
+ correction_factor);
+
+ if (bits_per_mb_at_this_q <= target_bits_per_mb) {
+ if ((target_bits_per_mb - bits_per_mb_at_this_q) <= last_error)
+ q = i;
+ else
+ q = i - 1;
+
+ break;
+ } else {
+ last_error = bits_per_mb_at_this_q - target_bits_per_mb;
+ }
+ } while (++i <= cpi->active_worst_quality);
+
+ return q;
+}
+
+
+static int estimate_keyframe_frequency(VP9_COMP *cpi) {
+ int i;
+
+ // Average key frame frequency
+ int av_key_frame_frequency = 0;
+
+ /* First key frame at start of sequence is a special case. We have no
+ * frequency data.
+ */
+ if (cpi->key_frame_count == 1) {
+ /* Assume a default of 1 kf every 2 seconds, or the max kf interval,
+ * whichever is smaller.
+ */
+ int key_freq = cpi->oxcf.key_freq > 0 ? cpi->oxcf.key_freq : 1;
+ av_key_frame_frequency = (int)cpi->output_framerate * 2;
+
+ if (cpi->oxcf.auto_key && av_key_frame_frequency > key_freq)
+ av_key_frame_frequency = cpi->oxcf.key_freq;
+
+ cpi->prior_key_frame_distance[KEY_FRAME_CONTEXT - 1]
+ = av_key_frame_frequency;
+ } else {
+ unsigned int total_weight = 0;
+ int last_kf_interval =
+ (cpi->frames_since_key > 0) ? cpi->frames_since_key : 1;
+
+ /* reset keyframe context and calculate weighted average of last
+ * KEY_FRAME_CONTEXT keyframes
+ */
+ for (i = 0; i < KEY_FRAME_CONTEXT; i++) {
+ if (i < KEY_FRAME_CONTEXT - 1)
+ cpi->prior_key_frame_distance[i]
+ = cpi->prior_key_frame_distance[i + 1];
+ else
+ cpi->prior_key_frame_distance[i] = last_kf_interval;
+
+ av_key_frame_frequency += prior_key_frame_weight[i]
+ * cpi->prior_key_frame_distance[i];
+ total_weight += prior_key_frame_weight[i];
+ }
+
+ av_key_frame_frequency /= total_weight;
+
+ }
+ return av_key_frame_frequency;
+}
+
+
+void vp9_adjust_key_frame_context(VP9_COMP *cpi) {
+ // Clear down mmx registers to allow floating point in what follows
+ vp9_clear_system_state();
+
+ cpi->frames_since_key = 0;
+ cpi->key_frame_count++;
+}
+
+
+void vp9_compute_frame_size_bounds(VP9_COMP *cpi, int *frame_under_shoot_limit,
+ int *frame_over_shoot_limit) {
+ // Set-up bounds on acceptable frame size:
+ if (cpi->oxcf.fixed_q >= 0) {
+ // Fixed Q scenario: frame size never outranges target (there is no target!)
+ *frame_under_shoot_limit = 0;
+ *frame_over_shoot_limit = INT_MAX;
+ } else {
+ if (cpi->common.frame_type == KEY_FRAME) {
+ *frame_over_shoot_limit = cpi->this_frame_target * 9 / 8;
+ *frame_under_shoot_limit = cpi->this_frame_target * 7 / 8;
+ } else {
+ if (cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame) {
+ *frame_over_shoot_limit = cpi->this_frame_target * 9 / 8;
+ *frame_under_shoot_limit = cpi->this_frame_target * 7 / 8;
+ } else {
+ // Stron overshoot limit for constrained quality
+ if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
+ *frame_over_shoot_limit = cpi->this_frame_target * 11 / 8;
+ *frame_under_shoot_limit = cpi->this_frame_target * 2 / 8;
+ } else {
+ *frame_over_shoot_limit = cpi->this_frame_target * 11 / 8;
+ *frame_under_shoot_limit = cpi->this_frame_target * 5 / 8;
+ }
+ }
+ }
+
+ // For very small rate targets where the fractional adjustment
+ // (eg * 7/8) may be tiny make sure there is at least a minimum
+ // range.
+ *frame_over_shoot_limit += 200;
+ *frame_under_shoot_limit -= 200;
+ if (*frame_under_shoot_limit < 0)
+ *frame_under_shoot_limit = 0;
+ }
+}
+
+
+// return of 0 means drop frame
+int vp9_pick_frame_size(VP9_COMP *cpi) {
+ VP9_COMMON *cm = &cpi->common;
+
+ if (cm->frame_type == KEY_FRAME)
+ calc_iframe_target_size(cpi);
+ else
+ calc_pframe_target_size(cpi);
+
+ return 1;
+}
diff --git a/libvpx/vp9/encoder/vp9_ratectrl.h b/libvpx/vp9/encoder/vp9_ratectrl.h
new file mode 100644
index 0000000..4733176
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_ratectrl.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_ENCODER_VP9_RATECTRL_H_
+#define VP9_ENCODER_VP9_RATECTRL_H_
+
+#include "vp9/encoder/vp9_onyx_int.h"
+
+#define FRAME_OVERHEAD_BITS 200
+
+void vp9_save_coding_context(VP9_COMP *cpi);
+void vp9_restore_coding_context(VP9_COMP *cpi);
+
+void vp9_setup_key_frame(VP9_COMP *cpi);
+void vp9_update_rate_correction_factors(VP9_COMP *cpi, int damp_var);
+int vp9_regulate_q(VP9_COMP *cpi, int target_bits_per_frame);
+void vp9_adjust_key_frame_context(VP9_COMP *cpi);
+void vp9_compute_frame_size_bounds(VP9_COMP *cpi,
+ int *frame_under_shoot_limit,
+ int *frame_over_shoot_limit);
+
+// return of 0 means drop frame
+int vp9_pick_frame_size(VP9_COMP *cpi);
+
+double vp9_convert_qindex_to_q(int qindex);
+int vp9_gfboost_qadjust(int qindex);
+extern int vp9_bits_per_mb(FRAME_TYPE frame_type, int qindex,
+ double correction_factor);
+void vp9_setup_inter_frame(VP9_COMP *cpi);
+
+#endif // VP9_ENCODER_VP9_RATECTRL_H_
diff --git a/libvpx/vp9/encoder/vp9_rdopt.c b/libvpx/vp9/encoder/vp9_rdopt.c
new file mode 100644
index 0000000..df00334
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_rdopt.c
@@ -0,0 +1,4050 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+#include <math.h>
+#include <limits.h>
+#include <assert.h>
+
+#include "vp9/common/vp9_pragmas.h"
+#include "vp9/encoder/vp9_tokenize.h"
+#include "vp9/encoder/vp9_treewriter.h"
+#include "vp9/encoder/vp9_onyx_int.h"
+#include "vp9/encoder/vp9_modecosts.h"
+#include "vp9/encoder/vp9_encodeintra.h"
+#include "vp9/common/vp9_entropymode.h"
+#include "vp9/common/vp9_reconinter.h"
+#include "vp9/common/vp9_reconintra.h"
+#include "vp9/common/vp9_findnearmv.h"
+#include "vp9/common/vp9_quant_common.h"
+#include "vp9/encoder/vp9_encodemb.h"
+#include "vp9/encoder/vp9_quantize.h"
+#include "vp9/encoder/vp9_variance.h"
+#include "vp9/encoder/vp9_mcomp.h"
+#include "vp9/encoder/vp9_rdopt.h"
+#include "vp9/encoder/vp9_ratectrl.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vp9/common/vp9_systemdependent.h"
+#include "vp9/encoder/vp9_encodemv.h"
+#include "vp9/common/vp9_seg_common.h"
+#include "vp9/common/vp9_pred_common.h"
+#include "vp9/common/vp9_entropy.h"
+#include "vp9_rtcd.h"
+#include "vp9/common/vp9_mvref_common.h"
+#include "vp9/common/vp9_common.h"
+
+#define INVALID_MV 0x80008000
+
+/* Factor to weigh the rate for switchable interp filters */
+#define SWITCHABLE_INTERP_RATE_FACTOR 1
+
+DECLARE_ALIGNED(16, extern const uint8_t,
+ vp9_pt_energy_class[MAX_ENTROPY_TOKENS]);
+
+#define LAST_FRAME_MODE_MASK 0xFFDADCD60
+#define GOLDEN_FRAME_MODE_MASK 0xFFB5A3BB0
+#define ALT_REF_MODE_MASK 0xFF8C648D0
+
+const MODE_DEFINITION vp9_mode_order[MAX_MODES] = {
+ {RD_NEARESTMV, LAST_FRAME, NONE},
+ {RD_NEARESTMV, ALTREF_FRAME, NONE},
+ {RD_NEARESTMV, GOLDEN_FRAME, NONE},
+
+ {RD_DC_PRED, INTRA_FRAME, NONE},
+
+ {RD_NEWMV, LAST_FRAME, NONE},
+ {RD_NEWMV, ALTREF_FRAME, NONE},
+ {RD_NEWMV, GOLDEN_FRAME, NONE},
+
+ {RD_NEARMV, LAST_FRAME, NONE},
+ {RD_NEARMV, ALTREF_FRAME, NONE},
+ {RD_NEARESTMV, LAST_FRAME, ALTREF_FRAME},
+ {RD_NEARESTMV, GOLDEN_FRAME, ALTREF_FRAME},
+
+ {RD_TM_PRED, INTRA_FRAME, NONE},
+
+ {RD_NEARMV, LAST_FRAME, ALTREF_FRAME},
+ {RD_NEWMV, LAST_FRAME, ALTREF_FRAME},
+ {RD_NEARMV, GOLDEN_FRAME, NONE},
+ {RD_NEARMV, GOLDEN_FRAME, ALTREF_FRAME},
+ {RD_NEWMV, GOLDEN_FRAME, ALTREF_FRAME},
+
+ {RD_SPLITMV, LAST_FRAME, NONE},
+ {RD_SPLITMV, GOLDEN_FRAME, NONE},
+ {RD_SPLITMV, ALTREF_FRAME, NONE},
+ {RD_SPLITMV, LAST_FRAME, ALTREF_FRAME},
+ {RD_SPLITMV, GOLDEN_FRAME, ALTREF_FRAME},
+
+ {RD_ZEROMV, LAST_FRAME, NONE},
+ {RD_ZEROMV, GOLDEN_FRAME, NONE},
+ {RD_ZEROMV, ALTREF_FRAME, NONE},
+ {RD_ZEROMV, LAST_FRAME, ALTREF_FRAME},
+ {RD_ZEROMV, GOLDEN_FRAME, ALTREF_FRAME},
+
+ {RD_I4X4_PRED, INTRA_FRAME, NONE},
+ {RD_H_PRED, INTRA_FRAME, NONE},
+ {RD_V_PRED, INTRA_FRAME, NONE},
+ {RD_D135_PRED, INTRA_FRAME, NONE},
+ {RD_D207_PRED, INTRA_FRAME, NONE},
+ {RD_D153_PRED, INTRA_FRAME, NONE},
+ {RD_D63_PRED, INTRA_FRAME, NONE},
+ {RD_D117_PRED, INTRA_FRAME, NONE},
+ {RD_D45_PRED, INTRA_FRAME, NONE},
+};
+
+// The baseline rd thresholds for breaking out of the rd loop for
+// certain modes are assumed to be based on 8x8 blocks.
+// This table is used to correct for blocks size.
+// The factors here are << 2 (2 = x0.5, 32 = x8 etc).
+static int rd_thresh_block_size_factor[BLOCK_SIZES] =
+ {2, 3, 3, 4, 6, 6, 8, 12, 12, 16, 24, 24, 32};
+
+#define MAX_RD_THRESH_FACT 64
+#define RD_THRESH_INC 1
+
+static void fill_token_costs(vp9_coeff_cost *c,
+ vp9_coeff_probs_model (*p)[BLOCK_TYPES]) {
+ int i, j, k, l;
+ TX_SIZE t;
+ for (t = TX_4X4; t <= TX_32X32; t++)
+ for (i = 0; i < BLOCK_TYPES; i++)
+ for (j = 0; j < REF_TYPES; j++)
+ for (k = 0; k < COEF_BANDS; k++)
+ for (l = 0; l < PREV_COEF_CONTEXTS; l++) {
+ vp9_prob probs[ENTROPY_NODES];
+ vp9_model_to_full_probs(p[t][i][j][k][l], probs);
+ vp9_cost_tokens((int *)c[t][i][j][k][0][l], probs,
+ vp9_coef_tree);
+ vp9_cost_tokens_skip((int *)c[t][i][j][k][1][l], probs,
+ vp9_coef_tree);
+ assert(c[t][i][j][k][0][l][DCT_EOB_TOKEN] ==
+ c[t][i][j][k][1][l][DCT_EOB_TOKEN]);
+ }
+}
+
+static const int rd_iifactor[32] = {
+ 4, 4, 3, 2, 1, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+// 3* dc_qlookup[Q]*dc_qlookup[Q];
+
+/* values are now correlated to quantizer */
+static int sad_per_bit16lut[QINDEX_RANGE];
+static int sad_per_bit4lut[QINDEX_RANGE];
+
+void vp9_init_me_luts() {
+ int i;
+
+ // Initialize the sad lut tables using a formulaic calculation for now
+ // This is to make it easier to resolve the impact of experimental changes
+ // to the quantizer tables.
+ for (i = 0; i < QINDEX_RANGE; i++) {
+ sad_per_bit16lut[i] =
+ (int)((0.0418 * vp9_convert_qindex_to_q(i)) + 2.4107);
+ sad_per_bit4lut[i] = (int)(0.063 * vp9_convert_qindex_to_q(i) + 2.742);
+ }
+}
+
+static int compute_rd_mult(int qindex) {
+ const int q = vp9_dc_quant(qindex, 0);
+ return (11 * q * q) >> 2;
+}
+
+static MB_PREDICTION_MODE rd_mode_to_mode(RD_PREDICTION_MODE rd_mode) {
+ if (rd_mode == RD_SPLITMV || rd_mode == RD_I4X4_PRED) {
+ assert(!"Invalid rd_mode");
+ return MB_MODE_COUNT;
+ }
+ assert((int)rd_mode < (int)MB_MODE_COUNT);
+ return (MB_PREDICTION_MODE)rd_mode;
+}
+
+void vp9_initialize_me_consts(VP9_COMP *cpi, int qindex) {
+ cpi->mb.sadperbit16 = sad_per_bit16lut[qindex];
+ cpi->mb.sadperbit4 = sad_per_bit4lut[qindex];
+}
+
+
+void vp9_initialize_rd_consts(VP9_COMP *cpi, int qindex) {
+ int q, i, bsize;
+
+ vp9_clear_system_state(); // __asm emms;
+
+ // Further tests required to see if optimum is different
+ // for key frames, golden frames and arf frames.
+ // if (cpi->common.refresh_golden_frame ||
+ // cpi->common.refresh_alt_ref_frame)
+ qindex = clamp(qindex, 0, MAXQ);
+
+ cpi->RDMULT = compute_rd_mult(qindex);
+ if (cpi->pass == 2 && (cpi->common.frame_type != KEY_FRAME)) {
+ if (cpi->twopass.next_iiratio > 31)
+ cpi->RDMULT += (cpi->RDMULT * rd_iifactor[31]) >> 4;
+ else
+ cpi->RDMULT +=
+ (cpi->RDMULT * rd_iifactor[cpi->twopass.next_iiratio]) >> 4;
+ }
+ cpi->mb.errorperbit = cpi->RDMULT >> 6;
+ cpi->mb.errorperbit += (cpi->mb.errorperbit == 0);
+
+ vp9_set_speed_features(cpi);
+
+ q = (int)pow(vp9_dc_quant(qindex, 0) >> 2, 1.25);
+ q <<= 2;
+ if (q < 8)
+ q = 8;
+
+ if (cpi->RDMULT > 1000) {
+ cpi->RDDIV = 1;
+ cpi->RDMULT /= 100;
+
+ for (bsize = 0; bsize < BLOCK_SIZES; ++bsize) {
+ for (i = 0; i < MAX_MODES; ++i) {
+ // Threshold here seem unecessarily harsh but fine given actual
+ // range of values used for cpi->sf.thresh_mult[]
+ int thresh_max = INT_MAX / (q * rd_thresh_block_size_factor[bsize]);
+
+ // *4 relates to the scaling of rd_thresh_block_size_factor[]
+ if ((int64_t)cpi->sf.thresh_mult[i] < thresh_max) {
+ cpi->rd_threshes[bsize][i] =
+ cpi->sf.thresh_mult[i] * q *
+ rd_thresh_block_size_factor[bsize] / (4 * 100);
+ } else {
+ cpi->rd_threshes[bsize][i] = INT_MAX;
+ }
+ }
+ }
+ } else {
+ cpi->RDDIV = 100;
+
+ for (bsize = 0; bsize < BLOCK_SIZES; ++bsize) {
+ for (i = 0; i < MAX_MODES; i++) {
+ // Threshold here seem unecessarily harsh but fine given actual
+ // range of values used for cpi->sf.thresh_mult[]
+ int thresh_max = INT_MAX / (q * rd_thresh_block_size_factor[bsize]);
+
+ if (cpi->sf.thresh_mult[i] < thresh_max) {
+ cpi->rd_threshes[bsize][i] =
+ cpi->sf.thresh_mult[i] * q *
+ rd_thresh_block_size_factor[bsize] / 4;
+ } else {
+ cpi->rd_threshes[bsize][i] = INT_MAX;
+ }
+ }
+ }
+ }
+
+ fill_token_costs(cpi->mb.token_costs, cpi->common.fc.coef_probs);
+
+ for (i = 0; i < NUM_PARTITION_CONTEXTS; i++)
+ vp9_cost_tokens(cpi->mb.partition_cost[i],
+ cpi->common.fc.partition_prob[cpi->common.frame_type][i],
+ vp9_partition_tree);
+
+ /*rough estimate for costing*/
+ vp9_init_mode_costs(cpi);
+
+ if (cpi->common.frame_type != KEY_FRAME) {
+ vp9_build_nmv_cost_table(
+ cpi->mb.nmvjointcost,
+ cpi->mb.e_mbd.allow_high_precision_mv ?
+ cpi->mb.nmvcost_hp : cpi->mb.nmvcost,
+ &cpi->common.fc.nmvc,
+ cpi->mb.e_mbd.allow_high_precision_mv, 1, 1);
+
+ for (i = 0; i < INTER_MODE_CONTEXTS; i++) {
+ MB_PREDICTION_MODE m;
+
+ for (m = NEARESTMV; m < MB_MODE_COUNT; m++)
+ cpi->mb.inter_mode_cost[i][m - NEARESTMV] =
+ cost_token(vp9_inter_mode_tree,
+ cpi->common.fc.inter_mode_probs[i],
+ vp9_inter_mode_encodings - NEARESTMV + m);
+ }
+ }
+}
+
+static INLINE void linear_interpolate2(double x, int ntab, int inv_step,
+ const double *tab1, const double *tab2,
+ double *v1, double *v2) {
+ double y = x * inv_step;
+ int d = (int) y;
+ if (d >= ntab - 1) {
+ *v1 = tab1[ntab - 1];
+ *v2 = tab2[ntab - 1];
+ } else {
+ double a = y - d;
+ *v1 = tab1[d] * (1 - a) + tab1[d + 1] * a;
+ *v2 = tab2[d] * (1 - a) + tab2[d + 1] * a;
+ }
+}
+
+static void model_rd_norm(double x, double *R, double *D) {
+ static const int inv_tab_step = 8;
+ static const int tab_size = 120;
+ // NOTE: The tables below must be of the same size
+ //
+ // Normalized rate
+ // This table models the rate for a Laplacian source
+ // source with given variance when quantized with a uniform quantizer
+ // with given stepsize. The closed form expression is:
+ // Rn(x) = H(sqrt(r)) + sqrt(r)*[1 + H(r)/(1 - r)],
+ // where r = exp(-sqrt(2) * x) and x = qpstep / sqrt(variance),
+ // and H(x) is the binary entropy function.
+ static const double rate_tab[] = {
+ 64.00, 4.944, 3.949, 3.372, 2.966, 2.655, 2.403, 2.194,
+ 2.014, 1.858, 1.720, 1.596, 1.485, 1.384, 1.291, 1.206,
+ 1.127, 1.054, 0.986, 0.923, 0.863, 0.808, 0.756, 0.708,
+ 0.662, 0.619, 0.579, 0.541, 0.506, 0.473, 0.442, 0.412,
+ 0.385, 0.359, 0.335, 0.313, 0.291, 0.272, 0.253, 0.236,
+ 0.220, 0.204, 0.190, 0.177, 0.165, 0.153, 0.142, 0.132,
+ 0.123, 0.114, 0.106, 0.099, 0.091, 0.085, 0.079, 0.073,
+ 0.068, 0.063, 0.058, 0.054, 0.050, 0.047, 0.043, 0.040,
+ 0.037, 0.034, 0.032, 0.029, 0.027, 0.025, 0.023, 0.022,
+ 0.020, 0.019, 0.017, 0.016, 0.015, 0.014, 0.013, 0.012,
+ 0.011, 0.010, 0.009, 0.008, 0.008, 0.007, 0.007, 0.006,
+ 0.006, 0.005, 0.005, 0.005, 0.004, 0.004, 0.004, 0.003,
+ 0.003, 0.003, 0.003, 0.002, 0.002, 0.002, 0.002, 0.002,
+ 0.002, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
+ 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.000,
+ };
+ // Normalized distortion
+ // This table models the normalized distortion for a Laplacian source
+ // source with given variance when quantized with a uniform quantizer
+ // with given stepsize. The closed form expression is:
+ // Dn(x) = 1 - 1/sqrt(2) * x / sinh(x/sqrt(2))
+ // where x = qpstep / sqrt(variance)
+ // Note the actual distortion is Dn * variance.
+ static const double dist_tab[] = {
+ 0.000, 0.001, 0.005, 0.012, 0.021, 0.032, 0.045, 0.061,
+ 0.079, 0.098, 0.119, 0.142, 0.166, 0.190, 0.216, 0.242,
+ 0.269, 0.296, 0.324, 0.351, 0.378, 0.405, 0.432, 0.458,
+ 0.484, 0.509, 0.534, 0.557, 0.580, 0.603, 0.624, 0.645,
+ 0.664, 0.683, 0.702, 0.719, 0.735, 0.751, 0.766, 0.780,
+ 0.794, 0.807, 0.819, 0.830, 0.841, 0.851, 0.861, 0.870,
+ 0.878, 0.886, 0.894, 0.901, 0.907, 0.913, 0.919, 0.925,
+ 0.930, 0.935, 0.939, 0.943, 0.947, 0.951, 0.954, 0.957,
+ 0.960, 0.963, 0.966, 0.968, 0.971, 0.973, 0.975, 0.976,
+ 0.978, 0.980, 0.981, 0.982, 0.984, 0.985, 0.986, 0.987,
+ 0.988, 0.989, 0.990, 0.990, 0.991, 0.992, 0.992, 0.993,
+ 0.993, 0.994, 0.994, 0.995, 0.995, 0.996, 0.996, 0.996,
+ 0.996, 0.997, 0.997, 0.997, 0.997, 0.998, 0.998, 0.998,
+ 0.998, 0.998, 0.998, 0.999, 0.999, 0.999, 0.999, 0.999,
+ 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 1.000,
+ };
+ /*
+ assert(sizeof(rate_tab) == tab_size * sizeof(rate_tab[0]);
+ assert(sizeof(dist_tab) == tab_size * sizeof(dist_tab[0]);
+ assert(sizeof(rate_tab) == sizeof(dist_tab));
+ */
+ assert(x >= 0.0);
+ linear_interpolate2(x, tab_size, inv_tab_step,
+ rate_tab, dist_tab, R, D);
+}
+
+static void model_rd_from_var_lapndz(int var, int n, int qstep,
+ int *rate, int64_t *dist) {
+ // This function models the rate and distortion for a Laplacian
+ // source with given variance when quantized with a uniform quantizer
+ // with given stepsize. The closed form expressions are in:
+ // Hang and Chen, "Source Model for transform video coder and its
+ // application - Part I: Fundamental Theory", IEEE Trans. Circ.
+ // Sys. for Video Tech., April 1997.
+ vp9_clear_system_state();
+ if (var == 0 || n == 0) {
+ *rate = 0;
+ *dist = 0;
+ } else {
+ double D, R;
+ double s2 = (double) var / n;
+ double x = qstep / sqrt(s2);
+ model_rd_norm(x, &R, &D);
+ *rate = ((n << 8) * R + 0.5);
+ *dist = (var * D + 0.5);
+ }
+ vp9_clear_system_state();
+}
+
+static void model_rd_for_sb(VP9_COMP *cpi, BLOCK_SIZE bsize,
+ MACROBLOCK *x, MACROBLOCKD *xd,
+ int *out_rate_sum, int64_t *out_dist_sum) {
+ // Note our transform coeffs are 8 times an orthogonal transform.
+ // Hence quantizer step is also 8 times. To get effective quantizer
+ // we need to divide by 8 before sending to modeling function.
+ int i, rate_sum = 0, dist_sum = 0;
+
+ for (i = 0; i < MAX_MB_PLANE; ++i) {
+ struct macroblock_plane *const p = &x->plane[i];
+ struct macroblockd_plane *const pd = &xd->plane[i];
+ const BLOCK_SIZE bs = get_plane_block_size(bsize, pd);
+ unsigned int sse;
+ int rate;
+ int64_t dist;
+ (void) cpi->fn_ptr[bs].vf(p->src.buf, p->src.stride,
+ pd->dst.buf, pd->dst.stride, &sse);
+ // sse works better than var, since there is no dc prediction used
+ model_rd_from_var_lapndz(sse, 1 << num_pels_log2_lookup[bs],
+ pd->dequant[1] >> 3, &rate, &dist);
+
+ rate_sum += rate;
+ dist_sum += dist;
+ }
+
+ *out_rate_sum = rate_sum;
+ *out_dist_sum = dist_sum << 4;
+}
+
+static void model_rd_for_sb_y_tx(VP9_COMP *cpi, BLOCK_SIZE bsize,
+ TX_SIZE tx_size,
+ MACROBLOCK *x, MACROBLOCKD *xd,
+ int *out_rate_sum, int64_t *out_dist_sum,
+ int *out_skip) {
+ int j, k;
+ BLOCK_SIZE bs;
+ struct macroblock_plane *const p = &x->plane[0];
+ struct macroblockd_plane *const pd = &xd->plane[0];
+ const int width = 4 << num_4x4_blocks_wide_lookup[bsize];
+ const int height = 4 << num_4x4_blocks_high_lookup[bsize];
+ int rate_sum = 0;
+ int64_t dist_sum = 0;
+ const int t = 4 << tx_size;
+
+ if (tx_size == TX_4X4) {
+ bs = BLOCK_4X4;
+ } else if (tx_size == TX_8X8) {
+ bs = BLOCK_8X8;
+ } else if (tx_size == TX_16X16) {
+ bs = BLOCK_16X16;
+ } else if (tx_size == TX_32X32) {
+ bs = BLOCK_32X32;
+ } else {
+ assert(0);
+ }
+
+ *out_skip = 1;
+ for (j = 0; j < height; j += t) {
+ for (k = 0; k < width; k += t) {
+ int rate;
+ int64_t dist;
+ unsigned int sse;
+ cpi->fn_ptr[bs].vf(&p->src.buf[j * p->src.stride + k], p->src.stride,
+ &pd->dst.buf[j * pd->dst.stride + k], pd->dst.stride,
+ &sse);
+ // sse works better than var, since there is no dc prediction used
+ model_rd_from_var_lapndz(sse, t * t, pd->dequant[1] >> 3, &rate, &dist);
+ rate_sum += rate;
+ dist_sum += dist;
+ *out_skip &= (rate < 1024);
+ }
+ }
+
+ *out_rate_sum = rate_sum;
+ *out_dist_sum = dist_sum << 4;
+}
+
+int64_t vp9_block_error_c(int16_t *coeff, int16_t *dqcoeff,
+ intptr_t block_size, int64_t *ssz) {
+ int i;
+ int64_t error = 0, sqcoeff = 0;
+
+ for (i = 0; i < block_size; i++) {
+ int this_diff = coeff[i] - dqcoeff[i];
+ error += (unsigned)this_diff * this_diff;
+ sqcoeff += (unsigned) coeff[i] * coeff[i];
+ }
+
+ *ssz = sqcoeff;
+ return error;
+}
+
+/* The trailing '0' is a terminator which is used inside cost_coeffs() to
+ * decide whether to include cost of a trailing EOB node or not (i.e. we
+ * can skip this if the last coefficient in this transform block, e.g. the
+ * 16th coefficient in a 4x4 block or the 64th coefficient in a 8x8 block,
+ * were non-zero). */
+static const int16_t band_counts[TX_SIZES][8] = {
+ { 1, 2, 3, 4, 3, 16 - 13, 0 },
+ { 1, 2, 3, 4, 11, 64 - 21, 0 },
+ { 1, 2, 3, 4, 11, 256 - 21, 0 },
+ { 1, 2, 3, 4, 11, 1024 - 21, 0 },
+};
+
+static INLINE int cost_coeffs(MACROBLOCK *mb,
+ int plane, int block,
+ ENTROPY_CONTEXT *A, ENTROPY_CONTEXT *L,
+ TX_SIZE tx_size,
+ const int16_t *scan, const int16_t *nb) {
+ MACROBLOCKD *const xd = &mb->e_mbd;
+ MB_MODE_INFO *mbmi = &xd->this_mi->mbmi;
+ struct macroblockd_plane *pd = &xd->plane[plane];
+ const PLANE_TYPE type = pd->plane_type;
+ const int16_t *band_count = &band_counts[tx_size][1];
+ const int eob = pd->eobs[block];
+ const int16_t *const qcoeff_ptr = BLOCK_OFFSET(pd->qcoeff, block);
+ const int ref = mbmi->ref_frame[0] != INTRA_FRAME;
+ unsigned int (*token_costs)[2][PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS] =
+ mb->token_costs[tx_size][type][ref];
+ const ENTROPY_CONTEXT above_ec = !!*A, left_ec = !!*L;
+ uint8_t token_cache[1024];
+ int pt = combine_entropy_contexts(above_ec, left_ec);
+ int c, cost;
+
+ // Check for consistency of tx_size with mode info
+ assert(type == PLANE_TYPE_Y_WITH_DC ? mbmi->tx_size == tx_size
+ : get_uv_tx_size(mbmi) == tx_size);
+
+ if (eob == 0) {
+ // single eob token
+ cost = token_costs[0][0][pt][DCT_EOB_TOKEN];
+ c = 0;
+ } else {
+ int band_left = *band_count++;
+
+ // dc token
+ int v = qcoeff_ptr[0];
+ int prev_t = vp9_dct_value_tokens_ptr[v].token;
+ cost = (*token_costs)[0][pt][prev_t] + vp9_dct_value_cost_ptr[v];
+ token_cache[0] = vp9_pt_energy_class[prev_t];
+ ++token_costs;
+
+ // ac tokens
+ for (c = 1; c < eob; c++) {
+ const int rc = scan[c];
+ int t;
+
+ v = qcoeff_ptr[rc];
+ t = vp9_dct_value_tokens_ptr[v].token;
+ pt = get_coef_context(nb, token_cache, c);
+ cost += (*token_costs)[!prev_t][pt][t] + vp9_dct_value_cost_ptr[v];
+ token_cache[rc] = vp9_pt_energy_class[t];
+ prev_t = t;
+ if (!--band_left) {
+ band_left = *band_count++;
+ ++token_costs;
+ }
+ }
+
+ // eob token
+ if (band_left) {
+ pt = get_coef_context(nb, token_cache, c);
+ cost += (*token_costs)[0][pt][DCT_EOB_TOKEN];
+ }
+ }
+
+ // is eob first coefficient;
+ *A = *L = (c > 0);
+
+ return cost;
+}
+
+struct rdcost_block_args {
+ MACROBLOCK *x;
+ ENTROPY_CONTEXT t_above[16];
+ ENTROPY_CONTEXT t_left[16];
+ TX_SIZE tx_size;
+ int bw;
+ int bh;
+ int rate;
+ int64_t dist;
+ int64_t sse;
+ int64_t best_rd;
+ int skip;
+ const int16_t *scan, *nb;
+};
+
+static void dist_block(int plane, int block, TX_SIZE tx_size, void *arg) {
+ const int ss_txfrm_size = tx_size << 1;
+ struct rdcost_block_args* args = arg;
+ MACROBLOCK* const x = args->x;
+ MACROBLOCKD* const xd = &x->e_mbd;
+ struct macroblock_plane *const p = &x->plane[plane];
+ struct macroblockd_plane *const pd = &xd->plane[plane];
+ int64_t this_sse;
+ int shift = args->tx_size == TX_32X32 ? 0 : 2;
+ int16_t *const coeff = BLOCK_OFFSET(p->coeff, block);
+ int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
+ args->dist += vp9_block_error(coeff, dqcoeff, 16 << ss_txfrm_size,
+ &this_sse) >> shift;
+ args->sse += this_sse >> shift;
+
+ if (x->skip_encode &&
+ xd->this_mi->mbmi.ref_frame[0] == INTRA_FRAME) {
+ // TODO(jingning): tune the model to better capture the distortion.
+ int64_t p = (pd->dequant[1] * pd->dequant[1] *
+ (1 << ss_txfrm_size)) >> shift;
+ args->dist += p;
+ args->sse += p;
+ }
+}
+
+static void rate_block(int plane, int block, BLOCK_SIZE plane_bsize,
+ TX_SIZE tx_size, void *arg) {
+ struct rdcost_block_args* args = arg;
+
+ int x_idx, y_idx;
+ txfrm_block_to_raster_xy(plane_bsize, args->tx_size, block, &x_idx, &y_idx);
+
+ args->rate += cost_coeffs(args->x, plane, block,
+ args->t_above + x_idx,
+ args->t_left + y_idx, args->tx_size,
+ args->scan, args->nb);
+}
+
+static void block_yrd_txfm(int plane, int block, BLOCK_SIZE plane_bsize,
+ TX_SIZE tx_size, void *arg) {
+ struct rdcost_block_args *args = arg;
+ MACROBLOCK *const x = args->x;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ struct encode_b_args encode_args = {x, NULL};
+ int64_t rd1, rd2, rd;
+
+ if (args->skip)
+ return;
+ rd1 = RDCOST(x->rdmult, x->rddiv, args->rate, args->dist);
+ rd2 = RDCOST(x->rdmult, x->rddiv, 0, args->sse);
+ rd = MIN(rd1, rd2);
+ if (rd > args->best_rd) {
+ args->skip = 1;
+ args->rate = INT_MAX;
+ args->dist = INT64_MAX;
+ args->sse = INT64_MAX;
+ return;
+ }
+
+ if (!is_inter_block(&xd->this_mi->mbmi))
+ vp9_encode_block_intra(plane, block, plane_bsize, tx_size, &encode_args);
+ else
+ vp9_xform_quant(plane, block, plane_bsize, tx_size, &encode_args);
+
+ dist_block(plane, block, tx_size, args);
+ rate_block(plane, block, plane_bsize, tx_size, args);
+}
+
+static void txfm_rd_in_plane(MACROBLOCK *x,
+ int *rate, int64_t *distortion,
+ int *skippable, int64_t *sse,
+ int64_t ref_best_rd, int plane,
+ BLOCK_SIZE bsize, TX_SIZE tx_size) {
+ MACROBLOCKD *const xd = &x->e_mbd;
+ struct macroblockd_plane *const pd = &xd->plane[plane];
+ const BLOCK_SIZE bs = get_plane_block_size(bsize, pd);
+ const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bs];
+ const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bs];
+ int i;
+ struct rdcost_block_args args = { x, { 0 }, { 0 }, tx_size,
+ num_4x4_blocks_wide, num_4x4_blocks_high,
+ 0, 0, 0, ref_best_rd, 0 };
+ if (plane == 0)
+ xd->this_mi->mbmi.tx_size = tx_size;
+
+ switch (tx_size) {
+ case TX_4X4:
+ vpx_memcpy(&args.t_above, pd->above_context,
+ sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide);
+ vpx_memcpy(&args.t_left, pd->left_context,
+ sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high);
+ get_scan_nb_4x4(get_tx_type_4x4(pd->plane_type, xd, 0),
+ &args.scan, &args.nb);
+ break;
+ case TX_8X8:
+ for (i = 0; i < num_4x4_blocks_wide; i += 2)
+ args.t_above[i] = !!*(uint16_t *)&pd->above_context[i];
+ for (i = 0; i < num_4x4_blocks_high; i += 2)
+ args.t_left[i] = !!*(uint16_t *)&pd->left_context[i];
+ get_scan_nb_8x8(get_tx_type_8x8(pd->plane_type, xd),
+ &args.scan, &args.nb);
+ break;
+ case TX_16X16:
+ for (i = 0; i < num_4x4_blocks_wide; i += 4)
+ args.t_above[i] = !!*(uint32_t *)&pd->above_context[i];
+ for (i = 0; i < num_4x4_blocks_high; i += 4)
+ args.t_left[i] = !!*(uint32_t *)&pd->left_context[i];
+ get_scan_nb_16x16(get_tx_type_16x16(pd->plane_type, xd),
+ &args.scan, &args.nb);
+ break;
+ case TX_32X32:
+ for (i = 0; i < num_4x4_blocks_wide; i += 8)
+ args.t_above[i] = !!*(uint64_t *)&pd->above_context[i];
+ for (i = 0; i < num_4x4_blocks_high; i += 8)
+ args.t_left[i] = !!*(uint64_t *)&pd->left_context[i];
+ args.scan = vp9_default_scan_32x32;
+ args.nb = vp9_default_scan_32x32_neighbors;
+ break;
+ default:
+ assert(0);
+ }
+
+ foreach_transformed_block_in_plane(xd, bsize, plane, block_yrd_txfm, &args);
+ *distortion = args.dist;
+ *rate = args.rate;
+ *sse = args.sse;
+ *skippable = vp9_is_skippable_in_plane(xd, bsize, plane) && (!args.skip);
+}
+
+static void choose_largest_txfm_size(VP9_COMP *cpi, MACROBLOCK *x,
+ int *rate, int64_t *distortion,
+ int *skip, int64_t *sse,
+ int64_t ref_best_rd,
+ BLOCK_SIZE bs) {
+ const TX_SIZE max_txfm_size = max_txsize_lookup[bs];
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi;
+ if (max_txfm_size == TX_32X32 &&
+ (cm->tx_mode == ALLOW_32X32 ||
+ cm->tx_mode == TX_MODE_SELECT)) {
+ mbmi->tx_size = TX_32X32;
+ } else if (max_txfm_size >= TX_16X16 &&
+ (cm->tx_mode == ALLOW_16X16 ||
+ cm->tx_mode == ALLOW_32X32 ||
+ cm->tx_mode == TX_MODE_SELECT)) {
+ mbmi->tx_size = TX_16X16;
+ } else if (cm->tx_mode != ONLY_4X4) {
+ mbmi->tx_size = TX_8X8;
+ } else {
+ mbmi->tx_size = TX_4X4;
+ }
+ txfm_rd_in_plane(x, rate, distortion, skip,
+ &sse[mbmi->tx_size], ref_best_rd, 0, bs,
+ mbmi->tx_size);
+ cpi->txfm_stepdown_count[0]++;
+}
+
+static void choose_txfm_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x,
+ int (*r)[2], int *rate,
+ int64_t *d, int64_t *distortion,
+ int *s, int *skip,
+ int64_t tx_cache[TX_MODES],
+ BLOCK_SIZE bs) {
+ const TX_SIZE max_tx_size = max_txsize_lookup[bs];
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi;
+ vp9_prob skip_prob = vp9_get_pred_prob_mbskip(cm, xd);
+ int64_t rd[TX_SIZES][2];
+ int n, m;
+ int s0, s1;
+
+ const vp9_prob *tx_probs = get_tx_probs2(xd, &cm->fc.tx_probs, xd->this_mi);
+
+ for (n = TX_4X4; n <= max_tx_size; n++) {
+ r[n][1] = r[n][0];
+ if (r[n][0] == INT_MAX)
+ continue;
+ for (m = 0; m <= n - (n == max_tx_size); m++) {
+ if (m == n)
+ r[n][1] += vp9_cost_zero(tx_probs[m]);
+ else
+ r[n][1] += vp9_cost_one(tx_probs[m]);
+ }
+ }
+
+ assert(skip_prob > 0);
+ s0 = vp9_cost_bit(skip_prob, 0);
+ s1 = vp9_cost_bit(skip_prob, 1);
+
+ for (n = TX_4X4; n <= max_tx_size; n++) {
+ if (d[n] == INT64_MAX) {
+ rd[n][0] = rd[n][1] = INT64_MAX;
+ continue;
+ }
+ if (s[n]) {
+ rd[n][0] = rd[n][1] = RDCOST(x->rdmult, x->rddiv, s1, d[n]);
+ } else {
+ rd[n][0] = RDCOST(x->rdmult, x->rddiv, r[n][0] + s0, d[n]);
+ rd[n][1] = RDCOST(x->rdmult, x->rddiv, r[n][1] + s0, d[n]);
+ }
+ }
+
+ if (max_tx_size == TX_32X32 &&
+ (cm->tx_mode == ALLOW_32X32 ||
+ (cm->tx_mode == TX_MODE_SELECT &&
+ rd[TX_32X32][1] < rd[TX_16X16][1] && rd[TX_32X32][1] < rd[TX_8X8][1] &&
+ rd[TX_32X32][1] < rd[TX_4X4][1]))) {
+ mbmi->tx_size = TX_32X32;
+ } else if (max_tx_size >= TX_16X16 &&
+ (cm->tx_mode == ALLOW_16X16 ||
+ cm->tx_mode == ALLOW_32X32 ||
+ (cm->tx_mode == TX_MODE_SELECT &&
+ rd[TX_16X16][1] < rd[TX_8X8][1] &&
+ rd[TX_16X16][1] < rd[TX_4X4][1]))) {
+ mbmi->tx_size = TX_16X16;
+ } else if (cm->tx_mode == ALLOW_8X8 ||
+ cm->tx_mode == ALLOW_16X16 ||
+ cm->tx_mode == ALLOW_32X32 ||
+ (cm->tx_mode == TX_MODE_SELECT && rd[TX_8X8][1] < rd[TX_4X4][1])) {
+ mbmi->tx_size = TX_8X8;
+ } else {
+ mbmi->tx_size = TX_4X4;
+ }
+
+ *distortion = d[mbmi->tx_size];
+ *rate = r[mbmi->tx_size][cm->tx_mode == TX_MODE_SELECT];
+ *skip = s[mbmi->tx_size];
+
+ tx_cache[ONLY_4X4] = rd[TX_4X4][0];
+ tx_cache[ALLOW_8X8] = rd[TX_8X8][0];
+ tx_cache[ALLOW_16X16] = rd[MIN(max_tx_size, TX_16X16)][0];
+ tx_cache[ALLOW_32X32] = rd[MIN(max_tx_size, TX_32X32)][0];
+ if (max_tx_size == TX_32X32 &&
+ rd[TX_32X32][1] < rd[TX_16X16][1] && rd[TX_32X32][1] < rd[TX_8X8][1] &&
+ rd[TX_32X32][1] < rd[TX_4X4][1])
+ tx_cache[TX_MODE_SELECT] = rd[TX_32X32][1];
+ else if (max_tx_size >= TX_16X16 &&
+ rd[TX_16X16][1] < rd[TX_8X8][1] && rd[TX_16X16][1] < rd[TX_4X4][1])
+ tx_cache[TX_MODE_SELECT] = rd[TX_16X16][1];
+ else
+ tx_cache[TX_MODE_SELECT] = rd[TX_4X4][1] < rd[TX_8X8][1] ?
+ rd[TX_4X4][1] : rd[TX_8X8][1];
+
+ if (max_tx_size == TX_32X32 &&
+ rd[TX_32X32][1] < rd[TX_16X16][1] &&
+ rd[TX_32X32][1] < rd[TX_8X8][1] &&
+ rd[TX_32X32][1] < rd[TX_4X4][1]) {
+ cpi->txfm_stepdown_count[0]++;
+ } else if (max_tx_size >= TX_16X16 &&
+ rd[TX_16X16][1] < rd[TX_8X8][1] &&
+ rd[TX_16X16][1] < rd[TX_4X4][1]) {
+ cpi->txfm_stepdown_count[max_tx_size - TX_16X16]++;
+ } else if (rd[TX_8X8][1] < rd[TX_4X4][1]) {
+ cpi->txfm_stepdown_count[max_tx_size - TX_8X8]++;
+ } else {
+ cpi->txfm_stepdown_count[max_tx_size - TX_4X4]++;
+ }
+}
+
+static void choose_txfm_size_from_modelrd(VP9_COMP *cpi, MACROBLOCK *x,
+ int (*r)[2], int *rate,
+ int64_t *d, int64_t *distortion,
+ int *s, int *skip, int64_t *sse,
+ int64_t ref_best_rd,
+ BLOCK_SIZE bs) {
+ const TX_SIZE max_txfm_size = max_txsize_lookup[bs];
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi;
+ vp9_prob skip_prob = vp9_get_pred_prob_mbskip(cm, xd);
+ int64_t rd[TX_SIZES][2];
+ int n, m;
+ int s0, s1;
+ double scale_rd[TX_SIZES] = {1.73, 1.44, 1.20, 1.00};
+ // double scale_r[TX_SIZES] = {2.82, 2.00, 1.41, 1.00};
+
+ const vp9_prob *tx_probs = get_tx_probs2(xd, &cm->fc.tx_probs, xd->this_mi);
+
+ // for (n = TX_4X4; n <= max_txfm_size; n++)
+ // r[n][0] = (r[n][0] * scale_r[n]);
+
+ for (n = TX_4X4; n <= max_txfm_size; n++) {
+ r[n][1] = r[n][0];
+ for (m = 0; m <= n - (n == max_txfm_size); m++) {
+ if (m == n)
+ r[n][1] += vp9_cost_zero(tx_probs[m]);
+ else
+ r[n][1] += vp9_cost_one(tx_probs[m]);
+ }
+ }
+
+ assert(skip_prob > 0);
+ s0 = vp9_cost_bit(skip_prob, 0);
+ s1 = vp9_cost_bit(skip_prob, 1);
+
+ for (n = TX_4X4; n <= max_txfm_size; n++) {
+ if (s[n]) {
+ rd[n][0] = rd[n][1] = RDCOST(x->rdmult, x->rddiv, s1, d[n]);
+ } else {
+ rd[n][0] = RDCOST(x->rdmult, x->rddiv, r[n][0] + s0, d[n]);
+ rd[n][1] = RDCOST(x->rdmult, x->rddiv, r[n][1] + s0, d[n]);
+ }
+ }
+ for (n = TX_4X4; n <= max_txfm_size; n++) {
+ rd[n][0] = (scale_rd[n] * rd[n][0]);
+ rd[n][1] = (scale_rd[n] * rd[n][1]);
+ }
+
+ if (max_txfm_size == TX_32X32 &&
+ (cm->tx_mode == ALLOW_32X32 ||
+ (cm->tx_mode == TX_MODE_SELECT &&
+ rd[TX_32X32][1] <= rd[TX_16X16][1] &&
+ rd[TX_32X32][1] <= rd[TX_8X8][1] &&
+ rd[TX_32X32][1] <= rd[TX_4X4][1]))) {
+ mbmi->tx_size = TX_32X32;
+ } else if (max_txfm_size >= TX_16X16 &&
+ (cm->tx_mode == ALLOW_16X16 ||
+ cm->tx_mode == ALLOW_32X32 ||
+ (cm->tx_mode == TX_MODE_SELECT &&
+ rd[TX_16X16][1] <= rd[TX_8X8][1] &&
+ rd[TX_16X16][1] <= rd[TX_4X4][1]))) {
+ mbmi->tx_size = TX_16X16;
+ } else if (cm->tx_mode == ALLOW_8X8 ||
+ cm->tx_mode == ALLOW_16X16 ||
+ cm->tx_mode == ALLOW_32X32 ||
+ (cm->tx_mode == TX_MODE_SELECT &&
+ rd[TX_8X8][1] <= rd[TX_4X4][1])) {
+ mbmi->tx_size = TX_8X8;
+ } else {
+ mbmi->tx_size = TX_4X4;
+ }
+
+ // Actually encode using the chosen mode if a model was used, but do not
+ // update the r, d costs
+ txfm_rd_in_plane(x, rate, distortion, skip, &sse[mbmi->tx_size],
+ ref_best_rd, 0, bs, mbmi->tx_size);
+
+ if (max_txfm_size == TX_32X32 &&
+ rd[TX_32X32][1] <= rd[TX_16X16][1] &&
+ rd[TX_32X32][1] <= rd[TX_8X8][1] &&
+ rd[TX_32X32][1] <= rd[TX_4X4][1]) {
+ cpi->txfm_stepdown_count[0]++;
+ } else if (max_txfm_size >= TX_16X16 &&
+ rd[TX_16X16][1] <= rd[TX_8X8][1] &&
+ rd[TX_16X16][1] <= rd[TX_4X4][1]) {
+ cpi->txfm_stepdown_count[max_txfm_size - TX_16X16]++;
+ } else if (rd[TX_8X8][1] <= rd[TX_4X4][1]) {
+ cpi->txfm_stepdown_count[max_txfm_size - TX_8X8]++;
+ } else {
+ cpi->txfm_stepdown_count[max_txfm_size - TX_4X4]++;
+ }
+}
+
+static void super_block_yrd(VP9_COMP *cpi,
+ MACROBLOCK *x, int *rate, int64_t *distortion,
+ int *skip, int64_t *psse, BLOCK_SIZE bs,
+ int64_t txfm_cache[TX_MODES],
+ int64_t ref_best_rd) {
+ int r[TX_SIZES][2], s[TX_SIZES];
+ int64_t d[TX_SIZES], sse[TX_SIZES];
+ MACROBLOCKD *xd = &x->e_mbd;
+ MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi;
+
+ assert(bs == mbmi->sb_type);
+ if (mbmi->ref_frame[0] > INTRA_FRAME)
+ vp9_subtract_sby(x, bs);
+
+ if (cpi->sf.tx_size_search_method == USE_LARGESTALL ||
+ (cpi->sf.tx_size_search_method != USE_FULL_RD &&
+ mbmi->ref_frame[0] == INTRA_FRAME)) {
+ vpx_memset(txfm_cache, 0, TX_MODES * sizeof(int64_t));
+ choose_largest_txfm_size(cpi, x, rate, distortion, skip, sse,
+ ref_best_rd, bs);
+ if (psse)
+ *psse = sse[mbmi->tx_size];
+ return;
+ }
+
+ if (cpi->sf.tx_size_search_method == USE_LARGESTINTRA_MODELINTER &&
+ mbmi->ref_frame[0] > INTRA_FRAME) {
+ if (bs >= BLOCK_32X32)
+ model_rd_for_sb_y_tx(cpi, bs, TX_32X32, x, xd,
+ &r[TX_32X32][0], &d[TX_32X32], &s[TX_32X32]);
+ if (bs >= BLOCK_16X16)
+ model_rd_for_sb_y_tx(cpi, bs, TX_16X16, x, xd,
+ &r[TX_16X16][0], &d[TX_16X16], &s[TX_16X16]);
+
+ model_rd_for_sb_y_tx(cpi, bs, TX_8X8, x, xd,
+ &r[TX_8X8][0], &d[TX_8X8], &s[TX_8X8]);
+
+ model_rd_for_sb_y_tx(cpi, bs, TX_4X4, x, xd,
+ &r[TX_4X4][0], &d[TX_4X4], &s[TX_4X4]);
+
+ choose_txfm_size_from_modelrd(cpi, x, r, rate, d, distortion, s,
+ skip, sse, ref_best_rd, bs);
+ } else {
+ if (bs >= BLOCK_32X32)
+ txfm_rd_in_plane(x, &r[TX_32X32][0], &d[TX_32X32], &s[TX_32X32],
+ &sse[TX_32X32], ref_best_rd, 0, bs, TX_32X32);
+ if (bs >= BLOCK_16X16)
+ txfm_rd_in_plane(x, &r[TX_16X16][0], &d[TX_16X16], &s[TX_16X16],
+ &sse[TX_16X16], ref_best_rd, 0, bs, TX_16X16);
+ txfm_rd_in_plane(x, &r[TX_8X8][0], &d[TX_8X8], &s[TX_8X8],
+ &sse[TX_8X8], ref_best_rd, 0, bs, TX_8X8);
+ txfm_rd_in_plane(x, &r[TX_4X4][0], &d[TX_4X4], &s[TX_4X4],
+ &sse[TX_4X4], ref_best_rd, 0, bs, TX_4X4);
+ choose_txfm_size_from_rd(cpi, x, r, rate, d, distortion, s,
+ skip, txfm_cache, bs);
+ }
+ if (psse)
+ *psse = sse[mbmi->tx_size];
+}
+
+static int conditional_skipintra(MB_PREDICTION_MODE mode,
+ MB_PREDICTION_MODE best_intra_mode) {
+ if (mode == D117_PRED &&
+ best_intra_mode != V_PRED &&
+ best_intra_mode != D135_PRED)
+ return 1;
+ if (mode == D63_PRED &&
+ best_intra_mode != V_PRED &&
+ best_intra_mode != D45_PRED)
+ return 1;
+ if (mode == D207_PRED &&
+ best_intra_mode != H_PRED &&
+ best_intra_mode != D45_PRED)
+ return 1;
+ if (mode == D153_PRED &&
+ best_intra_mode != H_PRED &&
+ best_intra_mode != D135_PRED)
+ return 1;
+ return 0;
+}
+
+static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib,
+ MB_PREDICTION_MODE *best_mode,
+ int *bmode_costs,
+ ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
+ int *bestrate, int *bestratey,
+ int64_t *bestdistortion,
+ BLOCK_SIZE bsize, int64_t rd_thresh) {
+ MB_PREDICTION_MODE mode;
+ MACROBLOCKD *xd = &x->e_mbd;
+ int64_t best_rd = rd_thresh;
+ int rate = 0;
+ int64_t distortion;
+ struct macroblock_plane *p = &x->plane[0];
+ struct macroblockd_plane *pd = &xd->plane[0];
+ const int src_stride = p->src.stride;
+ const int dst_stride = pd->dst.stride;
+ uint8_t *src_init = raster_block_offset_uint8(BLOCK_8X8, ib,
+ p->src.buf, src_stride);
+ uint8_t *dst_init = raster_block_offset_uint8(BLOCK_8X8, ib,
+ pd->dst.buf, dst_stride);
+ int16_t *src_diff, *coeff;
+
+ ENTROPY_CONTEXT ta[2], tempa[2];
+ ENTROPY_CONTEXT tl[2], templ[2];
+ TX_TYPE tx_type = DCT_DCT;
+ const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
+ const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
+ int idx, idy, block;
+ uint8_t best_dst[8 * 8];
+
+ assert(ib < 4);
+
+ vpx_memcpy(ta, a, sizeof(ta));
+ vpx_memcpy(tl, l, sizeof(tl));
+ xd->this_mi->mbmi.tx_size = TX_4X4;
+
+ for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
+ int64_t this_rd;
+ int ratey = 0;
+
+ if (!(cpi->sf.intra_y_mode_mask & (1 << mode)))
+ continue;
+
+ // Only do the oblique modes if the best so far is
+ // one of the neighboring directional modes
+ if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
+ if (conditional_skipintra(mode, *best_mode))
+ continue;
+ }
+
+ rate = bmode_costs[mode];
+ distortion = 0;
+
+ vpx_memcpy(tempa, ta, sizeof(ta));
+ vpx_memcpy(templ, tl, sizeof(tl));
+
+ for (idy = 0; idy < num_4x4_blocks_high; ++idy) {
+ for (idx = 0; idx < num_4x4_blocks_wide; ++idx) {
+ int64_t ssz;
+ const int16_t *scan;
+ uint8_t *src = src_init + idx * 4 + idy * 4 * src_stride;
+ uint8_t *dst = dst_init + idx * 4 + idy * 4 * dst_stride;
+
+ block = ib + idy * 2 + idx;
+ xd->this_mi->bmi[block].as_mode = mode;
+ src_diff = raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
+ coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
+ vp9_predict_intra_block(xd, block, 1,
+ TX_4X4, mode,
+ x->skip_encode ? src : dst,
+ x->skip_encode ? src_stride : dst_stride,
+ dst, dst_stride);
+ vp9_subtract_block(4, 4, src_diff, 8,
+ src, src_stride,
+ dst, dst_stride);
+
+ tx_type = get_tx_type_4x4(PLANE_TYPE_Y_WITH_DC, xd, block);
+ if (tx_type != DCT_DCT) {
+ vp9_short_fht4x4(src_diff, coeff, 8, tx_type);
+ x->quantize_b_4x4(x, block, tx_type, 16);
+ } else {
+ x->fwd_txm4x4(src_diff, coeff, 16);
+ x->quantize_b_4x4(x, block, tx_type, 16);
+ }
+
+ scan = get_scan_4x4(get_tx_type_4x4(PLANE_TYPE_Y_WITH_DC, xd, block));
+ ratey += cost_coeffs(x, 0, block,
+ tempa + idx, templ + idy, TX_4X4, scan,
+ vp9_get_coef_neighbors_handle(scan));
+ distortion += vp9_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, block),
+ 16, &ssz) >> 2;
+ if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
+ goto next;
+
+ if (tx_type != DCT_DCT)
+ vp9_short_iht4x4_add(BLOCK_OFFSET(pd->dqcoeff, block),
+ dst, pd->dst.stride, tx_type);
+ else
+ xd->inv_txm4x4_add(BLOCK_OFFSET(pd->dqcoeff, block),
+ dst, pd->dst.stride);
+ }
+ }
+
+ rate += ratey;
+ this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
+
+ if (this_rd < best_rd) {
+ *bestrate = rate;
+ *bestratey = ratey;
+ *bestdistortion = distortion;
+ best_rd = this_rd;
+ *best_mode = mode;
+ vpx_memcpy(a, tempa, sizeof(tempa));
+ vpx_memcpy(l, templ, sizeof(templ));
+ for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy)
+ vpx_memcpy(best_dst + idy * 8, dst_init + idy * dst_stride,
+ num_4x4_blocks_wide * 4);
+ }
+ next:
+ {}
+ }
+
+ if (best_rd >= rd_thresh || x->skip_encode)
+ return best_rd;
+
+ for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy)
+ vpx_memcpy(dst_init + idy * dst_stride, best_dst + idy * 8,
+ num_4x4_blocks_wide * 4);
+
+ return best_rd;
+}
+
+static int64_t rd_pick_intra_sub_8x8_y_mode(VP9_COMP * const cpi,
+ MACROBLOCK * const mb,
+ int * const rate,
+ int * const rate_y,
+ int64_t * const distortion,
+ int64_t best_rd) {
+ int i, j;
+ MACROBLOCKD *const xd = &mb->e_mbd;
+ MODE_INFO *const mic = xd->this_mi;
+ const MODE_INFO *above_mi = xd->mi_8x8[-xd->mode_info_stride];
+ const MODE_INFO *left_mi = xd->mi_8x8[-1];
+ const BLOCK_SIZE bsize = xd->this_mi->mbmi.sb_type;
+ const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
+ const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
+ int idx, idy;
+ int cost = 0;
+ int64_t total_distortion = 0;
+ int tot_rate_y = 0;
+ int64_t total_rd = 0;
+ ENTROPY_CONTEXT t_above[4], t_left[4];
+ int *bmode_costs;
+
+ vpx_memcpy(t_above, xd->plane[0].above_context, sizeof(t_above));
+ vpx_memcpy(t_left, xd->plane[0].left_context, sizeof(t_left));
+
+ bmode_costs = mb->mbmode_cost;
+
+ // Pick modes for each sub-block (of size 4x4, 4x8, or 8x4) in an 8x8 block.
+ for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
+ for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
+ MB_PREDICTION_MODE best_mode = DC_PRED;
+ int r = INT_MAX, ry = INT_MAX;
+ int64_t d = INT64_MAX, this_rd = INT64_MAX;
+ i = idy * 2 + idx;
+ if (cpi->common.frame_type == KEY_FRAME) {
+ const MB_PREDICTION_MODE A = above_block_mode(mic, above_mi, i);
+ const MB_PREDICTION_MODE L = (xd->left_available || idx) ?
+ left_block_mode(mic, left_mi, i) :
+ DC_PRED;
+
+ bmode_costs = mb->y_mode_costs[A][L];
+ }
+
+ this_rd = rd_pick_intra4x4block(cpi, mb, i, &best_mode, bmode_costs,
+ t_above + idx, t_left + idy, &r, &ry, &d,
+ bsize, best_rd - total_rd);
+ if (this_rd >= best_rd - total_rd)
+ return INT64_MAX;
+
+ total_rd += this_rd;
+ cost += r;
+ total_distortion += d;
+ tot_rate_y += ry;
+
+ mic->bmi[i].as_mode = best_mode;
+ for (j = 1; j < num_4x4_blocks_high; ++j)
+ mic->bmi[i + j * 2].as_mode = best_mode;
+ for (j = 1; j < num_4x4_blocks_wide; ++j)
+ mic->bmi[i + j].as_mode = best_mode;
+
+ if (total_rd >= best_rd)
+ return INT64_MAX;
+ }
+ }
+
+ *rate = cost;
+ *rate_y = tot_rate_y;
+ *distortion = total_distortion;
+ mic->mbmi.mode = mic->bmi[3].as_mode;
+
+ return RDCOST(mb->rdmult, mb->rddiv, cost, total_distortion);
+}
+
+static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x,
+ int *rate, int *rate_tokenonly,
+ int64_t *distortion, int *skippable,
+ BLOCK_SIZE bsize,
+ int64_t tx_cache[TX_MODES],
+ int64_t best_rd) {
+ MB_PREDICTION_MODE mode;
+ MB_PREDICTION_MODE mode_selected = DC_PRED;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MODE_INFO *const mic = xd->this_mi;
+ int this_rate, this_rate_tokenonly, s;
+ int64_t this_distortion, this_rd;
+ TX_SIZE best_tx = TX_4X4;
+ int i;
+ int *bmode_costs = x->mbmode_cost;
+
+ if (cpi->sf.tx_size_search_method == USE_FULL_RD)
+ for (i = 0; i < TX_MODES; i++)
+ tx_cache[i] = INT64_MAX;
+
+ /* Y Search for intra prediction mode */
+ for (mode = DC_PRED; mode <= TM_PRED; mode++) {
+ int64_t local_tx_cache[TX_MODES];
+ MODE_INFO *above_mi = xd->mi_8x8[-xd->mode_info_stride];
+ MODE_INFO *left_mi = xd->mi_8x8[-1];
+
+ if (!(cpi->sf.intra_y_mode_mask & (1 << mode)))
+ continue;
+
+ if (cpi->common.frame_type == KEY_FRAME) {
+ const MB_PREDICTION_MODE A = above_block_mode(mic, above_mi, 0);
+ const MB_PREDICTION_MODE L = xd->left_available ?
+ left_block_mode(mic, left_mi, 0) : DC_PRED;
+
+ bmode_costs = x->y_mode_costs[A][L];
+ }
+ mic->mbmi.mode = mode;
+
+ super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s, NULL,
+ bsize, local_tx_cache, best_rd);
+
+ if (this_rate_tokenonly == INT_MAX)
+ continue;
+
+ this_rate = this_rate_tokenonly + bmode_costs[mode];
+ this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
+
+ if (this_rd < best_rd) {
+ mode_selected = mode;
+ best_rd = this_rd;
+ best_tx = mic->mbmi.tx_size;
+ *rate = this_rate;
+ *rate_tokenonly = this_rate_tokenonly;
+ *distortion = this_distortion;
+ *skippable = s;
+ }
+
+ if (cpi->sf.tx_size_search_method == USE_FULL_RD && this_rd < INT64_MAX) {
+ for (i = 0; i < TX_MODES && local_tx_cache[i] < INT64_MAX; i++) {
+ const int64_t adj_rd = this_rd + local_tx_cache[i] -
+ local_tx_cache[cpi->common.tx_mode];
+ if (adj_rd < tx_cache[i]) {
+ tx_cache[i] = adj_rd;
+ }
+ }
+ }
+ }
+
+ mic->mbmi.mode = mode_selected;
+ mic->mbmi.tx_size = best_tx;
+
+ return best_rd;
+}
+
+static void super_block_uvrd(VP9_COMMON *const cm, MACROBLOCK *x,
+ int *rate, int64_t *distortion, int *skippable,
+ int64_t *sse, BLOCK_SIZE bsize,
+ int64_t ref_best_rd) {
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi;
+ TX_SIZE uv_txfm_size = get_uv_tx_size(mbmi);
+ int plane;
+ int pnrate = 0, pnskip = 1;
+ int64_t pndist = 0, pnsse = 0;
+
+ if (ref_best_rd < 0)
+ goto term;
+
+ if (is_inter_block(mbmi))
+ vp9_subtract_sbuv(x, bsize);
+
+ *rate = 0;
+ *distortion = 0;
+ *sse = 0;
+ *skippable = 1;
+
+ for (plane = 1; plane < MAX_MB_PLANE; ++plane) {
+ txfm_rd_in_plane(x, &pnrate, &pndist, &pnskip, &pnsse,
+ ref_best_rd, plane, bsize, uv_txfm_size);
+ if (pnrate == INT_MAX)
+ goto term;
+ *rate += pnrate;
+ *distortion += pndist;
+ *sse += pnsse;
+ *skippable &= pnskip;
+ }
+ return;
+
+ term:
+ *rate = INT_MAX;
+ *distortion = INT64_MAX;
+ *sse = INT64_MAX;
+ *skippable = 0;
+ return;
+}
+
+static int64_t rd_pick_intra_sbuv_mode(VP9_COMP *cpi, MACROBLOCK *x,
+ int *rate, int *rate_tokenonly,
+ int64_t *distortion, int *skippable,
+ BLOCK_SIZE bsize) {
+ MB_PREDICTION_MODE mode;
+ MB_PREDICTION_MODE mode_selected = DC_PRED;
+ int64_t best_rd = INT64_MAX, this_rd;
+ int this_rate_tokenonly, this_rate, s;
+ int64_t this_distortion, this_sse;
+
+ // int mode_mask = (bsize <= BLOCK_8X8)
+ // ? ALL_INTRA_MODES : cpi->sf.intra_uv_mode_mask;
+
+ for (mode = DC_PRED; mode <= TM_PRED; mode++) {
+ // if (!(mode_mask & (1 << mode)))
+ if (!(cpi->sf.intra_uv_mode_mask & (1 << mode)))
+ continue;
+
+ x->e_mbd.mi_8x8[0]->mbmi.uv_mode = mode;
+
+ super_block_uvrd(&cpi->common, x, &this_rate_tokenonly,
+ &this_distortion, &s, &this_sse, bsize, best_rd);
+ if (this_rate_tokenonly == INT_MAX)
+ continue;
+ this_rate = this_rate_tokenonly +
+ x->intra_uv_mode_cost[cpi->common.frame_type][mode];
+ this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
+
+ if (this_rd < best_rd) {
+ mode_selected = mode;
+ best_rd = this_rd;
+ *rate = this_rate;
+ *rate_tokenonly = this_rate_tokenonly;
+ *distortion = this_distortion;
+ *skippable = s;
+ }
+ }
+
+ x->e_mbd.mi_8x8[0]->mbmi.uv_mode = mode_selected;
+
+ return best_rd;
+}
+
+static int64_t rd_sbuv_dcpred(VP9_COMP *cpi, MACROBLOCK *x,
+ int *rate, int *rate_tokenonly,
+ int64_t *distortion, int *skippable,
+ BLOCK_SIZE bsize) {
+ int64_t this_rd;
+ int64_t this_sse;
+
+ x->e_mbd.mi_8x8[0]->mbmi.uv_mode = DC_PRED;
+ super_block_uvrd(&cpi->common, x, rate_tokenonly,
+ distortion, skippable, &this_sse, bsize, INT64_MAX);
+ *rate = *rate_tokenonly +
+ x->intra_uv_mode_cost[cpi->common.frame_type][DC_PRED];
+ this_rd = RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
+
+ return this_rd;
+}
+
+static void choose_intra_uv_mode(VP9_COMP *cpi, BLOCK_SIZE bsize,
+ int *rate_uv, int *rate_uv_tokenonly,
+ int64_t *dist_uv, int *skip_uv,
+ MB_PREDICTION_MODE *mode_uv) {
+ MACROBLOCK *const x = &cpi->mb;
+
+ // Use an estimated rd for uv_intra based on DC_PRED if the
+ // appropriate speed flag is set.
+ if (cpi->sf.use_uv_intra_rd_estimate) {
+ rd_sbuv_dcpred(cpi, x, rate_uv, rate_uv_tokenonly, dist_uv, skip_uv,
+ bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize);
+ // Else do a proper rd search for each possible transform size that may
+ // be considered in the main rd loop.
+ } else {
+ rd_pick_intra_sbuv_mode(cpi, x,
+ rate_uv, rate_uv_tokenonly, dist_uv, skip_uv,
+ bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize);
+ }
+ *mode_uv = x->e_mbd.mi_8x8[0]->mbmi.uv_mode;
+}
+
+static int cost_mv_ref(VP9_COMP *cpi, MB_PREDICTION_MODE mode,
+ int mode_context) {
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ const int segment_id = xd->this_mi->mbmi.segment_id;
+
+ // Don't account for mode here if segment skip is enabled.
+ if (!vp9_segfeature_active(&cpi->common.seg, segment_id, SEG_LVL_SKIP)) {
+ assert(is_inter_mode(mode));
+ return x->inter_mode_cost[mode_context][mode - NEARESTMV];
+ } else {
+ return 0;
+ }
+}
+
+void vp9_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv) {
+ x->e_mbd.mi_8x8[0]->mbmi.mode = mb;
+ x->e_mbd.mi_8x8[0]->mbmi.mv[0].as_int = mv->as_int;
+}
+
+static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
+ BLOCK_SIZE bsize,
+ int_mv *frame_mv,
+ int mi_row, int mi_col,
+ int_mv single_newmv[MAX_REF_FRAMES],
+ int *rate_mv);
+static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
+ BLOCK_SIZE bsize,
+ int mi_row, int mi_col,
+ int_mv *tmp_mv, int *rate_mv);
+
+static int labels2mode(MACROBLOCK *x, int i,
+ MB_PREDICTION_MODE this_mode,
+ int_mv *this_mv, int_mv *this_second_mv,
+ int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
+ int_mv seg_mvs[MAX_REF_FRAMES],
+ int_mv *best_ref_mv,
+ int_mv *second_best_ref_mv,
+ int *mvjcost, int *mvcost[2], VP9_COMP *cpi) {
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MODE_INFO *const mic = xd->this_mi;
+ MB_MODE_INFO *mbmi = &mic->mbmi;
+ int cost = 0, thismvcost = 0;
+ int idx, idy;
+ const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[mbmi->sb_type];
+ const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[mbmi->sb_type];
+
+ /* We have to be careful retrieving previously-encoded motion vectors.
+ Ones from this macroblock have to be pulled from the BLOCKD array
+ as they have not yet made it to the bmi array in our MB_MODE_INFO. */
+ MB_PREDICTION_MODE m;
+
+ // the only time we should do costing for new motion vector or mode
+ // is when we are on a new label (jbb May 08, 2007)
+ switch (m = this_mode) {
+ case NEWMV:
+ this_mv->as_int = seg_mvs[mbmi->ref_frame[0]].as_int;
+ thismvcost = vp9_mv_bit_cost(this_mv, best_ref_mv, mvjcost, mvcost,
+ 102);
+ if (mbmi->ref_frame[1] > 0) {
+ this_second_mv->as_int = seg_mvs[mbmi->ref_frame[1]].as_int;
+ thismvcost += vp9_mv_bit_cost(this_second_mv, second_best_ref_mv,
+ mvjcost, mvcost, 102);
+ }
+ break;
+ case NEARESTMV:
+ this_mv->as_int = frame_mv[NEARESTMV][mbmi->ref_frame[0]].as_int;
+ if (mbmi->ref_frame[1] > 0)
+ this_second_mv->as_int =
+ frame_mv[NEARESTMV][mbmi->ref_frame[1]].as_int;
+ break;
+ case NEARMV:
+ this_mv->as_int = frame_mv[NEARMV][mbmi->ref_frame[0]].as_int;
+ if (mbmi->ref_frame[1] > 0)
+ this_second_mv->as_int =
+ frame_mv[NEARMV][mbmi->ref_frame[1]].as_int;
+ break;
+ case ZEROMV:
+ this_mv->as_int = 0;
+ if (mbmi->ref_frame[1] > 0)
+ this_second_mv->as_int = 0;
+ break;
+ default:
+ break;
+ }
+
+ cost = cost_mv_ref(cpi, this_mode,
+ mbmi->mode_context[mbmi->ref_frame[0]]);
+
+ mic->bmi[i].as_mv[0].as_int = this_mv->as_int;
+ if (mbmi->ref_frame[1] > 0)
+ mic->bmi[i].as_mv[1].as_int = this_second_mv->as_int;
+
+ x->partition_info->bmi[i].mode = m;
+ for (idy = 0; idy < num_4x4_blocks_high; ++idy)
+ for (idx = 0; idx < num_4x4_blocks_wide; ++idx)
+ vpx_memcpy(&mic->bmi[i + idy * 2 + idx],
+ &mic->bmi[i], sizeof(mic->bmi[i]));
+
+ cost += thismvcost;
+ return cost;
+}
+
+static int64_t encode_inter_mb_segment(VP9_COMP *cpi,
+ MACROBLOCK *x,
+ int64_t best_yrd,
+ int i,
+ int *labelyrate,
+ int64_t *distortion, int64_t *sse,
+ ENTROPY_CONTEXT *ta,
+ ENTROPY_CONTEXT *tl) {
+ int k;
+ MACROBLOCKD *xd = &x->e_mbd;
+ struct macroblockd_plane *const pd = &xd->plane[0];
+ MODE_INFO *const mi = xd->this_mi;
+ const BLOCK_SIZE bsize = mi->mbmi.sb_type;
+ const int width = plane_block_width(bsize, pd);
+ const int height = plane_block_height(bsize, pd);
+ int idx, idy;
+ const int src_stride = x->plane[0].src.stride;
+ uint8_t* const src = raster_block_offset_uint8(BLOCK_8X8, i,
+ x->plane[0].src.buf,
+ src_stride);
+ int16_t* src_diff = raster_block_offset_int16(BLOCK_8X8, i,
+ x->plane[0].src_diff);
+ int16_t* coeff = BLOCK_OFFSET(x->plane[0].coeff, i);
+ uint8_t* const dst = raster_block_offset_uint8(BLOCK_8X8, i,
+ pd->dst.buf, pd->dst.stride);
+ int64_t thisdistortion = 0, thissse = 0;
+ int thisrate = 0;
+ int ref, second_ref = has_second_ref(&mi->mbmi);
+
+ for (ref = 0; ref < 1 + second_ref; ++ref) {
+ const uint8_t *pre = raster_block_offset_uint8(BLOCK_8X8, i,
+ pd->pre[ref].buf, pd->pre[ref].stride);
+ vp9_build_inter_predictor(pre, pd->pre[ref].stride,
+ dst, pd->dst.stride,
+ &mi->bmi[i].as_mv[ref].as_mv,
+ &xd->scale_factor[ref],
+ width, height, ref, &xd->subpix, MV_PRECISION_Q3);
+ }
+
+ vp9_subtract_block(height, width, src_diff, 8, src, src_stride,
+ dst, pd->dst.stride);
+
+ k = i;
+ for (idy = 0; idy < height / 4; ++idy) {
+ for (idx = 0; idx < width / 4; ++idx) {
+ int64_t ssz, rd, rd1, rd2;
+
+ k += (idy * 2 + idx);
+ src_diff = raster_block_offset_int16(BLOCK_8X8, k,
+ x->plane[0].src_diff);
+ coeff = BLOCK_OFFSET(x->plane[0].coeff, k);
+ x->fwd_txm4x4(src_diff, coeff, 16);
+ x->quantize_b_4x4(x, k, DCT_DCT, 16);
+ thisdistortion += vp9_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k),
+ 16, &ssz);
+ thissse += ssz;
+ thisrate += cost_coeffs(x, 0, k,
+ ta + (k & 1),
+ tl + (k >> 1), TX_4X4,
+ vp9_default_scan_4x4,
+ vp9_default_scan_4x4_neighbors);
+ rd1 = RDCOST(x->rdmult, x->rddiv, thisrate, thisdistortion >> 2);
+ rd2 = RDCOST(x->rdmult, x->rddiv, 0, thissse >> 2);
+ rd = MIN(rd1, rd2);
+ if (rd >= best_yrd)
+ return INT64_MAX;
+ }
+ }
+ *distortion = thisdistortion >> 2;
+ *labelyrate = thisrate;
+ *sse = thissse >> 2;
+
+ return RDCOST(x->rdmult, x->rddiv, *labelyrate, *distortion);
+}
+
+typedef struct {
+ int eobs;
+ int brate;
+ int byrate;
+ int64_t bdist;
+ int64_t bsse;
+ int64_t brdcost;
+ int_mv mvs[2];
+ ENTROPY_CONTEXT ta[2];
+ ENTROPY_CONTEXT tl[2];
+} SEG_RDSTAT;
+
+typedef struct {
+ int_mv *ref_mv, *second_ref_mv;
+ int_mv mvp;
+
+ int64_t segment_rd;
+ int r;
+ int64_t d;
+ int64_t sse;
+ int segment_yrate;
+ MB_PREDICTION_MODE modes[4];
+ SEG_RDSTAT rdstat[4][INTER_MODES];
+ int mvthresh;
+} BEST_SEG_INFO;
+
+static INLINE int mv_check_bounds(MACROBLOCK *x, int_mv *mv) {
+ int r = 0;
+ r |= (mv->as_mv.row >> 3) < x->mv_row_min;
+ r |= (mv->as_mv.row >> 3) > x->mv_row_max;
+ r |= (mv->as_mv.col >> 3) < x->mv_col_min;
+ r |= (mv->as_mv.col >> 3) > x->mv_col_max;
+ return r;
+}
+
+static INLINE void mi_buf_shift(MACROBLOCK *x, int i) {
+ MB_MODE_INFO *const mbmi = &x->e_mbd.mi_8x8[0]->mbmi;
+ struct macroblock_plane *const p = &x->plane[0];
+ struct macroblockd_plane *const pd = &x->e_mbd.plane[0];
+
+ p->src.buf = raster_block_offset_uint8(BLOCK_8X8, i, p->src.buf,
+ p->src.stride);
+ assert(((intptr_t)pd->pre[0].buf & 0x7) == 0);
+ pd->pre[0].buf = raster_block_offset_uint8(BLOCK_8X8, i, pd->pre[0].buf,
+ pd->pre[0].stride);
+ if (mbmi->ref_frame[1])
+ pd->pre[1].buf = raster_block_offset_uint8(BLOCK_8X8, i, pd->pre[1].buf,
+ pd->pre[1].stride);
+}
+
+static INLINE void mi_buf_restore(MACROBLOCK *x, struct buf_2d orig_src,
+ struct buf_2d orig_pre[2]) {
+ MB_MODE_INFO *mbmi = &x->e_mbd.mi_8x8[0]->mbmi;
+ x->plane[0].src = orig_src;
+ x->e_mbd.plane[0].pre[0] = orig_pre[0];
+ if (mbmi->ref_frame[1])
+ x->e_mbd.plane[0].pre[1] = orig_pre[1];
+}
+
+static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x,
+ BEST_SEG_INFO *bsi_buf, int filter_idx,
+ int_mv seg_mvs[4][MAX_REF_FRAMES],
+ int mi_row, int mi_col) {
+ int i, j, br = 0, idx, idy;
+ int64_t bd = 0, block_sse = 0;
+ MB_PREDICTION_MODE this_mode;
+ MODE_INFO *mi = x->e_mbd.mi_8x8[0];
+ MB_MODE_INFO *const mbmi = &mi->mbmi;
+ const int label_count = 4;
+ int64_t this_segment_rd = 0;
+ int label_mv_thresh;
+ int segmentyrate = 0;
+ const BLOCK_SIZE bsize = mbmi->sb_type;
+ const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
+ const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
+ vp9_variance_fn_ptr_t *v_fn_ptr;
+ ENTROPY_CONTEXT t_above[2], t_left[2];
+ BEST_SEG_INFO *bsi = bsi_buf + filter_idx;
+ int mode_idx;
+ int subpelmv = 1, have_ref = 0;
+
+ vpx_memcpy(t_above, x->e_mbd.plane[0].above_context, sizeof(t_above));
+ vpx_memcpy(t_left, x->e_mbd.plane[0].left_context, sizeof(t_left));
+
+ v_fn_ptr = &cpi->fn_ptr[bsize];
+
+ // 64 makes this threshold really big effectively
+ // making it so that we very rarely check mvs on
+ // segments. setting this to 1 would make mv thresh
+ // roughly equal to what it is for macroblocks
+ label_mv_thresh = 1 * bsi->mvthresh / label_count;
+
+ // Segmentation method overheads
+ for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
+ for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
+ // TODO(jingning,rbultje): rewrite the rate-distortion optimization
+ // loop for 4x4/4x8/8x4 block coding. to be replaced with new rd loop
+ int_mv mode_mv[MB_MODE_COUNT], second_mode_mv[MB_MODE_COUNT];
+ int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
+ MB_PREDICTION_MODE mode_selected = ZEROMV;
+ int64_t best_rd = INT64_MAX;
+ i = idy * 2 + idx;
+
+ frame_mv[ZEROMV][mbmi->ref_frame[0]].as_int = 0;
+ frame_mv[ZEROMV][mbmi->ref_frame[1]].as_int = 0;
+ vp9_append_sub8x8_mvs_for_idx(&cpi->common, &x->e_mbd,
+ &frame_mv[NEARESTMV][mbmi->ref_frame[0]],
+ &frame_mv[NEARMV][mbmi->ref_frame[0]],
+ i, 0, mi_row, mi_col);
+ if (mbmi->ref_frame[1] > 0)
+ vp9_append_sub8x8_mvs_for_idx(&cpi->common, &x->e_mbd,
+ &frame_mv[NEARESTMV][mbmi->ref_frame[1]],
+ &frame_mv[NEARMV][mbmi->ref_frame[1]],
+ i, 1, mi_row, mi_col);
+
+ // search for the best motion vector on this segment
+ for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) {
+ const struct buf_2d orig_src = x->plane[0].src;
+ struct buf_2d orig_pre[2];
+
+ mode_idx = inter_mode_offset(this_mode);
+ bsi->rdstat[i][mode_idx].brdcost = INT64_MAX;
+
+ // if we're near/nearest and mv == 0,0, compare to zeromv
+ if ((this_mode == NEARMV || this_mode == NEARESTMV ||
+ this_mode == ZEROMV) &&
+ frame_mv[this_mode][mbmi->ref_frame[0]].as_int == 0 &&
+ (mbmi->ref_frame[1] <= 0 ||
+ frame_mv[this_mode][mbmi->ref_frame[1]].as_int == 0)) {
+ int rfc = mbmi->mode_context[mbmi->ref_frame[0]];
+ int c1 = cost_mv_ref(cpi, NEARMV, rfc);
+ int c2 = cost_mv_ref(cpi, NEARESTMV, rfc);
+ int c3 = cost_mv_ref(cpi, ZEROMV, rfc);
+
+ if (this_mode == NEARMV) {
+ if (c1 > c3)
+ continue;
+ } else if (this_mode == NEARESTMV) {
+ if (c2 > c3)
+ continue;
+ } else {
+ assert(this_mode == ZEROMV);
+ if (mbmi->ref_frame[1] <= 0) {
+ if ((c3 >= c2 &&
+ frame_mv[NEARESTMV][mbmi->ref_frame[0]].as_int == 0) ||
+ (c3 >= c1 &&
+ frame_mv[NEARMV][mbmi->ref_frame[0]].as_int == 0))
+ continue;
+ } else {
+ if ((c3 >= c2 &&
+ frame_mv[NEARESTMV][mbmi->ref_frame[0]].as_int == 0 &&
+ frame_mv[NEARESTMV][mbmi->ref_frame[1]].as_int == 0) ||
+ (c3 >= c1 &&
+ frame_mv[NEARMV][mbmi->ref_frame[0]].as_int == 0 &&
+ frame_mv[NEARMV][mbmi->ref_frame[1]].as_int == 0))
+ continue;
+ }
+ }
+ }
+
+ vpx_memcpy(orig_pre, x->e_mbd.plane[0].pre, sizeof(orig_pre));
+ vpx_memcpy(bsi->rdstat[i][mode_idx].ta, t_above,
+ sizeof(bsi->rdstat[i][mode_idx].ta));
+ vpx_memcpy(bsi->rdstat[i][mode_idx].tl, t_left,
+ sizeof(bsi->rdstat[i][mode_idx].tl));
+
+ // motion search for newmv (single predictor case only)
+ if (mbmi->ref_frame[1] <= 0 && this_mode == NEWMV &&
+ seg_mvs[i][mbmi->ref_frame[0]].as_int == INVALID_MV) {
+ int step_param = 0;
+ int further_steps;
+ int thissme, bestsme = INT_MAX;
+ int sadpb = x->sadperbit4;
+ int_mv mvp_full;
+ int max_mv;
+
+ /* Is the best so far sufficiently good that we cant justify doing
+ * and new motion search. */
+ if (best_rd < label_mv_thresh)
+ break;
+
+ if (cpi->compressor_speed) {
+ // use previous block's result as next block's MV predictor.
+ if (i > 0) {
+ bsi->mvp.as_int = mi->bmi[i - 1].as_mv[0].as_int;
+ if (i == 2)
+ bsi->mvp.as_int = mi->bmi[i - 2].as_mv[0].as_int;
+ }
+ }
+ if (i == 0)
+ max_mv = x->max_mv_context[mbmi->ref_frame[0]];
+ else
+ max_mv = MAX(abs(bsi->mvp.as_mv.row), abs(bsi->mvp.as_mv.col)) >> 3;
+
+ if (cpi->sf.auto_mv_step_size && cpi->common.show_frame) {
+ // Take wtd average of the step_params based on the last frame's
+ // max mv magnitude and the best ref mvs of the current block for
+ // the given reference.
+ step_param = (vp9_init_search_range(cpi, max_mv) +
+ cpi->mv_step_param) >> 1;
+ } else {
+ step_param = cpi->mv_step_param;
+ }
+
+ mvp_full.as_mv.row = bsi->mvp.as_mv.row >> 3;
+ mvp_full.as_mv.col = bsi->mvp.as_mv.col >> 3;
+
+ if (cpi->sf.adaptive_motion_search && cpi->common.show_frame) {
+ mvp_full.as_mv.row = x->pred_mv[mbmi->ref_frame[0]].as_mv.row >> 3;
+ mvp_full.as_mv.col = x->pred_mv[mbmi->ref_frame[0]].as_mv.col >> 3;
+ step_param = MAX(step_param, 8);
+ }
+
+ further_steps = (MAX_MVSEARCH_STEPS - 1) - step_param;
+ // adjust src pointer for this block
+ mi_buf_shift(x, i);
+ if (cpi->sf.search_method == HEX) {
+ bestsme = vp9_hex_search(x, &mvp_full,
+ step_param,
+ sadpb, 1, v_fn_ptr, 1,
+ bsi->ref_mv, &mode_mv[NEWMV]);
+ } else if (cpi->sf.search_method == SQUARE) {
+ bestsme = vp9_square_search(x, &mvp_full,
+ step_param,
+ sadpb, 1, v_fn_ptr, 1,
+ bsi->ref_mv, &mode_mv[NEWMV]);
+ } else if (cpi->sf.search_method == BIGDIA) {
+ bestsme = vp9_bigdia_search(x, &mvp_full,
+ step_param,
+ sadpb, 1, v_fn_ptr, 1,
+ bsi->ref_mv, &mode_mv[NEWMV]);
+ } else {
+ bestsme = vp9_full_pixel_diamond(cpi, x, &mvp_full, step_param,
+ sadpb, further_steps, 0, v_fn_ptr,
+ bsi->ref_mv, &mode_mv[NEWMV]);
+ }
+
+ // Should we do a full search (best quality only)
+ if (cpi->compressor_speed == 0) {
+ /* Check if mvp_full is within the range. */
+ clamp_mv(&mvp_full.as_mv, x->mv_col_min, x->mv_col_max,
+ x->mv_row_min, x->mv_row_max);
+
+ thissme = cpi->full_search_sad(x, &mvp_full,
+ sadpb, 16, v_fn_ptr,
+ x->nmvjointcost, x->mvcost,
+ bsi->ref_mv, i);
+
+ if (thissme < bestsme) {
+ bestsme = thissme;
+ mode_mv[NEWMV].as_int = mi->bmi[i].as_mv[0].as_int;
+ } else {
+ /* The full search result is actually worse so re-instate the
+ * previous best vector */
+ mi->bmi[i].as_mv[0].as_int = mode_mv[NEWMV].as_int;
+ }
+ }
+
+ if (bestsme < INT_MAX) {
+ int distortion;
+ unsigned int sse;
+ cpi->find_fractional_mv_step(x, &mode_mv[NEWMV],
+ bsi->ref_mv, x->errorperbit, v_fn_ptr,
+ 0, cpi->sf.subpel_iters_per_step,
+ x->nmvjointcost, x->mvcost,
+ &distortion, &sse);
+
+ // save motion search result for use in compound prediction
+ seg_mvs[i][mbmi->ref_frame[0]].as_int = mode_mv[NEWMV].as_int;
+ }
+
+ if (cpi->sf.adaptive_motion_search)
+ x->pred_mv[mbmi->ref_frame[0]].as_int = mode_mv[NEWMV].as_int;
+
+ // restore src pointers
+ mi_buf_restore(x, orig_src, orig_pre);
+ }
+
+ if (mbmi->ref_frame[1] > 0 && this_mode == NEWMV &&
+ mbmi->interp_filter == EIGHTTAP) {
+ if (seg_mvs[i][mbmi->ref_frame[1]].as_int == INVALID_MV ||
+ seg_mvs[i][mbmi->ref_frame[0]].as_int == INVALID_MV)
+ continue;
+
+ // adjust src pointers
+ mi_buf_shift(x, i);
+ if (cpi->sf.comp_inter_joint_search_thresh <= bsize) {
+ int rate_mv;
+ joint_motion_search(cpi, x, bsize, frame_mv[this_mode],
+ mi_row, mi_col, seg_mvs[i],
+ &rate_mv);
+ seg_mvs[i][mbmi->ref_frame[0]].as_int =
+ frame_mv[this_mode][mbmi->ref_frame[0]].as_int;
+ seg_mvs[i][mbmi->ref_frame[1]].as_int =
+ frame_mv[this_mode][mbmi->ref_frame[1]].as_int;
+ }
+ // restore src pointers
+ mi_buf_restore(x, orig_src, orig_pre);
+ }
+
+ bsi->rdstat[i][mode_idx].brate =
+ labels2mode(x, i, this_mode, &mode_mv[this_mode],
+ &second_mode_mv[this_mode], frame_mv, seg_mvs[i],
+ bsi->ref_mv, bsi->second_ref_mv, x->nmvjointcost,
+ x->mvcost, cpi);
+
+ bsi->rdstat[i][mode_idx].mvs[0].as_int = mode_mv[this_mode].as_int;
+ if (num_4x4_blocks_wide > 1)
+ bsi->rdstat[i + 1][mode_idx].mvs[0].as_int =
+ mode_mv[this_mode].as_int;
+ if (num_4x4_blocks_high > 1)
+ bsi->rdstat[i + 2][mode_idx].mvs[0].as_int =
+ mode_mv[this_mode].as_int;
+ if (mbmi->ref_frame[1] > 0) {
+ bsi->rdstat[i][mode_idx].mvs[1].as_int =
+ second_mode_mv[this_mode].as_int;
+ if (num_4x4_blocks_wide > 1)
+ bsi->rdstat[i + 1][mode_idx].mvs[1].as_int =
+ second_mode_mv[this_mode].as_int;
+ if (num_4x4_blocks_high > 1)
+ bsi->rdstat[i + 2][mode_idx].mvs[1].as_int =
+ second_mode_mv[this_mode].as_int;
+ }
+
+ // Trap vectors that reach beyond the UMV borders
+ if (mv_check_bounds(x, &mode_mv[this_mode]))
+ continue;
+ if (mbmi->ref_frame[1] > 0 &&
+ mv_check_bounds(x, &second_mode_mv[this_mode]))
+ continue;
+
+ if (filter_idx > 0) {
+ BEST_SEG_INFO *ref_bsi = bsi_buf;
+ subpelmv = (mode_mv[this_mode].as_mv.row & 0x0f) ||
+ (mode_mv[this_mode].as_mv.col & 0x0f);
+ have_ref = mode_mv[this_mode].as_int ==
+ ref_bsi->rdstat[i][mode_idx].mvs[0].as_int;
+ if (mbmi->ref_frame[1] > 0) {
+ subpelmv |= (second_mode_mv[this_mode].as_mv.row & 0x0f) ||
+ (second_mode_mv[this_mode].as_mv.col & 0x0f);
+ have_ref &= second_mode_mv[this_mode].as_int ==
+ ref_bsi->rdstat[i][mode_idx].mvs[1].as_int;
+ }
+
+ if (filter_idx > 1 && !subpelmv && !have_ref) {
+ ref_bsi = bsi_buf + 1;
+ have_ref = mode_mv[this_mode].as_int ==
+ ref_bsi->rdstat[i][mode_idx].mvs[0].as_int;
+ if (mbmi->ref_frame[1] > 0) {
+ have_ref &= second_mode_mv[this_mode].as_int ==
+ ref_bsi->rdstat[i][mode_idx].mvs[1].as_int;
+ }
+ }
+
+ if (!subpelmv && have_ref &&
+ ref_bsi->rdstat[i][mode_idx].brdcost < INT64_MAX) {
+ vpx_memcpy(&bsi->rdstat[i][mode_idx], &ref_bsi->rdstat[i][mode_idx],
+ sizeof(SEG_RDSTAT));
+ if (bsi->rdstat[i][mode_idx].brdcost < best_rd) {
+ mode_selected = this_mode;
+ best_rd = bsi->rdstat[i][mode_idx].brdcost;
+ }
+ continue;
+ }
+ }
+
+ bsi->rdstat[i][mode_idx].brdcost =
+ encode_inter_mb_segment(cpi, x,
+ bsi->segment_rd - this_segment_rd, i,
+ &bsi->rdstat[i][mode_idx].byrate,
+ &bsi->rdstat[i][mode_idx].bdist,
+ &bsi->rdstat[i][mode_idx].bsse,
+ bsi->rdstat[i][mode_idx].ta,
+ bsi->rdstat[i][mode_idx].tl);
+ if (bsi->rdstat[i][mode_idx].brdcost < INT64_MAX) {
+ bsi->rdstat[i][mode_idx].brdcost += RDCOST(x->rdmult, x->rddiv,
+ bsi->rdstat[i][mode_idx].brate, 0);
+ bsi->rdstat[i][mode_idx].brate += bsi->rdstat[i][mode_idx].byrate;
+ bsi->rdstat[i][mode_idx].eobs = x->e_mbd.plane[0].eobs[i];
+ }
+
+ if (bsi->rdstat[i][mode_idx].brdcost < best_rd) {
+ mode_selected = this_mode;
+ best_rd = bsi->rdstat[i][mode_idx].brdcost;
+ }
+ } /*for each 4x4 mode*/
+
+ if (best_rd == INT64_MAX) {
+ int iy, midx;
+ for (iy = i + 1; iy < 4; ++iy)
+ for (midx = 0; midx < INTER_MODES; ++midx)
+ bsi->rdstat[iy][midx].brdcost = INT64_MAX;
+ bsi->segment_rd = INT64_MAX;
+ return;
+ }
+
+ mode_idx = inter_mode_offset(mode_selected);
+ vpx_memcpy(t_above, bsi->rdstat[i][mode_idx].ta, sizeof(t_above));
+ vpx_memcpy(t_left, bsi->rdstat[i][mode_idx].tl, sizeof(t_left));
+
+ labels2mode(x, i, mode_selected, &mode_mv[mode_selected],
+ &second_mode_mv[mode_selected], frame_mv, seg_mvs[i],
+ bsi->ref_mv, bsi->second_ref_mv, x->nmvjointcost,
+ x->mvcost, cpi);
+
+ br += bsi->rdstat[i][mode_idx].brate;
+ bd += bsi->rdstat[i][mode_idx].bdist;
+ block_sse += bsi->rdstat[i][mode_idx].bsse;
+ segmentyrate += bsi->rdstat[i][mode_idx].byrate;
+ this_segment_rd += bsi->rdstat[i][mode_idx].brdcost;
+
+ if (this_segment_rd > bsi->segment_rd) {
+ int iy, midx;
+ for (iy = i + 1; iy < 4; ++iy)
+ for (midx = 0; midx < INTER_MODES; ++midx)
+ bsi->rdstat[iy][midx].brdcost = INT64_MAX;
+ bsi->segment_rd = INT64_MAX;
+ return;
+ }
+
+ for (j = 1; j < num_4x4_blocks_high; ++j)
+ vpx_memcpy(&x->partition_info->bmi[i + j * 2],
+ &x->partition_info->bmi[i],
+ sizeof(x->partition_info->bmi[i]));
+ for (j = 1; j < num_4x4_blocks_wide; ++j)
+ vpx_memcpy(&x->partition_info->bmi[i + j],
+ &x->partition_info->bmi[i],
+ sizeof(x->partition_info->bmi[i]));
+ }
+ } /* for each label */
+
+ bsi->r = br;
+ bsi->d = bd;
+ bsi->segment_yrate = segmentyrate;
+ bsi->segment_rd = this_segment_rd;
+ bsi->sse = block_sse;
+
+ // update the coding decisions
+ for (i = 0; i < 4; ++i)
+ bsi->modes[i] = x->partition_info->bmi[i].mode;
+}
+
+static int64_t rd_pick_best_mbsegmentation(VP9_COMP *cpi, MACROBLOCK *x,
+ int_mv *best_ref_mv,
+ int_mv *second_best_ref_mv,
+ int64_t best_rd,
+ int *returntotrate,
+ int *returnyrate,
+ int64_t *returndistortion,
+ int *skippable, int64_t *psse,
+ int mvthresh,
+ int_mv seg_mvs[4][MAX_REF_FRAMES],
+ BEST_SEG_INFO *bsi_buf,
+ int filter_idx,
+ int mi_row, int mi_col) {
+ int i;
+ BEST_SEG_INFO *bsi = bsi_buf + filter_idx;
+ MACROBLOCKD *xd = &x->e_mbd;
+ MODE_INFO *mi = xd->this_mi;
+ MB_MODE_INFO *mbmi = &mi->mbmi;
+ int mode_idx;
+
+ vp9_zero(*bsi);
+
+ bsi->segment_rd = best_rd;
+ bsi->ref_mv = best_ref_mv;
+ bsi->second_ref_mv = second_best_ref_mv;
+ bsi->mvp.as_int = best_ref_mv->as_int;
+ bsi->mvthresh = mvthresh;
+
+ for (i = 0; i < 4; i++)
+ bsi->modes[i] = ZEROMV;
+
+ rd_check_segment_txsize(cpi, x, bsi_buf, filter_idx, seg_mvs, mi_row, mi_col);
+
+ if (bsi->segment_rd > best_rd)
+ return INT64_MAX;
+ /* set it to the best */
+ for (i = 0; i < 4; i++) {
+ mode_idx = inter_mode_offset(bsi->modes[i]);
+ mi->bmi[i].as_mv[0].as_int = bsi->rdstat[i][mode_idx].mvs[0].as_int;
+ if (mbmi->ref_frame[1] > 0)
+ mi->bmi[i].as_mv[1].as_int = bsi->rdstat[i][mode_idx].mvs[1].as_int;
+ xd->plane[0].eobs[i] = bsi->rdstat[i][mode_idx].eobs;
+ x->partition_info->bmi[i].mode = bsi->modes[i];
+ }
+
+ /*
+ * used to set mbmi->mv.as_int
+ */
+ *returntotrate = bsi->r;
+ *returndistortion = bsi->d;
+ *returnyrate = bsi->segment_yrate;
+ *skippable = vp9_is_skippable_in_plane(&x->e_mbd, BLOCK_8X8, 0);
+ *psse = bsi->sse;
+ mbmi->mode = bsi->modes[3];
+
+ return bsi->segment_rd;
+}
+
+static void mv_pred(VP9_COMP *cpi, MACROBLOCK *x,
+ uint8_t *ref_y_buffer, int ref_y_stride,
+ int ref_frame, BLOCK_SIZE block_size ) {
+ MACROBLOCKD *xd = &x->e_mbd;
+ MB_MODE_INFO *mbmi = &xd->this_mi->mbmi;
+ int_mv this_mv;
+ int i;
+ int zero_seen = 0;
+ int best_index = 0;
+ int best_sad = INT_MAX;
+ int this_sad = INT_MAX;
+ unsigned int max_mv = 0;
+
+ uint8_t *src_y_ptr = x->plane[0].src.buf;
+ uint8_t *ref_y_ptr;
+ int row_offset, col_offset;
+ int num_mv_refs = MAX_MV_REF_CANDIDATES +
+ (cpi->sf.adaptive_motion_search &&
+ cpi->common.show_frame &&
+ block_size < cpi->sf.max_partition_size);
+
+ // Get the sad for each candidate reference mv
+ for (i = 0; i < num_mv_refs; i++) {
+ this_mv.as_int = (i < MAX_MV_REF_CANDIDATES) ?
+ mbmi->ref_mvs[ref_frame][i].as_int : x->pred_mv[ref_frame].as_int;
+
+ max_mv = MAX(max_mv,
+ MAX(abs(this_mv.as_mv.row), abs(this_mv.as_mv.col)) >> 3);
+ // The list is at an end if we see 0 for a second time.
+ if (!this_mv.as_int && zero_seen)
+ break;
+ zero_seen = zero_seen || !this_mv.as_int;
+
+ row_offset = this_mv.as_mv.row >> 3;
+ col_offset = this_mv.as_mv.col >> 3;
+ ref_y_ptr = ref_y_buffer + (ref_y_stride * row_offset) + col_offset;
+
+ // Find sad for current vector.
+ this_sad = cpi->fn_ptr[block_size].sdf(src_y_ptr, x->plane[0].src.stride,
+ ref_y_ptr, ref_y_stride,
+ 0x7fffffff);
+
+ // Note if it is the best so far.
+ if (this_sad < best_sad) {
+ best_sad = this_sad;
+ best_index = i;
+ }
+ }
+
+ // Note the index of the mv that worked best in the reference list.
+ x->mv_best_ref_index[ref_frame] = best_index;
+ x->max_mv_context[ref_frame] = max_mv;
+}
+
+static void estimate_ref_frame_costs(VP9_COMP *cpi, int segment_id,
+ unsigned int *ref_costs_single,
+ unsigned int *ref_costs_comp,
+ vp9_prob *comp_mode_p) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &cpi->mb.e_mbd;
+ int seg_ref_active = vp9_segfeature_active(&cm->seg, segment_id,
+ SEG_LVL_REF_FRAME);
+ if (seg_ref_active) {
+ vpx_memset(ref_costs_single, 0, MAX_REF_FRAMES * sizeof(*ref_costs_single));
+ vpx_memset(ref_costs_comp, 0, MAX_REF_FRAMES * sizeof(*ref_costs_comp));
+ *comp_mode_p = 128;
+ } else {
+ vp9_prob intra_inter_p = vp9_get_pred_prob_intra_inter(cm, xd);
+ vp9_prob comp_inter_p = 128;
+
+ if (cm->comp_pred_mode == HYBRID_PREDICTION) {
+ comp_inter_p = vp9_get_pred_prob_comp_inter_inter(cm, xd);
+ *comp_mode_p = comp_inter_p;
+ } else {
+ *comp_mode_p = 128;
+ }
+
+ ref_costs_single[INTRA_FRAME] = vp9_cost_bit(intra_inter_p, 0);
+
+ if (cm->comp_pred_mode != COMP_PREDICTION_ONLY) {
+ vp9_prob ref_single_p1 = vp9_get_pred_prob_single_ref_p1(cm, xd);
+ vp9_prob ref_single_p2 = vp9_get_pred_prob_single_ref_p2(cm, xd);
+ unsigned int base_cost = vp9_cost_bit(intra_inter_p, 1);
+
+ if (cm->comp_pred_mode == HYBRID_PREDICTION)
+ base_cost += vp9_cost_bit(comp_inter_p, 0);
+
+ ref_costs_single[LAST_FRAME] = ref_costs_single[GOLDEN_FRAME] =
+ ref_costs_single[ALTREF_FRAME] = base_cost;
+ ref_costs_single[LAST_FRAME] += vp9_cost_bit(ref_single_p1, 0);
+ ref_costs_single[GOLDEN_FRAME] += vp9_cost_bit(ref_single_p1, 1);
+ ref_costs_single[ALTREF_FRAME] += vp9_cost_bit(ref_single_p1, 1);
+ ref_costs_single[GOLDEN_FRAME] += vp9_cost_bit(ref_single_p2, 0);
+ ref_costs_single[ALTREF_FRAME] += vp9_cost_bit(ref_single_p2, 1);
+ } else {
+ ref_costs_single[LAST_FRAME] = 512;
+ ref_costs_single[GOLDEN_FRAME] = 512;
+ ref_costs_single[ALTREF_FRAME] = 512;
+ }
+ if (cm->comp_pred_mode != SINGLE_PREDICTION_ONLY) {
+ vp9_prob ref_comp_p = vp9_get_pred_prob_comp_ref_p(cm, xd);
+ unsigned int base_cost = vp9_cost_bit(intra_inter_p, 1);
+
+ if (cm->comp_pred_mode == HYBRID_PREDICTION)
+ base_cost += vp9_cost_bit(comp_inter_p, 1);
+
+ ref_costs_comp[LAST_FRAME] = base_cost + vp9_cost_bit(ref_comp_p, 0);
+ ref_costs_comp[GOLDEN_FRAME] = base_cost + vp9_cost_bit(ref_comp_p, 1);
+ } else {
+ ref_costs_comp[LAST_FRAME] = 512;
+ ref_costs_comp[GOLDEN_FRAME] = 512;
+ }
+ }
+}
+
+static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
+ int mode_index,
+ PARTITION_INFO *partition,
+ int_mv *ref_mv,
+ int_mv *second_ref_mv,
+ int64_t comp_pred_diff[NB_PREDICTION_TYPES],
+ int64_t tx_size_diff[TX_MODES],
+ int64_t best_filter_diff[SWITCHABLE_FILTERS + 1]) {
+ MACROBLOCKD *const xd = &x->e_mbd;
+
+ // Take a snapshot of the coding context so it can be
+ // restored if we decide to encode this way
+ ctx->skip = x->skip;
+ ctx->best_mode_index = mode_index;
+ ctx->mic = *xd->this_mi;
+
+ if (partition)
+ ctx->partition_info = *partition;
+
+ ctx->best_ref_mv.as_int = ref_mv->as_int;
+ ctx->second_best_ref_mv.as_int = second_ref_mv->as_int;
+
+ ctx->single_pred_diff = (int)comp_pred_diff[SINGLE_PREDICTION_ONLY];
+ ctx->comp_pred_diff = (int)comp_pred_diff[COMP_PREDICTION_ONLY];
+ ctx->hybrid_pred_diff = (int)comp_pred_diff[HYBRID_PREDICTION];
+
+ // FIXME(rbultje) does this memcpy the whole array? I believe sizeof()
+ // doesn't actually work this way
+ memcpy(ctx->tx_rd_diff, tx_size_diff, sizeof(ctx->tx_rd_diff));
+ memcpy(ctx->best_filter_diff, best_filter_diff,
+ sizeof(*best_filter_diff) * (SWITCHABLE_FILTERS + 1));
+}
+
+static void setup_pred_block(const MACROBLOCKD *xd,
+ struct buf_2d dst[MAX_MB_PLANE],
+ const YV12_BUFFER_CONFIG *src,
+ int mi_row, int mi_col,
+ const struct scale_factors *scale,
+ const struct scale_factors *scale_uv) {
+ int i;
+
+ dst[0].buf = src->y_buffer;
+ dst[0].stride = src->y_stride;
+ dst[1].buf = src->u_buffer;
+ dst[2].buf = src->v_buffer;
+ dst[1].stride = dst[2].stride = src->uv_stride;
+#if CONFIG_ALPHA
+ dst[3].buf = src->alpha_buffer;
+ dst[3].stride = src->alpha_stride;
+#endif
+
+ // TODO(jkoleszar): Make scale factors per-plane data
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ setup_pred_plane(dst + i, dst[i].buf, dst[i].stride, mi_row, mi_col,
+ i ? scale_uv : scale,
+ xd->plane[i].subsampling_x, xd->plane[i].subsampling_y);
+ }
+}
+
+static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x,
+ int idx, MV_REFERENCE_FRAME frame_type,
+ BLOCK_SIZE block_size,
+ int mi_row, int mi_col,
+ int_mv frame_nearest_mv[MAX_REF_FRAMES],
+ int_mv frame_near_mv[MAX_REF_FRAMES],
+ struct buf_2d yv12_mb[4][MAX_MB_PLANE],
+ struct scale_factors scale[MAX_REF_FRAMES]) {
+ VP9_COMMON *cm = &cpi->common;
+ YV12_BUFFER_CONFIG *yv12 = &cm->yv12_fb[cpi->common.ref_frame_map[idx]];
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi;
+
+ // set up scaling factors
+ scale[frame_type] = cpi->common.active_ref_scale[frame_type - 1];
+
+ scale[frame_type].x_offset_q4 =
+ ROUND_POWER_OF_TWO(mi_col * MI_SIZE * scale[frame_type].x_scale_fp,
+ REF_SCALE_SHIFT) & 0xf;
+ scale[frame_type].y_offset_q4 =
+ ROUND_POWER_OF_TWO(mi_row * MI_SIZE * scale[frame_type].y_scale_fp,
+ REF_SCALE_SHIFT) & 0xf;
+
+ // TODO(jkoleszar): Is the UV buffer ever used here? If so, need to make this
+ // use the UV scaling factors.
+ setup_pred_block(xd, yv12_mb[frame_type], yv12, mi_row, mi_col,
+ &scale[frame_type], &scale[frame_type]);
+
+ // Gets an initial list of candidate vectors from neighbours and orders them
+ vp9_find_mv_refs(&cpi->common, xd, xd->this_mi,
+ xd->last_mi,
+ frame_type,
+ mbmi->ref_mvs[frame_type], mi_row, mi_col);
+
+ // Candidate refinement carried out at encoder and decoder
+ vp9_find_best_ref_mvs(xd,
+ mbmi->ref_mvs[frame_type],
+ &frame_nearest_mv[frame_type],
+ &frame_near_mv[frame_type]);
+
+ // Further refinement that is encode side only to test the top few candidates
+ // in full and choose the best as the centre point for subsequent searches.
+ // The current implementation doesn't support scaling.
+ if (!vp9_is_scaled(&scale[frame_type]))
+ mv_pred(cpi, x, yv12_mb[frame_type][0].buf, yv12->y_stride,
+ frame_type, block_size);
+}
+
+static YV12_BUFFER_CONFIG *get_scaled_ref_frame(VP9_COMP *cpi, int ref_frame) {
+ YV12_BUFFER_CONFIG *scaled_ref_frame = NULL;
+ int fb = get_ref_frame_idx(cpi, ref_frame);
+ int fb_scale = get_scale_ref_frame_idx(cpi, ref_frame);
+ if (cpi->scaled_ref_idx[fb_scale] != cpi->common.ref_frame_map[fb])
+ scaled_ref_frame = &cpi->common.yv12_fb[cpi->scaled_ref_idx[fb_scale]];
+ return scaled_ref_frame;
+}
+
+static INLINE int get_switchable_rate(const MACROBLOCK *x) {
+ const MACROBLOCKD *const xd = &x->e_mbd;
+ const MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi;
+ const int ctx = vp9_get_pred_context_switchable_interp(xd);
+ return SWITCHABLE_INTERP_RATE_FACTOR *
+ x->switchable_interp_costs[ctx][mbmi->interp_filter];
+}
+
+static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
+ BLOCK_SIZE bsize,
+ int mi_row, int mi_col,
+ int_mv *tmp_mv, int *rate_mv) {
+ MACROBLOCKD *xd = &x->e_mbd;
+ VP9_COMMON *cm = &cpi->common;
+ MB_MODE_INFO *mbmi = &xd->this_mi->mbmi;
+ struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0}};
+ int bestsme = INT_MAX;
+ int further_steps, step_param;
+ int sadpb = x->sadperbit16;
+ int_mv mvp_full;
+ int ref = mbmi->ref_frame[0];
+ int_mv ref_mv = mbmi->ref_mvs[ref][0];
+ const BLOCK_SIZE block_size = get_plane_block_size(bsize, &xd->plane[0]);
+
+ int tmp_col_min = x->mv_col_min;
+ int tmp_col_max = x->mv_col_max;
+ int tmp_row_min = x->mv_row_min;
+ int tmp_row_max = x->mv_row_max;
+
+ YV12_BUFFER_CONFIG *scaled_ref_frame = get_scaled_ref_frame(cpi, ref);
+
+ if (scaled_ref_frame) {
+ int i;
+ // Swap out the reference frame for a version that's been scaled to
+ // match the resolution of the current frame, allowing the existing
+ // motion search code to be used without additional modifications.
+ for (i = 0; i < MAX_MB_PLANE; i++)
+ backup_yv12[i] = xd->plane[i].pre[0];
+
+ setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
+ }
+
+ vp9_clamp_mv_min_max(x, &ref_mv.as_mv);
+
+ // Adjust search parameters based on small partitions' result.
+ if (x->fast_ms) {
+ // && abs(mvp_full.as_mv.row - x->pred_mv.as_mv.row) < 24 &&
+ // abs(mvp_full.as_mv.col - x->pred_mv.as_mv.col) < 24) {
+ // adjust search range
+ step_param = 6;
+ if (x->fast_ms > 1)
+ step_param = 8;
+
+ // Get prediction MV.
+ mvp_full.as_int = x->pred_mv[ref].as_int;
+
+ // Adjust MV sign if needed.
+ if (cm->ref_frame_sign_bias[ref]) {
+ mvp_full.as_mv.col *= -1;
+ mvp_full.as_mv.row *= -1;
+ }
+ } else {
+ // Work out the size of the first step in the mv step search.
+ // 0 here is maximum length first step. 1 is MAX >> 1 etc.
+ if (cpi->sf.auto_mv_step_size && cpi->common.show_frame) {
+ // Take wtd average of the step_params based on the last frame's
+ // max mv magnitude and that based on the best ref mvs of the current
+ // block for the given reference.
+ step_param = (vp9_init_search_range(cpi, x->max_mv_context[ref]) +
+ cpi->mv_step_param) >> 1;
+ } else {
+ step_param = cpi->mv_step_param;
+ }
+ }
+
+ if (cpi->sf.adaptive_motion_search && bsize < BLOCK_64X64 &&
+ cpi->common.show_frame) {
+ int boffset = 2 * (b_width_log2(BLOCK_64X64) - MIN(b_height_log2(bsize),
+ b_width_log2(bsize)));
+ step_param = MAX(step_param, boffset);
+ }
+
+ mvp_full.as_int = x->mv_best_ref_index[ref] < MAX_MV_REF_CANDIDATES ?
+ mbmi->ref_mvs[ref][x->mv_best_ref_index[ref]].as_int :
+ x->pred_mv[ref].as_int;
+
+ mvp_full.as_mv.col >>= 3;
+ mvp_full.as_mv.row >>= 3;
+
+ // Further step/diamond searches as necessary
+ further_steps = (cpi->sf.max_step_search_steps - 1) - step_param;
+
+ if (cpi->sf.search_method == HEX) {
+ bestsme = vp9_hex_search(x, &mvp_full,
+ step_param,
+ sadpb, 1,
+ &cpi->fn_ptr[block_size], 1,
+ &ref_mv, tmp_mv);
+ } else if (cpi->sf.search_method == SQUARE) {
+ bestsme = vp9_square_search(x, &mvp_full,
+ step_param,
+ sadpb, 1,
+ &cpi->fn_ptr[block_size], 1,
+ &ref_mv, tmp_mv);
+ } else if (cpi->sf.search_method == BIGDIA) {
+ bestsme = vp9_bigdia_search(x, &mvp_full,
+ step_param,
+ sadpb, 1,
+ &cpi->fn_ptr[block_size], 1,
+ &ref_mv, tmp_mv);
+ } else {
+ bestsme = vp9_full_pixel_diamond(cpi, x, &mvp_full, step_param,
+ sadpb, further_steps, 1,
+ &cpi->fn_ptr[block_size],
+ &ref_mv, tmp_mv);
+ }
+
+ x->mv_col_min = tmp_col_min;
+ x->mv_col_max = tmp_col_max;
+ x->mv_row_min = tmp_row_min;
+ x->mv_row_max = tmp_row_max;
+
+ if (bestsme < INT_MAX) {
+ int dis; /* TODO: use dis in distortion calculation later. */
+ unsigned int sse;
+ cpi->find_fractional_mv_step(x, tmp_mv, &ref_mv,
+ x->errorperbit,
+ &cpi->fn_ptr[block_size],
+ 0, cpi->sf.subpel_iters_per_step,
+ x->nmvjointcost, x->mvcost,
+ &dis, &sse);
+ }
+ *rate_mv = vp9_mv_bit_cost(tmp_mv, &ref_mv,
+ x->nmvjointcost, x->mvcost,
+ 96);
+
+ if (cpi->sf.adaptive_motion_search && cpi->common.show_frame)
+ x->pred_mv[ref].as_int = tmp_mv->as_int;
+
+ if (scaled_ref_frame) {
+ int i;
+ for (i = 0; i < MAX_MB_PLANE; i++)
+ xd->plane[i].pre[0] = backup_yv12[i];
+ }
+}
+
+static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
+ BLOCK_SIZE bsize,
+ int_mv *frame_mv,
+ int mi_row, int mi_col,
+ int_mv single_newmv[MAX_REF_FRAMES],
+ int *rate_mv) {
+ int pw = 4 << b_width_log2(bsize), ph = 4 << b_height_log2(bsize);
+ MACROBLOCKD *xd = &x->e_mbd;
+ MB_MODE_INFO *mbmi = &xd->this_mi->mbmi;
+ int refs[2] = { mbmi->ref_frame[0],
+ (mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1]) };
+ int_mv ref_mv[2];
+ const BLOCK_SIZE block_size = get_plane_block_size(bsize, &xd->plane[0]);
+ int ite;
+ // Prediction buffer from second frame.
+ uint8_t *second_pred = vpx_memalign(16, pw * ph * sizeof(uint8_t));
+
+ // Do joint motion search in compound mode to get more accurate mv.
+ struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0}};
+ struct buf_2d backup_second_yv12[MAX_MB_PLANE] = {{0}};
+ struct buf_2d scaled_first_yv12;
+ int last_besterr[2] = {INT_MAX, INT_MAX};
+ YV12_BUFFER_CONFIG *scaled_ref_frame[2] = {NULL, NULL};
+ scaled_ref_frame[0] = get_scaled_ref_frame(cpi, mbmi->ref_frame[0]);
+ scaled_ref_frame[1] = get_scaled_ref_frame(cpi, mbmi->ref_frame[1]);
+
+ ref_mv[0] = mbmi->ref_mvs[refs[0]][0];
+ ref_mv[1] = mbmi->ref_mvs[refs[1]][0];
+
+ if (scaled_ref_frame[0]) {
+ int i;
+ // Swap out the reference frame for a version that's been scaled to
+ // match the resolution of the current frame, allowing the existing
+ // motion search code to be used without additional modifications.
+ for (i = 0; i < MAX_MB_PLANE; i++)
+ backup_yv12[i] = xd->plane[i].pre[0];
+ setup_pre_planes(xd, 0, scaled_ref_frame[0], mi_row, mi_col, NULL);
+ }
+
+ if (scaled_ref_frame[1]) {
+ int i;
+ for (i = 0; i < MAX_MB_PLANE; i++)
+ backup_second_yv12[i] = xd->plane[i].pre[1];
+
+ setup_pre_planes(xd, 0, scaled_ref_frame[1], mi_row, mi_col, NULL);
+ }
+
+ xd->scale_factor[0].set_scaled_offsets(&xd->scale_factor[0],
+ mi_row, mi_col);
+ xd->scale_factor[1].set_scaled_offsets(&xd->scale_factor[1],
+ mi_row, mi_col);
+ scaled_first_yv12 = xd->plane[0].pre[0];
+
+ // Initialize mv using single prediction mode result.
+ frame_mv[refs[0]].as_int = single_newmv[refs[0]].as_int;
+ frame_mv[refs[1]].as_int = single_newmv[refs[1]].as_int;
+
+ // Allow joint search multiple times iteratively for each ref frame
+ // and break out the search loop if it couldn't find better mv.
+ for (ite = 0; ite < 4; ite++) {
+ struct buf_2d ref_yv12[2];
+ int bestsme = INT_MAX;
+ int sadpb = x->sadperbit16;
+ int_mv tmp_mv;
+ int search_range = 3;
+
+ int tmp_col_min = x->mv_col_min;
+ int tmp_col_max = x->mv_col_max;
+ int tmp_row_min = x->mv_row_min;
+ int tmp_row_max = x->mv_row_max;
+ int id = ite % 2;
+
+ // Initialized here because of compiler problem in Visual Studio.
+ ref_yv12[0] = xd->plane[0].pre[0];
+ ref_yv12[1] = xd->plane[0].pre[1];
+
+ // Get pred block from second frame.
+ vp9_build_inter_predictor(ref_yv12[!id].buf,
+ ref_yv12[!id].stride,
+ second_pred, pw,
+ &frame_mv[refs[!id]].as_mv,
+ &xd->scale_factor[!id],
+ pw, ph, 0,
+ &xd->subpix, MV_PRECISION_Q3);
+
+ // Compound motion search on first ref frame.
+ if (id)
+ xd->plane[0].pre[0] = ref_yv12[id];
+ vp9_clamp_mv_min_max(x, &ref_mv[id].as_mv);
+
+ // Use mv result from single mode as mvp.
+ tmp_mv.as_int = frame_mv[refs[id]].as_int;
+
+ tmp_mv.as_mv.col >>= 3;
+ tmp_mv.as_mv.row >>= 3;
+
+ // Small-range full-pixel motion search
+ bestsme = vp9_refining_search_8p_c(x, &tmp_mv, sadpb,
+ search_range,
+ &cpi->fn_ptr[block_size],
+ x->nmvjointcost, x->mvcost,
+ &ref_mv[id], second_pred,
+ pw, ph);
+
+ x->mv_col_min = tmp_col_min;
+ x->mv_col_max = tmp_col_max;
+ x->mv_row_min = tmp_row_min;
+ x->mv_row_max = tmp_row_max;
+
+ if (bestsme < INT_MAX) {
+ int dis; /* TODO: use dis in distortion calculation later. */
+ unsigned int sse;
+
+ bestsme = cpi->find_fractional_mv_step_comp(
+ x, &tmp_mv,
+ &ref_mv[id],
+ x->errorperbit,
+ &cpi->fn_ptr[block_size],
+ 0, cpi->sf.subpel_iters_per_step,
+ x->nmvjointcost, x->mvcost,
+ &dis, &sse, second_pred,
+ pw, ph);
+ }
+
+ if (id)
+ xd->plane[0].pre[0] = scaled_first_yv12;
+
+ if (bestsme < last_besterr[id]) {
+ frame_mv[refs[id]].as_int = tmp_mv.as_int;
+ last_besterr[id] = bestsme;
+ } else {
+ break;
+ }
+ }
+
+ // restore the predictor
+ if (scaled_ref_frame[0]) {
+ int i;
+ for (i = 0; i < MAX_MB_PLANE; i++)
+ xd->plane[i].pre[0] = backup_yv12[i];
+ }
+
+ if (scaled_ref_frame[1]) {
+ int i;
+ for (i = 0; i < MAX_MB_PLANE; i++)
+ xd->plane[i].pre[1] = backup_second_yv12[i];
+ }
+ *rate_mv = vp9_mv_bit_cost(&frame_mv[refs[0]],
+ &mbmi->ref_mvs[refs[0]][0],
+ x->nmvjointcost, x->mvcost, 96);
+ *rate_mv += vp9_mv_bit_cost(&frame_mv[refs[1]],
+ &mbmi->ref_mvs[refs[1]][0],
+ x->nmvjointcost, x->mvcost, 96);
+
+ vpx_free(second_pred);
+}
+
+static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
+ BLOCK_SIZE bsize,
+ int64_t txfm_cache[],
+ int *rate2, int64_t *distortion,
+ int *skippable,
+ int *rate_y, int64_t *distortion_y,
+ int *rate_uv, int64_t *distortion_uv,
+ int *mode_excluded, int *disable_skip,
+ INTERPOLATIONFILTERTYPE *best_filter,
+ int_mv (*mode_mv)[MAX_REF_FRAMES],
+ int mi_row, int mi_col,
+ int_mv single_newmv[MAX_REF_FRAMES],
+ int64_t *psse,
+ const int64_t ref_best_rd) {
+ VP9_COMMON *cm = &cpi->common;
+ MACROBLOCKD *xd = &x->e_mbd;
+ MB_MODE_INFO *mbmi = &xd->this_mi->mbmi;
+ const int is_comp_pred = (mbmi->ref_frame[1] > 0);
+ const int num_refs = is_comp_pred ? 2 : 1;
+ const int this_mode = mbmi->mode;
+ int_mv *frame_mv = mode_mv[this_mode];
+ int i;
+ int refs[2] = { mbmi->ref_frame[0],
+ (mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1]) };
+ int_mv cur_mv[2];
+ int64_t this_rd = 0;
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, tmp_buf, MAX_MB_PLANE * 64 * 64);
+ int pred_exists = 0;
+ int intpel_mv;
+ int64_t rd, best_rd = INT64_MAX;
+ int best_needs_copy = 0;
+ uint8_t *orig_dst[MAX_MB_PLANE];
+ int orig_dst_stride[MAX_MB_PLANE];
+ int rs = 0;
+
+ if (this_mode == NEWMV) {
+ int rate_mv;
+ if (is_comp_pred) {
+ // Initialize mv using single prediction mode result.
+ frame_mv[refs[0]].as_int = single_newmv[refs[0]].as_int;
+ frame_mv[refs[1]].as_int = single_newmv[refs[1]].as_int;
+
+ if (cpi->sf.comp_inter_joint_search_thresh <= bsize) {
+ joint_motion_search(cpi, x, bsize, frame_mv,
+ mi_row, mi_col, single_newmv, &rate_mv);
+ } else {
+ rate_mv = vp9_mv_bit_cost(&frame_mv[refs[0]],
+ &mbmi->ref_mvs[refs[0]][0],
+ x->nmvjointcost, x->mvcost, 96);
+ rate_mv += vp9_mv_bit_cost(&frame_mv[refs[1]],
+ &mbmi->ref_mvs[refs[1]][0],
+ x->nmvjointcost, x->mvcost, 96);
+ }
+ if (frame_mv[refs[0]].as_int == INVALID_MV ||
+ frame_mv[refs[1]].as_int == INVALID_MV)
+ return INT64_MAX;
+ *rate2 += rate_mv;
+ } else {
+ int_mv tmp_mv;
+ single_motion_search(cpi, x, bsize, mi_row, mi_col, &tmp_mv, &rate_mv);
+ *rate2 += rate_mv;
+ frame_mv[refs[0]].as_int =
+ xd->this_mi->bmi[0].as_mv[0].as_int = tmp_mv.as_int;
+ single_newmv[refs[0]].as_int = tmp_mv.as_int;
+ }
+ }
+
+ // if we're near/nearest and mv == 0,0, compare to zeromv
+ if ((this_mode == NEARMV || this_mode == NEARESTMV || this_mode == ZEROMV) &&
+ frame_mv[refs[0]].as_int == 0 &&
+ !vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP) &&
+ (num_refs == 1 || frame_mv[refs[1]].as_int == 0)) {
+ int rfc = mbmi->mode_context[mbmi->ref_frame[0]];
+ int c1 = cost_mv_ref(cpi, NEARMV, rfc);
+ int c2 = cost_mv_ref(cpi, NEARESTMV, rfc);
+ int c3 = cost_mv_ref(cpi, ZEROMV, rfc);
+
+ if (this_mode == NEARMV) {
+ if (c1 > c3)
+ return INT64_MAX;
+ } else if (this_mode == NEARESTMV) {
+ if (c2 > c3)
+ return INT64_MAX;
+ } else {
+ assert(this_mode == ZEROMV);
+ if (num_refs == 1) {
+ if ((c3 >= c2 &&
+ mode_mv[NEARESTMV][mbmi->ref_frame[0]].as_int == 0) ||
+ (c3 >= c1 &&
+ mode_mv[NEARMV][mbmi->ref_frame[0]].as_int == 0))
+ return INT64_MAX;
+ } else {
+ if ((c3 >= c2 &&
+ mode_mv[NEARESTMV][mbmi->ref_frame[0]].as_int == 0 &&
+ mode_mv[NEARESTMV][mbmi->ref_frame[1]].as_int == 0) ||
+ (c3 >= c1 &&
+ mode_mv[NEARMV][mbmi->ref_frame[0]].as_int == 0 &&
+ mode_mv[NEARMV][mbmi->ref_frame[1]].as_int == 0))
+ return INT64_MAX;
+ }
+ }
+ }
+
+ for (i = 0; i < num_refs; ++i) {
+ cur_mv[i] = frame_mv[refs[i]];
+ // Clip "next_nearest" so that it does not extend to far out of image
+ if (this_mode != NEWMV)
+ clamp_mv2(&cur_mv[i].as_mv, xd);
+
+ if (mv_check_bounds(x, &cur_mv[i]))
+ return INT64_MAX;
+ mbmi->mv[i].as_int = cur_mv[i].as_int;
+ }
+
+ // do first prediction into the destination buffer. Do the next
+ // prediction into a temporary buffer. Then keep track of which one
+ // of these currently holds the best predictor, and use the other
+ // one for future predictions. In the end, copy from tmp_buf to
+ // dst if necessary.
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ orig_dst[i] = xd->plane[i].dst.buf;
+ orig_dst_stride[i] = xd->plane[i].dst.stride;
+ }
+
+ /* We don't include the cost of the second reference here, because there
+ * are only three options: Last/Golden, ARF/Last or Golden/ARF, or in other
+ * words if you present them in that order, the second one is always known
+ * if the first is known */
+ *rate2 += cost_mv_ref(cpi, this_mode,
+ mbmi->mode_context[mbmi->ref_frame[0]]);
+
+ if (!(*mode_excluded)) {
+ if (is_comp_pred) {
+ *mode_excluded = (cpi->common.comp_pred_mode == SINGLE_PREDICTION_ONLY);
+ } else {
+ *mode_excluded = (cpi->common.comp_pred_mode == COMP_PREDICTION_ONLY);
+ }
+ }
+
+ pred_exists = 0;
+ // Are all MVs integer pel for Y and UV
+ intpel_mv = (mbmi->mv[0].as_mv.row & 15) == 0 &&
+ (mbmi->mv[0].as_mv.col & 15) == 0;
+ if (is_comp_pred)
+ intpel_mv &= (mbmi->mv[1].as_mv.row & 15) == 0 &&
+ (mbmi->mv[1].as_mv.col & 15) == 0;
+ // Search for best switchable filter by checking the variance of
+ // pred error irrespective of whether the filter will be used
+ if (cm->mcomp_filter_type != BILINEAR) {
+ *best_filter = EIGHTTAP;
+ if (x->source_variance <
+ cpi->sf.disable_filter_search_var_thresh) {
+ *best_filter = EIGHTTAP;
+ vp9_zero(cpi->rd_filter_cache);
+ } else {
+ int i, newbest;
+ int tmp_rate_sum = 0;
+ int64_t tmp_dist_sum = 0;
+
+ cpi->rd_filter_cache[SWITCHABLE_FILTERS] = INT64_MAX;
+ for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
+ int j;
+ int64_t rs_rd;
+ mbmi->interp_filter = i;
+ vp9_setup_interp_filters(xd, mbmi->interp_filter, cm);
+ rs = get_switchable_rate(x);
+ rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
+
+ if (i > 0 && intpel_mv) {
+ cpi->rd_filter_cache[i] = RDCOST(x->rdmult, x->rddiv,
+ tmp_rate_sum, tmp_dist_sum);
+ cpi->rd_filter_cache[SWITCHABLE_FILTERS] =
+ MIN(cpi->rd_filter_cache[SWITCHABLE_FILTERS],
+ cpi->rd_filter_cache[i] + rs_rd);
+ rd = cpi->rd_filter_cache[i];
+ if (cm->mcomp_filter_type == SWITCHABLE)
+ rd += rs_rd;
+ } else {
+ int rate_sum = 0;
+ int64_t dist_sum = 0;
+ if ((cm->mcomp_filter_type == SWITCHABLE &&
+ (!i || best_needs_copy)) ||
+ (cm->mcomp_filter_type != SWITCHABLE &&
+ (cm->mcomp_filter_type == mbmi->interp_filter ||
+ (i == 0 && intpel_mv)))) {
+ for (j = 0; j < MAX_MB_PLANE; j++) {
+ xd->plane[j].dst.buf = orig_dst[j];
+ xd->plane[j].dst.stride = orig_dst_stride[j];
+ }
+ } else {
+ for (j = 0; j < MAX_MB_PLANE; j++) {
+ xd->plane[j].dst.buf = tmp_buf + j * 64 * 64;
+ xd->plane[j].dst.stride = 64;
+ }
+ }
+ vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+ model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum);
+ cpi->rd_filter_cache[i] = RDCOST(x->rdmult, x->rddiv,
+ rate_sum, dist_sum);
+ cpi->rd_filter_cache[SWITCHABLE_FILTERS] =
+ MIN(cpi->rd_filter_cache[SWITCHABLE_FILTERS],
+ cpi->rd_filter_cache[i] + rs_rd);
+ rd = cpi->rd_filter_cache[i];
+ if (cm->mcomp_filter_type == SWITCHABLE)
+ rd += rs_rd;
+ if (i == 0 && intpel_mv) {
+ tmp_rate_sum = rate_sum;
+ tmp_dist_sum = dist_sum;
+ }
+ }
+ if (i == 0 && cpi->sf.use_rd_breakout && ref_best_rd < INT64_MAX) {
+ if (rd / 2 > ref_best_rd) {
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ xd->plane[i].dst.buf = orig_dst[i];
+ xd->plane[i].dst.stride = orig_dst_stride[i];
+ }
+ return INT64_MAX;
+ }
+ }
+ newbest = i == 0 || rd < best_rd;
+
+ if (newbest) {
+ best_rd = rd;
+ *best_filter = mbmi->interp_filter;
+ if (cm->mcomp_filter_type == SWITCHABLE && i && !intpel_mv)
+ best_needs_copy = !best_needs_copy;
+ }
+
+ if ((cm->mcomp_filter_type == SWITCHABLE && newbest) ||
+ (cm->mcomp_filter_type != SWITCHABLE &&
+ cm->mcomp_filter_type == mbmi->interp_filter)) {
+ pred_exists = 1;
+ }
+ }
+
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ xd->plane[i].dst.buf = orig_dst[i];
+ xd->plane[i].dst.stride = orig_dst_stride[i];
+ }
+ }
+ }
+ // Set the appropriate filter
+ mbmi->interp_filter = cm->mcomp_filter_type != SWITCHABLE ?
+ cm->mcomp_filter_type : *best_filter;
+ vp9_setup_interp_filters(xd, mbmi->interp_filter, cm);
+ rs = cm->mcomp_filter_type == SWITCHABLE ? get_switchable_rate(x) : 0;
+
+ if (pred_exists) {
+ if (best_needs_copy) {
+ // again temporarily set the buffers to local memory to prevent a memcpy
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ xd->plane[i].dst.buf = tmp_buf + i * 64 * 64;
+ xd->plane[i].dst.stride = 64;
+ }
+ }
+ } else {
+ // Handles the special case when a filter that is not in the
+ // switchable list (ex. bilinear, 6-tap) is indicated at the frame level
+ vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+ }
+
+
+ if (cpi->sf.use_rd_breakout && ref_best_rd < INT64_MAX) {
+ int tmp_rate;
+ int64_t tmp_dist;
+ model_rd_for_sb(cpi, bsize, x, xd, &tmp_rate, &tmp_dist);
+ rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate, tmp_dist);
+ // if current pred_error modeled rd is substantially more than the best
+ // so far, do not bother doing full rd
+ if (rd / 2 > ref_best_rd) {
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ xd->plane[i].dst.buf = orig_dst[i];
+ xd->plane[i].dst.stride = orig_dst_stride[i];
+ }
+ return INT64_MAX;
+ }
+ }
+
+ if (cpi->common.mcomp_filter_type == SWITCHABLE)
+ *rate2 += get_switchable_rate(x);
+
+ if (!is_comp_pred && cpi->enable_encode_breakout) {
+ if (cpi->active_map_enabled && x->active_ptr[0] == 0)
+ x->skip = 1;
+ else if (x->encode_breakout) {
+ const BLOCK_SIZE y_size = get_plane_block_size(bsize, &xd->plane[0]);
+ const BLOCK_SIZE uv_size = get_plane_block_size(bsize, &xd->plane[1]);
+ unsigned int var, sse;
+ // Skipping threshold for ac.
+ unsigned int thresh_ac;
+ // The encode_breakout input
+ unsigned int encode_breakout = x->encode_breakout << 4;
+ int max_thresh = 36000;
+
+ // Use extreme low threshold for static frames to limit skipping.
+ if (cpi->enable_encode_breakout == 2)
+ max_thresh = 128;
+
+ // Calculate threshold according to dequant value.
+ thresh_ac = (xd->plane[0].dequant[1] * xd->plane[0].dequant[1]) / 9;
+
+ // Use encode_breakout input if it is bigger than internal threshold.
+ if (thresh_ac < encode_breakout)
+ thresh_ac = encode_breakout;
+
+ // Set a maximum for threshold to avoid big PSNR loss in low bitrate case.
+ if (thresh_ac > max_thresh)
+ thresh_ac = max_thresh;
+
+ var = cpi->fn_ptr[y_size].vf(x->plane[0].src.buf, x->plane[0].src.stride,
+ xd->plane[0].dst.buf,
+ xd->plane[0].dst.stride, &sse);
+
+ // Adjust threshold according to partition size.
+ thresh_ac >>= 8 - (b_width_log2_lookup[bsize] +
+ b_height_log2_lookup[bsize]);
+
+ // Y skipping condition checking
+ if (sse < thresh_ac || sse == 0) {
+ // Skipping threshold for dc
+ unsigned int thresh_dc;
+
+ thresh_dc = (xd->plane[0].dequant[0] * xd->plane[0].dequant[0] >> 6);
+
+ // dc skipping checking
+ if ((sse - var) < thresh_dc || sse == var) {
+ unsigned int sse_u, sse_v;
+ unsigned int var_u, var_v;
+
+ var_u = cpi->fn_ptr[uv_size].vf(x->plane[1].src.buf,
+ x->plane[1].src.stride,
+ xd->plane[1].dst.buf,
+ xd->plane[1].dst.stride, &sse_u);
+
+ // U skipping condition checking
+ if ((sse_u * 4 < thresh_ac || sse_u == 0) &&
+ (sse_u - var_u < thresh_dc || sse_u == var_u)) {
+ var_v = cpi->fn_ptr[uv_size].vf(x->plane[2].src.buf,
+ x->plane[2].src.stride,
+ xd->plane[2].dst.buf,
+ xd->plane[2].dst.stride, &sse_v);
+
+ // V skipping condition checking
+ if ((sse_v * 4 < thresh_ac || sse_v == 0) &&
+ (sse_v - var_v < thresh_dc || sse_v == var_v)) {
+ x->skip = 1;
+
+ // The cost of skip bit needs to be added.
+ *rate2 += vp9_cost_bit(vp9_get_pred_prob_mbskip(cm, xd), 1);
+
+ // Scaling factor for SSE from spatial domain to frequency domain
+ // is 16. Adjust distortion accordingly.
+ *distortion_uv = (sse_u + sse_v) << 4;
+ *distortion = (sse << 4) + *distortion_uv;
+
+ *disable_skip = 1;
+ this_rd = RDCOST(x->rdmult, x->rddiv, *rate2, *distortion);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if (!x->skip) {
+ int skippable_y, skippable_uv;
+ int64_t sseuv = INT64_MAX;
+ int64_t rdcosty = INT64_MAX;
+
+ // Y cost and distortion
+ super_block_yrd(cpi, x, rate_y, distortion_y, &skippable_y, psse,
+ bsize, txfm_cache, ref_best_rd);
+
+ if (*rate_y == INT_MAX) {
+ *rate2 = INT_MAX;
+ *distortion = INT64_MAX;
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ xd->plane[i].dst.buf = orig_dst[i];
+ xd->plane[i].dst.stride = orig_dst_stride[i];
+ }
+ return INT64_MAX;
+ }
+
+ *rate2 += *rate_y;
+ *distortion += *distortion_y;
+
+ rdcosty = RDCOST(x->rdmult, x->rddiv, *rate2, *distortion);
+ rdcosty = MIN(rdcosty, RDCOST(x->rdmult, x->rddiv, 0, *psse));
+
+ super_block_uvrd(cm, x, rate_uv, distortion_uv, &skippable_uv, &sseuv,
+ bsize, ref_best_rd - rdcosty);
+ if (*rate_uv == INT_MAX) {
+ *rate2 = INT_MAX;
+ *distortion = INT64_MAX;
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ xd->plane[i].dst.buf = orig_dst[i];
+ xd->plane[i].dst.stride = orig_dst_stride[i];
+ }
+ return INT64_MAX;
+ }
+
+ *psse += sseuv;
+ *rate2 += *rate_uv;
+ *distortion += *distortion_uv;
+ *skippable = skippable_y && skippable_uv;
+ }
+
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ xd->plane[i].dst.buf = orig_dst[i];
+ xd->plane[i].dst.stride = orig_dst_stride[i];
+ }
+
+ return this_rd; // if 0, this will be re-calculated by caller
+}
+
+void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
+ int *returnrate, int64_t *returndist,
+ BLOCK_SIZE bsize,
+ PICK_MODE_CONTEXT *ctx, int64_t best_rd) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ int rate_y = 0, rate_uv = 0, rate_y_tokenonly = 0, rate_uv_tokenonly = 0;
+ int y_skip = 0, uv_skip = 0;
+ int64_t dist_y = 0, dist_uv = 0, tx_cache[TX_MODES] = { 0 };
+ x->skip_encode = 0;
+ ctx->skip = 0;
+ xd->this_mi->mbmi.ref_frame[0] = INTRA_FRAME;
+ if (bsize >= BLOCK_8X8) {
+ if (rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly,
+ &dist_y, &y_skip, bsize, tx_cache,
+ best_rd) >= best_rd) {
+ *returnrate = INT_MAX;
+ return;
+ }
+ rd_pick_intra_sbuv_mode(cpi, x, &rate_uv, &rate_uv_tokenonly,
+ &dist_uv, &uv_skip, bsize);
+ } else {
+ y_skip = 0;
+ if (rd_pick_intra_sub_8x8_y_mode(cpi, x, &rate_y, &rate_y_tokenonly,
+ &dist_y, best_rd) >= best_rd) {
+ *returnrate = INT_MAX;
+ return;
+ }
+ rd_pick_intra_sbuv_mode(cpi, x, &rate_uv, &rate_uv_tokenonly,
+ &dist_uv, &uv_skip, BLOCK_8X8);
+ }
+
+ if (y_skip && uv_skip) {
+ *returnrate = rate_y + rate_uv - rate_y_tokenonly - rate_uv_tokenonly +
+ vp9_cost_bit(vp9_get_pred_prob_mbskip(cm, xd), 1);
+ *returndist = dist_y + dist_uv;
+ vp9_zero(ctx->tx_rd_diff);
+ } else {
+ int i;
+ *returnrate = rate_y + rate_uv +
+ vp9_cost_bit(vp9_get_pred_prob_mbskip(cm, xd), 0);
+ *returndist = dist_y + dist_uv;
+ if (cpi->sf.tx_size_search_method == USE_FULL_RD)
+ for (i = 0; i < TX_MODES; i++)
+ ctx->tx_rd_diff[i] = tx_cache[i] - tx_cache[cm->tx_mode];
+ }
+
+ ctx->mic = *xd->this_mi;
+}
+
+int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
+ int mi_row, int mi_col,
+ int *returnrate,
+ int64_t *returndistortion,
+ BLOCK_SIZE bsize,
+ PICK_MODE_CONTEXT *ctx,
+ int64_t best_rd_so_far) {
+ VP9_COMMON *cm = &cpi->common;
+ MACROBLOCKD *xd = &x->e_mbd;
+ MB_MODE_INFO *mbmi = &xd->this_mi->mbmi;
+ const struct segmentation *seg = &cm->seg;
+ const BLOCK_SIZE block_size = get_plane_block_size(bsize, &xd->plane[0]);
+ RD_PREDICTION_MODE this_mode;
+ MV_REFERENCE_FRAME ref_frame, second_ref_frame;
+ unsigned char segment_id = mbmi->segment_id;
+ int comp_pred, i;
+ int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
+ struct buf_2d yv12_mb[4][MAX_MB_PLANE];
+ int_mv single_newmv[MAX_REF_FRAMES] = { { 0 } };
+ static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
+ VP9_ALT_FLAG };
+ int idx_list[4] = {0,
+ cpi->lst_fb_idx,
+ cpi->gld_fb_idx,
+ cpi->alt_fb_idx};
+ int64_t best_rd = best_rd_so_far;
+ int64_t best_yrd = best_rd_so_far; // FIXME(rbultje) more precise
+ int64_t best_tx_rd[TX_MODES];
+ int64_t best_tx_diff[TX_MODES];
+ int64_t best_pred_diff[NB_PREDICTION_TYPES];
+ int64_t best_pred_rd[NB_PREDICTION_TYPES];
+ int64_t best_filter_rd[SWITCHABLE_FILTERS + 1];
+ int64_t best_filter_diff[SWITCHABLE_FILTERS + 1];
+ MB_MODE_INFO best_mbmode = { 0 };
+ int j;
+ int mode_index, best_mode_index = 0;
+ unsigned int ref_costs_single[MAX_REF_FRAMES], ref_costs_comp[MAX_REF_FRAMES];
+ vp9_prob comp_mode_p;
+ int64_t best_intra_rd = INT64_MAX;
+ int64_t best_inter_rd = INT64_MAX;
+ MB_PREDICTION_MODE best_intra_mode = DC_PRED;
+ // MB_PREDICTION_MODE best_inter_mode = ZEROMV;
+ MV_REFERENCE_FRAME best_inter_ref_frame = LAST_FRAME;
+ INTERPOLATIONFILTERTYPE tmp_best_filter = SWITCHABLE;
+ int rate_uv_intra[TX_SIZES], rate_uv_tokenonly[TX_SIZES];
+ int64_t dist_uv[TX_SIZES];
+ int skip_uv[TX_SIZES];
+ MB_PREDICTION_MODE mode_uv[TX_SIZES];
+ struct scale_factors scale_factor[4];
+ unsigned int ref_frame_mask = 0;
+ unsigned int mode_mask = 0;
+ int64_t mode_distortions[MB_MODE_COUNT] = {-1};
+ int64_t frame_distortions[MAX_REF_FRAMES] = {-1};
+ int intra_cost_penalty = 20 * vp9_dc_quant(cpi->common.base_qindex,
+ cpi->common.y_dc_delta_q);
+ int_mv seg_mvs[4][MAX_REF_FRAMES];
+ union b_mode_info best_bmodes[4];
+ PARTITION_INFO best_partition;
+ const int bws = num_8x8_blocks_wide_lookup[bsize] / 2;
+ const int bhs = num_8x8_blocks_high_lookup[bsize] / 2;
+ int best_skip2 = 0;
+
+ x->skip_encode = cpi->sf.skip_encode_frame && xd->q_index < QIDX_SKIP_THRESH;
+
+ for (i = 0; i < 4; i++) {
+ int j;
+ for (j = 0; j < MAX_REF_FRAMES; j++)
+ seg_mvs[i][j].as_int = INVALID_MV;
+ }
+ // Everywhere the flag is set the error is much higher than its neighbors.
+ ctx->frames_with_high_error = 0;
+ ctx->modes_with_high_error = 0;
+
+ estimate_ref_frame_costs(cpi, segment_id, ref_costs_single, ref_costs_comp,
+ &comp_mode_p);
+
+ for (i = 0; i < NB_PREDICTION_TYPES; ++i)
+ best_pred_rd[i] = INT64_MAX;
+ for (i = 0; i < TX_MODES; i++)
+ best_tx_rd[i] = INT64_MAX;
+ for (i = 0; i <= SWITCHABLE_FILTERS; i++)
+ best_filter_rd[i] = INT64_MAX;
+ for (i = 0; i < TX_SIZES; i++)
+ rate_uv_intra[i] = INT_MAX;
+
+ *returnrate = INT_MAX;
+
+ // Create a mask set to 1 for each reference frame used by a smaller
+ // resolution.
+ if (cpi->sf.use_avoid_tested_higherror) {
+ switch (block_size) {
+ case BLOCK_64X64:
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 4; j++) {
+ ref_frame_mask |= x->mb_context[i][j].frames_with_high_error;
+ mode_mask |= x->mb_context[i][j].modes_with_high_error;
+ }
+ }
+ for (i = 0; i < 4; i++) {
+ ref_frame_mask |= x->sb32_context[i].frames_with_high_error;
+ mode_mask |= x->sb32_context[i].modes_with_high_error;
+ }
+ break;
+ case BLOCK_32X32:
+ for (i = 0; i < 4; i++) {
+ ref_frame_mask |=
+ x->mb_context[xd->sb_index][i].frames_with_high_error;
+ mode_mask |= x->mb_context[xd->sb_index][i].modes_with_high_error;
+ }
+ break;
+ default:
+ // Until we handle all block sizes set it to present;
+ ref_frame_mask = 0;
+ mode_mask = 0;
+ break;
+ }
+ ref_frame_mask = ~ref_frame_mask;
+ mode_mask = ~mode_mask;
+ }
+
+ for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ref_frame++) {
+ if (cpi->ref_frame_flags & flag_list[ref_frame]) {
+ setup_buffer_inter(cpi, x, idx_list[ref_frame], ref_frame, block_size,
+ mi_row, mi_col, frame_mv[NEARESTMV], frame_mv[NEARMV],
+ yv12_mb, scale_factor);
+ }
+ frame_mv[NEWMV][ref_frame].as_int = INVALID_MV;
+ frame_mv[ZEROMV][ref_frame].as_int = 0;
+ }
+
+ for (mode_index = 0; mode_index < MAX_MODES; ++mode_index) {
+ int mode_excluded = 0;
+ int64_t this_rd = INT64_MAX;
+ int disable_skip = 0;
+ int compmode_cost = 0;
+ int rate2 = 0, rate_y = 0, rate_uv = 0;
+ int64_t distortion2 = 0, distortion_y = 0, distortion_uv = 0;
+ int skippable = 0;
+ int64_t tx_cache[TX_MODES];
+ int i;
+ int this_skip2 = 0;
+ int64_t total_sse = INT_MAX;
+ int early_term = 0;
+
+ for (i = 0; i < TX_MODES; ++i)
+ tx_cache[i] = INT64_MAX;
+
+ x->skip = 0;
+ this_mode = vp9_mode_order[mode_index].mode;
+ ref_frame = vp9_mode_order[mode_index].ref_frame;
+ second_ref_frame = vp9_mode_order[mode_index].second_ref_frame;
+
+ // Look at the reference frame of the best mode so far and set the
+ // skip mask to look at a subset of the remaining modes.
+ if (mode_index > cpi->sf.mode_skip_start) {
+ if (mode_index == (cpi->sf.mode_skip_start + 1)) {
+ switch (vp9_mode_order[best_mode_index].ref_frame) {
+ case INTRA_FRAME:
+ cpi->mode_skip_mask = 0;
+ break;
+ case LAST_FRAME:
+ cpi->mode_skip_mask = LAST_FRAME_MODE_MASK;
+ break;
+ case GOLDEN_FRAME:
+ cpi->mode_skip_mask = GOLDEN_FRAME_MODE_MASK;
+ break;
+ case ALTREF_FRAME:
+ cpi->mode_skip_mask = ALT_REF_MODE_MASK;
+ break;
+ case NONE:
+ case MAX_REF_FRAMES:
+ assert(!"Invalid Reference frame");
+ }
+ }
+ if (cpi->mode_skip_mask & (1 << mode_index))
+ continue;
+ }
+
+ // Skip if the current reference frame has been masked off
+ if (cpi->sf.reference_masking && !cpi->set_ref_frame_mask &&
+ (cpi->ref_frame_mask & (1 << ref_frame)))
+ continue;
+
+ // Test best rd so far against threshold for trying this mode.
+ if ((best_rd < ((cpi->rd_threshes[bsize][mode_index] *
+ cpi->rd_thresh_freq_fact[bsize][mode_index]) >> 5)) ||
+ cpi->rd_threshes[bsize][mode_index] == INT_MAX)
+ continue;
+
+ // Do not allow compound prediction if the segment level reference
+ // frame feature is in use as in this case there can only be one reference.
+ if ((second_ref_frame > INTRA_FRAME) &&
+ vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
+ continue;
+
+ // Skip some checking based on small partitions' result.
+ if (x->fast_ms > 1 && !ref_frame)
+ continue;
+ if (x->fast_ms > 2 && ref_frame != x->subblock_ref)
+ continue;
+
+ if (cpi->sf.use_avoid_tested_higherror && bsize >= BLOCK_8X8) {
+ if (!(ref_frame_mask & (1 << ref_frame))) {
+ continue;
+ }
+ if (!(mode_mask & (1 << this_mode))) {
+ continue;
+ }
+ if (second_ref_frame != NONE
+ && !(ref_frame_mask & (1 << second_ref_frame))) {
+ continue;
+ }
+ }
+
+ mbmi->ref_frame[0] = ref_frame;
+ mbmi->ref_frame[1] = second_ref_frame;
+
+ if (!(ref_frame == INTRA_FRAME
+ || (cpi->ref_frame_flags & flag_list[ref_frame]))) {
+ continue;
+ }
+ if (!(second_ref_frame == NONE
+ || (cpi->ref_frame_flags & flag_list[second_ref_frame]))) {
+ continue;
+ }
+
+ comp_pred = second_ref_frame > INTRA_FRAME;
+ if (comp_pred) {
+ if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA)
+ if (vp9_mode_order[best_mode_index].ref_frame == INTRA_FRAME)
+ continue;
+ if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_COMP_REFMISMATCH)
+ if (ref_frame != best_inter_ref_frame &&
+ second_ref_frame != best_inter_ref_frame)
+ continue;
+ }
+ // TODO(jingning, jkoleszar): scaling reference frame not supported for
+ // SPLITMV.
+ if (ref_frame > 0 &&
+ vp9_is_scaled(&scale_factor[ref_frame]) &&
+ this_mode == RD_SPLITMV)
+ continue;
+
+ if (second_ref_frame > 0 &&
+ vp9_is_scaled(&scale_factor[second_ref_frame]) &&
+ this_mode == RD_SPLITMV)
+ continue;
+
+ if (bsize >= BLOCK_8X8 &&
+ (this_mode == RD_I4X4_PRED || this_mode == RD_SPLITMV))
+ continue;
+
+ if (bsize < BLOCK_8X8 &&
+ !(this_mode == RD_I4X4_PRED || this_mode == RD_SPLITMV))
+ continue;
+
+ set_scale_factors(xd, ref_frame, second_ref_frame, scale_factor);
+ mbmi->uv_mode = DC_PRED;
+
+ // Evaluate all sub-pel filters irrespective of whether we can use
+ // them for this frame.
+ mbmi->interp_filter = cm->mcomp_filter_type;
+ vp9_setup_interp_filters(xd, mbmi->interp_filter, &cpi->common);
+
+ if (comp_pred) {
+ if (!(cpi->ref_frame_flags & flag_list[second_ref_frame]))
+ continue;
+ set_scale_factors(xd, ref_frame, second_ref_frame, scale_factor);
+
+ mode_excluded = mode_excluded
+ ? mode_excluded
+ : cm->comp_pred_mode == SINGLE_PREDICTION_ONLY;
+ } else {
+ if (ref_frame != INTRA_FRAME && second_ref_frame != INTRA_FRAME) {
+ mode_excluded =
+ mode_excluded ?
+ mode_excluded : cm->comp_pred_mode == COMP_PREDICTION_ONLY;
+ }
+ }
+
+ // Select prediction reference frames.
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
+ if (comp_pred)
+ xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
+ }
+
+ // If the segment reference frame feature is enabled....
+ // then do nothing if the current ref frame is not allowed..
+ if (vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) &&
+ vp9_get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) !=
+ (int)ref_frame) {
+ continue;
+ // If the segment skip feature is enabled....
+ // then do nothing if the current mode is not allowed..
+ } else if (vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP) &&
+ (this_mode != RD_ZEROMV && ref_frame != INTRA_FRAME)) {
+ continue;
+ // Disable this drop out case if the ref frame
+ // segment level feature is enabled for this segment. This is to
+ // prevent the possibility that we end up unable to pick any mode.
+ } else if (!vp9_segfeature_active(seg, segment_id,
+ SEG_LVL_REF_FRAME)) {
+ // Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
+ // unless ARNR filtering is enabled in which case we want
+ // an unfiltered alternative. We allow near/nearest as well
+ // because they may result in zero-zero MVs but be cheaper.
+ if (cpi->is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) {
+ if ((this_mode != RD_ZEROMV &&
+ !(this_mode == RD_NEARMV &&
+ frame_mv[RD_NEARMV][ALTREF_FRAME].as_int == 0) &&
+ !(this_mode == RD_NEARESTMV &&
+ frame_mv[RD_NEARESTMV][ALTREF_FRAME].as_int == 0)) ||
+ ref_frame != ALTREF_FRAME) {
+ continue;
+ }
+ }
+ }
+ // TODO(JBB): This is to make up for the fact that we don't have sad
+ // functions that work when the block size reads outside the umv. We
+ // should fix this either by making the motion search just work on
+ // a representative block in the boundary ( first ) and then implement a
+ // function that does sads when inside the border..
+ if (((mi_row + bhs) > cm->mi_rows || (mi_col + bws) > cm->mi_cols) &&
+ this_mode == RD_NEWMV) {
+ continue;
+ }
+
+#ifdef MODE_TEST_HIT_STATS
+ // TEST/DEBUG CODE
+ // Keep a rcord of the number of test hits at each size
+ cpi->mode_test_hits[bsize]++;
+#endif
+
+ if (this_mode == RD_I4X4_PRED) {
+ int rate;
+
+ /*
+ if ((cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_BESTINTER) &&
+ (vp9_mode_order[best_mode_index].ref_frame > INTRA_FRAME))
+ continue;
+ */
+
+ // RD_I4X4_PRED is only considered for block sizes less than 8x8.
+ mbmi->tx_size = TX_4X4;
+ if (rd_pick_intra_sub_8x8_y_mode(cpi, x, &rate, &rate_y,
+ &distortion_y, best_rd) >= best_rd)
+ continue;
+ rate2 += rate;
+ rate2 += intra_cost_penalty;
+ distortion2 += distortion_y;
+
+ if (rate_uv_intra[TX_4X4] == INT_MAX) {
+ choose_intra_uv_mode(cpi, bsize, &rate_uv_intra[TX_4X4],
+ &rate_uv_tokenonly[TX_4X4],
+ &dist_uv[TX_4X4], &skip_uv[TX_4X4],
+ &mode_uv[TX_4X4]);
+ }
+ rate2 += rate_uv_intra[TX_4X4];
+ rate_uv = rate_uv_tokenonly[TX_4X4];
+ distortion2 += dist_uv[TX_4X4];
+ distortion_uv = dist_uv[TX_4X4];
+ mbmi->uv_mode = mode_uv[TX_4X4];
+ tx_cache[ONLY_4X4] = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
+ for (i = 0; i < TX_MODES; ++i)
+ tx_cache[i] = tx_cache[ONLY_4X4];
+ } else if (ref_frame == INTRA_FRAME) {
+ TX_SIZE uv_tx;
+ // Disable intra modes other than DC_PRED for blocks with low variance
+ // Threshold for intra skipping based on source variance
+ // TODO(debargha): Specialize the threshold for super block sizes
+ static const int skip_intra_var_thresh[BLOCK_SIZES] = {
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ };
+ if ((cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_LOWVAR) &&
+ this_mode != RD_DC_PRED &&
+ x->source_variance < skip_intra_var_thresh[mbmi->sb_type])
+ continue;
+ // Only search the oblique modes if the best so far is
+ // one of the neighboring directional modes
+ if ((cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_BESTINTER) &&
+ (this_mode >= RD_D45_PRED && this_mode <= RD_TM_PRED)) {
+ if (vp9_mode_order[best_mode_index].ref_frame > INTRA_FRAME)
+ continue;
+ }
+ mbmi->mode = rd_mode_to_mode(this_mode);
+ if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
+ if (conditional_skipintra(mbmi->mode, best_intra_mode))
+ continue;
+ }
+
+ super_block_yrd(cpi, x, &rate_y, &distortion_y, &skippable, NULL,
+ bsize, tx_cache, best_rd);
+
+ if (rate_y == INT_MAX)
+ continue;
+
+ uv_tx = MIN(mbmi->tx_size, max_uv_txsize_lookup[bsize]);
+ if (rate_uv_intra[uv_tx] == INT_MAX) {
+ choose_intra_uv_mode(cpi, bsize, &rate_uv_intra[uv_tx],
+ &rate_uv_tokenonly[uv_tx],
+ &dist_uv[uv_tx], &skip_uv[uv_tx],
+ &mode_uv[uv_tx]);
+ }
+
+ rate_uv = rate_uv_tokenonly[uv_tx];
+ distortion_uv = dist_uv[uv_tx];
+ skippable = skippable && skip_uv[uv_tx];
+ mbmi->uv_mode = mode_uv[uv_tx];
+
+ rate2 = rate_y + x->mbmode_cost[mbmi->mode] + rate_uv_intra[uv_tx];
+ if (this_mode != RD_DC_PRED && this_mode != RD_TM_PRED)
+ rate2 += intra_cost_penalty;
+ distortion2 = distortion_y + distortion_uv;
+ } else if (this_mode == RD_SPLITMV) {
+ const int is_comp_pred = second_ref_frame > 0;
+ int rate;
+ int64_t distortion;
+ int64_t this_rd_thresh;
+ int64_t tmp_rd, tmp_best_rd = INT64_MAX, tmp_best_rdu = INT64_MAX;
+ int tmp_best_rate = INT_MAX, tmp_best_ratey = INT_MAX;
+ int64_t tmp_best_distortion = INT_MAX, tmp_best_sse, uv_sse;
+ int tmp_best_skippable = 0;
+ int switchable_filter_index;
+ int_mv *second_ref = is_comp_pred ?
+ &mbmi->ref_mvs[second_ref_frame][0] : NULL;
+ union b_mode_info tmp_best_bmodes[16];
+ MB_MODE_INFO tmp_best_mbmode;
+ PARTITION_INFO tmp_best_partition;
+ BEST_SEG_INFO bsi[SWITCHABLE_FILTERS];
+ int pred_exists = 0;
+ int uv_skippable;
+ if (is_comp_pred) {
+ if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA)
+ if (vp9_mode_order[best_mode_index].ref_frame == INTRA_FRAME)
+ continue;
+ if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_COMP_REFMISMATCH)
+ if (ref_frame != best_inter_ref_frame &&
+ second_ref_frame != best_inter_ref_frame)
+ continue;
+ }
+
+ this_rd_thresh = (ref_frame == LAST_FRAME) ?
+ cpi->rd_threshes[bsize][THR_NEWMV] :
+ cpi->rd_threshes[bsize][THR_NEWA];
+ this_rd_thresh = (ref_frame == GOLDEN_FRAME) ?
+ cpi->rd_threshes[bsize][THR_NEWG] : this_rd_thresh;
+ xd->this_mi->mbmi.tx_size = TX_4X4;
+
+ cpi->rd_filter_cache[SWITCHABLE_FILTERS] = INT64_MAX;
+ if (cm->mcomp_filter_type != BILINEAR) {
+ tmp_best_filter = EIGHTTAP;
+ if (x->source_variance <
+ cpi->sf.disable_filter_search_var_thresh) {
+ tmp_best_filter = EIGHTTAP;
+ vp9_zero(cpi->rd_filter_cache);
+ } else {
+ for (switchable_filter_index = 0;
+ switchable_filter_index < SWITCHABLE_FILTERS;
+ ++switchable_filter_index) {
+ int newbest, rs;
+ int64_t rs_rd;
+ mbmi->interp_filter = switchable_filter_index;
+ vp9_setup_interp_filters(xd, mbmi->interp_filter, &cpi->common);
+
+ tmp_rd = rd_pick_best_mbsegmentation(cpi, x,
+ &mbmi->ref_mvs[ref_frame][0],
+ second_ref,
+ best_yrd,
+ &rate, &rate_y, &distortion,
+ &skippable, &total_sse,
+ (int)this_rd_thresh, seg_mvs,
+ bsi, switchable_filter_index,
+ mi_row, mi_col);
+
+ if (tmp_rd == INT64_MAX)
+ continue;
+ cpi->rd_filter_cache[switchable_filter_index] = tmp_rd;
+ rs = get_switchable_rate(x);
+ rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
+ cpi->rd_filter_cache[SWITCHABLE_FILTERS] =
+ MIN(cpi->rd_filter_cache[SWITCHABLE_FILTERS],
+ tmp_rd + rs_rd);
+ if (cm->mcomp_filter_type == SWITCHABLE)
+ tmp_rd += rs_rd;
+
+ newbest = (tmp_rd < tmp_best_rd);
+ if (newbest) {
+ tmp_best_filter = mbmi->interp_filter;
+ tmp_best_rd = tmp_rd;
+ }
+ if ((newbest && cm->mcomp_filter_type == SWITCHABLE) ||
+ (mbmi->interp_filter == cm->mcomp_filter_type &&
+ cm->mcomp_filter_type != SWITCHABLE)) {
+ tmp_best_rdu = tmp_rd;
+ tmp_best_rate = rate;
+ tmp_best_ratey = rate_y;
+ tmp_best_distortion = distortion;
+ tmp_best_sse = total_sse;
+ tmp_best_skippable = skippable;
+ tmp_best_mbmode = *mbmi;
+ tmp_best_partition = *x->partition_info;
+ for (i = 0; i < 4; i++)
+ tmp_best_bmodes[i] = xd->this_mi->bmi[i];
+ pred_exists = 1;
+ if (switchable_filter_index == 0 &&
+ cpi->sf.use_rd_breakout &&
+ best_rd < INT64_MAX) {
+ if (tmp_best_rdu / 2 > best_rd) {
+ // skip searching the other filters if the first is
+ // already substantially larger than the best so far
+ tmp_best_filter = mbmi->interp_filter;
+ tmp_best_rdu = INT64_MAX;
+ break;
+ }
+ }
+ }
+ } // switchable_filter_index loop
+ }
+ }
+
+ if (tmp_best_rdu == INT64_MAX)
+ continue;
+
+ mbmi->interp_filter = (cm->mcomp_filter_type == SWITCHABLE ?
+ tmp_best_filter : cm->mcomp_filter_type);
+ vp9_setup_interp_filters(xd, mbmi->interp_filter, &cpi->common);
+ if (!pred_exists) {
+ // Handles the special case when a filter that is not in the
+ // switchable list (bilinear, 6-tap) is indicated at the frame level
+ tmp_rd = rd_pick_best_mbsegmentation(cpi, x,
+ &mbmi->ref_mvs[ref_frame][0],
+ second_ref,
+ best_yrd,
+ &rate, &rate_y, &distortion,
+ &skippable, &total_sse,
+ (int)this_rd_thresh, seg_mvs,
+ bsi, 0,
+ mi_row, mi_col);
+ if (tmp_rd == INT64_MAX)
+ continue;
+ } else {
+ if (cpi->common.mcomp_filter_type == SWITCHABLE) {
+ int rs = get_switchable_rate(x);
+ tmp_best_rdu -= RDCOST(x->rdmult, x->rddiv, rs, 0);
+ }
+ tmp_rd = tmp_best_rdu;
+ total_sse = tmp_best_sse;
+ rate = tmp_best_rate;
+ rate_y = tmp_best_ratey;
+ distortion = tmp_best_distortion;
+ skippable = tmp_best_skippable;
+ *mbmi = tmp_best_mbmode;
+ *x->partition_info = tmp_best_partition;
+ for (i = 0; i < 4; i++)
+ xd->this_mi->bmi[i] = tmp_best_bmodes[i];
+ }
+
+ rate2 += rate;
+ distortion2 += distortion;
+
+ if (cpi->common.mcomp_filter_type == SWITCHABLE)
+ rate2 += get_switchable_rate(x);
+
+ if (!mode_excluded) {
+ if (is_comp_pred)
+ mode_excluded = cpi->common.comp_pred_mode == SINGLE_PREDICTION_ONLY;
+ else
+ mode_excluded = cpi->common.comp_pred_mode == COMP_PREDICTION_ONLY;
+ }
+ compmode_cost = vp9_cost_bit(comp_mode_p, is_comp_pred);
+
+ tmp_best_rdu = best_rd -
+ MIN(RDCOST(x->rdmult, x->rddiv, rate2, distortion2),
+ RDCOST(x->rdmult, x->rddiv, 0, total_sse));
+
+ if (tmp_best_rdu > 0) {
+ // If even the 'Y' rd value of split is higher than best so far
+ // then dont bother looking at UV
+ vp9_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col,
+ BLOCK_8X8);
+ super_block_uvrd(cm, x, &rate_uv, &distortion_uv, &uv_skippable,
+ &uv_sse, BLOCK_8X8, tmp_best_rdu);
+ if (rate_uv == INT_MAX)
+ continue;
+ rate2 += rate_uv;
+ distortion2 += distortion_uv;
+ skippable = skippable && uv_skippable;
+ total_sse += uv_sse;
+
+ tx_cache[ONLY_4X4] = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
+ for (i = 0; i < TX_MODES; ++i)
+ tx_cache[i] = tx_cache[ONLY_4X4];
+ }
+ } else {
+ mbmi->mode = rd_mode_to_mode(this_mode);
+ compmode_cost = vp9_cost_bit(comp_mode_p, second_ref_frame > INTRA_FRAME);
+ this_rd = handle_inter_mode(cpi, x, bsize,
+ tx_cache,
+ &rate2, &distortion2, &skippable,
+ &rate_y, &distortion_y,
+ &rate_uv, &distortion_uv,
+ &mode_excluded, &disable_skip,
+ &tmp_best_filter, frame_mv,
+ mi_row, mi_col,
+ single_newmv, &total_sse, best_rd);
+ if (this_rd == INT64_MAX)
+ continue;
+ }
+
+ if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) {
+ rate2 += compmode_cost;
+ }
+
+ // Estimate the reference frame signaling cost and add it
+ // to the rolling cost variable.
+ if (second_ref_frame > INTRA_FRAME) {
+ rate2 += ref_costs_comp[ref_frame];
+ } else {
+ rate2 += ref_costs_single[ref_frame];
+ }
+
+ if (!disable_skip) {
+ // Test for the condition where skip block will be activated
+ // because there are no non zero coefficients and make any
+ // necessary adjustment for rate. Ignore if skip is coded at
+ // segment level as the cost wont have been added in.
+ // Is Mb level skip allowed (i.e. not coded at segment level).
+ const int mb_skip_allowed = !vp9_segfeature_active(seg, segment_id,
+ SEG_LVL_SKIP);
+
+ if (skippable && bsize >= BLOCK_8X8) {
+ // Back out the coefficient coding costs
+ rate2 -= (rate_y + rate_uv);
+ // for best yrd calculation
+ rate_uv = 0;
+
+ if (mb_skip_allowed) {
+ int prob_skip_cost;
+
+ // Cost the skip mb case
+ vp9_prob skip_prob =
+ vp9_get_pred_prob_mbskip(cm, xd);
+
+ if (skip_prob) {
+ prob_skip_cost = vp9_cost_bit(skip_prob, 1);
+ rate2 += prob_skip_cost;
+ }
+ }
+ } else if (mb_skip_allowed && ref_frame != INTRA_FRAME && !xd->lossless) {
+ if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv, distortion2) <
+ RDCOST(x->rdmult, x->rddiv, 0, total_sse)) {
+ // Add in the cost of the no skip flag.
+ int prob_skip_cost = vp9_cost_bit(vp9_get_pred_prob_mbskip(cm, xd),
+ 0);
+ rate2 += prob_skip_cost;
+ } else {
+ // FIXME(rbultje) make this work for splitmv also
+ int prob_skip_cost = vp9_cost_bit(vp9_get_pred_prob_mbskip(cm, xd),
+ 1);
+ rate2 += prob_skip_cost;
+ distortion2 = total_sse;
+ assert(total_sse >= 0);
+ rate2 -= (rate_y + rate_uv);
+ rate_y = 0;
+ rate_uv = 0;
+ this_skip2 = 1;
+ }
+ } else if (mb_skip_allowed) {
+ // Add in the cost of the no skip flag.
+ int prob_skip_cost = vp9_cost_bit(vp9_get_pred_prob_mbskip(cm, xd),
+ 0);
+ rate2 += prob_skip_cost;
+ }
+
+ // Calculate the final RD estimate for this mode.
+ this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
+ }
+
+ // Keep record of best intra rd
+ if (xd->this_mi->mbmi.ref_frame[0] == INTRA_FRAME &&
+ is_intra_mode(xd->this_mi->mbmi.mode) &&
+ this_rd < best_intra_rd) {
+ best_intra_rd = this_rd;
+ best_intra_mode = xd->this_mi->mbmi.mode;
+ }
+ // Keep record of best inter rd with single reference
+ if (xd->this_mi->mbmi.ref_frame[0] > INTRA_FRAME &&
+ xd->this_mi->mbmi.ref_frame[1] == NONE &&
+ !mode_excluded &&
+ this_rd < best_inter_rd) {
+ best_inter_rd = this_rd;
+ best_inter_ref_frame = ref_frame;
+ // best_inter_mode = xd->this_mi->mbmi.mode;
+ }
+
+ if (!disable_skip && ref_frame == INTRA_FRAME) {
+ for (i = 0; i < NB_PREDICTION_TYPES; ++i)
+ best_pred_rd[i] = MIN(best_pred_rd[i], this_rd);
+ for (i = 0; i <= SWITCHABLE_FILTERS; i++)
+ best_filter_rd[i] = MIN(best_filter_rd[i], this_rd);
+ }
+
+ if (this_mode != RD_I4X4_PRED && this_mode != RD_SPLITMV) {
+ // Store the respective mode distortions for later use.
+ if (mode_distortions[this_mode] == -1
+ || distortion2 < mode_distortions[this_mode]) {
+ mode_distortions[this_mode] = distortion2;
+ }
+ if (frame_distortions[ref_frame] == -1
+ || distortion2 < frame_distortions[ref_frame]) {
+ frame_distortions[ref_frame] = distortion2;
+ }
+ }
+
+ // Did this mode help.. i.e. is it the new best mode
+ if (this_rd < best_rd || x->skip) {
+ if (!mode_excluded) {
+ // Note index of best mode so far
+ best_mode_index = mode_index;
+
+ if (ref_frame == INTRA_FRAME) {
+ /* required for left and above block mv */
+ mbmi->mv[0].as_int = 0;
+ }
+
+ *returnrate = rate2;
+ *returndistortion = distortion2;
+ best_rd = this_rd;
+ best_yrd = best_rd -
+ RDCOST(x->rdmult, x->rddiv, rate_uv, distortion_uv);
+ best_mbmode = *mbmi;
+ best_skip2 = this_skip2;
+ best_partition = *x->partition_info;
+
+ if (this_mode == RD_I4X4_PRED || this_mode == RD_SPLITMV)
+ for (i = 0; i < 4; i++)
+ best_bmodes[i] = xd->this_mi->bmi[i];
+
+ // TODO(debargha): enhance this test with a better distortion prediction
+ // based on qp, activity mask and history
+ if (cpi->sf.mode_search_skip_flags & FLAG_EARLY_TERMINATE) {
+ const int qstep = xd->plane[0].dequant[1];
+ // TODO(debargha): Enhance this by specializing for each mode_index
+ int scale = 4;
+ if (x->source_variance < UINT_MAX) {
+ const int var_adjust = (x->source_variance < 16);
+ scale -= var_adjust;
+ }
+ if (ref_frame > INTRA_FRAME &&
+ distortion2 * scale < qstep * qstep) {
+ early_term = 1;
+ }
+ }
+ }
+ }
+
+ /* keep record of best compound/single-only prediction */
+ if (!disable_skip && ref_frame != INTRA_FRAME) {
+ int single_rd, hybrid_rd, single_rate, hybrid_rate;
+
+ if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) {
+ single_rate = rate2 - compmode_cost;
+ hybrid_rate = rate2;
+ } else {
+ single_rate = rate2;
+ hybrid_rate = rate2 + compmode_cost;
+ }
+
+ single_rd = RDCOST(x->rdmult, x->rddiv, single_rate, distortion2);
+ hybrid_rd = RDCOST(x->rdmult, x->rddiv, hybrid_rate, distortion2);
+
+ if (second_ref_frame <= INTRA_FRAME &&
+ single_rd < best_pred_rd[SINGLE_PREDICTION_ONLY]) {
+ best_pred_rd[SINGLE_PREDICTION_ONLY] = single_rd;
+ } else if (second_ref_frame > INTRA_FRAME &&
+ single_rd < best_pred_rd[COMP_PREDICTION_ONLY]) {
+ best_pred_rd[COMP_PREDICTION_ONLY] = single_rd;
+ }
+ if (hybrid_rd < best_pred_rd[HYBRID_PREDICTION])
+ best_pred_rd[HYBRID_PREDICTION] = hybrid_rd;
+ }
+
+ /* keep record of best filter type */
+ if (!mode_excluded && !disable_skip && ref_frame != INTRA_FRAME &&
+ cm->mcomp_filter_type != BILINEAR) {
+ int64_t ref = cpi->rd_filter_cache[cm->mcomp_filter_type == SWITCHABLE ?
+ SWITCHABLE_FILTERS : cm->mcomp_filter_type];
+ for (i = 0; i <= SWITCHABLE_FILTERS; i++) {
+ int64_t adj_rd;
+ // In cases of poor prediction, filter_cache[] can contain really big
+ // values, which actually are bigger than this_rd itself. This can
+ // cause negative best_filter_rd[] values, which is obviously silly.
+ // Therefore, if filter_cache < ref, we do an adjusted calculation.
+ if (cpi->rd_filter_cache[i] >= ref)
+ adj_rd = this_rd + cpi->rd_filter_cache[i] - ref;
+ else // FIXME(rbultje) do this for comppred also
+ adj_rd = this_rd - (ref - cpi->rd_filter_cache[i]) * this_rd / ref;
+ best_filter_rd[i] = MIN(best_filter_rd[i], adj_rd);
+ }
+ }
+
+ /* keep record of best txfm size */
+ if (bsize < BLOCK_32X32) {
+ if (bsize < BLOCK_16X16) {
+ if (this_mode == RD_SPLITMV || this_mode == RD_I4X4_PRED)
+ tx_cache[ALLOW_8X8] = tx_cache[ONLY_4X4];
+ tx_cache[ALLOW_16X16] = tx_cache[ALLOW_8X8];
+ }
+ tx_cache[ALLOW_32X32] = tx_cache[ALLOW_16X16];
+ }
+ if (!mode_excluded && this_rd != INT64_MAX) {
+ for (i = 0; i < TX_MODES && tx_cache[i] < INT64_MAX; i++) {
+ int64_t adj_rd = INT64_MAX;
+ if (this_mode != RD_I4X4_PRED) {
+ adj_rd = this_rd + tx_cache[i] - tx_cache[cm->tx_mode];
+ } else {
+ adj_rd = this_rd;
+ }
+
+ if (adj_rd < best_tx_rd[i])
+ best_tx_rd[i] = adj_rd;
+ }
+ }
+
+ if (early_term)
+ break;
+
+ if (x->skip && !comp_pred)
+ break;
+ }
+
+ if (best_rd >= best_rd_so_far)
+ return INT64_MAX;
+
+ // If we used an estimate for the uv intra rd in the loop above...
+ if (cpi->sf.use_uv_intra_rd_estimate) {
+ // Do Intra UV best rd mode selection if best mode choice above was intra.
+ if (vp9_mode_order[best_mode_index].ref_frame == INTRA_FRAME) {
+ TX_SIZE uv_tx_size = get_uv_tx_size(mbmi);
+ rd_pick_intra_sbuv_mode(cpi, x, &rate_uv_intra[uv_tx_size],
+ &rate_uv_tokenonly[uv_tx_size],
+ &dist_uv[uv_tx_size],
+ &skip_uv[uv_tx_size],
+ bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize);
+ }
+ }
+
+ // If we are using reference masking and the set mask flag is set then
+ // create the reference frame mask.
+ if (cpi->sf.reference_masking && cpi->set_ref_frame_mask)
+ cpi->ref_frame_mask = ~(1 << vp9_mode_order[best_mode_index].ref_frame);
+
+ // Flag all modes that have a distortion thats > 2x the best we found at
+ // this level.
+ for (mode_index = 0; mode_index < MB_MODE_COUNT; ++mode_index) {
+ if (mode_index == NEARESTMV || mode_index == NEARMV || mode_index == NEWMV)
+ continue;
+
+ if (mode_distortions[mode_index] > 2 * *returndistortion) {
+ ctx->modes_with_high_error |= (1 << mode_index);
+ }
+ }
+
+ // Flag all ref frames that have a distortion thats > 2x the best we found at
+ // this level.
+ for (ref_frame = INTRA_FRAME; ref_frame <= ALTREF_FRAME; ref_frame++) {
+ if (frame_distortions[ref_frame] > 2 * *returndistortion) {
+ ctx->frames_with_high_error |= (1 << ref_frame);
+ }
+ }
+
+ if (best_rd == INT64_MAX && bsize < BLOCK_8X8) {
+ *returnrate = INT_MAX;
+ *returndistortion = INT_MAX;
+ return best_rd;
+ }
+
+ assert((cm->mcomp_filter_type == SWITCHABLE) ||
+ (cm->mcomp_filter_type == best_mbmode.interp_filter) ||
+ (best_mbmode.ref_frame[0] == INTRA_FRAME));
+
+ // Updating rd_thresh_freq_fact[] here means that the different
+ // partition/block sizes are handled independently based on the best
+ // choice for the current partition. It may well be better to keep a scaled
+ // best rd so far value and update rd_thresh_freq_fact based on the mode/size
+ // combination that wins out.
+ if (cpi->sf.adaptive_rd_thresh) {
+ for (mode_index = 0; mode_index < MAX_MODES; ++mode_index) {
+ if (mode_index == best_mode_index) {
+ cpi->rd_thresh_freq_fact[bsize][mode_index] -=
+ (cpi->rd_thresh_freq_fact[bsize][mode_index] >> 3);
+ } else {
+ cpi->rd_thresh_freq_fact[bsize][mode_index] += RD_THRESH_INC;
+ if (cpi->rd_thresh_freq_fact[bsize][mode_index] >
+ (cpi->sf.adaptive_rd_thresh * MAX_RD_THRESH_FACT)) {
+ cpi->rd_thresh_freq_fact[bsize][mode_index] =
+ cpi->sf.adaptive_rd_thresh * MAX_RD_THRESH_FACT;
+ }
+ }
+ }
+ }
+
+ // macroblock modes
+ *mbmi = best_mbmode;
+ x->skip |= best_skip2;
+ if (best_mbmode.ref_frame[0] == INTRA_FRAME &&
+ best_mbmode.sb_type < BLOCK_8X8) {
+ for (i = 0; i < 4; i++)
+ xd->this_mi->bmi[i].as_mode = best_bmodes[i].as_mode;
+ }
+
+ if (best_mbmode.ref_frame[0] != INTRA_FRAME &&
+ best_mbmode.sb_type < BLOCK_8X8) {
+ for (i = 0; i < 4; i++)
+ xd->this_mi->bmi[i].as_mv[0].as_int =
+ best_bmodes[i].as_mv[0].as_int;
+
+ if (mbmi->ref_frame[1] > 0)
+ for (i = 0; i < 4; i++)
+ xd->this_mi->bmi[i].as_mv[1].as_int =
+ best_bmodes[i].as_mv[1].as_int;
+
+ *x->partition_info = best_partition;
+
+ mbmi->mv[0].as_int = xd->this_mi->bmi[3].as_mv[0].as_int;
+ mbmi->mv[1].as_int = xd->this_mi->bmi[3].as_mv[1].as_int;
+ }
+
+ for (i = 0; i < NB_PREDICTION_TYPES; ++i) {
+ if (best_pred_rd[i] == INT64_MAX)
+ best_pred_diff[i] = INT_MIN;
+ else
+ best_pred_diff[i] = best_rd - best_pred_rd[i];
+ }
+
+ if (!x->skip) {
+ for (i = 0; i <= SWITCHABLE_FILTERS; i++) {
+ if (best_filter_rd[i] == INT64_MAX)
+ best_filter_diff[i] = 0;
+ else
+ best_filter_diff[i] = best_rd - best_filter_rd[i];
+ }
+ if (cm->mcomp_filter_type == SWITCHABLE)
+ assert(best_filter_diff[SWITCHABLE_FILTERS] == 0);
+ } else {
+ vpx_memset(best_filter_diff, 0, sizeof(best_filter_diff));
+ }
+
+ if (!x->skip) {
+ for (i = 0; i < TX_MODES; i++) {
+ if (best_tx_rd[i] == INT64_MAX)
+ best_tx_diff[i] = 0;
+ else
+ best_tx_diff[i] = best_rd - best_tx_rd[i];
+ }
+ } else {
+ vpx_memset(best_tx_diff, 0, sizeof(best_tx_diff));
+ }
+
+ set_scale_factors(xd, mbmi->ref_frame[0], mbmi->ref_frame[1],
+ scale_factor);
+ store_coding_context(x, ctx, best_mode_index,
+ &best_partition,
+ &mbmi->ref_mvs[mbmi->ref_frame[0]][0],
+ &mbmi->ref_mvs[mbmi->ref_frame[1] < 0 ? 0 :
+ mbmi->ref_frame[1]][0],
+ best_pred_diff, best_tx_diff, best_filter_diff);
+
+ return best_rd;
+}
diff --git a/libvpx/vp9/encoder/vp9_rdopt.h b/libvpx/vp9/encoder/vp9_rdopt.h
new file mode 100644
index 0000000..eba7df9
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_rdopt.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_ENCODER_VP9_RDOPT_H_
+#define VP9_ENCODER_VP9_RDOPT_H_
+
+#define RDCOST(RM,DM,R,D) ( ((128+((int64_t)R)*(RM)) >> 8) + ((int64_t)DM)*(D) )
+#define QIDX_SKIP_THRESH 115
+
+void vp9_initialize_rd_consts(VP9_COMP *cpi, int qindex);
+
+void vp9_initialize_me_consts(VP9_COMP *cpi, int qindex);
+
+void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
+ int *r, int64_t *d, BLOCK_SIZE bsize,
+ PICK_MODE_CONTEXT *ctx, int64_t best_rd);
+
+int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
+ int mi_row, int mi_col,
+ int *r, int64_t *d, BLOCK_SIZE bsize,
+ PICK_MODE_CONTEXT *ctx, int64_t best_rd);
+
+void vp9_init_me_luts();
+
+void vp9_set_mbmode_and_mvs(MACROBLOCK *x,
+ MB_PREDICTION_MODE mb, int_mv *mv);
+
+#endif // VP9_ENCODER_VP9_RDOPT_H_
diff --git a/libvpx/vp9/encoder/vp9_sad_c.c b/libvpx/vp9/encoder/vp9_sad_c.c
new file mode 100644
index 0000000..42ddb21
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_sad_c.c
@@ -0,0 +1,615 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include <stdlib.h>
+#include "vp9/common/vp9_sadmxn.h"
+#include "vp9/encoder/vp9_variance.h"
+#include "./vpx_config.h"
+#include "vpx/vpx_integer.h"
+#include "./vp9_rtcd.h"
+
+#define sad_mxn_func(m, n) \
+unsigned int vp9_sad##m##x##n##_c(const uint8_t *src_ptr, \
+ int src_stride, \
+ const uint8_t *ref_ptr, \
+ int ref_stride, \
+ unsigned int max_sad) { \
+ return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, m, n); \
+} \
+unsigned int vp9_sad##m##x##n##_avg_c(const uint8_t *src_ptr, \
+ int src_stride, \
+ const uint8_t *ref_ptr, \
+ int ref_stride, \
+ const uint8_t *second_pred, \
+ unsigned int max_sad) { \
+ uint8_t comp_pred[m * n]; \
+ comp_avg_pred(comp_pred, second_pred, m, n, ref_ptr, ref_stride); \
+ return sad_mx_n_c(src_ptr, src_stride, comp_pred, m, m, n); \
+}
+
+sad_mxn_func(64, 64)
+sad_mxn_func(64, 32)
+sad_mxn_func(32, 64)
+sad_mxn_func(32, 32)
+sad_mxn_func(32, 16)
+sad_mxn_func(16, 32)
+sad_mxn_func(16, 16)
+sad_mxn_func(16, 8)
+sad_mxn_func(8, 16)
+sad_mxn_func(8, 8)
+sad_mxn_func(8, 4)
+sad_mxn_func(4, 8)
+sad_mxn_func(4, 4)
+
+void vp9_sad64x32x4d_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t* const ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad64x32(src_ptr, src_stride,
+ ref_ptr[0], ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad64x32(src_ptr, src_stride,
+ ref_ptr[1], ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad64x32(src_ptr, src_stride,
+ ref_ptr[2], ref_stride, 0x7fffffff);
+ sad_array[3] = vp9_sad64x32(src_ptr, src_stride,
+ ref_ptr[3], ref_stride, 0x7fffffff);
+}
+
+void vp9_sad32x64x4d_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t* const ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad32x64(src_ptr, src_stride,
+ ref_ptr[0], ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad32x64(src_ptr, src_stride,
+ ref_ptr[1], ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad32x64(src_ptr, src_stride,
+ ref_ptr[2], ref_stride, 0x7fffffff);
+ sad_array[3] = vp9_sad32x64(src_ptr, src_stride,
+ ref_ptr[3], ref_stride, 0x7fffffff);
+}
+
+void vp9_sad32x16x4d_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t* const ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad32x16(src_ptr, src_stride,
+ ref_ptr[0], ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad32x16(src_ptr, src_stride,
+ ref_ptr[1], ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad32x16(src_ptr, src_stride,
+ ref_ptr[2], ref_stride, 0x7fffffff);
+ sad_array[3] = vp9_sad32x16(src_ptr, src_stride,
+ ref_ptr[3], ref_stride, 0x7fffffff);
+}
+
+void vp9_sad16x32x4d_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t* const ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad16x32(src_ptr, src_stride,
+ ref_ptr[0], ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad16x32(src_ptr, src_stride,
+ ref_ptr[1], ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad16x32(src_ptr, src_stride,
+ ref_ptr[2], ref_stride, 0x7fffffff);
+ sad_array[3] = vp9_sad16x32(src_ptr, src_stride,
+ ref_ptr[3], ref_stride, 0x7fffffff);
+}
+
+void vp9_sad64x64x3_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad64x64(src_ptr, src_stride, ref_ptr, ref_stride,
+ 0x7fffffff);
+ sad_array[1] = vp9_sad64x64(src_ptr, src_stride, ref_ptr + 1, ref_stride,
+ 0x7fffffff);
+ sad_array[2] = vp9_sad64x64(src_ptr, src_stride, ref_ptr + 2, ref_stride,
+ 0x7fffffff);
+}
+
+void vp9_sad32x32x3_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad32x32(src_ptr, src_stride,
+ ref_ptr, ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad32x32(src_ptr, src_stride,
+ ref_ptr + 1, ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad32x32(src_ptr, src_stride,
+ ref_ptr + 2, ref_stride, 0x7fffffff);
+}
+
+void vp9_sad64x64x8_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad64x64(src_ptr, src_stride,
+ ref_ptr, ref_stride,
+ 0x7fffffff);
+ sad_array[1] = vp9_sad64x64(src_ptr, src_stride,
+ ref_ptr + 1, ref_stride,
+ 0x7fffffff);
+ sad_array[2] = vp9_sad64x64(src_ptr, src_stride,
+ ref_ptr + 2, ref_stride,
+ 0x7fffffff);
+ sad_array[3] = vp9_sad64x64(src_ptr, src_stride,
+ ref_ptr + 3, ref_stride,
+ 0x7fffffff);
+ sad_array[4] = vp9_sad64x64(src_ptr, src_stride,
+ ref_ptr + 4, ref_stride,
+ 0x7fffffff);
+ sad_array[5] = vp9_sad64x64(src_ptr, src_stride,
+ ref_ptr + 5, ref_stride,
+ 0x7fffffff);
+ sad_array[6] = vp9_sad64x64(src_ptr, src_stride,
+ ref_ptr + 6, ref_stride,
+ 0x7fffffff);
+ sad_array[7] = vp9_sad64x64(src_ptr, src_stride,
+ ref_ptr + 7, ref_stride,
+ 0x7fffffff);
+}
+
+void vp9_sad32x32x8_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad32x32(src_ptr, src_stride,
+ ref_ptr, ref_stride,
+ 0x7fffffff);
+ sad_array[1] = vp9_sad32x32(src_ptr, src_stride,
+ ref_ptr + 1, ref_stride,
+ 0x7fffffff);
+ sad_array[2] = vp9_sad32x32(src_ptr, src_stride,
+ ref_ptr + 2, ref_stride,
+ 0x7fffffff);
+ sad_array[3] = vp9_sad32x32(src_ptr, src_stride,
+ ref_ptr + 3, ref_stride,
+ 0x7fffffff);
+ sad_array[4] = vp9_sad32x32(src_ptr, src_stride,
+ ref_ptr + 4, ref_stride,
+ 0x7fffffff);
+ sad_array[5] = vp9_sad32x32(src_ptr, src_stride,
+ ref_ptr + 5, ref_stride,
+ 0x7fffffff);
+ sad_array[6] = vp9_sad32x32(src_ptr, src_stride,
+ ref_ptr + 6, ref_stride,
+ 0x7fffffff);
+ sad_array[7] = vp9_sad32x32(src_ptr, src_stride,
+ ref_ptr + 7, ref_stride,
+ 0x7fffffff);
+}
+
+void vp9_sad16x16x3_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad16x16(src_ptr, src_stride,
+ ref_ptr, ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad16x16(src_ptr, src_stride,
+ ref_ptr + 1, ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad16x16(src_ptr, src_stride,
+ ref_ptr + 2, ref_stride, 0x7fffffff);
+}
+
+void vp9_sad16x16x8_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ uint32_t *sad_array) {
+ sad_array[0] = vp9_sad16x16(src_ptr, src_stride,
+ ref_ptr, ref_stride,
+ 0x7fffffff);
+ sad_array[1] = vp9_sad16x16(src_ptr, src_stride,
+ ref_ptr + 1, ref_stride,
+ 0x7fffffff);
+ sad_array[2] = vp9_sad16x16(src_ptr, src_stride,
+ ref_ptr + 2, ref_stride,
+ 0x7fffffff);
+ sad_array[3] = vp9_sad16x16(src_ptr, src_stride,
+ ref_ptr + 3, ref_stride,
+ 0x7fffffff);
+ sad_array[4] = vp9_sad16x16(src_ptr, src_stride,
+ ref_ptr + 4, ref_stride,
+ 0x7fffffff);
+ sad_array[5] = vp9_sad16x16(src_ptr, src_stride,
+ ref_ptr + 5, ref_stride,
+ 0x7fffffff);
+ sad_array[6] = vp9_sad16x16(src_ptr, src_stride,
+ ref_ptr + 6, ref_stride,
+ 0x7fffffff);
+ sad_array[7] = vp9_sad16x16(src_ptr, src_stride,
+ ref_ptr + 7, ref_stride,
+ 0x7fffffff);
+}
+
+void vp9_sad16x8x3_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad16x8(src_ptr, src_stride,
+ ref_ptr, ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad16x8(src_ptr, src_stride,
+ ref_ptr + 1, ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad16x8(src_ptr, src_stride,
+ ref_ptr + 2, ref_stride, 0x7fffffff);
+}
+
+void vp9_sad16x8x8_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ uint32_t *sad_array) {
+ sad_array[0] = vp9_sad16x8(src_ptr, src_stride,
+ ref_ptr, ref_stride,
+ 0x7fffffff);
+ sad_array[1] = vp9_sad16x8(src_ptr, src_stride,
+ ref_ptr + 1, ref_stride,
+ 0x7fffffff);
+ sad_array[2] = vp9_sad16x8(src_ptr, src_stride,
+ ref_ptr + 2, ref_stride,
+ 0x7fffffff);
+ sad_array[3] = vp9_sad16x8(src_ptr, src_stride,
+ ref_ptr + 3, ref_stride,
+ 0x7fffffff);
+ sad_array[4] = vp9_sad16x8(src_ptr, src_stride,
+ ref_ptr + 4, ref_stride,
+ 0x7fffffff);
+ sad_array[5] = vp9_sad16x8(src_ptr, src_stride,
+ ref_ptr + 5, ref_stride,
+ 0x7fffffff);
+ sad_array[6] = vp9_sad16x8(src_ptr, src_stride,
+ ref_ptr + 6, ref_stride,
+ 0x7fffffff);
+ sad_array[7] = vp9_sad16x8(src_ptr, src_stride,
+ ref_ptr + 7, ref_stride,
+ 0x7fffffff);
+}
+
+void vp9_sad8x8x3_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad8x8(src_ptr, src_stride,
+ ref_ptr, ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad8x8(src_ptr, src_stride,
+ ref_ptr + 1, ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad8x8(src_ptr, src_stride,
+ ref_ptr + 2, ref_stride, 0x7fffffff);
+}
+
+void vp9_sad8x8x8_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ uint32_t *sad_array) {
+ sad_array[0] = vp9_sad8x8(src_ptr, src_stride,
+ ref_ptr, ref_stride,
+ 0x7fffffff);
+ sad_array[1] = vp9_sad8x8(src_ptr, src_stride,
+ ref_ptr + 1, ref_stride,
+ 0x7fffffff);
+ sad_array[2] = vp9_sad8x8(src_ptr, src_stride,
+ ref_ptr + 2, ref_stride,
+ 0x7fffffff);
+ sad_array[3] = vp9_sad8x8(src_ptr, src_stride,
+ ref_ptr + 3, ref_stride,
+ 0x7fffffff);
+ sad_array[4] = vp9_sad8x8(src_ptr, src_stride,
+ ref_ptr + 4, ref_stride,
+ 0x7fffffff);
+ sad_array[5] = vp9_sad8x8(src_ptr, src_stride,
+ ref_ptr + 5, ref_stride,
+ 0x7fffffff);
+ sad_array[6] = vp9_sad8x8(src_ptr, src_stride,
+ ref_ptr + 6, ref_stride,
+ 0x7fffffff);
+ sad_array[7] = vp9_sad8x8(src_ptr, src_stride,
+ ref_ptr + 7, ref_stride,
+ 0x7fffffff);
+}
+
+void vp9_sad8x16x3_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad8x16(src_ptr, src_stride,
+ ref_ptr, ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad8x16(src_ptr, src_stride,
+ ref_ptr + 1, ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad8x16(src_ptr, src_stride,
+ ref_ptr + 2, ref_stride, 0x7fffffff);
+}
+
+void vp9_sad8x16x8_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ uint32_t *sad_array) {
+ sad_array[0] = vp9_sad8x16(src_ptr, src_stride,
+ ref_ptr, ref_stride,
+ 0x7fffffff);
+ sad_array[1] = vp9_sad8x16(src_ptr, src_stride,
+ ref_ptr + 1, ref_stride,
+ 0x7fffffff);
+ sad_array[2] = vp9_sad8x16(src_ptr, src_stride,
+ ref_ptr + 2, ref_stride,
+ 0x7fffffff);
+ sad_array[3] = vp9_sad8x16(src_ptr, src_stride,
+ ref_ptr + 3, ref_stride,
+ 0x7fffffff);
+ sad_array[4] = vp9_sad8x16(src_ptr, src_stride,
+ ref_ptr + 4, ref_stride,
+ 0x7fffffff);
+ sad_array[5] = vp9_sad8x16(src_ptr, src_stride,
+ ref_ptr + 5, ref_stride,
+ 0x7fffffff);
+ sad_array[6] = vp9_sad8x16(src_ptr, src_stride,
+ ref_ptr + 6, ref_stride,
+ 0x7fffffff);
+ sad_array[7] = vp9_sad8x16(src_ptr, src_stride,
+ ref_ptr + 7, ref_stride,
+ 0x7fffffff);
+}
+
+void vp9_sad4x4x3_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad4x4(src_ptr, src_stride,
+ ref_ptr, ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad4x4(src_ptr, src_stride,
+ ref_ptr + 1, ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad4x4(src_ptr, src_stride,
+ ref_ptr + 2, ref_stride, 0x7fffffff);
+}
+
+void vp9_sad4x4x8_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ uint32_t *sad_array) {
+ sad_array[0] = vp9_sad4x4(src_ptr, src_stride,
+ ref_ptr, ref_stride,
+ 0x7fffffff);
+ sad_array[1] = vp9_sad4x4(src_ptr, src_stride,
+ ref_ptr + 1, ref_stride,
+ 0x7fffffff);
+ sad_array[2] = vp9_sad4x4(src_ptr, src_stride,
+ ref_ptr + 2, ref_stride,
+ 0x7fffffff);
+ sad_array[3] = vp9_sad4x4(src_ptr, src_stride,
+ ref_ptr + 3, ref_stride,
+ 0x7fffffff);
+ sad_array[4] = vp9_sad4x4(src_ptr, src_stride,
+ ref_ptr + 4, ref_stride,
+ 0x7fffffff);
+ sad_array[5] = vp9_sad4x4(src_ptr, src_stride,
+ ref_ptr + 5, ref_stride,
+ 0x7fffffff);
+ sad_array[6] = vp9_sad4x4(src_ptr, src_stride,
+ ref_ptr + 6, ref_stride,
+ 0x7fffffff);
+ sad_array[7] = vp9_sad4x4(src_ptr, src_stride,
+ ref_ptr + 7, ref_stride,
+ 0x7fffffff);
+}
+
+void vp9_sad64x64x4d_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t* const ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad64x64(src_ptr, src_stride,
+ ref_ptr[0], ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad64x64(src_ptr, src_stride,
+ ref_ptr[1], ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad64x64(src_ptr, src_stride,
+ ref_ptr[2], ref_stride, 0x7fffffff);
+ sad_array[3] = vp9_sad64x64(src_ptr, src_stride,
+ ref_ptr[3], ref_stride, 0x7fffffff);
+}
+
+void vp9_sad32x32x4d_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t* const ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad32x32(src_ptr, src_stride,
+ ref_ptr[0], ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad32x32(src_ptr, src_stride,
+ ref_ptr[1], ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad32x32(src_ptr, src_stride,
+ ref_ptr[2], ref_stride, 0x7fffffff);
+ sad_array[3] = vp9_sad32x32(src_ptr, src_stride,
+ ref_ptr[3], ref_stride, 0x7fffffff);
+}
+
+void vp9_sad16x16x4d_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t* const ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad16x16(src_ptr, src_stride,
+ ref_ptr[0], ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad16x16(src_ptr, src_stride,
+ ref_ptr[1], ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad16x16(src_ptr, src_stride,
+ ref_ptr[2], ref_stride, 0x7fffffff);
+ sad_array[3] = vp9_sad16x16(src_ptr, src_stride,
+ ref_ptr[3], ref_stride, 0x7fffffff);
+}
+
+void vp9_sad16x8x4d_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t* const ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad16x8(src_ptr, src_stride,
+ ref_ptr[0], ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad16x8(src_ptr, src_stride,
+ ref_ptr[1], ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad16x8(src_ptr, src_stride,
+ ref_ptr[2], ref_stride, 0x7fffffff);
+ sad_array[3] = vp9_sad16x8(src_ptr, src_stride,
+ ref_ptr[3], ref_stride, 0x7fffffff);
+}
+
+void vp9_sad8x8x4d_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t* const ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad8x8(src_ptr, src_stride,
+ ref_ptr[0], ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad8x8(src_ptr, src_stride,
+ ref_ptr[1], ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad8x8(src_ptr, src_stride,
+ ref_ptr[2], ref_stride, 0x7fffffff);
+ sad_array[3] = vp9_sad8x8(src_ptr, src_stride,
+ ref_ptr[3], ref_stride, 0x7fffffff);
+}
+
+void vp9_sad8x16x4d_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t* const ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad8x16(src_ptr, src_stride,
+ ref_ptr[0], ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad8x16(src_ptr, src_stride,
+ ref_ptr[1], ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad8x16(src_ptr, src_stride,
+ ref_ptr[2], ref_stride, 0x7fffffff);
+ sad_array[3] = vp9_sad8x16(src_ptr, src_stride,
+ ref_ptr[3], ref_stride, 0x7fffffff);
+}
+
+void vp9_sad8x4x4d_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t* const ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad8x4(src_ptr, src_stride,
+ ref_ptr[0], ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad8x4(src_ptr, src_stride,
+ ref_ptr[1], ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad8x4(src_ptr, src_stride,
+ ref_ptr[2], ref_stride, 0x7fffffff);
+ sad_array[3] = vp9_sad8x4(src_ptr, src_stride,
+ ref_ptr[3], ref_stride, 0x7fffffff);
+}
+
+void vp9_sad8x4x8_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ uint32_t *sad_array) {
+ sad_array[0] = vp9_sad8x4(src_ptr, src_stride,
+ ref_ptr, ref_stride,
+ 0x7fffffff);
+ sad_array[1] = vp9_sad8x4(src_ptr, src_stride,
+ ref_ptr + 1, ref_stride,
+ 0x7fffffff);
+ sad_array[2] = vp9_sad8x4(src_ptr, src_stride,
+ ref_ptr + 2, ref_stride,
+ 0x7fffffff);
+ sad_array[3] = vp9_sad8x4(src_ptr, src_stride,
+ ref_ptr + 3, ref_stride,
+ 0x7fffffff);
+ sad_array[4] = vp9_sad8x4(src_ptr, src_stride,
+ ref_ptr + 4, ref_stride,
+ 0x7fffffff);
+ sad_array[5] = vp9_sad8x4(src_ptr, src_stride,
+ ref_ptr + 5, ref_stride,
+ 0x7fffffff);
+ sad_array[6] = vp9_sad8x4(src_ptr, src_stride,
+ ref_ptr + 6, ref_stride,
+ 0x7fffffff);
+ sad_array[7] = vp9_sad8x4(src_ptr, src_stride,
+ ref_ptr + 7, ref_stride,
+ 0x7fffffff);
+}
+
+void vp9_sad4x8x4d_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t* const ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad4x8(src_ptr, src_stride,
+ ref_ptr[0], ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad4x8(src_ptr, src_stride,
+ ref_ptr[1], ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad4x8(src_ptr, src_stride,
+ ref_ptr[2], ref_stride, 0x7fffffff);
+ sad_array[3] = vp9_sad4x8(src_ptr, src_stride,
+ ref_ptr[3], ref_stride, 0x7fffffff);
+}
+
+void vp9_sad4x8x8_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ uint32_t *sad_array) {
+ sad_array[0] = vp9_sad4x8(src_ptr, src_stride,
+ ref_ptr, ref_stride,
+ 0x7fffffff);
+ sad_array[1] = vp9_sad4x8(src_ptr, src_stride,
+ ref_ptr + 1, ref_stride,
+ 0x7fffffff);
+ sad_array[2] = vp9_sad4x8(src_ptr, src_stride,
+ ref_ptr + 2, ref_stride,
+ 0x7fffffff);
+ sad_array[3] = vp9_sad4x8(src_ptr, src_stride,
+ ref_ptr + 3, ref_stride,
+ 0x7fffffff);
+ sad_array[4] = vp9_sad4x8(src_ptr, src_stride,
+ ref_ptr + 4, ref_stride,
+ 0x7fffffff);
+ sad_array[5] = vp9_sad4x8(src_ptr, src_stride,
+ ref_ptr + 5, ref_stride,
+ 0x7fffffff);
+ sad_array[6] = vp9_sad4x8(src_ptr, src_stride,
+ ref_ptr + 6, ref_stride,
+ 0x7fffffff);
+ sad_array[7] = vp9_sad4x8(src_ptr, src_stride,
+ ref_ptr + 7, ref_stride,
+ 0x7fffffff);
+}
+
+void vp9_sad4x4x4d_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t* const ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad4x4(src_ptr, src_stride,
+ ref_ptr[0], ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad4x4(src_ptr, src_stride,
+ ref_ptr[1], ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad4x4(src_ptr, src_stride,
+ ref_ptr[2], ref_stride, 0x7fffffff);
+ sad_array[3] = vp9_sad4x4(src_ptr, src_stride,
+ ref_ptr[3], ref_stride, 0x7fffffff);
+}
diff --git a/libvpx/vp9/encoder/vp9_segmentation.c b/libvpx/vp9/encoder/vp9_segmentation.c
new file mode 100644
index 0000000..10655e8
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_segmentation.c
@@ -0,0 +1,282 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include <limits.h>
+#include "vpx_mem/vpx_mem.h"
+#include "vp9/encoder/vp9_segmentation.h"
+#include "vp9/common/vp9_pred_common.h"
+#include "vp9/common/vp9_tile_common.h"
+
+void vp9_enable_segmentation(VP9_PTR ptr) {
+ VP9_COMP *cpi = (VP9_COMP *)ptr;
+ struct segmentation *const seg = &cpi->common.seg;
+
+ seg->enabled = 1;
+ seg->update_map = 1;
+ seg->update_data = 1;
+}
+
+void vp9_disable_segmentation(VP9_PTR ptr) {
+ VP9_COMP *cpi = (VP9_COMP *)ptr;
+ struct segmentation *const seg = &cpi->common.seg;
+ seg->enabled = 0;
+}
+
+void vp9_set_segmentation_map(VP9_PTR ptr,
+ unsigned char *segmentation_map) {
+ VP9_COMP *cpi = (VP9_COMP *)ptr;
+ struct segmentation *const seg = &cpi->common.seg;
+
+ // Copy in the new segmentation map
+ vpx_memcpy(cpi->segmentation_map, segmentation_map,
+ (cpi->common.mi_rows * cpi->common.mi_cols));
+
+ // Signal that the map should be updated.
+ seg->update_map = 1;
+ seg->update_data = 1;
+}
+
+void vp9_set_segment_data(VP9_PTR ptr,
+ signed char *feature_data,
+ unsigned char abs_delta) {
+ VP9_COMP *cpi = (VP9_COMP *)ptr;
+ struct segmentation *const seg = &cpi->common.seg;
+
+ seg->abs_delta = abs_delta;
+
+ vpx_memcpy(seg->feature_data, feature_data, sizeof(seg->feature_data));
+
+ // TBD ?? Set the feature mask
+ // vpx_memcpy(cpi->mb.e_mbd.segment_feature_mask, 0,
+ // sizeof(cpi->mb.e_mbd.segment_feature_mask));
+}
+
+// Based on set of segment counts calculate a probability tree
+static void calc_segtree_probs(int *segcounts, vp9_prob *segment_tree_probs) {
+ // Work out probabilities of each segment
+ const int c01 = segcounts[0] + segcounts[1];
+ const int c23 = segcounts[2] + segcounts[3];
+ const int c45 = segcounts[4] + segcounts[5];
+ const int c67 = segcounts[6] + segcounts[7];
+
+ segment_tree_probs[0] = get_binary_prob(c01 + c23, c45 + c67);
+ segment_tree_probs[1] = get_binary_prob(c01, c23);
+ segment_tree_probs[2] = get_binary_prob(c45, c67);
+ segment_tree_probs[3] = get_binary_prob(segcounts[0], segcounts[1]);
+ segment_tree_probs[4] = get_binary_prob(segcounts[2], segcounts[3]);
+ segment_tree_probs[5] = get_binary_prob(segcounts[4], segcounts[5]);
+ segment_tree_probs[6] = get_binary_prob(segcounts[6], segcounts[7]);
+}
+
+// Based on set of segment counts and probabilities calculate a cost estimate
+static int cost_segmap(int *segcounts, vp9_prob *probs) {
+ const int c01 = segcounts[0] + segcounts[1];
+ const int c23 = segcounts[2] + segcounts[3];
+ const int c45 = segcounts[4] + segcounts[5];
+ const int c67 = segcounts[6] + segcounts[7];
+ const int c0123 = c01 + c23;
+ const int c4567 = c45 + c67;
+
+ // Cost the top node of the tree
+ int cost = c0123 * vp9_cost_zero(probs[0]) +
+ c4567 * vp9_cost_one(probs[0]);
+
+ // Cost subsequent levels
+ if (c0123 > 0) {
+ cost += c01 * vp9_cost_zero(probs[1]) +
+ c23 * vp9_cost_one(probs[1]);
+
+ if (c01 > 0)
+ cost += segcounts[0] * vp9_cost_zero(probs[3]) +
+ segcounts[1] * vp9_cost_one(probs[3]);
+ if (c23 > 0)
+ cost += segcounts[2] * vp9_cost_zero(probs[4]) +
+ segcounts[3] * vp9_cost_one(probs[4]);
+ }
+
+ if (c4567 > 0) {
+ cost += c45 * vp9_cost_zero(probs[2]) +
+ c67 * vp9_cost_one(probs[2]);
+
+ if (c45 > 0)
+ cost += segcounts[4] * vp9_cost_zero(probs[5]) +
+ segcounts[5] * vp9_cost_one(probs[5]);
+ if (c67 > 0)
+ cost += segcounts[6] * vp9_cost_zero(probs[6]) +
+ segcounts[7] * vp9_cost_one(probs[6]);
+ }
+
+ return cost;
+}
+
+static void count_segs(VP9_COMP *cpi, MODE_INFO **mi_8x8,
+ int *no_pred_segcounts,
+ int (*temporal_predictor_count)[2],
+ int *t_unpred_seg_counts,
+ int bw, int bh, int mi_row, int mi_col) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &cpi->mb.e_mbd;
+ int segment_id;
+
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
+ return;
+
+ segment_id = mi_8x8[0]->mbmi.segment_id;
+
+ set_mi_row_col(cm, xd, mi_row, bh, mi_col, bw);
+
+ // Count the number of hits on each segment with no prediction
+ no_pred_segcounts[segment_id]++;
+
+ // Temporal prediction not allowed on key frames
+ if (cm->frame_type != KEY_FRAME) {
+ const BLOCK_SIZE bsize = mi_8x8[0]->mbmi.sb_type;
+ // Test to see if the segment id matches the predicted value.
+ const int pred_segment_id = vp9_get_segment_id(cm, cm->last_frame_seg_map,
+ bsize, mi_row, mi_col);
+ const int pred_flag = pred_segment_id == segment_id;
+ const int pred_context = vp9_get_pred_context_seg_id(xd);
+
+ // Store the prediction status for this mb and update counts
+ // as appropriate
+ vp9_set_pred_flag_seg_id(xd, pred_flag);
+ temporal_predictor_count[pred_context][pred_flag]++;
+
+ if (!pred_flag)
+ // Update the "unpredicted" segment count
+ t_unpred_seg_counts[segment_id]++;
+ }
+}
+
+static void count_segs_sb(VP9_COMP *cpi, MODE_INFO **mi_8x8,
+ int *no_pred_segcounts,
+ int (*temporal_predictor_count)[2],
+ int *t_unpred_seg_counts,
+ int mi_row, int mi_col,
+ BLOCK_SIZE bsize) {
+ const VP9_COMMON *const cm = &cpi->common;
+ const int mis = cm->mode_info_stride;
+ int bw, bh;
+ const int bs = num_8x8_blocks_wide_lookup[bsize], hbs = bs / 2;
+
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
+ return;
+
+ bw = num_8x8_blocks_wide_lookup[mi_8x8[0]->mbmi.sb_type];
+ bh = num_8x8_blocks_high_lookup[mi_8x8[0]->mbmi.sb_type];
+
+ if (bw == bs && bh == bs) {
+ count_segs(cpi, mi_8x8, no_pred_segcounts, temporal_predictor_count,
+ t_unpred_seg_counts, bs, bs, mi_row, mi_col);
+ } else if (bw == bs && bh < bs) {
+ count_segs(cpi, mi_8x8, no_pred_segcounts, temporal_predictor_count,
+ t_unpred_seg_counts, bs, hbs, mi_row, mi_col);
+ count_segs(cpi, mi_8x8 + hbs * mis, no_pred_segcounts,
+ temporal_predictor_count, t_unpred_seg_counts, bs, hbs,
+ mi_row + hbs, mi_col);
+ } else if (bw < bs && bh == bs) {
+ count_segs(cpi, mi_8x8, no_pred_segcounts, temporal_predictor_count,
+ t_unpred_seg_counts, hbs, bs, mi_row, mi_col);
+ count_segs(cpi, mi_8x8 + hbs, no_pred_segcounts, temporal_predictor_count,
+ t_unpred_seg_counts, hbs, bs, mi_row, mi_col + hbs);
+ } else {
+ const BLOCK_SIZE subsize = subsize_lookup[PARTITION_SPLIT][bsize];
+ int n;
+
+ assert(bw < bs && bh < bs);
+
+ for (n = 0; n < 4; n++) {
+ const int mi_dc = hbs * (n & 1);
+ const int mi_dr = hbs * (n >> 1);
+
+ count_segs_sb(cpi, &mi_8x8[mi_dr * mis + mi_dc],
+ no_pred_segcounts, temporal_predictor_count,
+ t_unpred_seg_counts,
+ mi_row + mi_dr, mi_col + mi_dc, subsize);
+ }
+ }
+}
+
+void vp9_choose_segmap_coding_method(VP9_COMP *cpi) {
+ VP9_COMMON *const cm = &cpi->common;
+ struct segmentation *seg = &cm->seg;
+
+ int no_pred_cost;
+ int t_pred_cost = INT_MAX;
+
+ int i, tile_col, mi_row, mi_col;
+
+ int temporal_predictor_count[PREDICTION_PROBS][2] = { { 0 } };
+ int no_pred_segcounts[MAX_SEGMENTS] = { 0 };
+ int t_unpred_seg_counts[MAX_SEGMENTS] = { 0 };
+
+ vp9_prob no_pred_tree[SEG_TREE_PROBS];
+ vp9_prob t_pred_tree[SEG_TREE_PROBS];
+ vp9_prob t_nopred_prob[PREDICTION_PROBS];
+
+ const int mis = cm->mode_info_stride;
+ MODE_INFO **mi_ptr, **mi;
+
+ // Set default state for the segment tree probabilities and the
+ // temporal coding probabilities
+ vpx_memset(seg->tree_probs, 255, sizeof(seg->tree_probs));
+ vpx_memset(seg->pred_probs, 255, sizeof(seg->pred_probs));
+
+ // First of all generate stats regarding how well the last segment map
+ // predicts this one
+ for (tile_col = 0; tile_col < 1 << cm->log2_tile_cols; tile_col++) {
+ vp9_get_tile_col_offsets(cm, tile_col);
+ mi_ptr = cm->mi_grid_visible + cm->cur_tile_mi_col_start;
+ for (mi_row = 0; mi_row < cm->mi_rows;
+ mi_row += 8, mi_ptr += 8 * mis) {
+ mi = mi_ptr;
+ for (mi_col = cm->cur_tile_mi_col_start; mi_col < cm->cur_tile_mi_col_end;
+ mi_col += 8, mi += 8)
+ count_segs_sb(cpi, mi, no_pred_segcounts, temporal_predictor_count,
+ t_unpred_seg_counts, mi_row, mi_col, BLOCK_64X64);
+ }
+ }
+
+ // Work out probability tree for coding segments without prediction
+ // and the cost.
+ calc_segtree_probs(no_pred_segcounts, no_pred_tree);
+ no_pred_cost = cost_segmap(no_pred_segcounts, no_pred_tree);
+
+ // Key frames cannot use temporal prediction
+ if (cm->frame_type != KEY_FRAME) {
+ // Work out probability tree for coding those segments not
+ // predicted using the temporal method and the cost.
+ calc_segtree_probs(t_unpred_seg_counts, t_pred_tree);
+ t_pred_cost = cost_segmap(t_unpred_seg_counts, t_pred_tree);
+
+ // Add in the cost of the signalling for each prediction context
+ for (i = 0; i < PREDICTION_PROBS; i++) {
+ const int count0 = temporal_predictor_count[i][0];
+ const int count1 = temporal_predictor_count[i][1];
+
+ t_nopred_prob[i] = get_binary_prob(count0, count1);
+
+ // Add in the predictor signaling cost
+ t_pred_cost += count0 * vp9_cost_zero(t_nopred_prob[i]) +
+ count1 * vp9_cost_one(t_nopred_prob[i]);
+ }
+ }
+
+ // Now choose which coding method to use.
+ if (t_pred_cost < no_pred_cost) {
+ seg->temporal_update = 1;
+ vpx_memcpy(seg->tree_probs, t_pred_tree, sizeof(t_pred_tree));
+ vpx_memcpy(seg->pred_probs, t_nopred_prob, sizeof(t_nopred_prob));
+ } else {
+ seg->temporal_update = 0;
+ vpx_memcpy(seg->tree_probs, no_pred_tree, sizeof(no_pred_tree));
+ }
+}
diff --git a/libvpx/vp9/encoder/vp9_segmentation.h b/libvpx/vp9/encoder/vp9_segmentation.h
new file mode 100644
index 0000000..2183771
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_segmentation.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_ENCODER_VP9_SEGMENTATION_H_
+#define VP9_ENCODER_VP9_SEGMENTATION_H_
+
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/encoder/vp9_onyx_int.h"
+
+void vp9_enable_segmentation(VP9_PTR ptr);
+void vp9_disable_segmentation(VP9_PTR ptr);
+
+// Valid values for a segment are 0 to 3
+// Segmentation map is arrange as [Rows][Columns]
+void vp9_set_segmentation_map(VP9_PTR ptr, unsigned char *segmentation_map);
+
+// The values given for each segment can be either deltas (from the default
+// value chosen for the frame) or absolute values.
+//
+// Valid range for abs values is (0-127 for MB_LVL_ALT_Q), (0-63 for
+// SEGMENT_ALT_LF)
+// Valid range for delta values are (+/-127 for MB_LVL_ALT_Q), (+/-63 for
+// SEGMENT_ALT_LF)
+//
+// abs_delta = SEGMENT_DELTADATA (deltas) abs_delta = SEGMENT_ABSDATA (use
+// the absolute values given).
+void vp9_set_segment_data(VP9_PTR ptr, signed char *feature_data,
+ unsigned char abs_delta);
+
+void vp9_choose_segmap_coding_method(VP9_COMP *cpi);
+
+#endif // VP9_ENCODER_VP9_SEGMENTATION_H_
diff --git a/libvpx/vp9/encoder/vp9_ssim.c b/libvpx/vp9/encoder/vp9_ssim.c
new file mode 100644
index 0000000..c155516
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_ssim.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vp9/encoder/vp9_onyx_int.h"
+
+void vp9_ssim_parms_16x16_c(uint8_t *s, int sp, uint8_t *r,
+ int rp, unsigned long *sum_s, unsigned long *sum_r,
+ unsigned long *sum_sq_s, unsigned long *sum_sq_r,
+ unsigned long *sum_sxr) {
+ int i, j;
+ for (i = 0; i < 16; i++, s += sp, r += rp) {
+ for (j = 0; j < 16; j++) {
+ *sum_s += s[j];
+ *sum_r += r[j];
+ *sum_sq_s += s[j] * s[j];
+ *sum_sq_r += r[j] * r[j];
+ *sum_sxr += s[j] * r[j];
+ }
+ }
+}
+void vp9_ssim_parms_8x8_c(uint8_t *s, int sp, uint8_t *r, int rp,
+ unsigned long *sum_s, unsigned long *sum_r,
+ unsigned long *sum_sq_s, unsigned long *sum_sq_r,
+ unsigned long *sum_sxr) {
+ int i, j;
+ for (i = 0; i < 8; i++, s += sp, r += rp) {
+ for (j = 0; j < 8; j++) {
+ *sum_s += s[j];
+ *sum_r += r[j];
+ *sum_sq_s += s[j] * s[j];
+ *sum_sq_r += r[j] * r[j];
+ *sum_sxr += s[j] * r[j];
+ }
+ }
+}
+
+const static int64_t cc1 = 26634; // (64^2*(.01*255)^2
+const static int64_t cc2 = 239708; // (64^2*(.03*255)^2
+
+static double similarity(unsigned long sum_s, unsigned long sum_r,
+ unsigned long sum_sq_s, unsigned long sum_sq_r,
+ unsigned long sum_sxr, int count) {
+ int64_t ssim_n, ssim_d;
+ int64_t c1, c2;
+
+ // scale the constants by number of pixels
+ c1 = (cc1 * count * count) >> 12;
+ c2 = (cc2 * count * count) >> 12;
+
+ ssim_n = (2 * sum_s * sum_r + c1) * ((int64_t) 2 * count * sum_sxr -
+ (int64_t) 2 * sum_s * sum_r + c2);
+
+ ssim_d = (sum_s * sum_s + sum_r * sum_r + c1) *
+ ((int64_t)count * sum_sq_s - (int64_t)sum_s * sum_s +
+ (int64_t)count * sum_sq_r - (int64_t) sum_r * sum_r + c2);
+
+ return ssim_n * 1.0 / ssim_d;
+}
+
+static double ssim_16x16(uint8_t *s, int sp, uint8_t *r, int rp) {
+ unsigned long sum_s = 0, sum_r = 0, sum_sq_s = 0, sum_sq_r = 0, sum_sxr = 0;
+ vp9_ssim_parms_16x16(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r,
+ &sum_sxr);
+ return similarity(sum_s, sum_r, sum_sq_s, sum_sq_r, sum_sxr, 256);
+}
+static double ssim_8x8(uint8_t *s, int sp, uint8_t *r, int rp) {
+ unsigned long sum_s = 0, sum_r = 0, sum_sq_s = 0, sum_sq_r = 0, sum_sxr = 0;
+ vp9_ssim_parms_8x8(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r,
+ &sum_sxr);
+ return similarity(sum_s, sum_r, sum_sq_s, sum_sq_r, sum_sxr, 64);
+}
+
+// We are using a 8x8 moving window with starting location of each 8x8 window
+// on the 4x4 pixel grid. Such arrangement allows the windows to overlap
+// block boundaries to penalize blocking artifacts.
+double vp9_ssim2(uint8_t *img1, uint8_t *img2, int stride_img1,
+ int stride_img2, int width, int height) {
+ int i, j;
+ int samples = 0;
+ double ssim_total = 0;
+
+ // sample point start with each 4x4 location
+ for (i = 0; i <= height - 8;
+ i += 4, img1 += stride_img1 * 4, img2 += stride_img2 * 4) {
+ for (j = 0; j <= width - 8; j += 4) {
+ double v = ssim_8x8(img1 + j, stride_img1, img2 + j, stride_img2);
+ ssim_total += v;
+ samples++;
+ }
+ }
+ ssim_total /= samples;
+ return ssim_total;
+}
+double vp9_calc_ssim(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest,
+ int lumamask, double *weight) {
+ double a, b, c;
+ double ssimv;
+
+ a = vp9_ssim2(source->y_buffer, dest->y_buffer,
+ source->y_stride, dest->y_stride,
+ source->y_crop_width, source->y_crop_height);
+
+ b = vp9_ssim2(source->u_buffer, dest->u_buffer,
+ source->uv_stride, dest->uv_stride,
+ source->uv_crop_width, source->uv_crop_height);
+
+ c = vp9_ssim2(source->v_buffer, dest->v_buffer,
+ source->uv_stride, dest->uv_stride,
+ source->uv_crop_width, source->uv_crop_height);
+
+ ssimv = a * .8 + .1 * (b + c);
+
+ *weight = 1;
+
+ return ssimv;
+}
+
+double vp9_calc_ssimg(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest,
+ double *ssim_y, double *ssim_u, double *ssim_v) {
+ double ssim_all = 0;
+ double a, b, c;
+
+ a = vp9_ssim2(source->y_buffer, dest->y_buffer,
+ source->y_stride, dest->y_stride,
+ source->y_crop_width, source->y_crop_height);
+
+ b = vp9_ssim2(source->u_buffer, dest->u_buffer,
+ source->uv_stride, dest->uv_stride,
+ source->uv_crop_width, source->uv_crop_height);
+
+ c = vp9_ssim2(source->v_buffer, dest->v_buffer,
+ source->uv_stride, dest->uv_stride,
+ source->uv_crop_width, source->uv_crop_height);
+ *ssim_y = a;
+ *ssim_u = b;
+ *ssim_v = c;
+ ssim_all = (a * 4 + b + c) / 6;
+
+ return ssim_all;
+}
diff --git a/libvpx/vp9/encoder/vp9_subexp.c b/libvpx/vp9/encoder/vp9_subexp.c
new file mode 100644
index 0000000..667b801
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_subexp.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_entropy.h"
+
+#include "vp9/encoder/vp9_boolhuff.h"
+#include "vp9/encoder/vp9_treewriter.h"
+
+#define vp9_cost_upd ((int)(vp9_cost_one(upd) - vp9_cost_zero(upd)) >> 8)
+#define vp9_cost_upd256 ((int)(vp9_cost_one(upd) - vp9_cost_zero(upd)))
+
+static int update_bits[255];
+
+static int count_uniform(int v, int n) {
+ int l = get_unsigned_bits(n);
+ int m;
+ if (l == 0) return 0;
+ m = (1 << l) - n;
+ if (v < m)
+ return l - 1;
+ else
+ return l;
+}
+
+static int split_index(int i, int n, int modulus) {
+ int max1 = (n - 1 - modulus / 2) / modulus + 1;
+ if (i % modulus == modulus / 2)
+ i = i / modulus;
+ else
+ i = max1 + i - (i + modulus - modulus / 2) / modulus;
+ return i;
+}
+
+static int recenter_nonneg(int v, int m) {
+ if (v > (m << 1))
+ return v;
+ else if (v >= m)
+ return ((v - m) << 1);
+ else
+ return ((m - v) << 1) - 1;
+}
+
+static int remap_prob(int v, int m) {
+ int i;
+ static const int map_table[MAX_PROB - 1] = {
+ // generated by:
+ // map_table[j] = split_index(j, MAX_PROB - 1, MODULUS_PARAM);
+ 20, 21, 22, 23, 24, 25, 0, 26, 27, 28, 29, 30, 31, 32, 33,
+ 34, 35, 36, 37, 1, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 2, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 3, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 4, 74,
+ 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 5, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94, 95, 96, 97, 6, 98, 99, 100, 101, 102,
+ 103, 104, 105, 106, 107, 108, 109, 7, 110, 111, 112, 113, 114, 115, 116,
+ 117, 118, 119, 120, 121, 8, 122, 123, 124, 125, 126, 127, 128, 129, 130,
+ 131, 132, 133, 9, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144,
+ 145, 10, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 11,
+ 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 12, 170, 171,
+ 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 13, 182, 183, 184, 185,
+ 186, 187, 188, 189, 190, 191, 192, 193, 14, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 15, 206, 207, 208, 209, 210, 211, 212, 213,
+ 214, 215, 216, 217, 16, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227,
+ 228, 229, 17, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241,
+ 18, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 19,
+ };
+ v--;
+ m--;
+ if ((m << 1) <= MAX_PROB)
+ i = recenter_nonneg(v, m) - 1;
+ else
+ i = recenter_nonneg(MAX_PROB - 1 - v, MAX_PROB - 1 - m) - 1;
+
+ i = map_table[i];
+ return i;
+}
+
+static int count_term_subexp(int word, int k, int num_syms) {
+ int count = 0;
+ int i = 0;
+ int mk = 0;
+ while (1) {
+ int b = (i ? k + i - 1 : k);
+ int a = (1 << b);
+ if (num_syms <= mk + 3 * a) {
+ count += count_uniform(word - mk, num_syms - mk);
+ break;
+ } else {
+ int t = (word >= mk + a);
+ count++;
+ if (t) {
+ i = i + 1;
+ mk += a;
+ } else {
+ count += b;
+ break;
+ }
+ }
+ }
+ return count;
+}
+
+static int prob_diff_update_cost(vp9_prob newp, vp9_prob oldp) {
+ int delp = remap_prob(newp, oldp);
+ return update_bits[delp] * 256;
+}
+
+static void encode_uniform(vp9_writer *w, int v, int n) {
+ int l = get_unsigned_bits(n);
+ int m;
+ if (l == 0)
+ return;
+ m = (1 << l) - n;
+ if (v < m) {
+ vp9_write_literal(w, v, l - 1);
+ } else {
+ vp9_write_literal(w, m + ((v - m) >> 1), l - 1);
+ vp9_write_literal(w, (v - m) & 1, 1);
+ }
+}
+
+static void encode_term_subexp(vp9_writer *w, int word, int k, int num_syms) {
+ int i = 0;
+ int mk = 0;
+ while (1) {
+ int b = (i ? k + i - 1 : k);
+ int a = (1 << b);
+ if (num_syms <= mk + 3 * a) {
+ encode_uniform(w, word - mk, num_syms - mk);
+ break;
+ } else {
+ int t = (word >= mk + a);
+ vp9_write_literal(w, t, 1);
+ if (t) {
+ i = i + 1;
+ mk += a;
+ } else {
+ vp9_write_literal(w, word - mk, b);
+ break;
+ }
+ }
+ }
+}
+
+void vp9_write_prob_diff_update(vp9_writer *w, vp9_prob newp, vp9_prob oldp) {
+ const int delp = remap_prob(newp, oldp);
+ encode_term_subexp(w, delp, SUBEXP_PARAM, 255);
+}
+
+void vp9_compute_update_table() {
+ int i;
+ for (i = 0; i < 254; i++)
+ update_bits[i] = count_term_subexp(i, SUBEXP_PARAM, 255);
+}
+
+int vp9_prob_diff_update_savings_search(const unsigned int *ct,
+ vp9_prob oldp, vp9_prob *bestp,
+ vp9_prob upd) {
+ const int old_b = cost_branch256(ct, oldp);
+ int bestsavings = 0;
+ vp9_prob newp, bestnewp = oldp;
+ const int step = *bestp > oldp ? -1 : 1;
+
+ for (newp = *bestp; newp != oldp; newp += step) {
+ const int new_b = cost_branch256(ct, newp);
+ const int update_b = prob_diff_update_cost(newp, oldp) + vp9_cost_upd256;
+ const int savings = old_b - new_b - update_b;
+ if (savings > bestsavings) {
+ bestsavings = savings;
+ bestnewp = newp;
+ }
+ }
+ *bestp = bestnewp;
+ return bestsavings;
+}
+
+int vp9_prob_diff_update_savings_search_model(const unsigned int *ct,
+ const vp9_prob *oldp,
+ vp9_prob *bestp,
+ vp9_prob upd,
+ int b, int r) {
+ int i, old_b, new_b, update_b, savings, bestsavings, step;
+ int newp;
+ vp9_prob bestnewp, newplist[ENTROPY_NODES], oldplist[ENTROPY_NODES];
+ vp9_model_to_full_probs(oldp, oldplist);
+ vpx_memcpy(newplist, oldp, sizeof(vp9_prob) * UNCONSTRAINED_NODES);
+ for (i = UNCONSTRAINED_NODES, old_b = 0; i < ENTROPY_NODES; ++i)
+ old_b += cost_branch256(ct + 2 * i, oldplist[i]);
+ old_b += cost_branch256(ct + 2 * PIVOT_NODE, oldplist[PIVOT_NODE]);
+
+ bestsavings = 0;
+ bestnewp = oldp[PIVOT_NODE];
+
+ step = (*bestp > oldp[PIVOT_NODE] ? -1 : 1);
+
+ for (newp = *bestp; newp != oldp[PIVOT_NODE]; newp += step) {
+ if (newp < 1 || newp > 255)
+ continue;
+ newplist[PIVOT_NODE] = newp;
+ vp9_model_to_full_probs(newplist, newplist);
+ for (i = UNCONSTRAINED_NODES, new_b = 0; i < ENTROPY_NODES; ++i)
+ new_b += cost_branch256(ct + 2 * i, newplist[i]);
+ new_b += cost_branch256(ct + 2 * PIVOT_NODE, newplist[PIVOT_NODE]);
+ update_b = prob_diff_update_cost(newp, oldp[PIVOT_NODE]) +
+ vp9_cost_upd256;
+ savings = old_b - new_b - update_b;
+ if (savings > bestsavings) {
+ bestsavings = savings;
+ bestnewp = newp;
+ }
+ }
+ *bestp = bestnewp;
+ return bestsavings;
+}
+
+void vp9_cond_prob_diff_update(vp9_writer *w, vp9_prob *oldp,
+ vp9_prob upd, unsigned int *ct) {
+ vp9_prob newp = get_binary_prob(ct[0], ct[1]);
+ const int savings = vp9_prob_diff_update_savings_search(ct, *oldp, &newp,
+ upd);
+ assert(newp >= 1);
+ if (savings > 0) {
+ vp9_write(w, 1, upd);
+ vp9_write_prob_diff_update(w, newp, *oldp);
+ *oldp = newp;
+ } else {
+ vp9_write(w, 0, upd);
+ }
+}
diff --git a/libvpx/vp9/encoder/vp9_subexp.h b/libvpx/vp9/encoder/vp9_subexp.h
new file mode 100644
index 0000000..7acdaf6
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_subexp.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_DECODER_VP9_SUBEXP_H_
+#define VP9_DECODER_VP9_SUBEXP_H_
+
+void vp9_compute_update_table();
+
+
+void vp9_write_prob_diff_update(vp9_writer *w,
+ vp9_prob newp, vp9_prob oldp);
+
+void vp9_cond_prob_diff_update(vp9_writer *w, vp9_prob *oldp,
+ vp9_prob upd, unsigned int *ct);
+
+int vp9_prob_diff_update_savings_search(const unsigned int *ct,
+ vp9_prob oldp, vp9_prob *bestp,
+ vp9_prob upd);
+
+
+int vp9_prob_diff_update_savings_search_model(const unsigned int *ct,
+ const vp9_prob *oldp,
+ vp9_prob *bestp,
+ vp9_prob upd,
+ int b, int r);
+
+#endif // VP9_DECODER_VP9_SUBEXP_H_
diff --git a/libvpx/vp9/encoder/vp9_temporal_filter.c b/libvpx/vp9/encoder/vp9_temporal_filter.c
new file mode 100644
index 0000000..63826ee
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_temporal_filter.c
@@ -0,0 +1,527 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include <limits.h>
+
+#include "vp9/common/vp9_onyxc_int.h"
+#include "vp9/common/vp9_reconinter.h"
+#include "vp9/encoder/vp9_onyx_int.h"
+#include "vp9/common/vp9_systemdependent.h"
+#include "vp9/encoder/vp9_quantize.h"
+#include "vp9/common/vp9_alloccommon.h"
+#include "vp9/encoder/vp9_mcomp.h"
+#include "vp9/encoder/vp9_firstpass.h"
+#include "vp9/encoder/vp9_psnr.h"
+#include "vpx_scale/vpx_scale.h"
+#include "vp9/common/vp9_extend.h"
+#include "vp9/encoder/vp9_ratectrl.h"
+#include "vp9/common/vp9_quant_common.h"
+#include "vp9/encoder/vp9_segmentation.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_ports/vpx_timer.h"
+
+#define ALT_REF_MC_ENABLED 1 // dis/enable MC in AltRef filtering
+#define ALT_REF_SUBPEL_ENABLED 1 // dis/enable subpel in MC AltRef filtering
+
+static void temporal_filter_predictors_mb_c(MACROBLOCKD *xd,
+ uint8_t *y_mb_ptr,
+ uint8_t *u_mb_ptr,
+ uint8_t *v_mb_ptr,
+ int stride,
+ int mv_row,
+ int mv_col,
+ uint8_t *pred) {
+ const int which_mv = 0;
+ MV mv = { mv_row, mv_col };
+
+ vp9_build_inter_predictor(y_mb_ptr, stride,
+ &pred[0], 16,
+ &mv,
+ &xd->scale_factor[which_mv],
+ 16, 16,
+ which_mv,
+ &xd->subpix, MV_PRECISION_Q3);
+
+ stride = (stride + 1) >> 1;
+
+ vp9_build_inter_predictor(u_mb_ptr, stride,
+ &pred[256], 8,
+ &mv,
+ &xd->scale_factor[which_mv],
+ 8, 8,
+ which_mv,
+ &xd->subpix, MV_PRECISION_Q4);
+
+ vp9_build_inter_predictor(v_mb_ptr, stride,
+ &pred[320], 8,
+ &mv,
+ &xd->scale_factor[which_mv],
+ 8, 8,
+ which_mv,
+ &xd->subpix, MV_PRECISION_Q4);
+}
+
+void vp9_temporal_filter_apply_c(uint8_t *frame1,
+ unsigned int stride,
+ uint8_t *frame2,
+ unsigned int block_size,
+ int strength,
+ int filter_weight,
+ unsigned int *accumulator,
+ uint16_t *count) {
+ unsigned int i, j, k;
+ int modifier;
+ int byte = 0;
+
+ for (i = 0, k = 0; i < block_size; i++) {
+ for (j = 0; j < block_size; j++, k++) {
+
+ int src_byte = frame1[byte];
+ int pixel_value = *frame2++;
+
+ modifier = src_byte - pixel_value;
+ // This is an integer approximation of:
+ // float coeff = (3.0 * modifer * modifier) / pow(2, strength);
+ // modifier = (int)roundf(coeff > 16 ? 0 : 16-coeff);
+ modifier *= modifier;
+ modifier *= 3;
+ modifier += 1 << (strength - 1);
+ modifier >>= strength;
+
+ if (modifier > 16)
+ modifier = 16;
+
+ modifier = 16 - modifier;
+ modifier *= filter_weight;
+
+ count[k] += modifier;
+ accumulator[k] += modifier * pixel_value;
+
+ byte++;
+ }
+
+ byte += stride - block_size;
+ }
+}
+
+#if ALT_REF_MC_ENABLED
+
+static int temporal_filter_find_matching_mb_c(VP9_COMP *cpi,
+ uint8_t *arf_frame_buf,
+ uint8_t *frame_ptr_buf,
+ int stride,
+ int error_thresh) {
+ MACROBLOCK *x = &cpi->mb;
+ MACROBLOCKD* const xd = &x->e_mbd;
+ int step_param;
+ int sadpb = x->sadperbit16;
+ int bestsme = INT_MAX;
+
+ int_mv best_ref_mv1;
+ int_mv best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */
+ int_mv *ref_mv;
+
+ // Save input state
+ struct buf_2d src = x->plane[0].src;
+ struct buf_2d pre = xd->plane[0].pre[0];
+
+ best_ref_mv1.as_int = 0;
+ best_ref_mv1_full.as_mv.col = best_ref_mv1.as_mv.col >> 3;
+ best_ref_mv1_full.as_mv.row = best_ref_mv1.as_mv.row >> 3;
+
+ // Setup frame pointers
+ x->plane[0].src.buf = arf_frame_buf;
+ x->plane[0].src.stride = stride;
+ xd->plane[0].pre[0].buf = frame_ptr_buf;
+ xd->plane[0].pre[0].stride = stride;
+
+ // Further step/diamond searches as necessary
+ if (cpi->speed < 8)
+ step_param = cpi->sf.reduce_first_step_size + ((cpi->speed > 5) ? 1 : 0);
+ else
+ step_param = cpi->sf.reduce_first_step_size + 2;
+ step_param = MIN(step_param, (cpi->sf.max_step_search_steps - 2));
+
+ /*cpi->sf.search_method == HEX*/
+ // TODO Check that the 16x16 vf & sdf are selected here
+ // Ignore mv costing by sending NULL pointer instead of cost arrays
+ ref_mv = &x->e_mbd.mi_8x8[0]->bmi[0].as_mv[0];
+ bestsme = vp9_hex_search(x, &best_ref_mv1_full,
+ step_param, sadpb, 1,
+ &cpi->fn_ptr[BLOCK_16X16],
+ 0, &best_ref_mv1, ref_mv);
+
+#if ALT_REF_SUBPEL_ENABLED
+ // Try sub-pixel MC?
+ // if (bestsme > error_thresh && bestsme < INT_MAX)
+ {
+ int distortion;
+ unsigned int sse;
+ // Ignore mv costing by sending NULL pointer instead of cost array
+ bestsme = cpi->find_fractional_mv_step(x, ref_mv,
+ &best_ref_mv1,
+ x->errorperbit,
+ &cpi->fn_ptr[BLOCK_16X16],
+ 0, cpi->sf.subpel_iters_per_step,
+ NULL, NULL,
+ &distortion, &sse);
+ }
+#endif
+
+ // Restore input state
+ x->plane[0].src = src;
+ xd->plane[0].pre[0] = pre;
+
+ return bestsme;
+}
+#endif
+
+static void temporal_filter_iterate_c(VP9_COMP *cpi,
+ int frame_count,
+ int alt_ref_index,
+ int strength) {
+ int byte;
+ int frame;
+ int mb_col, mb_row;
+ unsigned int filter_weight;
+ int mb_cols = cpi->common.mb_cols;
+ int mb_rows = cpi->common.mb_rows;
+ int mb_y_offset = 0;
+ int mb_uv_offset = 0;
+ DECLARE_ALIGNED_ARRAY(16, unsigned int, accumulator, 16 * 16 + 8 * 8 + 8 * 8);
+ DECLARE_ALIGNED_ARRAY(16, uint16_t, count, 16 * 16 + 8 * 8 + 8 * 8);
+ MACROBLOCKD *mbd = &cpi->mb.e_mbd;
+ YV12_BUFFER_CONFIG *f = cpi->frames[alt_ref_index];
+ uint8_t *dst1, *dst2;
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, predictor, 16 * 16 + 8 * 8 + 8 * 8);
+
+ // Save input state
+ uint8_t* input_buffer[MAX_MB_PLANE];
+ int i;
+
+ for (i = 0; i < MAX_MB_PLANE; i++)
+ input_buffer[i] = mbd->plane[i].pre[0].buf;
+
+ for (mb_row = 0; mb_row < mb_rows; mb_row++) {
+#if ALT_REF_MC_ENABLED
+ // Source frames are extended to 16 pixels. This is different than
+ // L/A/G reference frames that have a border of 32 (VP9BORDERINPIXELS)
+ // A 6/8 tap filter is used for motion search. This requires 2 pixels
+ // before and 3 pixels after. So the largest Y mv on a border would
+ // then be 16 - VP9_INTERP_EXTEND. The UV blocks are half the size of the
+ // Y and therefore only extended by 8. The largest mv that a UV block
+ // can support is 8 - VP9_INTERP_EXTEND. A UV mv is half of a Y mv.
+ // (16 - VP9_INTERP_EXTEND) >> 1 which is greater than
+ // 8 - VP9_INTERP_EXTEND.
+ // To keep the mv in play for both Y and UV planes the max that it
+ // can be on a border is therefore 16 - (2*VP9_INTERP_EXTEND+1).
+ cpi->mb.mv_row_min = -((mb_row * 16) + (17 - 2 * VP9_INTERP_EXTEND));
+ cpi->mb.mv_row_max = ((cpi->common.mb_rows - 1 - mb_row) * 16)
+ + (17 - 2 * VP9_INTERP_EXTEND);
+#endif
+
+ for (mb_col = 0; mb_col < mb_cols; mb_col++) {
+ int i, j, k;
+ int stride;
+
+ vpx_memset(accumulator, 0, 384 * sizeof(unsigned int));
+ vpx_memset(count, 0, 384 * sizeof(uint16_t));
+
+#if ALT_REF_MC_ENABLED
+ cpi->mb.mv_col_min = -((mb_col * 16) + (17 - 2 * VP9_INTERP_EXTEND));
+ cpi->mb.mv_col_max = ((cpi->common.mb_cols - 1 - mb_col) * 16)
+ + (17 - 2 * VP9_INTERP_EXTEND);
+#endif
+
+ for (frame = 0; frame < frame_count; frame++) {
+ if (cpi->frames[frame] == NULL)
+ continue;
+
+ mbd->mi_8x8[0]->bmi[0].as_mv[0].as_mv.row = 0;
+ mbd->mi_8x8[0]->bmi[0].as_mv[0].as_mv.col = 0;
+
+ if (frame == alt_ref_index) {
+ filter_weight = 2;
+ } else {
+ int err = 0;
+#if ALT_REF_MC_ENABLED
+#define THRESH_LOW 10000
+#define THRESH_HIGH 20000
+
+ // Find best match in this frame by MC
+ err = temporal_filter_find_matching_mb_c
+ (cpi,
+ cpi->frames[alt_ref_index]->y_buffer + mb_y_offset,
+ cpi->frames[frame]->y_buffer + mb_y_offset,
+ cpi->frames[frame]->y_stride,
+ THRESH_LOW);
+#endif
+ // Assign higher weight to matching MB if it's error
+ // score is lower. If not applying MC default behavior
+ // is to weight all MBs equal.
+ filter_weight = err < THRESH_LOW
+ ? 2 : err < THRESH_HIGH ? 1 : 0;
+ }
+
+ if (filter_weight != 0) {
+ // Construct the predictors
+ temporal_filter_predictors_mb_c
+ (mbd,
+ cpi->frames[frame]->y_buffer + mb_y_offset,
+ cpi->frames[frame]->u_buffer + mb_uv_offset,
+ cpi->frames[frame]->v_buffer + mb_uv_offset,
+ cpi->frames[frame]->y_stride,
+ mbd->mi_8x8[0]->bmi[0].as_mv[0].as_mv.row,
+ mbd->mi_8x8[0]->bmi[0].as_mv[0].as_mv.col,
+ predictor);
+
+ // Apply the filter (YUV)
+ vp9_temporal_filter_apply(f->y_buffer + mb_y_offset, f->y_stride,
+ predictor, 16, strength, filter_weight,
+ accumulator, count);
+
+ vp9_temporal_filter_apply(f->u_buffer + mb_uv_offset, f->uv_stride,
+ predictor + 256, 8, strength, filter_weight,
+ accumulator + 256, count + 256);
+
+ vp9_temporal_filter_apply(f->v_buffer + mb_uv_offset, f->uv_stride,
+ predictor + 320, 8, strength, filter_weight,
+ accumulator + 320, count + 320);
+ }
+ }
+
+ // Normalize filter output to produce AltRef frame
+ dst1 = cpi->alt_ref_buffer.y_buffer;
+ stride = cpi->alt_ref_buffer.y_stride;
+ byte = mb_y_offset;
+ for (i = 0, k = 0; i < 16; i++) {
+ for (j = 0; j < 16; j++, k++) {
+ unsigned int pval = accumulator[k] + (count[k] >> 1);
+ pval *= cpi->fixed_divide[count[k]];
+ pval >>= 19;
+
+ dst1[byte] = (uint8_t)pval;
+
+ // move to next pixel
+ byte++;
+ }
+
+ byte += stride - 16;
+ }
+
+ dst1 = cpi->alt_ref_buffer.u_buffer;
+ dst2 = cpi->alt_ref_buffer.v_buffer;
+ stride = cpi->alt_ref_buffer.uv_stride;
+ byte = mb_uv_offset;
+ for (i = 0, k = 256; i < 8; i++) {
+ for (j = 0; j < 8; j++, k++) {
+ int m = k + 64;
+
+ // U
+ unsigned int pval = accumulator[k] + (count[k] >> 1);
+ pval *= cpi->fixed_divide[count[k]];
+ pval >>= 19;
+ dst1[byte] = (uint8_t)pval;
+
+ // V
+ pval = accumulator[m] + (count[m] >> 1);
+ pval *= cpi->fixed_divide[count[m]];
+ pval >>= 19;
+ dst2[byte] = (uint8_t)pval;
+
+ // move to next pixel
+ byte++;
+ }
+
+ byte += stride - 8;
+ }
+
+ mb_y_offset += 16;
+ mb_uv_offset += 8;
+ }
+
+ mb_y_offset += 16 * (f->y_stride - mb_cols);
+ mb_uv_offset += 8 * (f->uv_stride - mb_cols);
+ }
+
+ // Restore input state
+ for (i = 0; i < MAX_MB_PLANE; i++)
+ mbd->plane[i].pre[0].buf = input_buffer[i];
+}
+
+void vp9_temporal_filter_prepare(VP9_COMP *cpi, int distance) {
+ VP9_COMMON *const cm = &cpi->common;
+
+ int frame = 0;
+
+ int frames_to_blur_backward = 0;
+ int frames_to_blur_forward = 0;
+ int frames_to_blur = 0;
+ int start_frame = 0;
+
+ int strength = cpi->active_arnr_strength;
+ int blur_type = cpi->oxcf.arnr_type;
+ int max_frames = cpi->active_arnr_frames;
+
+ const int num_frames_backward = distance;
+ const int num_frames_forward = vp9_lookahead_depth(cpi->lookahead)
+ - (num_frames_backward + 1);
+
+ switch (blur_type) {
+ case 1:
+ // Backward Blur
+ frames_to_blur_backward = num_frames_backward;
+
+ if (frames_to_blur_backward >= max_frames)
+ frames_to_blur_backward = max_frames - 1;
+
+ frames_to_blur = frames_to_blur_backward + 1;
+ break;
+
+ case 2:
+ // Forward Blur
+
+ frames_to_blur_forward = num_frames_forward;
+
+ if (frames_to_blur_forward >= max_frames)
+ frames_to_blur_forward = max_frames - 1;
+
+ frames_to_blur = frames_to_blur_forward + 1;
+ break;
+
+ case 3:
+ default:
+ // Center Blur
+ frames_to_blur_forward = num_frames_forward;
+ frames_to_blur_backward = num_frames_backward;
+
+ if (frames_to_blur_forward > frames_to_blur_backward)
+ frames_to_blur_forward = frames_to_blur_backward;
+
+ if (frames_to_blur_backward > frames_to_blur_forward)
+ frames_to_blur_backward = frames_to_blur_forward;
+
+ // When max_frames is even we have 1 more frame backward than forward
+ if (frames_to_blur_forward > (max_frames - 1) / 2)
+ frames_to_blur_forward = ((max_frames - 1) / 2);
+
+ if (frames_to_blur_backward > (max_frames / 2))
+ frames_to_blur_backward = (max_frames / 2);
+
+ frames_to_blur = frames_to_blur_backward + frames_to_blur_forward + 1;
+ break;
+ }
+
+ start_frame = distance + frames_to_blur_forward;
+
+#ifdef DEBUGFWG
+ // DEBUG FWG
+ printf("max:%d FBCK:%d FFWD:%d ftb:%d ftbbck:%d ftbfwd:%d sei:%d lasei:%d start:%d"
+, max_frames
+, num_frames_backward
+, num_frames_forward
+, frames_to_blur
+, frames_to_blur_backward
+, frames_to_blur_forward
+, cpi->source_encode_index
+, cpi->last_alt_ref_sei
+, start_frame);
+#endif
+
+ // Setup scaling factors. Scaling on each of the arnr frames is not supported
+ vp9_setup_scale_factors_for_frame(&cpi->mb.e_mbd.scale_factor[0],
+ cm->yv12_fb[cm->new_fb_idx].y_crop_width,
+ cm->yv12_fb[cm->new_fb_idx].y_crop_height,
+ cm->width, cm->height);
+
+ // Setup frame pointers, NULL indicates frame not included in filter
+ vpx_memset(cpi->frames, 0, max_frames * sizeof(YV12_BUFFER_CONFIG *));
+ for (frame = 0; frame < frames_to_blur; frame++) {
+ int which_buffer = start_frame - frame;
+ struct lookahead_entry *buf = vp9_lookahead_peek(cpi->lookahead,
+ which_buffer);
+ cpi->frames[frames_to_blur - 1 - frame] = &buf->img;
+ }
+
+ temporal_filter_iterate_c(cpi, frames_to_blur, frames_to_blur_backward,
+ strength);
+}
+
+void configure_arnr_filter(VP9_COMP *cpi, const unsigned int this_frame,
+ const int group_boost) {
+ int half_gf_int;
+ int frames_after_arf;
+ int frames_bwd = cpi->oxcf.arnr_max_frames - 1;
+ int frames_fwd = cpi->oxcf.arnr_max_frames - 1;
+ int q;
+
+ // Define the arnr filter width for this group of frames:
+ // We only filter frames that lie within a distance of half
+ // the GF interval from the ARF frame. We also have to trap
+ // cases where the filter extends beyond the end of clip.
+ // Note: this_frame->frame has been updated in the loop
+ // so it now points at the ARF frame.
+ half_gf_int = cpi->baseline_gf_interval >> 1;
+ frames_after_arf = (int)(cpi->twopass.total_stats.count - this_frame - 1);
+
+ switch (cpi->oxcf.arnr_type) {
+ case 1: // Backward filter
+ frames_fwd = 0;
+ if (frames_bwd > half_gf_int)
+ frames_bwd = half_gf_int;
+ break;
+
+ case 2: // Forward filter
+ if (frames_fwd > half_gf_int)
+ frames_fwd = half_gf_int;
+ if (frames_fwd > frames_after_arf)
+ frames_fwd = frames_after_arf;
+ frames_bwd = 0;
+ break;
+
+ case 3: // Centered filter
+ default:
+ frames_fwd >>= 1;
+ if (frames_fwd > frames_after_arf)
+ frames_fwd = frames_after_arf;
+ if (frames_fwd > half_gf_int)
+ frames_fwd = half_gf_int;
+
+ frames_bwd = frames_fwd;
+
+ // For even length filter there is one more frame backward
+ // than forward: e.g. len=6 ==> bbbAff, len=7 ==> bbbAfff.
+ if (frames_bwd < half_gf_int)
+ frames_bwd += (cpi->oxcf.arnr_max_frames + 1) & 0x1;
+ break;
+ }
+
+ cpi->active_arnr_frames = frames_bwd + 1 + frames_fwd;
+
+ // Adjust the strength based on active max q
+ q = ((int)vp9_convert_qindex_to_q(cpi->active_worst_quality) >> 1);
+ if (q > 8) {
+ cpi->active_arnr_strength = cpi->oxcf.arnr_strength;
+ } else {
+ cpi->active_arnr_strength = cpi->oxcf.arnr_strength - (8 - q);
+ if (cpi->active_arnr_strength < 0)
+ cpi->active_arnr_strength = 0;
+ }
+
+ // Adjust number of frames in filter and strength based on gf boost level.
+ if (cpi->active_arnr_frames > (group_boost / 150)) {
+ cpi->active_arnr_frames = (group_boost / 150);
+ cpi->active_arnr_frames += !(cpi->active_arnr_frames & 1);
+ }
+ if (cpi->active_arnr_strength > (group_boost / 300)) {
+ cpi->active_arnr_strength = (group_boost / 300);
+ }
+}
diff --git a/libvpx/vp9/encoder/vp9_temporal_filter.h b/libvpx/vp9/encoder/vp9_temporal_filter.h
new file mode 100644
index 0000000..c5f3b46
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_temporal_filter.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_ENCODER_VP9_TEMPORAL_FILTER_H_
+#define VP9_ENCODER_VP9_TEMPORAL_FILTER_H_
+
+void vp9_temporal_filter_prepare(VP9_COMP *cpi, int distance);
+void configure_arnr_filter(VP9_COMP *cpi, const unsigned int this_frame,
+ const int group_boost);
+
+#endif // VP9_ENCODER_VP9_TEMPORAL_FILTER_H_
diff --git a/libvpx/vp9/encoder/vp9_tokenize.c b/libvpx/vp9/encoder/vp9_tokenize.c
new file mode 100644
index 0000000..0c9bf9d
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_tokenize.c
@@ -0,0 +1,384 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include <math.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include "vp9/encoder/vp9_onyx_int.h"
+#include "vp9/encoder/vp9_tokenize.h"
+#include "vpx_mem/vpx_mem.h"
+
+#include "vp9/common/vp9_pred_common.h"
+#include "vp9/common/vp9_seg_common.h"
+#include "vp9/common/vp9_entropy.h"
+
+/* Global event counters used for accumulating statistics across several
+ compressions, then generating vp9_context.c = initial stats. */
+
+#ifdef ENTROPY_STATS
+vp9_coeff_accum context_counters[TX_SIZES][BLOCK_TYPES];
+extern vp9_coeff_stats tree_update_hist[TX_SIZES][BLOCK_TYPES];
+#endif /* ENTROPY_STATS */
+
+DECLARE_ALIGNED(16, extern const uint8_t,
+ vp9_pt_energy_class[MAX_ENTROPY_TOKENS]);
+
+static TOKENVALUE dct_value_tokens[DCT_MAX_VALUE * 2];
+const TOKENVALUE *vp9_dct_value_tokens_ptr;
+static int dct_value_cost[DCT_MAX_VALUE * 2];
+const int *vp9_dct_value_cost_ptr;
+
+static void fill_value_tokens() {
+
+ TOKENVALUE *const t = dct_value_tokens + DCT_MAX_VALUE;
+ const vp9_extra_bit *const e = vp9_extra_bits;
+
+ int i = -DCT_MAX_VALUE;
+ int sign = 1;
+
+ do {
+ if (!i)
+ sign = 0;
+
+ {
+ const int a = sign ? -i : i;
+ int eb = sign;
+
+ if (a > 4) {
+ int j = 4;
+
+ while (++j < 11 && e[j].base_val <= a) {}
+
+ t[i].token = --j;
+ eb |= (a - e[j].base_val) << 1;
+ } else
+ t[i].token = a;
+
+ t[i].extra = eb;
+ }
+
+ // initialize the cost for extra bits for all possible coefficient value.
+ {
+ int cost = 0;
+ const vp9_extra_bit *p = vp9_extra_bits + t[i].token;
+
+ if (p->base_val) {
+ const int extra = t[i].extra;
+ const int length = p->len;
+
+ if (length)
+ cost += treed_cost(p->tree, p->prob, extra >> 1, length);
+
+ cost += vp9_cost_bit(vp9_prob_half, extra & 1); /* sign */
+ dct_value_cost[i + DCT_MAX_VALUE] = cost;
+ }
+
+ }
+
+ } while (++i < DCT_MAX_VALUE);
+
+ vp9_dct_value_tokens_ptr = dct_value_tokens + DCT_MAX_VALUE;
+ vp9_dct_value_cost_ptr = dct_value_cost + DCT_MAX_VALUE;
+}
+
+struct tokenize_b_args {
+ VP9_COMP *cpi;
+ MACROBLOCKD *xd;
+ TOKENEXTRA **tp;
+ TX_SIZE tx_size;
+};
+
+static void set_entropy_context_b(int plane, int block, BLOCK_SIZE plane_bsize,
+ TX_SIZE tx_size, void *arg) {
+ struct tokenize_b_args* const args = arg;
+ MACROBLOCKD *const xd = args->xd;
+ struct macroblockd_plane *pd = &xd->plane[plane];
+ int aoff, loff;
+ txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &aoff, &loff);
+ set_contexts(xd, pd, plane_bsize, tx_size, pd->eobs[block] > 0, aoff, loff);
+}
+
+static void tokenize_b(int plane, int block, BLOCK_SIZE plane_bsize,
+ TX_SIZE tx_size, void *arg) {
+ struct tokenize_b_args* const args = arg;
+ VP9_COMP *cpi = args->cpi;
+ MACROBLOCKD *xd = args->xd;
+ TOKENEXTRA **tp = args->tp;
+ struct macroblockd_plane *pd = &xd->plane[plane];
+ MB_MODE_INFO *mbmi = &xd->this_mi->mbmi;
+ int pt; /* near block/prev token context index */
+ int c = 0, rc = 0;
+ TOKENEXTRA *t = *tp; /* store tokens starting here */
+ const int eob = pd->eobs[block];
+ const PLANE_TYPE type = pd->plane_type;
+ const int16_t *qcoeff_ptr = BLOCK_OFFSET(pd->qcoeff, block);
+
+ const int segment_id = mbmi->segment_id;
+ const int16_t *scan, *nb;
+ vp9_coeff_count *const counts = cpi->coef_counts[tx_size];
+ vp9_coeff_probs_model *const coef_probs = cpi->common.fc.coef_probs[tx_size];
+ const int ref = is_inter_block(mbmi);
+ uint8_t token_cache[1024];
+ const uint8_t *band_translate;
+ ENTROPY_CONTEXT *A, *L;
+ const int seg_eob = get_tx_eob(&cpi->common.seg, segment_id, tx_size);
+ int aoff, loff;
+ txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &aoff, &loff);
+
+ A = pd->above_context + aoff;
+ L = pd->left_context + loff;
+
+ assert((!type && !plane) || (type && plane));
+
+ pt = get_entropy_context(xd, tx_size, type, block, A, L,
+ &scan, &band_translate);
+ nb = vp9_get_coef_neighbors_handle(scan);
+ c = 0;
+ do {
+ const int band = get_coef_band(band_translate, c);
+ int token;
+ int v = 0;
+ rc = scan[c];
+ if (c)
+ pt = get_coef_context(nb, token_cache, c);
+ if (c < eob) {
+ v = qcoeff_ptr[rc];
+ assert(-DCT_MAX_VALUE <= v && v < DCT_MAX_VALUE);
+
+ t->extra = vp9_dct_value_tokens_ptr[v].extra;
+ token = vp9_dct_value_tokens_ptr[v].token;
+ } else {
+ token = DCT_EOB_TOKEN;
+ }
+
+ t->token = token;
+ t->context_tree = coef_probs[type][ref][band][pt];
+ t->skip_eob_node = (c > 0) && (token_cache[scan[c - 1]] == 0);
+
+ assert(vp9_coef_encodings[t->token].len - t->skip_eob_node > 0);
+
+ ++counts[type][ref][band][pt][token];
+ if (!t->skip_eob_node)
+ ++cpi->common.counts.eob_branch[tx_size][type][ref][band][pt];
+
+ token_cache[rc] = vp9_pt_energy_class[token];
+ ++t;
+ } while (c < eob && ++c < seg_eob);
+
+ *tp = t;
+
+ set_contexts(xd, pd, plane_bsize, tx_size, c > 0, aoff, loff);
+}
+
+struct is_skippable_args {
+ MACROBLOCKD *xd;
+ int *skippable;
+};
+
+static void is_skippable(int plane, int block,
+ BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+ void *argv) {
+ struct is_skippable_args *args = argv;
+ args->skippable[0] &= (!args->xd->plane[plane].eobs[block]);
+}
+
+int vp9_sb_is_skippable(MACROBLOCKD *xd, BLOCK_SIZE bsize) {
+ int result = 1;
+ struct is_skippable_args args = {xd, &result};
+ foreach_transformed_block(xd, bsize, is_skippable, &args);
+ return result;
+}
+
+int vp9_is_skippable_in_plane(MACROBLOCKD *xd, BLOCK_SIZE bsize,
+ int plane) {
+ int result = 1;
+ struct is_skippable_args args = {xd, &result};
+ foreach_transformed_block_in_plane(xd, bsize, plane, is_skippable, &args);
+ return result;
+}
+
+void vp9_tokenize_sb(VP9_COMP *cpi, TOKENEXTRA **t, int dry_run,
+ BLOCK_SIZE bsize) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &cpi->mb.e_mbd;
+ MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi;
+ TOKENEXTRA *t_backup = *t;
+ const int mb_skip_context = vp9_get_pred_context_mbskip(xd);
+ const int skip_inc = !vp9_segfeature_active(&cm->seg, mbmi->segment_id,
+ SEG_LVL_SKIP);
+ struct tokenize_b_args arg = {cpi, xd, t, mbmi->tx_size};
+
+ mbmi->skip_coeff = vp9_sb_is_skippable(xd, bsize);
+ if (mbmi->skip_coeff) {
+ if (!dry_run)
+ cm->counts.mbskip[mb_skip_context][1] += skip_inc;
+ reset_skip_context(xd, bsize);
+ if (dry_run)
+ *t = t_backup;
+ return;
+ }
+
+ if (!dry_run) {
+ cm->counts.mbskip[mb_skip_context][0] += skip_inc;
+ foreach_transformed_block(xd, bsize, tokenize_b, &arg);
+ } else {
+ foreach_transformed_block(xd, bsize, set_entropy_context_b, &arg);
+ *t = t_backup;
+ }
+}
+
+#ifdef ENTROPY_STATS
+void init_context_counters(void) {
+ FILE *f = fopen("context.bin", "rb");
+ if (!f) {
+ vp9_zero(context_counters);
+ } else {
+ fread(context_counters, sizeof(context_counters), 1, f);
+ fclose(f);
+ }
+
+ f = fopen("treeupdate.bin", "rb");
+ if (!f) {
+ vpx_memset(tree_update_hist, 0, sizeof(tree_update_hist));
+ } else {
+ fread(tree_update_hist, sizeof(tree_update_hist), 1, f);
+ fclose(f);
+ }
+}
+
+static void print_counter(FILE *f, vp9_coeff_accum *context_counters,
+ int block_types, const char *header) {
+ int type, ref, band, pt, t;
+
+ fprintf(f, "static const vp9_coeff_count %s = {\n", header);
+
+#define Comma(X) (X ? "," : "")
+ type = 0;
+ do {
+ ref = 0;
+ fprintf(f, "%s\n { /* block Type %d */", Comma(type), type);
+ do {
+ fprintf(f, "%s\n { /* %s */", Comma(type), ref ? "Inter" : "Intra");
+ band = 0;
+ do {
+ fprintf(f, "%s\n { /* Coeff Band %d */", Comma(band), band);
+ pt = 0;
+ do {
+ fprintf(f, "%s\n {", Comma(pt));
+
+ t = 0;
+ do {
+ const int64_t x = context_counters[type][ref][band][pt][t];
+ const int y = (int) x;
+
+ assert(x == (int64_t) y); /* no overflow handling yet */
+ fprintf(f, "%s %d", Comma(t), y);
+ } while (++t < 1 + MAX_ENTROPY_TOKENS);
+ fprintf(f, "}");
+ } while (++pt < PREV_COEF_CONTEXTS);
+ fprintf(f, "\n }");
+ } while (++band < COEF_BANDS);
+ fprintf(f, "\n }");
+ } while (++ref < REF_TYPES);
+ fprintf(f, "\n }");
+ } while (++type < block_types);
+ fprintf(f, "\n};\n");
+}
+
+static void print_probs(FILE *f, vp9_coeff_accum *context_counters,
+ int block_types, const char *header) {
+ int type, ref, band, pt, t;
+
+ fprintf(f, "static const vp9_coeff_probs %s = {", header);
+
+ type = 0;
+#define Newline(x, spaces) (x ? " " : "\n" spaces)
+ do {
+ fprintf(f, "%s%s{ /* block Type %d */",
+ Comma(type), Newline(type, " "), type);
+ ref = 0;
+ do {
+ fprintf(f, "%s%s{ /* %s */",
+ Comma(band), Newline(band, " "), ref ? "Inter" : "Intra");
+ band = 0;
+ do {
+ fprintf(f, "%s%s{ /* Coeff Band %d */",
+ Comma(band), Newline(band, " "), band);
+ pt = 0;
+ do {
+ unsigned int branch_ct[ENTROPY_NODES][2];
+ unsigned int coef_counts[MAX_ENTROPY_TOKENS + 1];
+ vp9_prob coef_probs[ENTROPY_NODES];
+
+ if (pt >= 3 && band == 0)
+ break;
+ for (t = 0; t < MAX_ENTROPY_TOKENS + 1; ++t)
+ coef_counts[t] = context_counters[type][ref][band][pt][t];
+ vp9_tree_probs_from_distribution(vp9_coef_tree, coef_probs,
+ branch_ct, coef_counts, 0);
+ branch_ct[0][1] = coef_counts[MAX_ENTROPY_TOKENS] - branch_ct[0][0];
+ coef_probs[0] = get_binary_prob(branch_ct[0][0], branch_ct[0][1]);
+ fprintf(f, "%s\n {", Comma(pt));
+
+ t = 0;
+ do {
+ fprintf(f, "%s %3d", Comma(t), coef_probs[t]);
+ } while (++t < ENTROPY_NODES);
+
+ fprintf(f, " }");
+ } while (++pt < PREV_COEF_CONTEXTS);
+ fprintf(f, "\n }");
+ } while (++band < COEF_BANDS);
+ fprintf(f, "\n }");
+ } while (++ref < REF_TYPES);
+ fprintf(f, "\n }");
+ } while (++type < block_types);
+ fprintf(f, "\n};\n");
+}
+
+void print_context_counters() {
+ FILE *f = fopen("vp9_context.c", "w");
+
+ fprintf(f, "#include \"vp9_entropy.h\"\n");
+ fprintf(f, "\n/* *** GENERATED FILE: DO NOT EDIT *** */\n\n");
+
+ /* print counts */
+ print_counter(f, context_counters[TX_4X4], BLOCK_TYPES,
+ "vp9_default_coef_counts_4x4[BLOCK_TYPES]");
+ print_counter(f, context_counters[TX_8X8], BLOCK_TYPES,
+ "vp9_default_coef_counts_8x8[BLOCK_TYPES]");
+ print_counter(f, context_counters[TX_16X16], BLOCK_TYPES,
+ "vp9_default_coef_counts_16x16[BLOCK_TYPES]");
+ print_counter(f, context_counters[TX_32X32], BLOCK_TYPES,
+ "vp9_default_coef_counts_32x32[BLOCK_TYPES]");
+
+ /* print coefficient probabilities */
+ print_probs(f, context_counters[TX_4X4], BLOCK_TYPES,
+ "default_coef_probs_4x4[BLOCK_TYPES]");
+ print_probs(f, context_counters[TX_8X8], BLOCK_TYPES,
+ "default_coef_probs_8x8[BLOCK_TYPES]");
+ print_probs(f, context_counters[TX_16X16], BLOCK_TYPES,
+ "default_coef_probs_16x16[BLOCK_TYPES]");
+ print_probs(f, context_counters[TX_32X32], BLOCK_TYPES,
+ "default_coef_probs_32x32[BLOCK_TYPES]");
+
+ fclose(f);
+
+ f = fopen("context.bin", "wb");
+ fwrite(context_counters, sizeof(context_counters), 1, f);
+ fclose(f);
+}
+#endif
+
+void vp9_tokenize_initialize() {
+ fill_value_tokens();
+}
diff --git a/libvpx/vp9/encoder/vp9_tokenize.h b/libvpx/vp9/encoder/vp9_tokenize.h
new file mode 100644
index 0000000..b78e100
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_tokenize.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_ENCODER_VP9_TOKENIZE_H_
+#define VP9_ENCODER_VP9_TOKENIZE_H_
+
+#include "vp9/common/vp9_entropy.h"
+#include "vp9/encoder/vp9_block.h"
+
+void vp9_tokenize_initialize();
+
+typedef struct {
+ int16_t token;
+ int16_t extra;
+} TOKENVALUE;
+
+typedef struct {
+ const vp9_prob *context_tree;
+ int16_t extra;
+ uint8_t token;
+ uint8_t skip_eob_node;
+} TOKENEXTRA;
+
+typedef int64_t vp9_coeff_accum[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS]
+ [MAX_ENTROPY_TOKENS + 1];
+
+int vp9_sb_is_skippable(MACROBLOCKD *xd, BLOCK_SIZE bsize);
+int vp9_is_skippable_in_plane(MACROBLOCKD *xd, BLOCK_SIZE bsize,
+ int plane);
+struct VP9_COMP;
+
+void vp9_tokenize_sb(struct VP9_COMP *cpi, TOKENEXTRA **t, int dry_run,
+ BLOCK_SIZE bsize);
+
+#ifdef ENTROPY_STATS
+void init_context_counters();
+void print_context_counters();
+
+extern vp9_coeff_accum context_counters[TX_SIZES][BLOCK_TYPES];
+#endif
+
+extern const int *vp9_dct_value_cost_ptr;
+/* TODO: The Token field should be broken out into a separate char array to
+ * improve cache locality, since it's needed for costing when the rest of the
+ * fields are not.
+ */
+extern const TOKENVALUE *vp9_dct_value_tokens_ptr;
+
+#endif // VP9_ENCODER_VP9_TOKENIZE_H_
diff --git a/libvpx/vp9/encoder/vp9_treewriter.c b/libvpx/vp9/encoder/vp9_treewriter.c
new file mode 100644
index 0000000..e4aed53
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_treewriter.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp9/encoder/vp9_treewriter.h"
+
+static void cost(int *costs, vp9_tree tree, const vp9_prob *probs,
+ int i, int c) {
+ const vp9_prob prob = probs[i / 2];
+ int b;
+
+ for (b = 0; b <= 1; ++b) {
+ const int cc = c + vp9_cost_bit(prob, b);
+ const vp9_tree_index ii = tree[i + b];
+
+ if (ii <= 0)
+ costs[-ii] = cc;
+ else
+ cost(costs, tree, probs, ii, cc);
+ }
+}
+
+void vp9_cost_tokens(int *costs, const vp9_prob *probs, vp9_tree tree) {
+ cost(costs, tree, probs, 0, 0);
+}
+
+void vp9_cost_tokens_skip(int *costs, const vp9_prob *probs, vp9_tree tree) {
+ assert(tree[0] <= 0 && tree[1] > 0);
+
+ costs[-tree[0]] = vp9_cost_bit(probs[0], 0);
+ cost(costs, tree, probs, 2, 0);
+}
diff --git a/libvpx/vp9/encoder/vp9_treewriter.h b/libvpx/vp9/encoder/vp9_treewriter.h
new file mode 100644
index 0000000..eeda5cd
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_treewriter.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_ENCODER_VP9_TREEWRITER_H_
+#define VP9_ENCODER_VP9_TREEWRITER_H_
+
+/* Trees map alphabets into huffman-like codes suitable for an arithmetic
+ bit coder. Timothy S Murphy 11 October 2004 */
+
+#include "vp9/common/vp9_treecoder.h"
+
+#include "vp9/encoder/vp9_boolhuff.h" /* for now */
+
+
+#define vp9_write_prob(w, v) vp9_write_literal((w), (v), 8)
+
+/* Approximate length of an encoded bool in 256ths of a bit at given prob */
+
+#define vp9_cost_zero(x) (vp9_prob_cost[x])
+#define vp9_cost_one(x) vp9_cost_zero(vp9_complement(x))
+
+#define vp9_cost_bit(x, b) vp9_cost_zero((b) ? vp9_complement(x) : (x))
+
+/* VP8BC version is scaled by 2^20 rather than 2^8; see bool_coder.h */
+
+
+/* Both of these return bits, not scaled bits. */
+static INLINE unsigned int cost_branch256(const unsigned int ct[2],
+ vp9_prob p) {
+ return ct[0] * vp9_cost_zero(p) + ct[1] * vp9_cost_one(p);
+}
+
+static INLINE unsigned int cost_branch(const unsigned int ct[2],
+ vp9_prob p) {
+ return cost_branch256(ct, p) >> 8;
+}
+
+
+static INLINE void treed_write(vp9_writer *w,
+ vp9_tree tree, const vp9_prob *probs,
+ int bits, int len) {
+ vp9_tree_index i = 0;
+
+ do {
+ const int bit = (bits >> --len) & 1;
+ vp9_write(w, bit, probs[i >> 1]);
+ i = tree[i + bit];
+ } while (len);
+}
+
+static INLINE void write_token(vp9_writer *w, vp9_tree tree,
+ const vp9_prob *probs,
+ const struct vp9_token *token) {
+ treed_write(w, tree, probs, token->value, token->len);
+}
+
+static INLINE int treed_cost(vp9_tree tree, const vp9_prob *probs,
+ int bits, int len) {
+ int cost = 0;
+ vp9_tree_index i = 0;
+
+ do {
+ const int bit = (bits >> --len) & 1;
+ cost += vp9_cost_bit(probs[i >> 1], bit);
+ i = tree[i + bit];
+ } while (len);
+
+ return cost;
+}
+
+static INLINE int cost_token(vp9_tree tree, const vp9_prob *probs,
+ const struct vp9_token *token) {
+ return treed_cost(tree, probs, token->value, token->len);
+}
+
+void vp9_cost_tokens(int *costs, const vp9_prob *probs, vp9_tree tree);
+void vp9_cost_tokens_skip(int *costs, const vp9_prob *probs, vp9_tree tree);
+
+#endif // VP9_ENCODER_VP9_TREEWRITER_H_
diff --git a/libvpx/vp9/encoder/vp9_variance.h b/libvpx/vp9/encoder/vp9_variance.h
new file mode 100644
index 0000000..6e686d6
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_variance.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_ENCODER_VP9_VARIANCE_H_
+#define VP9_ENCODER_VP9_VARIANCE_H_
+
+#include "vpx/vpx_integer.h"
+// #include "./vpx_config.h"
+
+typedef unsigned int(*vp9_sad_fn_t)(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int max_sad);
+
+typedef unsigned int(*vp9_sad_avg_fn_t)(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ const uint8_t *second_pred,
+ unsigned int max_sad);
+
+typedef void (*vp9_sad_multi_fn_t)(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int *sad_array);
+
+typedef void (*vp9_sad_multi1_fn_t)(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int *sad_array);
+
+typedef void (*vp9_sad_multi_d_fn_t)(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t* const ref_ptr[],
+ int ref_stride, unsigned int *sad_array);
+
+typedef unsigned int (*vp9_variance_fn_t)(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int *sse);
+
+typedef unsigned int (*vp9_subpixvariance_fn_t)(const uint8_t *src_ptr,
+ int source_stride,
+ int xoffset,
+ int yoffset,
+ const uint8_t *ref_ptr,
+ int Refstride,
+ unsigned int *sse);
+
+typedef unsigned int (*vp9_subp_avg_variance_fn_t)(const uint8_t *src_ptr,
+ int source_stride,
+ int xoffset,
+ int yoffset,
+ const uint8_t *ref_ptr,
+ int Refstride,
+ unsigned int *sse,
+ const uint8_t *second_pred);
+
+typedef void (*vp9_ssimpf_fn_t)(uint8_t *s, int sp, uint8_t *r,
+ int rp, unsigned long *sum_s,
+ unsigned long *sum_r, unsigned long *sum_sq_s,
+ unsigned long *sum_sq_r,
+ unsigned long *sum_sxr);
+
+typedef unsigned int (*vp9_getmbss_fn_t)(const short *);
+
+typedef unsigned int (*vp9_get16x16prederror_fn_t)(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride);
+
+typedef struct vp9_variance_vtable {
+ vp9_sad_fn_t sdf;
+ vp9_sad_avg_fn_t sdaf;
+ vp9_variance_fn_t vf;
+ vp9_subpixvariance_fn_t svf;
+ vp9_subp_avg_variance_fn_t svaf;
+ vp9_variance_fn_t svf_halfpix_h;
+ vp9_variance_fn_t svf_halfpix_v;
+ vp9_variance_fn_t svf_halfpix_hv;
+ vp9_sad_multi_fn_t sdx3f;
+ vp9_sad_multi1_fn_t sdx8f;
+ vp9_sad_multi_d_fn_t sdx4df;
+} vp9_variance_fn_ptr_t;
+
+static void comp_avg_pred(uint8_t *comp_pred, const uint8_t *pred, int width,
+ int height, const uint8_t *ref, int ref_stride) {
+ int i, j;
+
+ for (i = 0; i < height; i++) {
+ for (j = 0; j < width; j++) {
+ int tmp;
+ tmp = pred[j] + ref[j];
+ comp_pred[j] = (tmp + 1) >> 1;
+ }
+ comp_pred += width;
+ pred += width;
+ ref += ref_stride;
+ }
+}
+#endif // VP9_ENCODER_VP9_VARIANCE_H_
diff --git a/libvpx/vp9/encoder/vp9_variance_c.c b/libvpx/vp9/encoder/vp9_variance_c.c
new file mode 100644
index 0000000..155ba8a
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_variance_c.c
@@ -0,0 +1,957 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vp9/encoder/vp9_variance.h"
+#include "vp9/common/vp9_filter.h"
+#include "vp9/common/vp9_subpelvar.h"
+#include "vpx/vpx_integer.h"
+#include "vpx_ports/mem.h"
+#include "./vp9_rtcd.h"
+
+unsigned int vp9_get_mb_ss_c(const int16_t *src_ptr) {
+ unsigned int i, sum = 0;
+
+ for (i = 0; i < 256; i++) {
+ sum += (src_ptr[i] * src_ptr[i]);
+ }
+
+ return sum;
+}
+
+unsigned int vp9_variance64x32_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 64, 32, &var, &avg);
+ *sse = var;
+ return (var - (((int64_t)avg * avg) >> 11));
+}
+
+unsigned int vp9_sub_pixel_variance64x32_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ uint16_t fdata3[65 * 64]; // Temp data buffer used in filtering
+ uint8_t temp2[68 * 64];
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 33, 64, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 64, 64, 32, 64, vfilter);
+
+ return vp9_variance64x32(temp2, 64, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_avg_variance64x32_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse,
+ const uint8_t *second_pred) {
+ uint16_t fdata3[65 * 64]; // Temp data buffer used in filtering
+ uint8_t temp2[68 * 64];
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 64 * 64); // compound pred buffer
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 33, 64, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 64, 64, 32, 64, vfilter);
+ comp_avg_pred(temp3, second_pred, 64, 32, temp2, 64);
+ return vp9_variance64x32(temp3, 64, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_variance32x64_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 32, 64, &var, &avg);
+ *sse = var;
+ return (var - (((int64_t)avg * avg) >> 11));
+}
+
+unsigned int vp9_sub_pixel_variance32x64_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ uint16_t fdata3[65 * 64]; // Temp data buffer used in filtering
+ uint8_t temp2[68 * 64];
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 65, 32, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 32, 32, 64, 32, vfilter);
+
+ return vp9_variance32x64(temp2, 32, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_avg_variance32x64_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse,
+ const uint8_t *second_pred) {
+ uint16_t fdata3[65 * 64]; // Temp data buffer used in filtering
+ uint8_t temp2[68 * 64];
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 32 * 64); // compound pred buffer
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 65, 32, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 32, 32, 64, 32, vfilter);
+ comp_avg_pred(temp3, second_pred, 32, 64, temp2, 32);
+ return vp9_variance32x64(temp3, 32, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_variance32x16_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 32, 16, &var, &avg);
+ *sse = var;
+ return (var - (((int64_t)avg * avg) >> 9));
+}
+
+unsigned int vp9_sub_pixel_variance32x16_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ uint16_t fdata3[33 * 32]; // Temp data buffer used in filtering
+ uint8_t temp2[36 * 32];
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 17, 32, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 32, 32, 16, 32, vfilter);
+
+ return vp9_variance32x16(temp2, 32, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_avg_variance32x16_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse,
+ const uint8_t *second_pred) {
+ uint16_t fdata3[33 * 32]; // Temp data buffer used in filtering
+ uint8_t temp2[36 * 32];
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 32 * 16); // compound pred buffer
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 17, 32, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 32, 32, 16, 32, vfilter);
+ comp_avg_pred(temp3, second_pred, 32, 16, temp2, 32);
+ return vp9_variance32x16(temp3, 32, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_variance16x32_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 32, &var, &avg);
+ *sse = var;
+ return (var - (((int64_t)avg * avg) >> 9));
+}
+
+unsigned int vp9_sub_pixel_variance16x32_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ uint16_t fdata3[33 * 32]; // Temp data buffer used in filtering
+ uint8_t temp2[36 * 32];
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 33, 16, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 16, 16, 32, 16, vfilter);
+
+ return vp9_variance16x32(temp2, 16, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_avg_variance16x32_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse,
+ const uint8_t *second_pred) {
+ uint16_t fdata3[33 * 32]; // Temp data buffer used in filtering
+ uint8_t temp2[36 * 32];
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 16 * 32); // compound pred buffer
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 33, 16, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 16, 16, 32, 16, vfilter);
+ comp_avg_pred(temp3, second_pred, 16, 32, temp2, 16);
+ return vp9_variance16x32(temp3, 16, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_variance64x64_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 64, 64, &var, &avg);
+ *sse = var;
+ return (var - (((int64_t)avg * avg) >> 12));
+}
+
+unsigned int vp9_variance32x32_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 32, 32, &var, &avg);
+ *sse = var;
+ return (var - (((int64_t)avg * avg) >> 10));
+}
+
+unsigned int vp9_variance16x16_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 16, &var, &avg);
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 8));
+}
+
+unsigned int vp9_variance8x16_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 8, 16, &var, &avg);
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 7));
+}
+
+unsigned int vp9_variance16x8_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 8, &var, &avg);
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 7));
+}
+
+void vp9_get_sse_sum_8x8_c(const uint8_t *src_ptr, int source_stride,
+ const uint8_t *ref_ptr, int ref_stride,
+ unsigned int *sse, int *sum) {
+ variance(src_ptr, source_stride, ref_ptr, ref_stride, 8, 8, sse, sum);
+}
+
+unsigned int vp9_variance8x8_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 8, 8, &var, &avg);
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 6));
+}
+
+unsigned int vp9_variance8x4_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 8, 4, &var, &avg);
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 5));
+}
+
+unsigned int vp9_variance4x8_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 4, 8, &var, &avg);
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 5));
+}
+
+unsigned int vp9_variance4x4_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 4, 4, &var, &avg);
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 4));
+}
+
+
+unsigned int vp9_mse16x16_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 16, &var, &avg);
+ *sse = var;
+ return var;
+}
+
+unsigned int vp9_mse16x8_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 8, &var, &avg);
+ *sse = var;
+ return var;
+}
+
+unsigned int vp9_mse8x16_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 8, 16, &var, &avg);
+ *sse = var;
+ return var;
+}
+
+unsigned int vp9_mse8x8_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 8, 8, &var, &avg);
+ *sse = var;
+ return var;
+}
+
+
+unsigned int vp9_sub_pixel_variance4x4_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ uint8_t temp2[20 * 16];
+ const int16_t *hfilter, *vfilter;
+ uint16_t fdata3[5 * 4]; // Temp data buffer used in filtering
+
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
+
+ // First filter 1d Horizontal
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 5, 4, hfilter);
+
+ // Now filter Verticaly
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4, vfilter);
+
+ return vp9_variance4x4(temp2, 4, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_avg_variance4x4_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse,
+ const uint8_t *second_pred) {
+ uint8_t temp2[20 * 16];
+ const int16_t *hfilter, *vfilter;
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 4 * 4); // compound pred buffer
+ uint16_t fdata3[5 * 4]; // Temp data buffer used in filtering
+
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
+
+ // First filter 1d Horizontal
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 5, 4, hfilter);
+
+ // Now filter Verticaly
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4, vfilter);
+ comp_avg_pred(temp3, second_pred, 4, 4, temp2, 4);
+ return vp9_variance4x4(temp3, 4, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_variance8x8_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ uint16_t fdata3[9 * 8]; // Temp data buffer used in filtering
+ uint8_t temp2[20 * 16];
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 9, 8, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 8, 8, 8, 8, vfilter);
+
+ return vp9_variance8x8(temp2, 8, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_avg_variance8x8_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse,
+ const uint8_t *second_pred) {
+ uint16_t fdata3[9 * 8]; // Temp data buffer used in filtering
+ uint8_t temp2[20 * 16];
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 8 * 8); // compound pred buffer
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 9, 8, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 8, 8, 8, 8, vfilter);
+ comp_avg_pred(temp3, second_pred, 8, 8, temp2, 8);
+ return vp9_variance8x8(temp3, 8, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_variance16x16_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ uint16_t fdata3[17 * 16]; // Temp data buffer used in filtering
+ uint8_t temp2[20 * 16];
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 17, 16, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 16, 16, 16, 16, vfilter);
+
+ return vp9_variance16x16(temp2, 16, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_avg_variance16x16_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse,
+ const uint8_t *second_pred) {
+ uint16_t fdata3[17 * 16];
+ uint8_t temp2[20 * 16];
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 16 * 16); // compound pred buffer
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 17, 16, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 16, 16, 16, 16, vfilter);
+
+ comp_avg_pred(temp3, second_pred, 16, 16, temp2, 16);
+ return vp9_variance16x16(temp3, 16, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_variance64x64_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ uint16_t fdata3[65 * 64]; // Temp data buffer used in filtering
+ uint8_t temp2[68 * 64];
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 65, 64, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 64, 64, 64, 64, vfilter);
+
+ return vp9_variance64x64(temp2, 64, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_avg_variance64x64_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse,
+ const uint8_t *second_pred) {
+ uint16_t fdata3[65 * 64]; // Temp data buffer used in filtering
+ uint8_t temp2[68 * 64];
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 64 * 64); // compound pred buffer
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 65, 64, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 64, 64, 64, 64, vfilter);
+ comp_avg_pred(temp3, second_pred, 64, 64, temp2, 64);
+ return vp9_variance64x64(temp3, 64, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_variance32x32_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ uint16_t fdata3[33 * 32]; // Temp data buffer used in filtering
+ uint8_t temp2[36 * 32];
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 33, 32, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 32, 32, 32, 32, vfilter);
+
+ return vp9_variance32x32(temp2, 32, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_avg_variance32x32_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse,
+ const uint8_t *second_pred) {
+ uint16_t fdata3[33 * 32]; // Temp data buffer used in filtering
+ uint8_t temp2[36 * 32];
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 32 * 32); // compound pred buffer
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 33, 32, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 32, 32, 32, 32, vfilter);
+ comp_avg_pred(temp3, second_pred, 32, 32, temp2, 32);
+ return vp9_variance32x32(temp3, 32, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_variance_halfpixvar16x16_h_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ return vp9_sub_pixel_variance16x16_c(src_ptr, source_stride, 8, 0,
+ ref_ptr, recon_stride, sse);
+}
+
+unsigned int vp9_variance_halfpixvar32x32_h_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ return vp9_sub_pixel_variance32x32_c(src_ptr, source_stride, 8, 0,
+ ref_ptr, recon_stride, sse);
+}
+
+unsigned int vp9_variance_halfpixvar64x64_h_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ return vp9_sub_pixel_variance64x64_c(src_ptr, source_stride, 8, 0,
+ ref_ptr, recon_stride, sse);
+}
+
+unsigned int vp9_variance_halfpixvar16x16_v_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ return vp9_sub_pixel_variance16x16_c(src_ptr, source_stride, 0, 8,
+ ref_ptr, recon_stride, sse);
+}
+
+unsigned int vp9_variance_halfpixvar32x32_v_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ return vp9_sub_pixel_variance32x32_c(src_ptr, source_stride, 0, 8,
+ ref_ptr, recon_stride, sse);
+}
+
+unsigned int vp9_variance_halfpixvar64x64_v_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ return vp9_sub_pixel_variance64x64_c(src_ptr, source_stride, 0, 8,
+ ref_ptr, recon_stride, sse);
+}
+
+unsigned int vp9_variance_halfpixvar16x16_hv_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ return vp9_sub_pixel_variance16x16_c(src_ptr, source_stride, 8, 8,
+ ref_ptr, recon_stride, sse);
+}
+
+unsigned int vp9_variance_halfpixvar32x32_hv_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ return vp9_sub_pixel_variance32x32_c(src_ptr, source_stride, 8, 8,
+ ref_ptr, recon_stride, sse);
+}
+
+unsigned int vp9_variance_halfpixvar64x64_hv_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ return vp9_sub_pixel_variance64x64_c(src_ptr, source_stride, 8, 8,
+ ref_ptr, recon_stride, sse);
+}
+
+unsigned int vp9_sub_pixel_mse16x16_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ vp9_sub_pixel_variance16x16_c(src_ptr, src_pixels_per_line,
+ xoffset, yoffset, dst_ptr,
+ dst_pixels_per_line, sse);
+ return *sse;
+}
+
+unsigned int vp9_sub_pixel_mse32x32_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ vp9_sub_pixel_variance32x32_c(src_ptr, src_pixels_per_line,
+ xoffset, yoffset, dst_ptr,
+ dst_pixels_per_line, sse);
+ return *sse;
+}
+
+unsigned int vp9_sub_pixel_mse64x64_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ vp9_sub_pixel_variance64x64_c(src_ptr, src_pixels_per_line,
+ xoffset, yoffset, dst_ptr,
+ dst_pixels_per_line, sse);
+ return *sse;
+}
+
+unsigned int vp9_sub_pixel_variance16x8_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ uint16_t fdata3[16 * 9]; // Temp data buffer used in filtering
+ uint8_t temp2[20 * 16];
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 9, 16, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 16, 16, 8, 16, vfilter);
+
+ return vp9_variance16x8(temp2, 16, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_avg_variance16x8_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse,
+ const uint8_t *second_pred) {
+ uint16_t fdata3[16 * 9]; // Temp data buffer used in filtering
+ uint8_t temp2[20 * 16];
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 16 * 8); // compound pred buffer
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 9, 16, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 16, 16, 8, 16, vfilter);
+ comp_avg_pred(temp3, second_pred, 16, 8, temp2, 16);
+ return vp9_variance16x8(temp3, 16, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_variance8x16_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ uint16_t fdata3[9 * 16]; // Temp data buffer used in filtering
+ uint8_t temp2[20 * 16];
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 17, 8, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 8, 8, 16, 8, vfilter);
+
+ return vp9_variance8x16(temp2, 8, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_avg_variance8x16_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse,
+ const uint8_t *second_pred) {
+ uint16_t fdata3[9 * 16]; // Temp data buffer used in filtering
+ uint8_t temp2[20 * 16];
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 8 * 16); // compound pred buffer
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 17, 8, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 8, 8, 16, 8, vfilter);
+ comp_avg_pred(temp3, second_pred, 8, 16, temp2, 8);
+ return vp9_variance8x16(temp3, 8, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_variance8x4_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ uint16_t fdata3[8 * 5]; // Temp data buffer used in filtering
+ uint8_t temp2[20 * 16];
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 5, 8, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 8, 8, 4, 8, vfilter);
+
+ return vp9_variance8x4(temp2, 8, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_avg_variance8x4_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse,
+ const uint8_t *second_pred) {
+ uint16_t fdata3[8 * 5]; // Temp data buffer used in filtering
+ uint8_t temp2[20 * 16];
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 8 * 4); // compound pred buffer
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 5, 8, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 8, 8, 4, 8, vfilter);
+ comp_avg_pred(temp3, second_pred, 8, 4, temp2, 8);
+ return vp9_variance8x4(temp3, 8, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_variance4x8_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ uint16_t fdata3[5 * 8]; // Temp data buffer used in filtering
+ // FIXME(jingning,rbultje): this temp2 buffer probably doesn't need to be
+ // of this big? same issue appears in all other block size settings.
+ uint8_t temp2[20 * 16];
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 9, 4, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 8, 4, vfilter);
+
+ return vp9_variance4x8(temp2, 4, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_avg_variance4x8_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse,
+ const uint8_t *second_pred) {
+ uint16_t fdata3[5 * 8]; // Temp data buffer used in filtering
+ uint8_t temp2[20 * 16];
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 4 * 8); // compound pred buffer
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 9, 4, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 8, 4, vfilter);
+ comp_avg_pred(temp3, second_pred, 4, 8, temp2, 4);
+ return vp9_variance4x8(temp3, 4, dst_ptr, dst_pixels_per_line, sse);
+}
diff --git a/libvpx/vp9/encoder/vp9_write_bit_buffer.h b/libvpx/vp9/encoder/vp9_write_bit_buffer.h
new file mode 100644
index 0000000..6f91cfc
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_write_bit_buffer.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_BIT_WRITE_BUFFER_H_
+#define VP9_BIT_WRITE_BUFFER_H_
+
+#include <limits.h>
+
+#include "vpx/vpx_integer.h"
+
+struct vp9_write_bit_buffer {
+ uint8_t *bit_buffer;
+ size_t bit_offset;
+};
+
+static size_t vp9_rb_bytes_written(struct vp9_write_bit_buffer *wb) {
+ return wb->bit_offset / CHAR_BIT + (wb->bit_offset % CHAR_BIT > 0);
+}
+
+static void vp9_wb_write_bit(struct vp9_write_bit_buffer *wb, int bit) {
+ const int off = wb->bit_offset;
+ const int p = off / CHAR_BIT;
+ const int q = CHAR_BIT - 1 - off % CHAR_BIT;
+ if (q == CHAR_BIT -1) {
+ wb->bit_buffer[p] = bit << q;
+ } else {
+ wb->bit_buffer[p] &= ~(1 << q);
+ wb->bit_buffer[p] |= bit << q;
+ }
+ wb->bit_offset = off + 1;
+}
+
+static void vp9_wb_write_literal(struct vp9_write_bit_buffer *wb,
+ int data, int bits) {
+ int bit;
+ for (bit = bits - 1; bit >= 0; bit--)
+ vp9_wb_write_bit(wb, (data >> bit) & 1);
+}
+
+
+#endif // VP9_BIT_WRITE_BUFFER_H_
diff --git a/libvpx/vp9/encoder/x86/vp9_dct32x32_sse2.c b/libvpx/vp9/encoder/x86/vp9_dct32x32_sse2.c
new file mode 100644
index 0000000..95ae266
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_dct32x32_sse2.c
@@ -0,0 +1,2650 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <emmintrin.h> // SSE2
+#include "vp9/common/vp9_idct.h" // for cospi constants
+#include "vpx_ports/mem.h"
+
+#if FDCT32x32_HIGH_PRECISION
+static INLINE __m128i k_madd_epi32(__m128i a, __m128i b) {
+ __m128i buf0, buf1;
+ buf0 = _mm_mul_epu32(a, b);
+ a = _mm_srli_epi64(a, 32);
+ b = _mm_srli_epi64(b, 32);
+ buf1 = _mm_mul_epu32(a, b);
+ return _mm_add_epi64(buf0, buf1);
+}
+
+static INLINE __m128i k_packs_epi64(__m128i a, __m128i b) {
+ __m128i buf0 = _mm_shuffle_epi32(a, _MM_SHUFFLE(0, 0, 2, 0));
+ __m128i buf1 = _mm_shuffle_epi32(b, _MM_SHUFFLE(0, 0, 2, 0));
+ return _mm_unpacklo_epi64(buf0, buf1);
+}
+
+static INLINE __m128i k_cvtlo_epi16(__m128i a, __m128i mask16, __m128i kZero) {
+ // convert the lower 4 signed 16-bit integers into 4 signed 32-bit integers
+ __m128i sign_bit = _mm_and_si128(a, mask16);
+ __m128i b = _mm_unpacklo_epi16(a, kZero);
+ sign_bit = _mm_cmplt_epi16(sign_bit, kZero);
+ sign_bit = _mm_unpacklo_epi16(kZero, sign_bit);
+ return _mm_or_si128(sign_bit, b);
+}
+
+static INLINE __m128i k_cvthi_epi16(__m128i a, __m128i mask16, __m128i kZero) {
+ // convert the lower 4 signed 16-bit integers into 4 signed 32-bit integers
+ __m128i sign_bit = _mm_and_si128(a, mask16);
+ __m128i b = _mm_unpackhi_epi16(a, kZero);
+ sign_bit = _mm_cmplt_epi16(sign_bit, kZero);
+ sign_bit = _mm_unpackhi_epi16(kZero, sign_bit);
+ return _mm_or_si128(sign_bit, b);
+}
+#endif
+
+void FDCT32x32_2D(int16_t *input,
+ int16_t *output_org, int pitch) {
+ // Calculate pre-multiplied strides
+ const int str1 = pitch >> 1;
+ const int str2 = pitch;
+ const int str3 = pitch + str1;
+ // We need an intermediate buffer between passes.
+ DECLARE_ALIGNED(16, int16_t, intermediate[32 * 32]);
+ // Constants
+ // When we use them, in one case, they are all the same. In all others
+ // it's a pair of them that we need to repeat four times. This is done
+ // by constructing the 32 bit constant corresponding to that pair.
+ const __m128i k__cospi_p16_p16 = _mm_set1_epi16(+cospi_16_64);
+ const __m128i k__cospi_p16_m16 = pair_set_epi16(+cospi_16_64, -cospi_16_64);
+ const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m128i k__cospi_m24_m08 = pair_set_epi16(-cospi_24_64, -cospi_8_64);
+ const __m128i k__cospi_p24_p08 = pair_set_epi16(+cospi_24_64, cospi_8_64);
+ const __m128i k__cospi_p12_p20 = pair_set_epi16(+cospi_12_64, cospi_20_64);
+ const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
+ const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64);
+ const __m128i k__cospi_p28_p04 = pair_set_epi16(+cospi_28_64, cospi_4_64);
+ const __m128i k__cospi_m28_m04 = pair_set_epi16(-cospi_28_64, -cospi_4_64);
+ const __m128i k__cospi_m12_m20 = pair_set_epi16(-cospi_12_64, -cospi_20_64);
+ const __m128i k__cospi_p30_p02 = pair_set_epi16(+cospi_30_64, cospi_2_64);
+ const __m128i k__cospi_p14_p18 = pair_set_epi16(+cospi_14_64, cospi_18_64);
+ const __m128i k__cospi_p22_p10 = pair_set_epi16(+cospi_22_64, cospi_10_64);
+ const __m128i k__cospi_p06_p26 = pair_set_epi16(+cospi_6_64, cospi_26_64);
+ const __m128i k__cospi_m26_p06 = pair_set_epi16(-cospi_26_64, cospi_6_64);
+ const __m128i k__cospi_m10_p22 = pair_set_epi16(-cospi_10_64, cospi_22_64);
+ const __m128i k__cospi_m18_p14 = pair_set_epi16(-cospi_18_64, cospi_14_64);
+ const __m128i k__cospi_m02_p30 = pair_set_epi16(-cospi_2_64, cospi_30_64);
+ const __m128i k__cospi_p31_p01 = pair_set_epi16(+cospi_31_64, cospi_1_64);
+ const __m128i k__cospi_p15_p17 = pair_set_epi16(+cospi_15_64, cospi_17_64);
+ const __m128i k__cospi_p23_p09 = pair_set_epi16(+cospi_23_64, cospi_9_64);
+ const __m128i k__cospi_p07_p25 = pair_set_epi16(+cospi_7_64, cospi_25_64);
+ const __m128i k__cospi_m25_p07 = pair_set_epi16(-cospi_25_64, cospi_7_64);
+ const __m128i k__cospi_m09_p23 = pair_set_epi16(-cospi_9_64, cospi_23_64);
+ const __m128i k__cospi_m17_p15 = pair_set_epi16(-cospi_17_64, cospi_15_64);
+ const __m128i k__cospi_m01_p31 = pair_set_epi16(-cospi_1_64, cospi_31_64);
+ const __m128i k__cospi_p27_p05 = pair_set_epi16(+cospi_27_64, cospi_5_64);
+ const __m128i k__cospi_p11_p21 = pair_set_epi16(+cospi_11_64, cospi_21_64);
+ const __m128i k__cospi_p19_p13 = pair_set_epi16(+cospi_19_64, cospi_13_64);
+ const __m128i k__cospi_p03_p29 = pair_set_epi16(+cospi_3_64, cospi_29_64);
+ const __m128i k__cospi_m29_p03 = pair_set_epi16(-cospi_29_64, cospi_3_64);
+ const __m128i k__cospi_m13_p19 = pair_set_epi16(-cospi_13_64, cospi_19_64);
+ const __m128i k__cospi_m21_p11 = pair_set_epi16(-cospi_21_64, cospi_11_64);
+ const __m128i k__cospi_m05_p27 = pair_set_epi16(-cospi_5_64, cospi_27_64);
+ const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
+ const __m128i kZero = _mm_set1_epi16(0);
+ const __m128i kOne = _mm_set1_epi16(1);
+ // Do the two transform/transpose passes
+ int pass;
+ for (pass = 0; pass < 2; ++pass) {
+ // We process eight columns (transposed rows in second pass) at a time.
+ int column_start;
+ for (column_start = 0; column_start < 32; column_start += 8) {
+ __m128i step1[32];
+ __m128i step2[32];
+ __m128i step3[32];
+ __m128i out[32];
+ // Stage 1
+ // Note: even though all the loads below are aligned, using the aligned
+ // intrinsic make the code slightly slower.
+ if (0 == pass) {
+ int16_t *in = &input[column_start];
+ // step1[i] = (in[ 0 * stride] + in[(32 - 1) * stride]) << 2;
+ // Note: the next four blocks could be in a loop. That would help the
+ // instruction cache but is actually slower.
+ {
+ int16_t *ina = in + 0 * str1;
+ int16_t *inb = in + 31 * str1;
+ __m128i *step1a = &step1[ 0];
+ __m128i *step1b = &step1[31];
+ const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina));
+ const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1));
+ const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2));
+ const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3));
+ const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3));
+ const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2));
+ const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1));
+ const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb));
+ step1a[ 0] = _mm_add_epi16(ina0, inb0);
+ step1a[ 1] = _mm_add_epi16(ina1, inb1);
+ step1a[ 2] = _mm_add_epi16(ina2, inb2);
+ step1a[ 3] = _mm_add_epi16(ina3, inb3);
+ step1b[-3] = _mm_sub_epi16(ina3, inb3);
+ step1b[-2] = _mm_sub_epi16(ina2, inb2);
+ step1b[-1] = _mm_sub_epi16(ina1, inb1);
+ step1b[-0] = _mm_sub_epi16(ina0, inb0);
+ step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2);
+ step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2);
+ step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2);
+ step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2);
+ step1b[-3] = _mm_slli_epi16(step1b[-3], 2);
+ step1b[-2] = _mm_slli_epi16(step1b[-2], 2);
+ step1b[-1] = _mm_slli_epi16(step1b[-1], 2);
+ step1b[-0] = _mm_slli_epi16(step1b[-0], 2);
+ }
+ {
+ int16_t *ina = in + 4 * str1;
+ int16_t *inb = in + 27 * str1;
+ __m128i *step1a = &step1[ 4];
+ __m128i *step1b = &step1[27];
+ const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina));
+ const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1));
+ const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2));
+ const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3));
+ const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3));
+ const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2));
+ const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1));
+ const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb));
+ step1a[ 0] = _mm_add_epi16(ina0, inb0);
+ step1a[ 1] = _mm_add_epi16(ina1, inb1);
+ step1a[ 2] = _mm_add_epi16(ina2, inb2);
+ step1a[ 3] = _mm_add_epi16(ina3, inb3);
+ step1b[-3] = _mm_sub_epi16(ina3, inb3);
+ step1b[-2] = _mm_sub_epi16(ina2, inb2);
+ step1b[-1] = _mm_sub_epi16(ina1, inb1);
+ step1b[-0] = _mm_sub_epi16(ina0, inb0);
+ step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2);
+ step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2);
+ step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2);
+ step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2);
+ step1b[-3] = _mm_slli_epi16(step1b[-3], 2);
+ step1b[-2] = _mm_slli_epi16(step1b[-2], 2);
+ step1b[-1] = _mm_slli_epi16(step1b[-1], 2);
+ step1b[-0] = _mm_slli_epi16(step1b[-0], 2);
+ }
+ {
+ int16_t *ina = in + 8 * str1;
+ int16_t *inb = in + 23 * str1;
+ __m128i *step1a = &step1[ 8];
+ __m128i *step1b = &step1[23];
+ const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina));
+ const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1));
+ const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2));
+ const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3));
+ const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3));
+ const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2));
+ const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1));
+ const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb));
+ step1a[ 0] = _mm_add_epi16(ina0, inb0);
+ step1a[ 1] = _mm_add_epi16(ina1, inb1);
+ step1a[ 2] = _mm_add_epi16(ina2, inb2);
+ step1a[ 3] = _mm_add_epi16(ina3, inb3);
+ step1b[-3] = _mm_sub_epi16(ina3, inb3);
+ step1b[-2] = _mm_sub_epi16(ina2, inb2);
+ step1b[-1] = _mm_sub_epi16(ina1, inb1);
+ step1b[-0] = _mm_sub_epi16(ina0, inb0);
+ step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2);
+ step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2);
+ step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2);
+ step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2);
+ step1b[-3] = _mm_slli_epi16(step1b[-3], 2);
+ step1b[-2] = _mm_slli_epi16(step1b[-2], 2);
+ step1b[-1] = _mm_slli_epi16(step1b[-1], 2);
+ step1b[-0] = _mm_slli_epi16(step1b[-0], 2);
+ }
+ {
+ int16_t *ina = in + 12 * str1;
+ int16_t *inb = in + 19 * str1;
+ __m128i *step1a = &step1[12];
+ __m128i *step1b = &step1[19];
+ const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina));
+ const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1));
+ const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2));
+ const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3));
+ const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3));
+ const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2));
+ const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1));
+ const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb));
+ step1a[ 0] = _mm_add_epi16(ina0, inb0);
+ step1a[ 1] = _mm_add_epi16(ina1, inb1);
+ step1a[ 2] = _mm_add_epi16(ina2, inb2);
+ step1a[ 3] = _mm_add_epi16(ina3, inb3);
+ step1b[-3] = _mm_sub_epi16(ina3, inb3);
+ step1b[-2] = _mm_sub_epi16(ina2, inb2);
+ step1b[-1] = _mm_sub_epi16(ina1, inb1);
+ step1b[-0] = _mm_sub_epi16(ina0, inb0);
+ step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2);
+ step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2);
+ step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2);
+ step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2);
+ step1b[-3] = _mm_slli_epi16(step1b[-3], 2);
+ step1b[-2] = _mm_slli_epi16(step1b[-2], 2);
+ step1b[-1] = _mm_slli_epi16(step1b[-1], 2);
+ step1b[-0] = _mm_slli_epi16(step1b[-0], 2);
+ }
+ } else {
+ int16_t *in = &intermediate[column_start];
+ // step1[i] = in[ 0 * 32] + in[(32 - 1) * 32];
+ // Note: using the same approach as above to have common offset is
+ // counter-productive as all offsets can be calculated at compile
+ // time.
+ // Note: the next four blocks could be in a loop. That would help the
+ // instruction cache but is actually slower.
+ {
+ __m128i in00 = _mm_loadu_si128((const __m128i *)(in + 0 * 32));
+ __m128i in01 = _mm_loadu_si128((const __m128i *)(in + 1 * 32));
+ __m128i in02 = _mm_loadu_si128((const __m128i *)(in + 2 * 32));
+ __m128i in03 = _mm_loadu_si128((const __m128i *)(in + 3 * 32));
+ __m128i in28 = _mm_loadu_si128((const __m128i *)(in + 28 * 32));
+ __m128i in29 = _mm_loadu_si128((const __m128i *)(in + 29 * 32));
+ __m128i in30 = _mm_loadu_si128((const __m128i *)(in + 30 * 32));
+ __m128i in31 = _mm_loadu_si128((const __m128i *)(in + 31 * 32));
+ step1[ 0] = _mm_add_epi16(in00, in31);
+ step1[ 1] = _mm_add_epi16(in01, in30);
+ step1[ 2] = _mm_add_epi16(in02, in29);
+ step1[ 3] = _mm_add_epi16(in03, in28);
+ step1[28] = _mm_sub_epi16(in03, in28);
+ step1[29] = _mm_sub_epi16(in02, in29);
+ step1[30] = _mm_sub_epi16(in01, in30);
+ step1[31] = _mm_sub_epi16(in00, in31);
+ }
+ {
+ __m128i in04 = _mm_loadu_si128((const __m128i *)(in + 4 * 32));
+ __m128i in05 = _mm_loadu_si128((const __m128i *)(in + 5 * 32));
+ __m128i in06 = _mm_loadu_si128((const __m128i *)(in + 6 * 32));
+ __m128i in07 = _mm_loadu_si128((const __m128i *)(in + 7 * 32));
+ __m128i in24 = _mm_loadu_si128((const __m128i *)(in + 24 * 32));
+ __m128i in25 = _mm_loadu_si128((const __m128i *)(in + 25 * 32));
+ __m128i in26 = _mm_loadu_si128((const __m128i *)(in + 26 * 32));
+ __m128i in27 = _mm_loadu_si128((const __m128i *)(in + 27 * 32));
+ step1[ 4] = _mm_add_epi16(in04, in27);
+ step1[ 5] = _mm_add_epi16(in05, in26);
+ step1[ 6] = _mm_add_epi16(in06, in25);
+ step1[ 7] = _mm_add_epi16(in07, in24);
+ step1[24] = _mm_sub_epi16(in07, in24);
+ step1[25] = _mm_sub_epi16(in06, in25);
+ step1[26] = _mm_sub_epi16(in05, in26);
+ step1[27] = _mm_sub_epi16(in04, in27);
+ }
+ {
+ __m128i in08 = _mm_loadu_si128((const __m128i *)(in + 8 * 32));
+ __m128i in09 = _mm_loadu_si128((const __m128i *)(in + 9 * 32));
+ __m128i in10 = _mm_loadu_si128((const __m128i *)(in + 10 * 32));
+ __m128i in11 = _mm_loadu_si128((const __m128i *)(in + 11 * 32));
+ __m128i in20 = _mm_loadu_si128((const __m128i *)(in + 20 * 32));
+ __m128i in21 = _mm_loadu_si128((const __m128i *)(in + 21 * 32));
+ __m128i in22 = _mm_loadu_si128((const __m128i *)(in + 22 * 32));
+ __m128i in23 = _mm_loadu_si128((const __m128i *)(in + 23 * 32));
+ step1[ 8] = _mm_add_epi16(in08, in23);
+ step1[ 9] = _mm_add_epi16(in09, in22);
+ step1[10] = _mm_add_epi16(in10, in21);
+ step1[11] = _mm_add_epi16(in11, in20);
+ step1[20] = _mm_sub_epi16(in11, in20);
+ step1[21] = _mm_sub_epi16(in10, in21);
+ step1[22] = _mm_sub_epi16(in09, in22);
+ step1[23] = _mm_sub_epi16(in08, in23);
+ }
+ {
+ __m128i in12 = _mm_loadu_si128((const __m128i *)(in + 12 * 32));
+ __m128i in13 = _mm_loadu_si128((const __m128i *)(in + 13 * 32));
+ __m128i in14 = _mm_loadu_si128((const __m128i *)(in + 14 * 32));
+ __m128i in15 = _mm_loadu_si128((const __m128i *)(in + 15 * 32));
+ __m128i in16 = _mm_loadu_si128((const __m128i *)(in + 16 * 32));
+ __m128i in17 = _mm_loadu_si128((const __m128i *)(in + 17 * 32));
+ __m128i in18 = _mm_loadu_si128((const __m128i *)(in + 18 * 32));
+ __m128i in19 = _mm_loadu_si128((const __m128i *)(in + 19 * 32));
+ step1[12] = _mm_add_epi16(in12, in19);
+ step1[13] = _mm_add_epi16(in13, in18);
+ step1[14] = _mm_add_epi16(in14, in17);
+ step1[15] = _mm_add_epi16(in15, in16);
+ step1[16] = _mm_sub_epi16(in15, in16);
+ step1[17] = _mm_sub_epi16(in14, in17);
+ step1[18] = _mm_sub_epi16(in13, in18);
+ step1[19] = _mm_sub_epi16(in12, in19);
+ }
+ }
+ // Stage 2
+ {
+ step2[ 0] = _mm_add_epi16(step1[0], step1[15]);
+ step2[ 1] = _mm_add_epi16(step1[1], step1[14]);
+ step2[ 2] = _mm_add_epi16(step1[2], step1[13]);
+ step2[ 3] = _mm_add_epi16(step1[3], step1[12]);
+ step2[ 4] = _mm_add_epi16(step1[4], step1[11]);
+ step2[ 5] = _mm_add_epi16(step1[5], step1[10]);
+ step2[ 6] = _mm_add_epi16(step1[6], step1[ 9]);
+ step2[ 7] = _mm_add_epi16(step1[7], step1[ 8]);
+ step2[ 8] = _mm_sub_epi16(step1[7], step1[ 8]);
+ step2[ 9] = _mm_sub_epi16(step1[6], step1[ 9]);
+ step2[10] = _mm_sub_epi16(step1[5], step1[10]);
+ step2[11] = _mm_sub_epi16(step1[4], step1[11]);
+ step2[12] = _mm_sub_epi16(step1[3], step1[12]);
+ step2[13] = _mm_sub_epi16(step1[2], step1[13]);
+ step2[14] = _mm_sub_epi16(step1[1], step1[14]);
+ step2[15] = _mm_sub_epi16(step1[0], step1[15]);
+ }
+ {
+ const __m128i s2_20_0 = _mm_unpacklo_epi16(step1[27], step1[20]);
+ const __m128i s2_20_1 = _mm_unpackhi_epi16(step1[27], step1[20]);
+ const __m128i s2_21_0 = _mm_unpacklo_epi16(step1[26], step1[21]);
+ const __m128i s2_21_1 = _mm_unpackhi_epi16(step1[26], step1[21]);
+ const __m128i s2_22_0 = _mm_unpacklo_epi16(step1[25], step1[22]);
+ const __m128i s2_22_1 = _mm_unpackhi_epi16(step1[25], step1[22]);
+ const __m128i s2_23_0 = _mm_unpacklo_epi16(step1[24], step1[23]);
+ const __m128i s2_23_1 = _mm_unpackhi_epi16(step1[24], step1[23]);
+ const __m128i s2_20_2 = _mm_madd_epi16(s2_20_0, k__cospi_p16_m16);
+ const __m128i s2_20_3 = _mm_madd_epi16(s2_20_1, k__cospi_p16_m16);
+ const __m128i s2_21_2 = _mm_madd_epi16(s2_21_0, k__cospi_p16_m16);
+ const __m128i s2_21_3 = _mm_madd_epi16(s2_21_1, k__cospi_p16_m16);
+ const __m128i s2_22_2 = _mm_madd_epi16(s2_22_0, k__cospi_p16_m16);
+ const __m128i s2_22_3 = _mm_madd_epi16(s2_22_1, k__cospi_p16_m16);
+ const __m128i s2_23_2 = _mm_madd_epi16(s2_23_0, k__cospi_p16_m16);
+ const __m128i s2_23_3 = _mm_madd_epi16(s2_23_1, k__cospi_p16_m16);
+ const __m128i s2_24_2 = _mm_madd_epi16(s2_23_0, k__cospi_p16_p16);
+ const __m128i s2_24_3 = _mm_madd_epi16(s2_23_1, k__cospi_p16_p16);
+ const __m128i s2_25_2 = _mm_madd_epi16(s2_22_0, k__cospi_p16_p16);
+ const __m128i s2_25_3 = _mm_madd_epi16(s2_22_1, k__cospi_p16_p16);
+ const __m128i s2_26_2 = _mm_madd_epi16(s2_21_0, k__cospi_p16_p16);
+ const __m128i s2_26_3 = _mm_madd_epi16(s2_21_1, k__cospi_p16_p16);
+ const __m128i s2_27_2 = _mm_madd_epi16(s2_20_0, k__cospi_p16_p16);
+ const __m128i s2_27_3 = _mm_madd_epi16(s2_20_1, k__cospi_p16_p16);
+ // dct_const_round_shift
+ const __m128i s2_20_4 = _mm_add_epi32(s2_20_2, k__DCT_CONST_ROUNDING);
+ const __m128i s2_20_5 = _mm_add_epi32(s2_20_3, k__DCT_CONST_ROUNDING);
+ const __m128i s2_21_4 = _mm_add_epi32(s2_21_2, k__DCT_CONST_ROUNDING);
+ const __m128i s2_21_5 = _mm_add_epi32(s2_21_3, k__DCT_CONST_ROUNDING);
+ const __m128i s2_22_4 = _mm_add_epi32(s2_22_2, k__DCT_CONST_ROUNDING);
+ const __m128i s2_22_5 = _mm_add_epi32(s2_22_3, k__DCT_CONST_ROUNDING);
+ const __m128i s2_23_4 = _mm_add_epi32(s2_23_2, k__DCT_CONST_ROUNDING);
+ const __m128i s2_23_5 = _mm_add_epi32(s2_23_3, k__DCT_CONST_ROUNDING);
+ const __m128i s2_24_4 = _mm_add_epi32(s2_24_2, k__DCT_CONST_ROUNDING);
+ const __m128i s2_24_5 = _mm_add_epi32(s2_24_3, k__DCT_CONST_ROUNDING);
+ const __m128i s2_25_4 = _mm_add_epi32(s2_25_2, k__DCT_CONST_ROUNDING);
+ const __m128i s2_25_5 = _mm_add_epi32(s2_25_3, k__DCT_CONST_ROUNDING);
+ const __m128i s2_26_4 = _mm_add_epi32(s2_26_2, k__DCT_CONST_ROUNDING);
+ const __m128i s2_26_5 = _mm_add_epi32(s2_26_3, k__DCT_CONST_ROUNDING);
+ const __m128i s2_27_4 = _mm_add_epi32(s2_27_2, k__DCT_CONST_ROUNDING);
+ const __m128i s2_27_5 = _mm_add_epi32(s2_27_3, k__DCT_CONST_ROUNDING);
+ const __m128i s2_20_6 = _mm_srai_epi32(s2_20_4, DCT_CONST_BITS);
+ const __m128i s2_20_7 = _mm_srai_epi32(s2_20_5, DCT_CONST_BITS);
+ const __m128i s2_21_6 = _mm_srai_epi32(s2_21_4, DCT_CONST_BITS);
+ const __m128i s2_21_7 = _mm_srai_epi32(s2_21_5, DCT_CONST_BITS);
+ const __m128i s2_22_6 = _mm_srai_epi32(s2_22_4, DCT_CONST_BITS);
+ const __m128i s2_22_7 = _mm_srai_epi32(s2_22_5, DCT_CONST_BITS);
+ const __m128i s2_23_6 = _mm_srai_epi32(s2_23_4, DCT_CONST_BITS);
+ const __m128i s2_23_7 = _mm_srai_epi32(s2_23_5, DCT_CONST_BITS);
+ const __m128i s2_24_6 = _mm_srai_epi32(s2_24_4, DCT_CONST_BITS);
+ const __m128i s2_24_7 = _mm_srai_epi32(s2_24_5, DCT_CONST_BITS);
+ const __m128i s2_25_6 = _mm_srai_epi32(s2_25_4, DCT_CONST_BITS);
+ const __m128i s2_25_7 = _mm_srai_epi32(s2_25_5, DCT_CONST_BITS);
+ const __m128i s2_26_6 = _mm_srai_epi32(s2_26_4, DCT_CONST_BITS);
+ const __m128i s2_26_7 = _mm_srai_epi32(s2_26_5, DCT_CONST_BITS);
+ const __m128i s2_27_6 = _mm_srai_epi32(s2_27_4, DCT_CONST_BITS);
+ const __m128i s2_27_7 = _mm_srai_epi32(s2_27_5, DCT_CONST_BITS);
+ // Combine
+ step2[20] = _mm_packs_epi32(s2_20_6, s2_20_7);
+ step2[21] = _mm_packs_epi32(s2_21_6, s2_21_7);
+ step2[22] = _mm_packs_epi32(s2_22_6, s2_22_7);
+ step2[23] = _mm_packs_epi32(s2_23_6, s2_23_7);
+ step2[24] = _mm_packs_epi32(s2_24_6, s2_24_7);
+ step2[25] = _mm_packs_epi32(s2_25_6, s2_25_7);
+ step2[26] = _mm_packs_epi32(s2_26_6, s2_26_7);
+ step2[27] = _mm_packs_epi32(s2_27_6, s2_27_7);
+ }
+
+#if !FDCT32x32_HIGH_PRECISION
+ // dump the magnitude by half, hence the intermediate values are within
+ // the range of 16 bits.
+ if (1 == pass) {
+ __m128i s3_00_0 = _mm_cmplt_epi16(step2[ 0], kZero);
+ __m128i s3_01_0 = _mm_cmplt_epi16(step2[ 1], kZero);
+ __m128i s3_02_0 = _mm_cmplt_epi16(step2[ 2], kZero);
+ __m128i s3_03_0 = _mm_cmplt_epi16(step2[ 3], kZero);
+ __m128i s3_04_0 = _mm_cmplt_epi16(step2[ 4], kZero);
+ __m128i s3_05_0 = _mm_cmplt_epi16(step2[ 5], kZero);
+ __m128i s3_06_0 = _mm_cmplt_epi16(step2[ 6], kZero);
+ __m128i s3_07_0 = _mm_cmplt_epi16(step2[ 7], kZero);
+ __m128i s2_08_0 = _mm_cmplt_epi16(step2[ 8], kZero);
+ __m128i s2_09_0 = _mm_cmplt_epi16(step2[ 9], kZero);
+ __m128i s3_10_0 = _mm_cmplt_epi16(step2[10], kZero);
+ __m128i s3_11_0 = _mm_cmplt_epi16(step2[11], kZero);
+ __m128i s3_12_0 = _mm_cmplt_epi16(step2[12], kZero);
+ __m128i s3_13_0 = _mm_cmplt_epi16(step2[13], kZero);
+ __m128i s2_14_0 = _mm_cmplt_epi16(step2[14], kZero);
+ __m128i s2_15_0 = _mm_cmplt_epi16(step2[15], kZero);
+ __m128i s3_16_0 = _mm_cmplt_epi16(step1[16], kZero);
+ __m128i s3_17_0 = _mm_cmplt_epi16(step1[17], kZero);
+ __m128i s3_18_0 = _mm_cmplt_epi16(step1[18], kZero);
+ __m128i s3_19_0 = _mm_cmplt_epi16(step1[19], kZero);
+ __m128i s3_20_0 = _mm_cmplt_epi16(step2[20], kZero);
+ __m128i s3_21_0 = _mm_cmplt_epi16(step2[21], kZero);
+ __m128i s3_22_0 = _mm_cmplt_epi16(step2[22], kZero);
+ __m128i s3_23_0 = _mm_cmplt_epi16(step2[23], kZero);
+ __m128i s3_24_0 = _mm_cmplt_epi16(step2[24], kZero);
+ __m128i s3_25_0 = _mm_cmplt_epi16(step2[25], kZero);
+ __m128i s3_26_0 = _mm_cmplt_epi16(step2[26], kZero);
+ __m128i s3_27_0 = _mm_cmplt_epi16(step2[27], kZero);
+ __m128i s3_28_0 = _mm_cmplt_epi16(step1[28], kZero);
+ __m128i s3_29_0 = _mm_cmplt_epi16(step1[29], kZero);
+ __m128i s3_30_0 = _mm_cmplt_epi16(step1[30], kZero);
+ __m128i s3_31_0 = _mm_cmplt_epi16(step1[31], kZero);
+
+ step2[ 0] = _mm_sub_epi16(step2[ 0], s3_00_0);
+ step2[ 1] = _mm_sub_epi16(step2[ 1], s3_01_0);
+ step2[ 2] = _mm_sub_epi16(step2[ 2], s3_02_0);
+ step2[ 3] = _mm_sub_epi16(step2[ 3], s3_03_0);
+ step2[ 4] = _mm_sub_epi16(step2[ 4], s3_04_0);
+ step2[ 5] = _mm_sub_epi16(step2[ 5], s3_05_0);
+ step2[ 6] = _mm_sub_epi16(step2[ 6], s3_06_0);
+ step2[ 7] = _mm_sub_epi16(step2[ 7], s3_07_0);
+ step2[ 8] = _mm_sub_epi16(step2[ 8], s2_08_0);
+ step2[ 9] = _mm_sub_epi16(step2[ 9], s2_09_0);
+ step2[10] = _mm_sub_epi16(step2[10], s3_10_0);
+ step2[11] = _mm_sub_epi16(step2[11], s3_11_0);
+ step2[12] = _mm_sub_epi16(step2[12], s3_12_0);
+ step2[13] = _mm_sub_epi16(step2[13], s3_13_0);
+ step2[14] = _mm_sub_epi16(step2[14], s2_14_0);
+ step2[15] = _mm_sub_epi16(step2[15], s2_15_0);
+ step1[16] = _mm_sub_epi16(step1[16], s3_16_0);
+ step1[17] = _mm_sub_epi16(step1[17], s3_17_0);
+ step1[18] = _mm_sub_epi16(step1[18], s3_18_0);
+ step1[19] = _mm_sub_epi16(step1[19], s3_19_0);
+ step2[20] = _mm_sub_epi16(step2[20], s3_20_0);
+ step2[21] = _mm_sub_epi16(step2[21], s3_21_0);
+ step2[22] = _mm_sub_epi16(step2[22], s3_22_0);
+ step2[23] = _mm_sub_epi16(step2[23], s3_23_0);
+ step2[24] = _mm_sub_epi16(step2[24], s3_24_0);
+ step2[25] = _mm_sub_epi16(step2[25], s3_25_0);
+ step2[26] = _mm_sub_epi16(step2[26], s3_26_0);
+ step2[27] = _mm_sub_epi16(step2[27], s3_27_0);
+ step1[28] = _mm_sub_epi16(step1[28], s3_28_0);
+ step1[29] = _mm_sub_epi16(step1[29], s3_29_0);
+ step1[30] = _mm_sub_epi16(step1[30], s3_30_0);
+ step1[31] = _mm_sub_epi16(step1[31], s3_31_0);
+
+ step2[ 0] = _mm_add_epi16(step2[ 0], kOne);
+ step2[ 1] = _mm_add_epi16(step2[ 1], kOne);
+ step2[ 2] = _mm_add_epi16(step2[ 2], kOne);
+ step2[ 3] = _mm_add_epi16(step2[ 3], kOne);
+ step2[ 4] = _mm_add_epi16(step2[ 4], kOne);
+ step2[ 5] = _mm_add_epi16(step2[ 5], kOne);
+ step2[ 6] = _mm_add_epi16(step2[ 6], kOne);
+ step2[ 7] = _mm_add_epi16(step2[ 7], kOne);
+ step2[ 8] = _mm_add_epi16(step2[ 8], kOne);
+ step2[ 9] = _mm_add_epi16(step2[ 9], kOne);
+ step2[10] = _mm_add_epi16(step2[10], kOne);
+ step2[11] = _mm_add_epi16(step2[11], kOne);
+ step2[12] = _mm_add_epi16(step2[12], kOne);
+ step2[13] = _mm_add_epi16(step2[13], kOne);
+ step2[14] = _mm_add_epi16(step2[14], kOne);
+ step2[15] = _mm_add_epi16(step2[15], kOne);
+ step1[16] = _mm_add_epi16(step1[16], kOne);
+ step1[17] = _mm_add_epi16(step1[17], kOne);
+ step1[18] = _mm_add_epi16(step1[18], kOne);
+ step1[19] = _mm_add_epi16(step1[19], kOne);
+ step2[20] = _mm_add_epi16(step2[20], kOne);
+ step2[21] = _mm_add_epi16(step2[21], kOne);
+ step2[22] = _mm_add_epi16(step2[22], kOne);
+ step2[23] = _mm_add_epi16(step2[23], kOne);
+ step2[24] = _mm_add_epi16(step2[24], kOne);
+ step2[25] = _mm_add_epi16(step2[25], kOne);
+ step2[26] = _mm_add_epi16(step2[26], kOne);
+ step2[27] = _mm_add_epi16(step2[27], kOne);
+ step1[28] = _mm_add_epi16(step1[28], kOne);
+ step1[29] = _mm_add_epi16(step1[29], kOne);
+ step1[30] = _mm_add_epi16(step1[30], kOne);
+ step1[31] = _mm_add_epi16(step1[31], kOne);
+
+ step2[ 0] = _mm_srai_epi16(step2[ 0], 2);
+ step2[ 1] = _mm_srai_epi16(step2[ 1], 2);
+ step2[ 2] = _mm_srai_epi16(step2[ 2], 2);
+ step2[ 3] = _mm_srai_epi16(step2[ 3], 2);
+ step2[ 4] = _mm_srai_epi16(step2[ 4], 2);
+ step2[ 5] = _mm_srai_epi16(step2[ 5], 2);
+ step2[ 6] = _mm_srai_epi16(step2[ 6], 2);
+ step2[ 7] = _mm_srai_epi16(step2[ 7], 2);
+ step2[ 8] = _mm_srai_epi16(step2[ 8], 2);
+ step2[ 9] = _mm_srai_epi16(step2[ 9], 2);
+ step2[10] = _mm_srai_epi16(step2[10], 2);
+ step2[11] = _mm_srai_epi16(step2[11], 2);
+ step2[12] = _mm_srai_epi16(step2[12], 2);
+ step2[13] = _mm_srai_epi16(step2[13], 2);
+ step2[14] = _mm_srai_epi16(step2[14], 2);
+ step2[15] = _mm_srai_epi16(step2[15], 2);
+ step1[16] = _mm_srai_epi16(step1[16], 2);
+ step1[17] = _mm_srai_epi16(step1[17], 2);
+ step1[18] = _mm_srai_epi16(step1[18], 2);
+ step1[19] = _mm_srai_epi16(step1[19], 2);
+ step2[20] = _mm_srai_epi16(step2[20], 2);
+ step2[21] = _mm_srai_epi16(step2[21], 2);
+ step2[22] = _mm_srai_epi16(step2[22], 2);
+ step2[23] = _mm_srai_epi16(step2[23], 2);
+ step2[24] = _mm_srai_epi16(step2[24], 2);
+ step2[25] = _mm_srai_epi16(step2[25], 2);
+ step2[26] = _mm_srai_epi16(step2[26], 2);
+ step2[27] = _mm_srai_epi16(step2[27], 2);
+ step1[28] = _mm_srai_epi16(step1[28], 2);
+ step1[29] = _mm_srai_epi16(step1[29], 2);
+ step1[30] = _mm_srai_epi16(step1[30], 2);
+ step1[31] = _mm_srai_epi16(step1[31], 2);
+ }
+#endif
+
+#if FDCT32x32_HIGH_PRECISION
+ if (pass == 0) {
+#endif
+ // Stage 3
+ {
+ step3[0] = _mm_add_epi16(step2[(8 - 1)], step2[0]);
+ step3[1] = _mm_add_epi16(step2[(8 - 2)], step2[1]);
+ step3[2] = _mm_add_epi16(step2[(8 - 3)], step2[2]);
+ step3[3] = _mm_add_epi16(step2[(8 - 4)], step2[3]);
+ step3[4] = _mm_sub_epi16(step2[(8 - 5)], step2[4]);
+ step3[5] = _mm_sub_epi16(step2[(8 - 6)], step2[5]);
+ step3[6] = _mm_sub_epi16(step2[(8 - 7)], step2[6]);
+ step3[7] = _mm_sub_epi16(step2[(8 - 8)], step2[7]);
+ }
+ {
+ const __m128i s3_10_0 = _mm_unpacklo_epi16(step2[13], step2[10]);
+ const __m128i s3_10_1 = _mm_unpackhi_epi16(step2[13], step2[10]);
+ const __m128i s3_11_0 = _mm_unpacklo_epi16(step2[12], step2[11]);
+ const __m128i s3_11_1 = _mm_unpackhi_epi16(step2[12], step2[11]);
+ const __m128i s3_10_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_m16);
+ const __m128i s3_10_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_m16);
+ const __m128i s3_11_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_m16);
+ const __m128i s3_11_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_m16);
+ const __m128i s3_12_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_p16);
+ const __m128i s3_12_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_p16);
+ const __m128i s3_13_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_p16);
+ const __m128i s3_13_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_p16);
+ // dct_const_round_shift
+ const __m128i s3_10_4 = _mm_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING);
+ const __m128i s3_10_5 = _mm_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING);
+ const __m128i s3_11_4 = _mm_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING);
+ const __m128i s3_11_5 = _mm_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING);
+ const __m128i s3_12_4 = _mm_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING);
+ const __m128i s3_12_5 = _mm_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING);
+ const __m128i s3_13_4 = _mm_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING);
+ const __m128i s3_13_5 = _mm_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING);
+ const __m128i s3_10_6 = _mm_srai_epi32(s3_10_4, DCT_CONST_BITS);
+ const __m128i s3_10_7 = _mm_srai_epi32(s3_10_5, DCT_CONST_BITS);
+ const __m128i s3_11_6 = _mm_srai_epi32(s3_11_4, DCT_CONST_BITS);
+ const __m128i s3_11_7 = _mm_srai_epi32(s3_11_5, DCT_CONST_BITS);
+ const __m128i s3_12_6 = _mm_srai_epi32(s3_12_4, DCT_CONST_BITS);
+ const __m128i s3_12_7 = _mm_srai_epi32(s3_12_5, DCT_CONST_BITS);
+ const __m128i s3_13_6 = _mm_srai_epi32(s3_13_4, DCT_CONST_BITS);
+ const __m128i s3_13_7 = _mm_srai_epi32(s3_13_5, DCT_CONST_BITS);
+ // Combine
+ step3[10] = _mm_packs_epi32(s3_10_6, s3_10_7);
+ step3[11] = _mm_packs_epi32(s3_11_6, s3_11_7);
+ step3[12] = _mm_packs_epi32(s3_12_6, s3_12_7);
+ step3[13] = _mm_packs_epi32(s3_13_6, s3_13_7);
+ }
+ {
+ step3[16] = _mm_add_epi16(step2[23], step1[16]);
+ step3[17] = _mm_add_epi16(step2[22], step1[17]);
+ step3[18] = _mm_add_epi16(step2[21], step1[18]);
+ step3[19] = _mm_add_epi16(step2[20], step1[19]);
+ step3[20] = _mm_sub_epi16(step1[19], step2[20]);
+ step3[21] = _mm_sub_epi16(step1[18], step2[21]);
+ step3[22] = _mm_sub_epi16(step1[17], step2[22]);
+ step3[23] = _mm_sub_epi16(step1[16], step2[23]);
+ step3[24] = _mm_sub_epi16(step1[31], step2[24]);
+ step3[25] = _mm_sub_epi16(step1[30], step2[25]);
+ step3[26] = _mm_sub_epi16(step1[29], step2[26]);
+ step3[27] = _mm_sub_epi16(step1[28], step2[27]);
+ step3[28] = _mm_add_epi16(step2[27], step1[28]);
+ step3[29] = _mm_add_epi16(step2[26], step1[29]);
+ step3[30] = _mm_add_epi16(step2[25], step1[30]);
+ step3[31] = _mm_add_epi16(step2[24], step1[31]);
+ }
+
+ // Stage 4
+ {
+ step1[ 0] = _mm_add_epi16(step3[ 3], step3[ 0]);
+ step1[ 1] = _mm_add_epi16(step3[ 2], step3[ 1]);
+ step1[ 2] = _mm_sub_epi16(step3[ 1], step3[ 2]);
+ step1[ 3] = _mm_sub_epi16(step3[ 0], step3[ 3]);
+ step1[ 8] = _mm_add_epi16(step3[11], step2[ 8]);
+ step1[ 9] = _mm_add_epi16(step3[10], step2[ 9]);
+ step1[10] = _mm_sub_epi16(step2[ 9], step3[10]);
+ step1[11] = _mm_sub_epi16(step2[ 8], step3[11]);
+ step1[12] = _mm_sub_epi16(step2[15], step3[12]);
+ step1[13] = _mm_sub_epi16(step2[14], step3[13]);
+ step1[14] = _mm_add_epi16(step3[13], step2[14]);
+ step1[15] = _mm_add_epi16(step3[12], step2[15]);
+ }
+ {
+ const __m128i s1_05_0 = _mm_unpacklo_epi16(step3[6], step3[5]);
+ const __m128i s1_05_1 = _mm_unpackhi_epi16(step3[6], step3[5]);
+ const __m128i s1_05_2 = _mm_madd_epi16(s1_05_0, k__cospi_p16_m16);
+ const __m128i s1_05_3 = _mm_madd_epi16(s1_05_1, k__cospi_p16_m16);
+ const __m128i s1_06_2 = _mm_madd_epi16(s1_05_0, k__cospi_p16_p16);
+ const __m128i s1_06_3 = _mm_madd_epi16(s1_05_1, k__cospi_p16_p16);
+ // dct_const_round_shift
+ const __m128i s1_05_4 = _mm_add_epi32(s1_05_2, k__DCT_CONST_ROUNDING);
+ const __m128i s1_05_5 = _mm_add_epi32(s1_05_3, k__DCT_CONST_ROUNDING);
+ const __m128i s1_06_4 = _mm_add_epi32(s1_06_2, k__DCT_CONST_ROUNDING);
+ const __m128i s1_06_5 = _mm_add_epi32(s1_06_3, k__DCT_CONST_ROUNDING);
+ const __m128i s1_05_6 = _mm_srai_epi32(s1_05_4, DCT_CONST_BITS);
+ const __m128i s1_05_7 = _mm_srai_epi32(s1_05_5, DCT_CONST_BITS);
+ const __m128i s1_06_6 = _mm_srai_epi32(s1_06_4, DCT_CONST_BITS);
+ const __m128i s1_06_7 = _mm_srai_epi32(s1_06_5, DCT_CONST_BITS);
+ // Combine
+ step1[5] = _mm_packs_epi32(s1_05_6, s1_05_7);
+ step1[6] = _mm_packs_epi32(s1_06_6, s1_06_7);
+ }
+ {
+ const __m128i s1_18_0 = _mm_unpacklo_epi16(step3[18], step3[29]);
+ const __m128i s1_18_1 = _mm_unpackhi_epi16(step3[18], step3[29]);
+ const __m128i s1_19_0 = _mm_unpacklo_epi16(step3[19], step3[28]);
+ const __m128i s1_19_1 = _mm_unpackhi_epi16(step3[19], step3[28]);
+ const __m128i s1_20_0 = _mm_unpacklo_epi16(step3[20], step3[27]);
+ const __m128i s1_20_1 = _mm_unpackhi_epi16(step3[20], step3[27]);
+ const __m128i s1_21_0 = _mm_unpacklo_epi16(step3[21], step3[26]);
+ const __m128i s1_21_1 = _mm_unpackhi_epi16(step3[21], step3[26]);
+ const __m128i s1_18_2 = _mm_madd_epi16(s1_18_0, k__cospi_m08_p24);
+ const __m128i s1_18_3 = _mm_madd_epi16(s1_18_1, k__cospi_m08_p24);
+ const __m128i s1_19_2 = _mm_madd_epi16(s1_19_0, k__cospi_m08_p24);
+ const __m128i s1_19_3 = _mm_madd_epi16(s1_19_1, k__cospi_m08_p24);
+ const __m128i s1_20_2 = _mm_madd_epi16(s1_20_0, k__cospi_m24_m08);
+ const __m128i s1_20_3 = _mm_madd_epi16(s1_20_1, k__cospi_m24_m08);
+ const __m128i s1_21_2 = _mm_madd_epi16(s1_21_0, k__cospi_m24_m08);
+ const __m128i s1_21_3 = _mm_madd_epi16(s1_21_1, k__cospi_m24_m08);
+ const __m128i s1_26_2 = _mm_madd_epi16(s1_21_0, k__cospi_m08_p24);
+ const __m128i s1_26_3 = _mm_madd_epi16(s1_21_1, k__cospi_m08_p24);
+ const __m128i s1_27_2 = _mm_madd_epi16(s1_20_0, k__cospi_m08_p24);
+ const __m128i s1_27_3 = _mm_madd_epi16(s1_20_1, k__cospi_m08_p24);
+ const __m128i s1_28_2 = _mm_madd_epi16(s1_19_0, k__cospi_p24_p08);
+ const __m128i s1_28_3 = _mm_madd_epi16(s1_19_1, k__cospi_p24_p08);
+ const __m128i s1_29_2 = _mm_madd_epi16(s1_18_0, k__cospi_p24_p08);
+ const __m128i s1_29_3 = _mm_madd_epi16(s1_18_1, k__cospi_p24_p08);
+ // dct_const_round_shift
+ const __m128i s1_18_4 = _mm_add_epi32(s1_18_2, k__DCT_CONST_ROUNDING);
+ const __m128i s1_18_5 = _mm_add_epi32(s1_18_3, k__DCT_CONST_ROUNDING);
+ const __m128i s1_19_4 = _mm_add_epi32(s1_19_2, k__DCT_CONST_ROUNDING);
+ const __m128i s1_19_5 = _mm_add_epi32(s1_19_3, k__DCT_CONST_ROUNDING);
+ const __m128i s1_20_4 = _mm_add_epi32(s1_20_2, k__DCT_CONST_ROUNDING);
+ const __m128i s1_20_5 = _mm_add_epi32(s1_20_3, k__DCT_CONST_ROUNDING);
+ const __m128i s1_21_4 = _mm_add_epi32(s1_21_2, k__DCT_CONST_ROUNDING);
+ const __m128i s1_21_5 = _mm_add_epi32(s1_21_3, k__DCT_CONST_ROUNDING);
+ const __m128i s1_26_4 = _mm_add_epi32(s1_26_2, k__DCT_CONST_ROUNDING);
+ const __m128i s1_26_5 = _mm_add_epi32(s1_26_3, k__DCT_CONST_ROUNDING);
+ const __m128i s1_27_4 = _mm_add_epi32(s1_27_2, k__DCT_CONST_ROUNDING);
+ const __m128i s1_27_5 = _mm_add_epi32(s1_27_3, k__DCT_CONST_ROUNDING);
+ const __m128i s1_28_4 = _mm_add_epi32(s1_28_2, k__DCT_CONST_ROUNDING);
+ const __m128i s1_28_5 = _mm_add_epi32(s1_28_3, k__DCT_CONST_ROUNDING);
+ const __m128i s1_29_4 = _mm_add_epi32(s1_29_2, k__DCT_CONST_ROUNDING);
+ const __m128i s1_29_5 = _mm_add_epi32(s1_29_3, k__DCT_CONST_ROUNDING);
+ const __m128i s1_18_6 = _mm_srai_epi32(s1_18_4, DCT_CONST_BITS);
+ const __m128i s1_18_7 = _mm_srai_epi32(s1_18_5, DCT_CONST_BITS);
+ const __m128i s1_19_6 = _mm_srai_epi32(s1_19_4, DCT_CONST_BITS);
+ const __m128i s1_19_7 = _mm_srai_epi32(s1_19_5, DCT_CONST_BITS);
+ const __m128i s1_20_6 = _mm_srai_epi32(s1_20_4, DCT_CONST_BITS);
+ const __m128i s1_20_7 = _mm_srai_epi32(s1_20_5, DCT_CONST_BITS);
+ const __m128i s1_21_6 = _mm_srai_epi32(s1_21_4, DCT_CONST_BITS);
+ const __m128i s1_21_7 = _mm_srai_epi32(s1_21_5, DCT_CONST_BITS);
+ const __m128i s1_26_6 = _mm_srai_epi32(s1_26_4, DCT_CONST_BITS);
+ const __m128i s1_26_7 = _mm_srai_epi32(s1_26_5, DCT_CONST_BITS);
+ const __m128i s1_27_6 = _mm_srai_epi32(s1_27_4, DCT_CONST_BITS);
+ const __m128i s1_27_7 = _mm_srai_epi32(s1_27_5, DCT_CONST_BITS);
+ const __m128i s1_28_6 = _mm_srai_epi32(s1_28_4, DCT_CONST_BITS);
+ const __m128i s1_28_7 = _mm_srai_epi32(s1_28_5, DCT_CONST_BITS);
+ const __m128i s1_29_6 = _mm_srai_epi32(s1_29_4, DCT_CONST_BITS);
+ const __m128i s1_29_7 = _mm_srai_epi32(s1_29_5, DCT_CONST_BITS);
+ // Combine
+ step1[18] = _mm_packs_epi32(s1_18_6, s1_18_7);
+ step1[19] = _mm_packs_epi32(s1_19_6, s1_19_7);
+ step1[20] = _mm_packs_epi32(s1_20_6, s1_20_7);
+ step1[21] = _mm_packs_epi32(s1_21_6, s1_21_7);
+ step1[26] = _mm_packs_epi32(s1_26_6, s1_26_7);
+ step1[27] = _mm_packs_epi32(s1_27_6, s1_27_7);
+ step1[28] = _mm_packs_epi32(s1_28_6, s1_28_7);
+ step1[29] = _mm_packs_epi32(s1_29_6, s1_29_7);
+ }
+ // Stage 5
+ {
+ step2[4] = _mm_add_epi16(step1[5], step3[4]);
+ step2[5] = _mm_sub_epi16(step3[4], step1[5]);
+ step2[6] = _mm_sub_epi16(step3[7], step1[6]);
+ step2[7] = _mm_add_epi16(step1[6], step3[7]);
+ }
+ {
+ const __m128i out_00_0 = _mm_unpacklo_epi16(step1[0], step1[1]);
+ const __m128i out_00_1 = _mm_unpackhi_epi16(step1[0], step1[1]);
+ const __m128i out_08_0 = _mm_unpacklo_epi16(step1[2], step1[3]);
+ const __m128i out_08_1 = _mm_unpackhi_epi16(step1[2], step1[3]);
+ const __m128i out_00_2 = _mm_madd_epi16(out_00_0, k__cospi_p16_p16);
+ const __m128i out_00_3 = _mm_madd_epi16(out_00_1, k__cospi_p16_p16);
+ const __m128i out_16_2 = _mm_madd_epi16(out_00_0, k__cospi_p16_m16);
+ const __m128i out_16_3 = _mm_madd_epi16(out_00_1, k__cospi_p16_m16);
+ const __m128i out_08_2 = _mm_madd_epi16(out_08_0, k__cospi_p24_p08);
+ const __m128i out_08_3 = _mm_madd_epi16(out_08_1, k__cospi_p24_p08);
+ const __m128i out_24_2 = _mm_madd_epi16(out_08_0, k__cospi_m08_p24);
+ const __m128i out_24_3 = _mm_madd_epi16(out_08_1, k__cospi_m08_p24);
+ // dct_const_round_shift
+ const __m128i out_00_4 = _mm_add_epi32(out_00_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_00_5 = _mm_add_epi32(out_00_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_16_4 = _mm_add_epi32(out_16_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_16_5 = _mm_add_epi32(out_16_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_08_4 = _mm_add_epi32(out_08_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_08_5 = _mm_add_epi32(out_08_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_24_4 = _mm_add_epi32(out_24_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_24_5 = _mm_add_epi32(out_24_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_00_6 = _mm_srai_epi32(out_00_4, DCT_CONST_BITS);
+ const __m128i out_00_7 = _mm_srai_epi32(out_00_5, DCT_CONST_BITS);
+ const __m128i out_16_6 = _mm_srai_epi32(out_16_4, DCT_CONST_BITS);
+ const __m128i out_16_7 = _mm_srai_epi32(out_16_5, DCT_CONST_BITS);
+ const __m128i out_08_6 = _mm_srai_epi32(out_08_4, DCT_CONST_BITS);
+ const __m128i out_08_7 = _mm_srai_epi32(out_08_5, DCT_CONST_BITS);
+ const __m128i out_24_6 = _mm_srai_epi32(out_24_4, DCT_CONST_BITS);
+ const __m128i out_24_7 = _mm_srai_epi32(out_24_5, DCT_CONST_BITS);
+ // Combine
+ out[ 0] = _mm_packs_epi32(out_00_6, out_00_7);
+ out[16] = _mm_packs_epi32(out_16_6, out_16_7);
+ out[ 8] = _mm_packs_epi32(out_08_6, out_08_7);
+ out[24] = _mm_packs_epi32(out_24_6, out_24_7);
+ }
+ {
+ const __m128i s2_09_0 = _mm_unpacklo_epi16(step1[ 9], step1[14]);
+ const __m128i s2_09_1 = _mm_unpackhi_epi16(step1[ 9], step1[14]);
+ const __m128i s2_10_0 = _mm_unpacklo_epi16(step1[10], step1[13]);
+ const __m128i s2_10_1 = _mm_unpackhi_epi16(step1[10], step1[13]);
+ const __m128i s2_09_2 = _mm_madd_epi16(s2_09_0, k__cospi_m08_p24);
+ const __m128i s2_09_3 = _mm_madd_epi16(s2_09_1, k__cospi_m08_p24);
+ const __m128i s2_10_2 = _mm_madd_epi16(s2_10_0, k__cospi_m24_m08);
+ const __m128i s2_10_3 = _mm_madd_epi16(s2_10_1, k__cospi_m24_m08);
+ const __m128i s2_13_2 = _mm_madd_epi16(s2_10_0, k__cospi_m08_p24);
+ const __m128i s2_13_3 = _mm_madd_epi16(s2_10_1, k__cospi_m08_p24);
+ const __m128i s2_14_2 = _mm_madd_epi16(s2_09_0, k__cospi_p24_p08);
+ const __m128i s2_14_3 = _mm_madd_epi16(s2_09_1, k__cospi_p24_p08);
+ // dct_const_round_shift
+ const __m128i s2_09_4 = _mm_add_epi32(s2_09_2, k__DCT_CONST_ROUNDING);
+ const __m128i s2_09_5 = _mm_add_epi32(s2_09_3, k__DCT_CONST_ROUNDING);
+ const __m128i s2_10_4 = _mm_add_epi32(s2_10_2, k__DCT_CONST_ROUNDING);
+ const __m128i s2_10_5 = _mm_add_epi32(s2_10_3, k__DCT_CONST_ROUNDING);
+ const __m128i s2_13_4 = _mm_add_epi32(s2_13_2, k__DCT_CONST_ROUNDING);
+ const __m128i s2_13_5 = _mm_add_epi32(s2_13_3, k__DCT_CONST_ROUNDING);
+ const __m128i s2_14_4 = _mm_add_epi32(s2_14_2, k__DCT_CONST_ROUNDING);
+ const __m128i s2_14_5 = _mm_add_epi32(s2_14_3, k__DCT_CONST_ROUNDING);
+ const __m128i s2_09_6 = _mm_srai_epi32(s2_09_4, DCT_CONST_BITS);
+ const __m128i s2_09_7 = _mm_srai_epi32(s2_09_5, DCT_CONST_BITS);
+ const __m128i s2_10_6 = _mm_srai_epi32(s2_10_4, DCT_CONST_BITS);
+ const __m128i s2_10_7 = _mm_srai_epi32(s2_10_5, DCT_CONST_BITS);
+ const __m128i s2_13_6 = _mm_srai_epi32(s2_13_4, DCT_CONST_BITS);
+ const __m128i s2_13_7 = _mm_srai_epi32(s2_13_5, DCT_CONST_BITS);
+ const __m128i s2_14_6 = _mm_srai_epi32(s2_14_4, DCT_CONST_BITS);
+ const __m128i s2_14_7 = _mm_srai_epi32(s2_14_5, DCT_CONST_BITS);
+ // Combine
+ step2[ 9] = _mm_packs_epi32(s2_09_6, s2_09_7);
+ step2[10] = _mm_packs_epi32(s2_10_6, s2_10_7);
+ step2[13] = _mm_packs_epi32(s2_13_6, s2_13_7);
+ step2[14] = _mm_packs_epi32(s2_14_6, s2_14_7);
+ }
+ {
+ step2[16] = _mm_add_epi16(step1[19], step3[16]);
+ step2[17] = _mm_add_epi16(step1[18], step3[17]);
+ step2[18] = _mm_sub_epi16(step3[17], step1[18]);
+ step2[19] = _mm_sub_epi16(step3[16], step1[19]);
+ step2[20] = _mm_sub_epi16(step3[23], step1[20]);
+ step2[21] = _mm_sub_epi16(step3[22], step1[21]);
+ step2[22] = _mm_add_epi16(step1[21], step3[22]);
+ step2[23] = _mm_add_epi16(step1[20], step3[23]);
+ step2[24] = _mm_add_epi16(step1[27], step3[24]);
+ step2[25] = _mm_add_epi16(step1[26], step3[25]);
+ step2[26] = _mm_sub_epi16(step3[25], step1[26]);
+ step2[27] = _mm_sub_epi16(step3[24], step1[27]);
+ step2[28] = _mm_sub_epi16(step3[31], step1[28]);
+ step2[29] = _mm_sub_epi16(step3[30], step1[29]);
+ step2[30] = _mm_add_epi16(step1[29], step3[30]);
+ step2[31] = _mm_add_epi16(step1[28], step3[31]);
+ }
+ // Stage 6
+ {
+ const __m128i out_04_0 = _mm_unpacklo_epi16(step2[4], step2[7]);
+ const __m128i out_04_1 = _mm_unpackhi_epi16(step2[4], step2[7]);
+ const __m128i out_20_0 = _mm_unpacklo_epi16(step2[5], step2[6]);
+ const __m128i out_20_1 = _mm_unpackhi_epi16(step2[5], step2[6]);
+ const __m128i out_12_0 = _mm_unpacklo_epi16(step2[5], step2[6]);
+ const __m128i out_12_1 = _mm_unpackhi_epi16(step2[5], step2[6]);
+ const __m128i out_28_0 = _mm_unpacklo_epi16(step2[4], step2[7]);
+ const __m128i out_28_1 = _mm_unpackhi_epi16(step2[4], step2[7]);
+ const __m128i out_04_2 = _mm_madd_epi16(out_04_0, k__cospi_p28_p04);
+ const __m128i out_04_3 = _mm_madd_epi16(out_04_1, k__cospi_p28_p04);
+ const __m128i out_20_2 = _mm_madd_epi16(out_20_0, k__cospi_p12_p20);
+ const __m128i out_20_3 = _mm_madd_epi16(out_20_1, k__cospi_p12_p20);
+ const __m128i out_12_2 = _mm_madd_epi16(out_12_0, k__cospi_m20_p12);
+ const __m128i out_12_3 = _mm_madd_epi16(out_12_1, k__cospi_m20_p12);
+ const __m128i out_28_2 = _mm_madd_epi16(out_28_0, k__cospi_m04_p28);
+ const __m128i out_28_3 = _mm_madd_epi16(out_28_1, k__cospi_m04_p28);
+ // dct_const_round_shift
+ const __m128i out_04_4 = _mm_add_epi32(out_04_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_04_5 = _mm_add_epi32(out_04_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_20_4 = _mm_add_epi32(out_20_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_20_5 = _mm_add_epi32(out_20_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_12_4 = _mm_add_epi32(out_12_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_12_5 = _mm_add_epi32(out_12_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_28_4 = _mm_add_epi32(out_28_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_28_5 = _mm_add_epi32(out_28_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_04_6 = _mm_srai_epi32(out_04_4, DCT_CONST_BITS);
+ const __m128i out_04_7 = _mm_srai_epi32(out_04_5, DCT_CONST_BITS);
+ const __m128i out_20_6 = _mm_srai_epi32(out_20_4, DCT_CONST_BITS);
+ const __m128i out_20_7 = _mm_srai_epi32(out_20_5, DCT_CONST_BITS);
+ const __m128i out_12_6 = _mm_srai_epi32(out_12_4, DCT_CONST_BITS);
+ const __m128i out_12_7 = _mm_srai_epi32(out_12_5, DCT_CONST_BITS);
+ const __m128i out_28_6 = _mm_srai_epi32(out_28_4, DCT_CONST_BITS);
+ const __m128i out_28_7 = _mm_srai_epi32(out_28_5, DCT_CONST_BITS);
+ // Combine
+ out[ 4] = _mm_packs_epi32(out_04_6, out_04_7);
+ out[20] = _mm_packs_epi32(out_20_6, out_20_7);
+ out[12] = _mm_packs_epi32(out_12_6, out_12_7);
+ out[28] = _mm_packs_epi32(out_28_6, out_28_7);
+ }
+ {
+ step3[ 8] = _mm_add_epi16(step2[ 9], step1[ 8]);
+ step3[ 9] = _mm_sub_epi16(step1[ 8], step2[ 9]);
+ step3[10] = _mm_sub_epi16(step1[11], step2[10]);
+ step3[11] = _mm_add_epi16(step2[10], step1[11]);
+ step3[12] = _mm_add_epi16(step2[13], step1[12]);
+ step3[13] = _mm_sub_epi16(step1[12], step2[13]);
+ step3[14] = _mm_sub_epi16(step1[15], step2[14]);
+ step3[15] = _mm_add_epi16(step2[14], step1[15]);
+ }
+ {
+ const __m128i s3_17_0 = _mm_unpacklo_epi16(step2[17], step2[30]);
+ const __m128i s3_17_1 = _mm_unpackhi_epi16(step2[17], step2[30]);
+ const __m128i s3_18_0 = _mm_unpacklo_epi16(step2[18], step2[29]);
+ const __m128i s3_18_1 = _mm_unpackhi_epi16(step2[18], step2[29]);
+ const __m128i s3_21_0 = _mm_unpacklo_epi16(step2[21], step2[26]);
+ const __m128i s3_21_1 = _mm_unpackhi_epi16(step2[21], step2[26]);
+ const __m128i s3_22_0 = _mm_unpacklo_epi16(step2[22], step2[25]);
+ const __m128i s3_22_1 = _mm_unpackhi_epi16(step2[22], step2[25]);
+ const __m128i s3_17_2 = _mm_madd_epi16(s3_17_0, k__cospi_m04_p28);
+ const __m128i s3_17_3 = _mm_madd_epi16(s3_17_1, k__cospi_m04_p28);
+ const __m128i s3_18_2 = _mm_madd_epi16(s3_18_0, k__cospi_m28_m04);
+ const __m128i s3_18_3 = _mm_madd_epi16(s3_18_1, k__cospi_m28_m04);
+ const __m128i s3_21_2 = _mm_madd_epi16(s3_21_0, k__cospi_m20_p12);
+ const __m128i s3_21_3 = _mm_madd_epi16(s3_21_1, k__cospi_m20_p12);
+ const __m128i s3_22_2 = _mm_madd_epi16(s3_22_0, k__cospi_m12_m20);
+ const __m128i s3_22_3 = _mm_madd_epi16(s3_22_1, k__cospi_m12_m20);
+ const __m128i s3_25_2 = _mm_madd_epi16(s3_22_0, k__cospi_m20_p12);
+ const __m128i s3_25_3 = _mm_madd_epi16(s3_22_1, k__cospi_m20_p12);
+ const __m128i s3_26_2 = _mm_madd_epi16(s3_21_0, k__cospi_p12_p20);
+ const __m128i s3_26_3 = _mm_madd_epi16(s3_21_1, k__cospi_p12_p20);
+ const __m128i s3_29_2 = _mm_madd_epi16(s3_18_0, k__cospi_m04_p28);
+ const __m128i s3_29_3 = _mm_madd_epi16(s3_18_1, k__cospi_m04_p28);
+ const __m128i s3_30_2 = _mm_madd_epi16(s3_17_0, k__cospi_p28_p04);
+ const __m128i s3_30_3 = _mm_madd_epi16(s3_17_1, k__cospi_p28_p04);
+ // dct_const_round_shift
+ const __m128i s3_17_4 = _mm_add_epi32(s3_17_2, k__DCT_CONST_ROUNDING);
+ const __m128i s3_17_5 = _mm_add_epi32(s3_17_3, k__DCT_CONST_ROUNDING);
+ const __m128i s3_18_4 = _mm_add_epi32(s3_18_2, k__DCT_CONST_ROUNDING);
+ const __m128i s3_18_5 = _mm_add_epi32(s3_18_3, k__DCT_CONST_ROUNDING);
+ const __m128i s3_21_4 = _mm_add_epi32(s3_21_2, k__DCT_CONST_ROUNDING);
+ const __m128i s3_21_5 = _mm_add_epi32(s3_21_3, k__DCT_CONST_ROUNDING);
+ const __m128i s3_22_4 = _mm_add_epi32(s3_22_2, k__DCT_CONST_ROUNDING);
+ const __m128i s3_22_5 = _mm_add_epi32(s3_22_3, k__DCT_CONST_ROUNDING);
+ const __m128i s3_17_6 = _mm_srai_epi32(s3_17_4, DCT_CONST_BITS);
+ const __m128i s3_17_7 = _mm_srai_epi32(s3_17_5, DCT_CONST_BITS);
+ const __m128i s3_18_6 = _mm_srai_epi32(s3_18_4, DCT_CONST_BITS);
+ const __m128i s3_18_7 = _mm_srai_epi32(s3_18_5, DCT_CONST_BITS);
+ const __m128i s3_21_6 = _mm_srai_epi32(s3_21_4, DCT_CONST_BITS);
+ const __m128i s3_21_7 = _mm_srai_epi32(s3_21_5, DCT_CONST_BITS);
+ const __m128i s3_22_6 = _mm_srai_epi32(s3_22_4, DCT_CONST_BITS);
+ const __m128i s3_22_7 = _mm_srai_epi32(s3_22_5, DCT_CONST_BITS);
+ const __m128i s3_25_4 = _mm_add_epi32(s3_25_2, k__DCT_CONST_ROUNDING);
+ const __m128i s3_25_5 = _mm_add_epi32(s3_25_3, k__DCT_CONST_ROUNDING);
+ const __m128i s3_26_4 = _mm_add_epi32(s3_26_2, k__DCT_CONST_ROUNDING);
+ const __m128i s3_26_5 = _mm_add_epi32(s3_26_3, k__DCT_CONST_ROUNDING);
+ const __m128i s3_29_4 = _mm_add_epi32(s3_29_2, k__DCT_CONST_ROUNDING);
+ const __m128i s3_29_5 = _mm_add_epi32(s3_29_3, k__DCT_CONST_ROUNDING);
+ const __m128i s3_30_4 = _mm_add_epi32(s3_30_2, k__DCT_CONST_ROUNDING);
+ const __m128i s3_30_5 = _mm_add_epi32(s3_30_3, k__DCT_CONST_ROUNDING);
+ const __m128i s3_25_6 = _mm_srai_epi32(s3_25_4, DCT_CONST_BITS);
+ const __m128i s3_25_7 = _mm_srai_epi32(s3_25_5, DCT_CONST_BITS);
+ const __m128i s3_26_6 = _mm_srai_epi32(s3_26_4, DCT_CONST_BITS);
+ const __m128i s3_26_7 = _mm_srai_epi32(s3_26_5, DCT_CONST_BITS);
+ const __m128i s3_29_6 = _mm_srai_epi32(s3_29_4, DCT_CONST_BITS);
+ const __m128i s3_29_7 = _mm_srai_epi32(s3_29_5, DCT_CONST_BITS);
+ const __m128i s3_30_6 = _mm_srai_epi32(s3_30_4, DCT_CONST_BITS);
+ const __m128i s3_30_7 = _mm_srai_epi32(s3_30_5, DCT_CONST_BITS);
+ // Combine
+ step3[17] = _mm_packs_epi32(s3_17_6, s3_17_7);
+ step3[18] = _mm_packs_epi32(s3_18_6, s3_18_7);
+ step3[21] = _mm_packs_epi32(s3_21_6, s3_21_7);
+ step3[22] = _mm_packs_epi32(s3_22_6, s3_22_7);
+ // Combine
+ step3[25] = _mm_packs_epi32(s3_25_6, s3_25_7);
+ step3[26] = _mm_packs_epi32(s3_26_6, s3_26_7);
+ step3[29] = _mm_packs_epi32(s3_29_6, s3_29_7);
+ step3[30] = _mm_packs_epi32(s3_30_6, s3_30_7);
+ }
+ // Stage 7
+ {
+ const __m128i out_02_0 = _mm_unpacklo_epi16(step3[ 8], step3[15]);
+ const __m128i out_02_1 = _mm_unpackhi_epi16(step3[ 8], step3[15]);
+ const __m128i out_18_0 = _mm_unpacklo_epi16(step3[ 9], step3[14]);
+ const __m128i out_18_1 = _mm_unpackhi_epi16(step3[ 9], step3[14]);
+ const __m128i out_10_0 = _mm_unpacklo_epi16(step3[10], step3[13]);
+ const __m128i out_10_1 = _mm_unpackhi_epi16(step3[10], step3[13]);
+ const __m128i out_26_0 = _mm_unpacklo_epi16(step3[11], step3[12]);
+ const __m128i out_26_1 = _mm_unpackhi_epi16(step3[11], step3[12]);
+ const __m128i out_02_2 = _mm_madd_epi16(out_02_0, k__cospi_p30_p02);
+ const __m128i out_02_3 = _mm_madd_epi16(out_02_1, k__cospi_p30_p02);
+ const __m128i out_18_2 = _mm_madd_epi16(out_18_0, k__cospi_p14_p18);
+ const __m128i out_18_3 = _mm_madd_epi16(out_18_1, k__cospi_p14_p18);
+ const __m128i out_10_2 = _mm_madd_epi16(out_10_0, k__cospi_p22_p10);
+ const __m128i out_10_3 = _mm_madd_epi16(out_10_1, k__cospi_p22_p10);
+ const __m128i out_26_2 = _mm_madd_epi16(out_26_0, k__cospi_p06_p26);
+ const __m128i out_26_3 = _mm_madd_epi16(out_26_1, k__cospi_p06_p26);
+ const __m128i out_06_2 = _mm_madd_epi16(out_26_0, k__cospi_m26_p06);
+ const __m128i out_06_3 = _mm_madd_epi16(out_26_1, k__cospi_m26_p06);
+ const __m128i out_22_2 = _mm_madd_epi16(out_10_0, k__cospi_m10_p22);
+ const __m128i out_22_3 = _mm_madd_epi16(out_10_1, k__cospi_m10_p22);
+ const __m128i out_14_2 = _mm_madd_epi16(out_18_0, k__cospi_m18_p14);
+ const __m128i out_14_3 = _mm_madd_epi16(out_18_1, k__cospi_m18_p14);
+ const __m128i out_30_2 = _mm_madd_epi16(out_02_0, k__cospi_m02_p30);
+ const __m128i out_30_3 = _mm_madd_epi16(out_02_1, k__cospi_m02_p30);
+ // dct_const_round_shift
+ const __m128i out_02_4 = _mm_add_epi32(out_02_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_02_5 = _mm_add_epi32(out_02_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_18_4 = _mm_add_epi32(out_18_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_18_5 = _mm_add_epi32(out_18_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_10_4 = _mm_add_epi32(out_10_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_10_5 = _mm_add_epi32(out_10_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_26_4 = _mm_add_epi32(out_26_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_26_5 = _mm_add_epi32(out_26_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_06_4 = _mm_add_epi32(out_06_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_06_5 = _mm_add_epi32(out_06_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_22_4 = _mm_add_epi32(out_22_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_22_5 = _mm_add_epi32(out_22_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_14_4 = _mm_add_epi32(out_14_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_14_5 = _mm_add_epi32(out_14_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_30_4 = _mm_add_epi32(out_30_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_30_5 = _mm_add_epi32(out_30_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_02_6 = _mm_srai_epi32(out_02_4, DCT_CONST_BITS);
+ const __m128i out_02_7 = _mm_srai_epi32(out_02_5, DCT_CONST_BITS);
+ const __m128i out_18_6 = _mm_srai_epi32(out_18_4, DCT_CONST_BITS);
+ const __m128i out_18_7 = _mm_srai_epi32(out_18_5, DCT_CONST_BITS);
+ const __m128i out_10_6 = _mm_srai_epi32(out_10_4, DCT_CONST_BITS);
+ const __m128i out_10_7 = _mm_srai_epi32(out_10_5, DCT_CONST_BITS);
+ const __m128i out_26_6 = _mm_srai_epi32(out_26_4, DCT_CONST_BITS);
+ const __m128i out_26_7 = _mm_srai_epi32(out_26_5, DCT_CONST_BITS);
+ const __m128i out_06_6 = _mm_srai_epi32(out_06_4, DCT_CONST_BITS);
+ const __m128i out_06_7 = _mm_srai_epi32(out_06_5, DCT_CONST_BITS);
+ const __m128i out_22_6 = _mm_srai_epi32(out_22_4, DCT_CONST_BITS);
+ const __m128i out_22_7 = _mm_srai_epi32(out_22_5, DCT_CONST_BITS);
+ const __m128i out_14_6 = _mm_srai_epi32(out_14_4, DCT_CONST_BITS);
+ const __m128i out_14_7 = _mm_srai_epi32(out_14_5, DCT_CONST_BITS);
+ const __m128i out_30_6 = _mm_srai_epi32(out_30_4, DCT_CONST_BITS);
+ const __m128i out_30_7 = _mm_srai_epi32(out_30_5, DCT_CONST_BITS);
+ // Combine
+ out[ 2] = _mm_packs_epi32(out_02_6, out_02_7);
+ out[18] = _mm_packs_epi32(out_18_6, out_18_7);
+ out[10] = _mm_packs_epi32(out_10_6, out_10_7);
+ out[26] = _mm_packs_epi32(out_26_6, out_26_7);
+ out[ 6] = _mm_packs_epi32(out_06_6, out_06_7);
+ out[22] = _mm_packs_epi32(out_22_6, out_22_7);
+ out[14] = _mm_packs_epi32(out_14_6, out_14_7);
+ out[30] = _mm_packs_epi32(out_30_6, out_30_7);
+ }
+ {
+ step1[16] = _mm_add_epi16(step3[17], step2[16]);
+ step1[17] = _mm_sub_epi16(step2[16], step3[17]);
+ step1[18] = _mm_sub_epi16(step2[19], step3[18]);
+ step1[19] = _mm_add_epi16(step3[18], step2[19]);
+ step1[20] = _mm_add_epi16(step3[21], step2[20]);
+ step1[21] = _mm_sub_epi16(step2[20], step3[21]);
+ step1[22] = _mm_sub_epi16(step2[23], step3[22]);
+ step1[23] = _mm_add_epi16(step3[22], step2[23]);
+ step1[24] = _mm_add_epi16(step3[25], step2[24]);
+ step1[25] = _mm_sub_epi16(step2[24], step3[25]);
+ step1[26] = _mm_sub_epi16(step2[27], step3[26]);
+ step1[27] = _mm_add_epi16(step3[26], step2[27]);
+ step1[28] = _mm_add_epi16(step3[29], step2[28]);
+ step1[29] = _mm_sub_epi16(step2[28], step3[29]);
+ step1[30] = _mm_sub_epi16(step2[31], step3[30]);
+ step1[31] = _mm_add_epi16(step3[30], step2[31]);
+ }
+ // Final stage --- outputs indices are bit-reversed.
+ {
+ const __m128i out_01_0 = _mm_unpacklo_epi16(step1[16], step1[31]);
+ const __m128i out_01_1 = _mm_unpackhi_epi16(step1[16], step1[31]);
+ const __m128i out_17_0 = _mm_unpacklo_epi16(step1[17], step1[30]);
+ const __m128i out_17_1 = _mm_unpackhi_epi16(step1[17], step1[30]);
+ const __m128i out_09_0 = _mm_unpacklo_epi16(step1[18], step1[29]);
+ const __m128i out_09_1 = _mm_unpackhi_epi16(step1[18], step1[29]);
+ const __m128i out_25_0 = _mm_unpacklo_epi16(step1[19], step1[28]);
+ const __m128i out_25_1 = _mm_unpackhi_epi16(step1[19], step1[28]);
+ const __m128i out_01_2 = _mm_madd_epi16(out_01_0, k__cospi_p31_p01);
+ const __m128i out_01_3 = _mm_madd_epi16(out_01_1, k__cospi_p31_p01);
+ const __m128i out_17_2 = _mm_madd_epi16(out_17_0, k__cospi_p15_p17);
+ const __m128i out_17_3 = _mm_madd_epi16(out_17_1, k__cospi_p15_p17);
+ const __m128i out_09_2 = _mm_madd_epi16(out_09_0, k__cospi_p23_p09);
+ const __m128i out_09_3 = _mm_madd_epi16(out_09_1, k__cospi_p23_p09);
+ const __m128i out_25_2 = _mm_madd_epi16(out_25_0, k__cospi_p07_p25);
+ const __m128i out_25_3 = _mm_madd_epi16(out_25_1, k__cospi_p07_p25);
+ const __m128i out_07_2 = _mm_madd_epi16(out_25_0, k__cospi_m25_p07);
+ const __m128i out_07_3 = _mm_madd_epi16(out_25_1, k__cospi_m25_p07);
+ const __m128i out_23_2 = _mm_madd_epi16(out_09_0, k__cospi_m09_p23);
+ const __m128i out_23_3 = _mm_madd_epi16(out_09_1, k__cospi_m09_p23);
+ const __m128i out_15_2 = _mm_madd_epi16(out_17_0, k__cospi_m17_p15);
+ const __m128i out_15_3 = _mm_madd_epi16(out_17_1, k__cospi_m17_p15);
+ const __m128i out_31_2 = _mm_madd_epi16(out_01_0, k__cospi_m01_p31);
+ const __m128i out_31_3 = _mm_madd_epi16(out_01_1, k__cospi_m01_p31);
+ // dct_const_round_shift
+ const __m128i out_01_4 = _mm_add_epi32(out_01_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_01_5 = _mm_add_epi32(out_01_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_17_4 = _mm_add_epi32(out_17_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_17_5 = _mm_add_epi32(out_17_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_09_4 = _mm_add_epi32(out_09_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_09_5 = _mm_add_epi32(out_09_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_25_4 = _mm_add_epi32(out_25_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_25_5 = _mm_add_epi32(out_25_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_07_4 = _mm_add_epi32(out_07_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_07_5 = _mm_add_epi32(out_07_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_23_4 = _mm_add_epi32(out_23_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_23_5 = _mm_add_epi32(out_23_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_15_4 = _mm_add_epi32(out_15_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_15_5 = _mm_add_epi32(out_15_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_31_4 = _mm_add_epi32(out_31_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_31_5 = _mm_add_epi32(out_31_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_01_6 = _mm_srai_epi32(out_01_4, DCT_CONST_BITS);
+ const __m128i out_01_7 = _mm_srai_epi32(out_01_5, DCT_CONST_BITS);
+ const __m128i out_17_6 = _mm_srai_epi32(out_17_4, DCT_CONST_BITS);
+ const __m128i out_17_7 = _mm_srai_epi32(out_17_5, DCT_CONST_BITS);
+ const __m128i out_09_6 = _mm_srai_epi32(out_09_4, DCT_CONST_BITS);
+ const __m128i out_09_7 = _mm_srai_epi32(out_09_5, DCT_CONST_BITS);
+ const __m128i out_25_6 = _mm_srai_epi32(out_25_4, DCT_CONST_BITS);
+ const __m128i out_25_7 = _mm_srai_epi32(out_25_5, DCT_CONST_BITS);
+ const __m128i out_07_6 = _mm_srai_epi32(out_07_4, DCT_CONST_BITS);
+ const __m128i out_07_7 = _mm_srai_epi32(out_07_5, DCT_CONST_BITS);
+ const __m128i out_23_6 = _mm_srai_epi32(out_23_4, DCT_CONST_BITS);
+ const __m128i out_23_7 = _mm_srai_epi32(out_23_5, DCT_CONST_BITS);
+ const __m128i out_15_6 = _mm_srai_epi32(out_15_4, DCT_CONST_BITS);
+ const __m128i out_15_7 = _mm_srai_epi32(out_15_5, DCT_CONST_BITS);
+ const __m128i out_31_6 = _mm_srai_epi32(out_31_4, DCT_CONST_BITS);
+ const __m128i out_31_7 = _mm_srai_epi32(out_31_5, DCT_CONST_BITS);
+ // Combine
+ out[ 1] = _mm_packs_epi32(out_01_6, out_01_7);
+ out[17] = _mm_packs_epi32(out_17_6, out_17_7);
+ out[ 9] = _mm_packs_epi32(out_09_6, out_09_7);
+ out[25] = _mm_packs_epi32(out_25_6, out_25_7);
+ out[ 7] = _mm_packs_epi32(out_07_6, out_07_7);
+ out[23] = _mm_packs_epi32(out_23_6, out_23_7);
+ out[15] = _mm_packs_epi32(out_15_6, out_15_7);
+ out[31] = _mm_packs_epi32(out_31_6, out_31_7);
+ }
+ {
+ const __m128i out_05_0 = _mm_unpacklo_epi16(step1[20], step1[27]);
+ const __m128i out_05_1 = _mm_unpackhi_epi16(step1[20], step1[27]);
+ const __m128i out_21_0 = _mm_unpacklo_epi16(step1[21], step1[26]);
+ const __m128i out_21_1 = _mm_unpackhi_epi16(step1[21], step1[26]);
+ const __m128i out_13_0 = _mm_unpacklo_epi16(step1[22], step1[25]);
+ const __m128i out_13_1 = _mm_unpackhi_epi16(step1[22], step1[25]);
+ const __m128i out_29_0 = _mm_unpacklo_epi16(step1[23], step1[24]);
+ const __m128i out_29_1 = _mm_unpackhi_epi16(step1[23], step1[24]);
+ const __m128i out_05_2 = _mm_madd_epi16(out_05_0, k__cospi_p27_p05);
+ const __m128i out_05_3 = _mm_madd_epi16(out_05_1, k__cospi_p27_p05);
+ const __m128i out_21_2 = _mm_madd_epi16(out_21_0, k__cospi_p11_p21);
+ const __m128i out_21_3 = _mm_madd_epi16(out_21_1, k__cospi_p11_p21);
+ const __m128i out_13_2 = _mm_madd_epi16(out_13_0, k__cospi_p19_p13);
+ const __m128i out_13_3 = _mm_madd_epi16(out_13_1, k__cospi_p19_p13);
+ const __m128i out_29_2 = _mm_madd_epi16(out_29_0, k__cospi_p03_p29);
+ const __m128i out_29_3 = _mm_madd_epi16(out_29_1, k__cospi_p03_p29);
+ const __m128i out_03_2 = _mm_madd_epi16(out_29_0, k__cospi_m29_p03);
+ const __m128i out_03_3 = _mm_madd_epi16(out_29_1, k__cospi_m29_p03);
+ const __m128i out_19_2 = _mm_madd_epi16(out_13_0, k__cospi_m13_p19);
+ const __m128i out_19_3 = _mm_madd_epi16(out_13_1, k__cospi_m13_p19);
+ const __m128i out_11_2 = _mm_madd_epi16(out_21_0, k__cospi_m21_p11);
+ const __m128i out_11_3 = _mm_madd_epi16(out_21_1, k__cospi_m21_p11);
+ const __m128i out_27_2 = _mm_madd_epi16(out_05_0, k__cospi_m05_p27);
+ const __m128i out_27_3 = _mm_madd_epi16(out_05_1, k__cospi_m05_p27);
+ // dct_const_round_shift
+ const __m128i out_05_4 = _mm_add_epi32(out_05_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_05_5 = _mm_add_epi32(out_05_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_21_4 = _mm_add_epi32(out_21_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_21_5 = _mm_add_epi32(out_21_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_13_4 = _mm_add_epi32(out_13_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_13_5 = _mm_add_epi32(out_13_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_29_4 = _mm_add_epi32(out_29_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_29_5 = _mm_add_epi32(out_29_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_03_4 = _mm_add_epi32(out_03_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_03_5 = _mm_add_epi32(out_03_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_19_4 = _mm_add_epi32(out_19_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_19_5 = _mm_add_epi32(out_19_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_11_4 = _mm_add_epi32(out_11_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_11_5 = _mm_add_epi32(out_11_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_27_4 = _mm_add_epi32(out_27_2, k__DCT_CONST_ROUNDING);
+ const __m128i out_27_5 = _mm_add_epi32(out_27_3, k__DCT_CONST_ROUNDING);
+ const __m128i out_05_6 = _mm_srai_epi32(out_05_4, DCT_CONST_BITS);
+ const __m128i out_05_7 = _mm_srai_epi32(out_05_5, DCT_CONST_BITS);
+ const __m128i out_21_6 = _mm_srai_epi32(out_21_4, DCT_CONST_BITS);
+ const __m128i out_21_7 = _mm_srai_epi32(out_21_5, DCT_CONST_BITS);
+ const __m128i out_13_6 = _mm_srai_epi32(out_13_4, DCT_CONST_BITS);
+ const __m128i out_13_7 = _mm_srai_epi32(out_13_5, DCT_CONST_BITS);
+ const __m128i out_29_6 = _mm_srai_epi32(out_29_4, DCT_CONST_BITS);
+ const __m128i out_29_7 = _mm_srai_epi32(out_29_5, DCT_CONST_BITS);
+ const __m128i out_03_6 = _mm_srai_epi32(out_03_4, DCT_CONST_BITS);
+ const __m128i out_03_7 = _mm_srai_epi32(out_03_5, DCT_CONST_BITS);
+ const __m128i out_19_6 = _mm_srai_epi32(out_19_4, DCT_CONST_BITS);
+ const __m128i out_19_7 = _mm_srai_epi32(out_19_5, DCT_CONST_BITS);
+ const __m128i out_11_6 = _mm_srai_epi32(out_11_4, DCT_CONST_BITS);
+ const __m128i out_11_7 = _mm_srai_epi32(out_11_5, DCT_CONST_BITS);
+ const __m128i out_27_6 = _mm_srai_epi32(out_27_4, DCT_CONST_BITS);
+ const __m128i out_27_7 = _mm_srai_epi32(out_27_5, DCT_CONST_BITS);
+ // Combine
+ out[ 5] = _mm_packs_epi32(out_05_6, out_05_7);
+ out[21] = _mm_packs_epi32(out_21_6, out_21_7);
+ out[13] = _mm_packs_epi32(out_13_6, out_13_7);
+ out[29] = _mm_packs_epi32(out_29_6, out_29_7);
+ out[ 3] = _mm_packs_epi32(out_03_6, out_03_7);
+ out[19] = _mm_packs_epi32(out_19_6, out_19_7);
+ out[11] = _mm_packs_epi32(out_11_6, out_11_7);
+ out[27] = _mm_packs_epi32(out_27_6, out_27_7);
+ }
+#if FDCT32x32_HIGH_PRECISION
+ } else {
+ __m128i lstep1[64], lstep2[64], lstep3[64];
+ __m128i u[32], v[32], sign[16];
+ const __m128i mask16 = _mm_set1_epi32(0x80008000);
+ const __m128i K32One = _mm_set_epi32(1, 1, 1, 1);
+ // start using 32-bit operations
+ // stage 3
+ {
+ // expanding to 32-bit length priori to addition operations
+ lstep2[ 0] = k_cvtlo_epi16(step2[ 0], mask16, kZero);
+ lstep2[ 1] = k_cvthi_epi16(step2[ 0], mask16, kZero);
+ lstep2[ 2] = k_cvtlo_epi16(step2[ 1], mask16, kZero);
+ lstep2[ 3] = k_cvthi_epi16(step2[ 1], mask16, kZero);
+ lstep2[ 4] = k_cvtlo_epi16(step2[ 2], mask16, kZero);
+ lstep2[ 5] = k_cvthi_epi16(step2[ 2], mask16, kZero);
+ lstep2[ 6] = k_cvtlo_epi16(step2[ 3], mask16, kZero);
+ lstep2[ 7] = k_cvthi_epi16(step2[ 3], mask16, kZero);
+ lstep2[ 8] = k_cvtlo_epi16(step2[ 4], mask16, kZero);
+ lstep2[ 9] = k_cvthi_epi16(step2[ 4], mask16, kZero);
+ lstep2[10] = k_cvtlo_epi16(step2[ 5], mask16, kZero);
+ lstep2[11] = k_cvthi_epi16(step2[ 5], mask16, kZero);
+ lstep2[12] = k_cvtlo_epi16(step2[ 6], mask16, kZero);
+ lstep2[13] = k_cvthi_epi16(step2[ 6], mask16, kZero);
+ lstep2[14] = k_cvtlo_epi16(step2[ 7], mask16, kZero);
+ lstep2[15] = k_cvthi_epi16(step2[ 7], mask16, kZero);
+
+ lstep3[ 0] = _mm_add_epi32(lstep2[14], lstep2[ 0]);
+ lstep3[ 1] = _mm_add_epi32(lstep2[15], lstep2[ 1]);
+ lstep3[ 2] = _mm_add_epi32(lstep2[12], lstep2[ 2]);
+ lstep3[ 3] = _mm_add_epi32(lstep2[13], lstep2[ 3]);
+ lstep3[ 4] = _mm_add_epi32(lstep2[10], lstep2[ 4]);
+ lstep3[ 5] = _mm_add_epi32(lstep2[11], lstep2[ 5]);
+ lstep3[ 6] = _mm_add_epi32(lstep2[ 8], lstep2[ 6]);
+ lstep3[ 7] = _mm_add_epi32(lstep2[ 9], lstep2[ 7]);
+ lstep3[ 8] = _mm_sub_epi32(lstep2[ 6], lstep2[ 8]);
+ lstep3[ 9] = _mm_sub_epi32(lstep2[ 7], lstep2[ 9]);
+ lstep3[10] = _mm_sub_epi32(lstep2[ 4], lstep2[10]);
+ lstep3[11] = _mm_sub_epi32(lstep2[ 5], lstep2[11]);
+ lstep3[12] = _mm_sub_epi32(lstep2[ 2], lstep2[12]);
+ lstep3[13] = _mm_sub_epi32(lstep2[ 3], lstep2[13]);
+ lstep3[14] = _mm_sub_epi32(lstep2[ 0], lstep2[14]);
+ lstep3[15] = _mm_sub_epi32(lstep2[ 1], lstep2[15]);
+ }
+ {
+ const __m128i s3_10_0 = _mm_unpacklo_epi16(step2[13], step2[10]);
+ const __m128i s3_10_1 = _mm_unpackhi_epi16(step2[13], step2[10]);
+ const __m128i s3_11_0 = _mm_unpacklo_epi16(step2[12], step2[11]);
+ const __m128i s3_11_1 = _mm_unpackhi_epi16(step2[12], step2[11]);
+ const __m128i s3_10_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_m16);
+ const __m128i s3_10_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_m16);
+ const __m128i s3_11_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_m16);
+ const __m128i s3_11_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_m16);
+ const __m128i s3_12_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_p16);
+ const __m128i s3_12_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_p16);
+ const __m128i s3_13_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_p16);
+ const __m128i s3_13_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_p16);
+ // dct_const_round_shift
+ const __m128i s3_10_4 = _mm_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING);
+ const __m128i s3_10_5 = _mm_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING);
+ const __m128i s3_11_4 = _mm_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING);
+ const __m128i s3_11_5 = _mm_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING);
+ const __m128i s3_12_4 = _mm_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING);
+ const __m128i s3_12_5 = _mm_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING);
+ const __m128i s3_13_4 = _mm_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING);
+ const __m128i s3_13_5 = _mm_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING);
+ lstep3[20] = _mm_srai_epi32(s3_10_4, DCT_CONST_BITS);
+ lstep3[21] = _mm_srai_epi32(s3_10_5, DCT_CONST_BITS);
+ lstep3[22] = _mm_srai_epi32(s3_11_4, DCT_CONST_BITS);
+ lstep3[23] = _mm_srai_epi32(s3_11_5, DCT_CONST_BITS);
+ lstep3[24] = _mm_srai_epi32(s3_12_4, DCT_CONST_BITS);
+ lstep3[25] = _mm_srai_epi32(s3_12_5, DCT_CONST_BITS);
+ lstep3[26] = _mm_srai_epi32(s3_13_4, DCT_CONST_BITS);
+ lstep3[27] = _mm_srai_epi32(s3_13_5, DCT_CONST_BITS);
+ }
+ {
+ lstep2[40] = k_cvtlo_epi16(step2[20], mask16, kZero);
+ lstep2[41] = k_cvthi_epi16(step2[20], mask16, kZero);
+ lstep2[42] = k_cvtlo_epi16(step2[21], mask16, kZero);
+ lstep2[43] = k_cvthi_epi16(step2[21], mask16, kZero);
+ lstep2[44] = k_cvtlo_epi16(step2[22], mask16, kZero);
+ lstep2[45] = k_cvthi_epi16(step2[22], mask16, kZero);
+ lstep2[46] = k_cvtlo_epi16(step2[23], mask16, kZero);
+ lstep2[47] = k_cvthi_epi16(step2[23], mask16, kZero);
+ lstep2[48] = k_cvtlo_epi16(step2[24], mask16, kZero);
+ lstep2[49] = k_cvthi_epi16(step2[24], mask16, kZero);
+ lstep2[50] = k_cvtlo_epi16(step2[25], mask16, kZero);
+ lstep2[51] = k_cvthi_epi16(step2[25], mask16, kZero);
+ lstep2[52] = k_cvtlo_epi16(step2[26], mask16, kZero);
+ lstep2[53] = k_cvthi_epi16(step2[26], mask16, kZero);
+ lstep2[54] = k_cvtlo_epi16(step2[27], mask16, kZero);
+ lstep2[55] = k_cvthi_epi16(step2[27], mask16, kZero);
+
+ lstep1[32] = k_cvtlo_epi16(step1[16], mask16, kZero);
+ lstep1[33] = k_cvthi_epi16(step1[16], mask16, kZero);
+ lstep1[34] = k_cvtlo_epi16(step1[17], mask16, kZero);
+ lstep1[35] = k_cvthi_epi16(step1[17], mask16, kZero);
+ lstep1[36] = k_cvtlo_epi16(step1[18], mask16, kZero);
+ lstep1[37] = k_cvthi_epi16(step1[18], mask16, kZero);
+ lstep1[38] = k_cvtlo_epi16(step1[19], mask16, kZero);
+ lstep1[39] = k_cvthi_epi16(step1[19], mask16, kZero);
+ lstep1[56] = k_cvtlo_epi16(step1[28], mask16, kZero);
+ lstep1[57] = k_cvthi_epi16(step1[28], mask16, kZero);
+ lstep1[58] = k_cvtlo_epi16(step1[29], mask16, kZero);
+ lstep1[59] = k_cvthi_epi16(step1[29], mask16, kZero);
+ lstep1[60] = k_cvtlo_epi16(step1[30], mask16, kZero);
+ lstep1[61] = k_cvthi_epi16(step1[30], mask16, kZero);
+ lstep1[62] = k_cvtlo_epi16(step1[31], mask16, kZero);
+ lstep1[63] = k_cvthi_epi16(step1[31], mask16, kZero);
+
+ lstep3[32] = _mm_add_epi32(lstep2[46], lstep1[32]);
+ lstep3[33] = _mm_add_epi32(lstep2[47], lstep1[33]);
+ lstep3[34] = _mm_add_epi32(lstep2[44], lstep1[34]);
+ lstep3[35] = _mm_add_epi32(lstep2[45], lstep1[35]);
+ lstep3[36] = _mm_add_epi32(lstep2[42], lstep1[36]);
+ lstep3[37] = _mm_add_epi32(lstep2[43], lstep1[37]);
+ lstep3[38] = _mm_add_epi32(lstep2[40], lstep1[38]);
+ lstep3[39] = _mm_add_epi32(lstep2[41], lstep1[39]);
+ lstep3[40] = _mm_sub_epi32(lstep1[38], lstep2[40]);
+ lstep3[41] = _mm_sub_epi32(lstep1[39], lstep2[41]);
+ lstep3[42] = _mm_sub_epi32(lstep1[36], lstep2[42]);
+ lstep3[43] = _mm_sub_epi32(lstep1[37], lstep2[43]);
+ lstep3[44] = _mm_sub_epi32(lstep1[34], lstep2[44]);
+ lstep3[45] = _mm_sub_epi32(lstep1[35], lstep2[45]);
+ lstep3[46] = _mm_sub_epi32(lstep1[32], lstep2[46]);
+ lstep3[47] = _mm_sub_epi32(lstep1[33], lstep2[47]);
+ lstep3[48] = _mm_sub_epi32(lstep1[62], lstep2[48]);
+ lstep3[49] = _mm_sub_epi32(lstep1[63], lstep2[49]);
+ lstep3[50] = _mm_sub_epi32(lstep1[60], lstep2[50]);
+ lstep3[51] = _mm_sub_epi32(lstep1[61], lstep2[51]);
+ lstep3[52] = _mm_sub_epi32(lstep1[58], lstep2[52]);
+ lstep3[53] = _mm_sub_epi32(lstep1[59], lstep2[53]);
+ lstep3[54] = _mm_sub_epi32(lstep1[56], lstep2[54]);
+ lstep3[55] = _mm_sub_epi32(lstep1[57], lstep2[55]);
+ lstep3[56] = _mm_add_epi32(lstep2[54], lstep1[56]);
+ lstep3[57] = _mm_add_epi32(lstep2[55], lstep1[57]);
+ lstep3[58] = _mm_add_epi32(lstep2[52], lstep1[58]);
+ lstep3[59] = _mm_add_epi32(lstep2[53], lstep1[59]);
+ lstep3[60] = _mm_add_epi32(lstep2[50], lstep1[60]);
+ lstep3[61] = _mm_add_epi32(lstep2[51], lstep1[61]);
+ lstep3[62] = _mm_add_epi32(lstep2[48], lstep1[62]);
+ lstep3[63] = _mm_add_epi32(lstep2[49], lstep1[63]);
+ }
+
+ // stage 4
+ {
+ // expanding to 32-bit length priori to addition operations
+ lstep2[16] = k_cvtlo_epi16(step2[ 8], mask16, kZero);
+ lstep2[17] = k_cvthi_epi16(step2[ 8], mask16, kZero);
+ lstep2[18] = k_cvtlo_epi16(step2[ 9], mask16, kZero);
+ lstep2[19] = k_cvthi_epi16(step2[ 9], mask16, kZero);
+ lstep2[28] = k_cvtlo_epi16(step2[14], mask16, kZero);
+ lstep2[29] = k_cvthi_epi16(step2[14], mask16, kZero);
+ lstep2[30] = k_cvtlo_epi16(step2[15], mask16, kZero);
+ lstep2[31] = k_cvthi_epi16(step2[15], mask16, kZero);
+
+ lstep1[ 0] = _mm_add_epi32(lstep3[ 6], lstep3[ 0]);
+ lstep1[ 1] = _mm_add_epi32(lstep3[ 7], lstep3[ 1]);
+ lstep1[ 2] = _mm_add_epi32(lstep3[ 4], lstep3[ 2]);
+ lstep1[ 3] = _mm_add_epi32(lstep3[ 5], lstep3[ 3]);
+ lstep1[ 4] = _mm_sub_epi32(lstep3[ 2], lstep3[ 4]);
+ lstep1[ 5] = _mm_sub_epi32(lstep3[ 3], lstep3[ 5]);
+ lstep1[ 6] = _mm_sub_epi32(lstep3[ 0], lstep3[ 6]);
+ lstep1[ 7] = _mm_sub_epi32(lstep3[ 1], lstep3[ 7]);
+ lstep1[16] = _mm_add_epi32(lstep3[22], lstep2[16]);
+ lstep1[17] = _mm_add_epi32(lstep3[23], lstep2[17]);
+ lstep1[18] = _mm_add_epi32(lstep3[20], lstep2[18]);
+ lstep1[19] = _mm_add_epi32(lstep3[21], lstep2[19]);
+ lstep1[20] = _mm_sub_epi32(lstep2[18], lstep3[20]);
+ lstep1[21] = _mm_sub_epi32(lstep2[19], lstep3[21]);
+ lstep1[22] = _mm_sub_epi32(lstep2[16], lstep3[22]);
+ lstep1[23] = _mm_sub_epi32(lstep2[17], lstep3[23]);
+ lstep1[24] = _mm_sub_epi32(lstep2[30], lstep3[24]);
+ lstep1[25] = _mm_sub_epi32(lstep2[31], lstep3[25]);
+ lstep1[26] = _mm_sub_epi32(lstep2[28], lstep3[26]);
+ lstep1[27] = _mm_sub_epi32(lstep2[29], lstep3[27]);
+ lstep1[28] = _mm_add_epi32(lstep3[26], lstep2[28]);
+ lstep1[29] = _mm_add_epi32(lstep3[27], lstep2[29]);
+ lstep1[30] = _mm_add_epi32(lstep3[24], lstep2[30]);
+ lstep1[31] = _mm_add_epi32(lstep3[25], lstep2[31]);
+ }
+ {
+ // to be continued...
+ //
+ const __m128i k32_p16_p16 = pair_set_epi32(cospi_16_64, cospi_16_64);
+ const __m128i k32_p16_m16 = pair_set_epi32(cospi_16_64, -cospi_16_64);
+
+ u[0] = _mm_unpacklo_epi32(lstep3[12], lstep3[10]);
+ u[1] = _mm_unpackhi_epi32(lstep3[12], lstep3[10]);
+ u[2] = _mm_unpacklo_epi32(lstep3[13], lstep3[11]);
+ u[3] = _mm_unpackhi_epi32(lstep3[13], lstep3[11]);
+
+ // TODO(jingning): manually inline k_madd_epi32_ to further hide
+ // instruction latency.
+ v[ 0] = k_madd_epi32(u[0], k32_p16_m16);
+ v[ 1] = k_madd_epi32(u[1], k32_p16_m16);
+ v[ 2] = k_madd_epi32(u[2], k32_p16_m16);
+ v[ 3] = k_madd_epi32(u[3], k32_p16_m16);
+ v[ 4] = k_madd_epi32(u[0], k32_p16_p16);
+ v[ 5] = k_madd_epi32(u[1], k32_p16_p16);
+ v[ 6] = k_madd_epi32(u[2], k32_p16_p16);
+ v[ 7] = k_madd_epi32(u[3], k32_p16_p16);
+
+ u[0] = k_packs_epi64(v[0], v[1]);
+ u[1] = k_packs_epi64(v[2], v[3]);
+ u[2] = k_packs_epi64(v[4], v[5]);
+ u[3] = k_packs_epi64(v[6], v[7]);
+
+ v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+ v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+ v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+ v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+
+ lstep1[10] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
+ lstep1[11] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
+ lstep1[12] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
+ lstep1[13] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
+ }
+ {
+ const __m128i k32_m08_p24 = pair_set_epi32(-cospi_8_64, cospi_24_64);
+ const __m128i k32_m24_m08 = pair_set_epi32(-cospi_24_64, -cospi_8_64);
+ const __m128i k32_p24_p08 = pair_set_epi32(cospi_24_64, cospi_8_64);
+
+ u[ 0] = _mm_unpacklo_epi32(lstep3[36], lstep3[58]);
+ u[ 1] = _mm_unpackhi_epi32(lstep3[36], lstep3[58]);
+ u[ 2] = _mm_unpacklo_epi32(lstep3[37], lstep3[59]);
+ u[ 3] = _mm_unpackhi_epi32(lstep3[37], lstep3[59]);
+ u[ 4] = _mm_unpacklo_epi32(lstep3[38], lstep3[56]);
+ u[ 5] = _mm_unpackhi_epi32(lstep3[38], lstep3[56]);
+ u[ 6] = _mm_unpacklo_epi32(lstep3[39], lstep3[57]);
+ u[ 7] = _mm_unpackhi_epi32(lstep3[39], lstep3[57]);
+ u[ 8] = _mm_unpacklo_epi32(lstep3[40], lstep3[54]);
+ u[ 9] = _mm_unpackhi_epi32(lstep3[40], lstep3[54]);
+ u[10] = _mm_unpacklo_epi32(lstep3[41], lstep3[55]);
+ u[11] = _mm_unpackhi_epi32(lstep3[41], lstep3[55]);
+ u[12] = _mm_unpacklo_epi32(lstep3[42], lstep3[52]);
+ u[13] = _mm_unpackhi_epi32(lstep3[42], lstep3[52]);
+ u[14] = _mm_unpacklo_epi32(lstep3[43], lstep3[53]);
+ u[15] = _mm_unpackhi_epi32(lstep3[43], lstep3[53]);
+
+ v[ 0] = k_madd_epi32(u[ 0], k32_m08_p24);
+ v[ 1] = k_madd_epi32(u[ 1], k32_m08_p24);
+ v[ 2] = k_madd_epi32(u[ 2], k32_m08_p24);
+ v[ 3] = k_madd_epi32(u[ 3], k32_m08_p24);
+ v[ 4] = k_madd_epi32(u[ 4], k32_m08_p24);
+ v[ 5] = k_madd_epi32(u[ 5], k32_m08_p24);
+ v[ 6] = k_madd_epi32(u[ 6], k32_m08_p24);
+ v[ 7] = k_madd_epi32(u[ 7], k32_m08_p24);
+ v[ 8] = k_madd_epi32(u[ 8], k32_m24_m08);
+ v[ 9] = k_madd_epi32(u[ 9], k32_m24_m08);
+ v[10] = k_madd_epi32(u[10], k32_m24_m08);
+ v[11] = k_madd_epi32(u[11], k32_m24_m08);
+ v[12] = k_madd_epi32(u[12], k32_m24_m08);
+ v[13] = k_madd_epi32(u[13], k32_m24_m08);
+ v[14] = k_madd_epi32(u[14], k32_m24_m08);
+ v[15] = k_madd_epi32(u[15], k32_m24_m08);
+ v[16] = k_madd_epi32(u[12], k32_m08_p24);
+ v[17] = k_madd_epi32(u[13], k32_m08_p24);
+ v[18] = k_madd_epi32(u[14], k32_m08_p24);
+ v[19] = k_madd_epi32(u[15], k32_m08_p24);
+ v[20] = k_madd_epi32(u[ 8], k32_m08_p24);
+ v[21] = k_madd_epi32(u[ 9], k32_m08_p24);
+ v[22] = k_madd_epi32(u[10], k32_m08_p24);
+ v[23] = k_madd_epi32(u[11], k32_m08_p24);
+ v[24] = k_madd_epi32(u[ 4], k32_p24_p08);
+ v[25] = k_madd_epi32(u[ 5], k32_p24_p08);
+ v[26] = k_madd_epi32(u[ 6], k32_p24_p08);
+ v[27] = k_madd_epi32(u[ 7], k32_p24_p08);
+ v[28] = k_madd_epi32(u[ 0], k32_p24_p08);
+ v[29] = k_madd_epi32(u[ 1], k32_p24_p08);
+ v[30] = k_madd_epi32(u[ 2], k32_p24_p08);
+ v[31] = k_madd_epi32(u[ 3], k32_p24_p08);
+
+ u[ 0] = k_packs_epi64(v[ 0], v[ 1]);
+ u[ 1] = k_packs_epi64(v[ 2], v[ 3]);
+ u[ 2] = k_packs_epi64(v[ 4], v[ 5]);
+ u[ 3] = k_packs_epi64(v[ 6], v[ 7]);
+ u[ 4] = k_packs_epi64(v[ 8], v[ 9]);
+ u[ 5] = k_packs_epi64(v[10], v[11]);
+ u[ 6] = k_packs_epi64(v[12], v[13]);
+ u[ 7] = k_packs_epi64(v[14], v[15]);
+ u[ 8] = k_packs_epi64(v[16], v[17]);
+ u[ 9] = k_packs_epi64(v[18], v[19]);
+ u[10] = k_packs_epi64(v[20], v[21]);
+ u[11] = k_packs_epi64(v[22], v[23]);
+ u[12] = k_packs_epi64(v[24], v[25]);
+ u[13] = k_packs_epi64(v[26], v[27]);
+ u[14] = k_packs_epi64(v[28], v[29]);
+ u[15] = k_packs_epi64(v[30], v[31]);
+
+ v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
+ v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
+ v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
+ v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
+ v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
+ v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
+ v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
+ v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
+ v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
+ v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
+ v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
+ v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
+ v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
+ v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING);
+ v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
+ v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
+
+ lstep1[36] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS);
+ lstep1[37] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS);
+ lstep1[38] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS);
+ lstep1[39] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS);
+ lstep1[40] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS);
+ lstep1[41] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS);
+ lstep1[42] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS);
+ lstep1[43] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS);
+ lstep1[52] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS);
+ lstep1[53] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS);
+ lstep1[54] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
+ lstep1[55] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
+ lstep1[56] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
+ lstep1[57] = _mm_srai_epi32(v[13], DCT_CONST_BITS);
+ lstep1[58] = _mm_srai_epi32(v[14], DCT_CONST_BITS);
+ lstep1[59] = _mm_srai_epi32(v[15], DCT_CONST_BITS);
+ }
+ // stage 5
+ {
+ lstep2[ 8] = _mm_add_epi32(lstep1[10], lstep3[ 8]);
+ lstep2[ 9] = _mm_add_epi32(lstep1[11], lstep3[ 9]);
+ lstep2[10] = _mm_sub_epi32(lstep3[ 8], lstep1[10]);
+ lstep2[11] = _mm_sub_epi32(lstep3[ 9], lstep1[11]);
+ lstep2[12] = _mm_sub_epi32(lstep3[14], lstep1[12]);
+ lstep2[13] = _mm_sub_epi32(lstep3[15], lstep1[13]);
+ lstep2[14] = _mm_add_epi32(lstep1[12], lstep3[14]);
+ lstep2[15] = _mm_add_epi32(lstep1[13], lstep3[15]);
+ }
+ {
+ const __m128i k32_p16_p16 = pair_set_epi32(cospi_16_64, cospi_16_64);
+ const __m128i k32_p16_m16 = pair_set_epi32(cospi_16_64, -cospi_16_64);
+ const __m128i k32_p24_p08 = pair_set_epi32(cospi_24_64, cospi_8_64);
+ const __m128i k32_m08_p24 = pair_set_epi32(-cospi_8_64, cospi_24_64);
+
+ u[0] = _mm_unpacklo_epi32(lstep1[0], lstep1[2]);
+ u[1] = _mm_unpackhi_epi32(lstep1[0], lstep1[2]);
+ u[2] = _mm_unpacklo_epi32(lstep1[1], lstep1[3]);
+ u[3] = _mm_unpackhi_epi32(lstep1[1], lstep1[3]);
+ u[4] = _mm_unpacklo_epi32(lstep1[4], lstep1[6]);
+ u[5] = _mm_unpackhi_epi32(lstep1[4], lstep1[6]);
+ u[6] = _mm_unpacklo_epi32(lstep1[5], lstep1[7]);
+ u[7] = _mm_unpackhi_epi32(lstep1[5], lstep1[7]);
+
+ // TODO(jingning): manually inline k_madd_epi32_ to further hide
+ // instruction latency.
+ v[ 0] = k_madd_epi32(u[0], k32_p16_p16);
+ v[ 1] = k_madd_epi32(u[1], k32_p16_p16);
+ v[ 2] = k_madd_epi32(u[2], k32_p16_p16);
+ v[ 3] = k_madd_epi32(u[3], k32_p16_p16);
+ v[ 4] = k_madd_epi32(u[0], k32_p16_m16);
+ v[ 5] = k_madd_epi32(u[1], k32_p16_m16);
+ v[ 6] = k_madd_epi32(u[2], k32_p16_m16);
+ v[ 7] = k_madd_epi32(u[3], k32_p16_m16);
+ v[ 8] = k_madd_epi32(u[4], k32_p24_p08);
+ v[ 9] = k_madd_epi32(u[5], k32_p24_p08);
+ v[10] = k_madd_epi32(u[6], k32_p24_p08);
+ v[11] = k_madd_epi32(u[7], k32_p24_p08);
+ v[12] = k_madd_epi32(u[4], k32_m08_p24);
+ v[13] = k_madd_epi32(u[5], k32_m08_p24);
+ v[14] = k_madd_epi32(u[6], k32_m08_p24);
+ v[15] = k_madd_epi32(u[7], k32_m08_p24);
+
+ u[0] = k_packs_epi64(v[0], v[1]);
+ u[1] = k_packs_epi64(v[2], v[3]);
+ u[2] = k_packs_epi64(v[4], v[5]);
+ u[3] = k_packs_epi64(v[6], v[7]);
+ u[4] = k_packs_epi64(v[8], v[9]);
+ u[5] = k_packs_epi64(v[10], v[11]);
+ u[6] = k_packs_epi64(v[12], v[13]);
+ u[7] = k_packs_epi64(v[14], v[15]);
+
+ v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+ v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+ v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+ v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+ v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+ v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+ v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+ v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+
+ u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
+ u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
+ u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
+ u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
+ u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS);
+ u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS);
+ u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS);
+ u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS);
+
+ sign[0] = _mm_cmplt_epi32(u[0], kZero);
+ sign[1] = _mm_cmplt_epi32(u[1], kZero);
+ sign[2] = _mm_cmplt_epi32(u[2], kZero);
+ sign[3] = _mm_cmplt_epi32(u[3], kZero);
+ sign[4] = _mm_cmplt_epi32(u[4], kZero);
+ sign[5] = _mm_cmplt_epi32(u[5], kZero);
+ sign[6] = _mm_cmplt_epi32(u[6], kZero);
+ sign[7] = _mm_cmplt_epi32(u[7], kZero);
+
+ u[0] = _mm_sub_epi32(u[0], sign[0]);
+ u[1] = _mm_sub_epi32(u[1], sign[1]);
+ u[2] = _mm_sub_epi32(u[2], sign[2]);
+ u[3] = _mm_sub_epi32(u[3], sign[3]);
+ u[4] = _mm_sub_epi32(u[4], sign[4]);
+ u[5] = _mm_sub_epi32(u[5], sign[5]);
+ u[6] = _mm_sub_epi32(u[6], sign[6]);
+ u[7] = _mm_sub_epi32(u[7], sign[7]);
+
+ u[0] = _mm_add_epi32(u[0], K32One);
+ u[1] = _mm_add_epi32(u[1], K32One);
+ u[2] = _mm_add_epi32(u[2], K32One);
+ u[3] = _mm_add_epi32(u[3], K32One);
+ u[4] = _mm_add_epi32(u[4], K32One);
+ u[5] = _mm_add_epi32(u[5], K32One);
+ u[6] = _mm_add_epi32(u[6], K32One);
+ u[7] = _mm_add_epi32(u[7], K32One);
+
+ u[0] = _mm_srai_epi32(u[0], 2);
+ u[1] = _mm_srai_epi32(u[1], 2);
+ u[2] = _mm_srai_epi32(u[2], 2);
+ u[3] = _mm_srai_epi32(u[3], 2);
+ u[4] = _mm_srai_epi32(u[4], 2);
+ u[5] = _mm_srai_epi32(u[5], 2);
+ u[6] = _mm_srai_epi32(u[6], 2);
+ u[7] = _mm_srai_epi32(u[7], 2);
+
+ // Combine
+ out[ 0] = _mm_packs_epi32(u[0], u[1]);
+ out[16] = _mm_packs_epi32(u[2], u[3]);
+ out[ 8] = _mm_packs_epi32(u[4], u[5]);
+ out[24] = _mm_packs_epi32(u[6], u[7]);
+ }
+ {
+ const __m128i k32_m08_p24 = pair_set_epi32(-cospi_8_64, cospi_24_64);
+ const __m128i k32_m24_m08 = pair_set_epi32(-cospi_24_64, -cospi_8_64);
+ const __m128i k32_p24_p08 = pair_set_epi32(cospi_24_64, cospi_8_64);
+
+ u[0] = _mm_unpacklo_epi32(lstep1[18], lstep1[28]);
+ u[1] = _mm_unpackhi_epi32(lstep1[18], lstep1[28]);
+ u[2] = _mm_unpacklo_epi32(lstep1[19], lstep1[29]);
+ u[3] = _mm_unpackhi_epi32(lstep1[19], lstep1[29]);
+ u[4] = _mm_unpacklo_epi32(lstep1[20], lstep1[26]);
+ u[5] = _mm_unpackhi_epi32(lstep1[20], lstep1[26]);
+ u[6] = _mm_unpacklo_epi32(lstep1[21], lstep1[27]);
+ u[7] = _mm_unpackhi_epi32(lstep1[21], lstep1[27]);
+
+ v[0] = k_madd_epi32(u[0], k32_m08_p24);
+ v[1] = k_madd_epi32(u[1], k32_m08_p24);
+ v[2] = k_madd_epi32(u[2], k32_m08_p24);
+ v[3] = k_madd_epi32(u[3], k32_m08_p24);
+ v[4] = k_madd_epi32(u[4], k32_m24_m08);
+ v[5] = k_madd_epi32(u[5], k32_m24_m08);
+ v[6] = k_madd_epi32(u[6], k32_m24_m08);
+ v[7] = k_madd_epi32(u[7], k32_m24_m08);
+ v[ 8] = k_madd_epi32(u[4], k32_m08_p24);
+ v[ 9] = k_madd_epi32(u[5], k32_m08_p24);
+ v[10] = k_madd_epi32(u[6], k32_m08_p24);
+ v[11] = k_madd_epi32(u[7], k32_m08_p24);
+ v[12] = k_madd_epi32(u[0], k32_p24_p08);
+ v[13] = k_madd_epi32(u[1], k32_p24_p08);
+ v[14] = k_madd_epi32(u[2], k32_p24_p08);
+ v[15] = k_madd_epi32(u[3], k32_p24_p08);
+
+ u[0] = k_packs_epi64(v[0], v[1]);
+ u[1] = k_packs_epi64(v[2], v[3]);
+ u[2] = k_packs_epi64(v[4], v[5]);
+ u[3] = k_packs_epi64(v[6], v[7]);
+ u[4] = k_packs_epi64(v[8], v[9]);
+ u[5] = k_packs_epi64(v[10], v[11]);
+ u[6] = k_packs_epi64(v[12], v[13]);
+ u[7] = k_packs_epi64(v[14], v[15]);
+
+ u[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+ u[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+ u[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+ u[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+ u[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+ u[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+ u[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+ u[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+
+ lstep2[18] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
+ lstep2[19] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
+ lstep2[20] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
+ lstep2[21] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
+ lstep2[26] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
+ lstep2[27] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
+ lstep2[28] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
+ lstep2[29] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
+ }
+ {
+ lstep2[32] = _mm_add_epi32(lstep1[38], lstep3[32]);
+ lstep2[33] = _mm_add_epi32(lstep1[39], lstep3[33]);
+ lstep2[34] = _mm_add_epi32(lstep1[36], lstep3[34]);
+ lstep2[35] = _mm_add_epi32(lstep1[37], lstep3[35]);
+ lstep2[36] = _mm_sub_epi32(lstep3[34], lstep1[36]);
+ lstep2[37] = _mm_sub_epi32(lstep3[35], lstep1[37]);
+ lstep2[38] = _mm_sub_epi32(lstep3[32], lstep1[38]);
+ lstep2[39] = _mm_sub_epi32(lstep3[33], lstep1[39]);
+ lstep2[40] = _mm_sub_epi32(lstep3[46], lstep1[40]);
+ lstep2[41] = _mm_sub_epi32(lstep3[47], lstep1[41]);
+ lstep2[42] = _mm_sub_epi32(lstep3[44], lstep1[42]);
+ lstep2[43] = _mm_sub_epi32(lstep3[45], lstep1[43]);
+ lstep2[44] = _mm_add_epi32(lstep1[42], lstep3[44]);
+ lstep2[45] = _mm_add_epi32(lstep1[43], lstep3[45]);
+ lstep2[46] = _mm_add_epi32(lstep1[40], lstep3[46]);
+ lstep2[47] = _mm_add_epi32(lstep1[41], lstep3[47]);
+ lstep2[48] = _mm_add_epi32(lstep1[54], lstep3[48]);
+ lstep2[49] = _mm_add_epi32(lstep1[55], lstep3[49]);
+ lstep2[50] = _mm_add_epi32(lstep1[52], lstep3[50]);
+ lstep2[51] = _mm_add_epi32(lstep1[53], lstep3[51]);
+ lstep2[52] = _mm_sub_epi32(lstep3[50], lstep1[52]);
+ lstep2[53] = _mm_sub_epi32(lstep3[51], lstep1[53]);
+ lstep2[54] = _mm_sub_epi32(lstep3[48], lstep1[54]);
+ lstep2[55] = _mm_sub_epi32(lstep3[49], lstep1[55]);
+ lstep2[56] = _mm_sub_epi32(lstep3[62], lstep1[56]);
+ lstep2[57] = _mm_sub_epi32(lstep3[63], lstep1[57]);
+ lstep2[58] = _mm_sub_epi32(lstep3[60], lstep1[58]);
+ lstep2[59] = _mm_sub_epi32(lstep3[61], lstep1[59]);
+ lstep2[60] = _mm_add_epi32(lstep1[58], lstep3[60]);
+ lstep2[61] = _mm_add_epi32(lstep1[59], lstep3[61]);
+ lstep2[62] = _mm_add_epi32(lstep1[56], lstep3[62]);
+ lstep2[63] = _mm_add_epi32(lstep1[57], lstep3[63]);
+ }
+ // stage 6
+ {
+ const __m128i k32_p28_p04 = pair_set_epi32(cospi_28_64, cospi_4_64);
+ const __m128i k32_p12_p20 = pair_set_epi32(cospi_12_64, cospi_20_64);
+ const __m128i k32_m20_p12 = pair_set_epi32(-cospi_20_64, cospi_12_64);
+ const __m128i k32_m04_p28 = pair_set_epi32(-cospi_4_64, cospi_28_64);
+
+ u[0] = _mm_unpacklo_epi32(lstep2[ 8], lstep2[14]);
+ u[1] = _mm_unpackhi_epi32(lstep2[ 8], lstep2[14]);
+ u[2] = _mm_unpacklo_epi32(lstep2[ 9], lstep2[15]);
+ u[3] = _mm_unpackhi_epi32(lstep2[ 9], lstep2[15]);
+ u[4] = _mm_unpacklo_epi32(lstep2[10], lstep2[12]);
+ u[5] = _mm_unpackhi_epi32(lstep2[10], lstep2[12]);
+ u[6] = _mm_unpacklo_epi32(lstep2[11], lstep2[13]);
+ u[7] = _mm_unpackhi_epi32(lstep2[11], lstep2[13]);
+ u[8] = _mm_unpacklo_epi32(lstep2[10], lstep2[12]);
+ u[9] = _mm_unpackhi_epi32(lstep2[10], lstep2[12]);
+ u[10] = _mm_unpacklo_epi32(lstep2[11], lstep2[13]);
+ u[11] = _mm_unpackhi_epi32(lstep2[11], lstep2[13]);
+ u[12] = _mm_unpacklo_epi32(lstep2[ 8], lstep2[14]);
+ u[13] = _mm_unpackhi_epi32(lstep2[ 8], lstep2[14]);
+ u[14] = _mm_unpacklo_epi32(lstep2[ 9], lstep2[15]);
+ u[15] = _mm_unpackhi_epi32(lstep2[ 9], lstep2[15]);
+
+ v[0] = k_madd_epi32(u[0], k32_p28_p04);
+ v[1] = k_madd_epi32(u[1], k32_p28_p04);
+ v[2] = k_madd_epi32(u[2], k32_p28_p04);
+ v[3] = k_madd_epi32(u[3], k32_p28_p04);
+ v[4] = k_madd_epi32(u[4], k32_p12_p20);
+ v[5] = k_madd_epi32(u[5], k32_p12_p20);
+ v[6] = k_madd_epi32(u[6], k32_p12_p20);
+ v[7] = k_madd_epi32(u[7], k32_p12_p20);
+ v[ 8] = k_madd_epi32(u[ 8], k32_m20_p12);
+ v[ 9] = k_madd_epi32(u[ 9], k32_m20_p12);
+ v[10] = k_madd_epi32(u[10], k32_m20_p12);
+ v[11] = k_madd_epi32(u[11], k32_m20_p12);
+ v[12] = k_madd_epi32(u[12], k32_m04_p28);
+ v[13] = k_madd_epi32(u[13], k32_m04_p28);
+ v[14] = k_madd_epi32(u[14], k32_m04_p28);
+ v[15] = k_madd_epi32(u[15], k32_m04_p28);
+
+ u[0] = k_packs_epi64(v[0], v[1]);
+ u[1] = k_packs_epi64(v[2], v[3]);
+ u[2] = k_packs_epi64(v[4], v[5]);
+ u[3] = k_packs_epi64(v[6], v[7]);
+ u[4] = k_packs_epi64(v[8], v[9]);
+ u[5] = k_packs_epi64(v[10], v[11]);
+ u[6] = k_packs_epi64(v[12], v[13]);
+ u[7] = k_packs_epi64(v[14], v[15]);
+
+ v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+ v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+ v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+ v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+ v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+ v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+ v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+ v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+
+ u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
+ u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
+ u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
+ u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
+ u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS);
+ u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS);
+ u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS);
+ u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS);
+
+ sign[0] = _mm_cmplt_epi32(u[0], kZero);
+ sign[1] = _mm_cmplt_epi32(u[1], kZero);
+ sign[2] = _mm_cmplt_epi32(u[2], kZero);
+ sign[3] = _mm_cmplt_epi32(u[3], kZero);
+ sign[4] = _mm_cmplt_epi32(u[4], kZero);
+ sign[5] = _mm_cmplt_epi32(u[5], kZero);
+ sign[6] = _mm_cmplt_epi32(u[6], kZero);
+ sign[7] = _mm_cmplt_epi32(u[7], kZero);
+
+ u[0] = _mm_sub_epi32(u[0], sign[0]);
+ u[1] = _mm_sub_epi32(u[1], sign[1]);
+ u[2] = _mm_sub_epi32(u[2], sign[2]);
+ u[3] = _mm_sub_epi32(u[3], sign[3]);
+ u[4] = _mm_sub_epi32(u[4], sign[4]);
+ u[5] = _mm_sub_epi32(u[5], sign[5]);
+ u[6] = _mm_sub_epi32(u[6], sign[6]);
+ u[7] = _mm_sub_epi32(u[7], sign[7]);
+
+ u[0] = _mm_add_epi32(u[0], K32One);
+ u[1] = _mm_add_epi32(u[1], K32One);
+ u[2] = _mm_add_epi32(u[2], K32One);
+ u[3] = _mm_add_epi32(u[3], K32One);
+ u[4] = _mm_add_epi32(u[4], K32One);
+ u[5] = _mm_add_epi32(u[5], K32One);
+ u[6] = _mm_add_epi32(u[6], K32One);
+ u[7] = _mm_add_epi32(u[7], K32One);
+
+ u[0] = _mm_srai_epi32(u[0], 2);
+ u[1] = _mm_srai_epi32(u[1], 2);
+ u[2] = _mm_srai_epi32(u[2], 2);
+ u[3] = _mm_srai_epi32(u[3], 2);
+ u[4] = _mm_srai_epi32(u[4], 2);
+ u[5] = _mm_srai_epi32(u[5], 2);
+ u[6] = _mm_srai_epi32(u[6], 2);
+ u[7] = _mm_srai_epi32(u[7], 2);
+
+ out[ 4] = _mm_packs_epi32(u[0], u[1]);
+ out[20] = _mm_packs_epi32(u[2], u[3]);
+ out[12] = _mm_packs_epi32(u[4], u[5]);
+ out[28] = _mm_packs_epi32(u[6], u[7]);
+ }
+ {
+ lstep3[16] = _mm_add_epi32(lstep2[18], lstep1[16]);
+ lstep3[17] = _mm_add_epi32(lstep2[19], lstep1[17]);
+ lstep3[18] = _mm_sub_epi32(lstep1[16], lstep2[18]);
+ lstep3[19] = _mm_sub_epi32(lstep1[17], lstep2[19]);
+ lstep3[20] = _mm_sub_epi32(lstep1[22], lstep2[20]);
+ lstep3[21] = _mm_sub_epi32(lstep1[23], lstep2[21]);
+ lstep3[22] = _mm_add_epi32(lstep2[20], lstep1[22]);
+ lstep3[23] = _mm_add_epi32(lstep2[21], lstep1[23]);
+ lstep3[24] = _mm_add_epi32(lstep2[26], lstep1[24]);
+ lstep3[25] = _mm_add_epi32(lstep2[27], lstep1[25]);
+ lstep3[26] = _mm_sub_epi32(lstep1[24], lstep2[26]);
+ lstep3[27] = _mm_sub_epi32(lstep1[25], lstep2[27]);
+ lstep3[28] = _mm_sub_epi32(lstep1[30], lstep2[28]);
+ lstep3[29] = _mm_sub_epi32(lstep1[31], lstep2[29]);
+ lstep3[30] = _mm_add_epi32(lstep2[28], lstep1[30]);
+ lstep3[31] = _mm_add_epi32(lstep2[29], lstep1[31]);
+ }
+ {
+ const __m128i k32_m04_p28 = pair_set_epi32(-cospi_4_64, cospi_28_64);
+ const __m128i k32_m28_m04 = pair_set_epi32(-cospi_28_64, -cospi_4_64);
+ const __m128i k32_m20_p12 = pair_set_epi32(-cospi_20_64, cospi_12_64);
+ const __m128i k32_m12_m20 = pair_set_epi32(-cospi_12_64,
+ -cospi_20_64);
+ const __m128i k32_p12_p20 = pair_set_epi32(cospi_12_64, cospi_20_64);
+ const __m128i k32_p28_p04 = pair_set_epi32(cospi_28_64, cospi_4_64);
+
+ u[ 0] = _mm_unpacklo_epi32(lstep2[34], lstep2[60]);
+ u[ 1] = _mm_unpackhi_epi32(lstep2[34], lstep2[60]);
+ u[ 2] = _mm_unpacklo_epi32(lstep2[35], lstep2[61]);
+ u[ 3] = _mm_unpackhi_epi32(lstep2[35], lstep2[61]);
+ u[ 4] = _mm_unpacklo_epi32(lstep2[36], lstep2[58]);
+ u[ 5] = _mm_unpackhi_epi32(lstep2[36], lstep2[58]);
+ u[ 6] = _mm_unpacklo_epi32(lstep2[37], lstep2[59]);
+ u[ 7] = _mm_unpackhi_epi32(lstep2[37], lstep2[59]);
+ u[ 8] = _mm_unpacklo_epi32(lstep2[42], lstep2[52]);
+ u[ 9] = _mm_unpackhi_epi32(lstep2[42], lstep2[52]);
+ u[10] = _mm_unpacklo_epi32(lstep2[43], lstep2[53]);
+ u[11] = _mm_unpackhi_epi32(lstep2[43], lstep2[53]);
+ u[12] = _mm_unpacklo_epi32(lstep2[44], lstep2[50]);
+ u[13] = _mm_unpackhi_epi32(lstep2[44], lstep2[50]);
+ u[14] = _mm_unpacklo_epi32(lstep2[45], lstep2[51]);
+ u[15] = _mm_unpackhi_epi32(lstep2[45], lstep2[51]);
+
+ v[ 0] = k_madd_epi32(u[ 0], k32_m04_p28);
+ v[ 1] = k_madd_epi32(u[ 1], k32_m04_p28);
+ v[ 2] = k_madd_epi32(u[ 2], k32_m04_p28);
+ v[ 3] = k_madd_epi32(u[ 3], k32_m04_p28);
+ v[ 4] = k_madd_epi32(u[ 4], k32_m28_m04);
+ v[ 5] = k_madd_epi32(u[ 5], k32_m28_m04);
+ v[ 6] = k_madd_epi32(u[ 6], k32_m28_m04);
+ v[ 7] = k_madd_epi32(u[ 7], k32_m28_m04);
+ v[ 8] = k_madd_epi32(u[ 8], k32_m20_p12);
+ v[ 9] = k_madd_epi32(u[ 9], k32_m20_p12);
+ v[10] = k_madd_epi32(u[10], k32_m20_p12);
+ v[11] = k_madd_epi32(u[11], k32_m20_p12);
+ v[12] = k_madd_epi32(u[12], k32_m12_m20);
+ v[13] = k_madd_epi32(u[13], k32_m12_m20);
+ v[14] = k_madd_epi32(u[14], k32_m12_m20);
+ v[15] = k_madd_epi32(u[15], k32_m12_m20);
+ v[16] = k_madd_epi32(u[12], k32_m20_p12);
+ v[17] = k_madd_epi32(u[13], k32_m20_p12);
+ v[18] = k_madd_epi32(u[14], k32_m20_p12);
+ v[19] = k_madd_epi32(u[15], k32_m20_p12);
+ v[20] = k_madd_epi32(u[ 8], k32_p12_p20);
+ v[21] = k_madd_epi32(u[ 9], k32_p12_p20);
+ v[22] = k_madd_epi32(u[10], k32_p12_p20);
+ v[23] = k_madd_epi32(u[11], k32_p12_p20);
+ v[24] = k_madd_epi32(u[ 4], k32_m04_p28);
+ v[25] = k_madd_epi32(u[ 5], k32_m04_p28);
+ v[26] = k_madd_epi32(u[ 6], k32_m04_p28);
+ v[27] = k_madd_epi32(u[ 7], k32_m04_p28);
+ v[28] = k_madd_epi32(u[ 0], k32_p28_p04);
+ v[29] = k_madd_epi32(u[ 1], k32_p28_p04);
+ v[30] = k_madd_epi32(u[ 2], k32_p28_p04);
+ v[31] = k_madd_epi32(u[ 3], k32_p28_p04);
+
+ u[ 0] = k_packs_epi64(v[ 0], v[ 1]);
+ u[ 1] = k_packs_epi64(v[ 2], v[ 3]);
+ u[ 2] = k_packs_epi64(v[ 4], v[ 5]);
+ u[ 3] = k_packs_epi64(v[ 6], v[ 7]);
+ u[ 4] = k_packs_epi64(v[ 8], v[ 9]);
+ u[ 5] = k_packs_epi64(v[10], v[11]);
+ u[ 6] = k_packs_epi64(v[12], v[13]);
+ u[ 7] = k_packs_epi64(v[14], v[15]);
+ u[ 8] = k_packs_epi64(v[16], v[17]);
+ u[ 9] = k_packs_epi64(v[18], v[19]);
+ u[10] = k_packs_epi64(v[20], v[21]);
+ u[11] = k_packs_epi64(v[22], v[23]);
+ u[12] = k_packs_epi64(v[24], v[25]);
+ u[13] = k_packs_epi64(v[26], v[27]);
+ u[14] = k_packs_epi64(v[28], v[29]);
+ u[15] = k_packs_epi64(v[30], v[31]);
+
+ v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
+ v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
+ v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
+ v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
+ v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
+ v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
+ v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
+ v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
+ v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
+ v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
+ v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
+ v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
+ v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
+ v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING);
+ v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
+ v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
+
+ lstep3[34] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS);
+ lstep3[35] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS);
+ lstep3[36] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS);
+ lstep3[37] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS);
+ lstep3[42] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS);
+ lstep3[43] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS);
+ lstep3[44] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS);
+ lstep3[45] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS);
+ lstep3[50] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS);
+ lstep3[51] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS);
+ lstep3[52] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
+ lstep3[53] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
+ lstep3[58] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
+ lstep3[59] = _mm_srai_epi32(v[13], DCT_CONST_BITS);
+ lstep3[60] = _mm_srai_epi32(v[14], DCT_CONST_BITS);
+ lstep3[61] = _mm_srai_epi32(v[15], DCT_CONST_BITS);
+ }
+ // stage 7
+ {
+ const __m128i k32_p30_p02 = pair_set_epi32(cospi_30_64, cospi_2_64);
+ const __m128i k32_p14_p18 = pair_set_epi32(cospi_14_64, cospi_18_64);
+ const __m128i k32_p22_p10 = pair_set_epi32(cospi_22_64, cospi_10_64);
+ const __m128i k32_p06_p26 = pair_set_epi32(cospi_6_64, cospi_26_64);
+ const __m128i k32_m26_p06 = pair_set_epi32(-cospi_26_64, cospi_6_64);
+ const __m128i k32_m10_p22 = pair_set_epi32(-cospi_10_64, cospi_22_64);
+ const __m128i k32_m18_p14 = pair_set_epi32(-cospi_18_64, cospi_14_64);
+ const __m128i k32_m02_p30 = pair_set_epi32(-cospi_2_64, cospi_30_64);
+
+ u[ 0] = _mm_unpacklo_epi32(lstep3[16], lstep3[30]);
+ u[ 1] = _mm_unpackhi_epi32(lstep3[16], lstep3[30]);
+ u[ 2] = _mm_unpacklo_epi32(lstep3[17], lstep3[31]);
+ u[ 3] = _mm_unpackhi_epi32(lstep3[17], lstep3[31]);
+ u[ 4] = _mm_unpacklo_epi32(lstep3[18], lstep3[28]);
+ u[ 5] = _mm_unpackhi_epi32(lstep3[18], lstep3[28]);
+ u[ 6] = _mm_unpacklo_epi32(lstep3[19], lstep3[29]);
+ u[ 7] = _mm_unpackhi_epi32(lstep3[19], lstep3[29]);
+ u[ 8] = _mm_unpacklo_epi32(lstep3[20], lstep3[26]);
+ u[ 9] = _mm_unpackhi_epi32(lstep3[20], lstep3[26]);
+ u[10] = _mm_unpacklo_epi32(lstep3[21], lstep3[27]);
+ u[11] = _mm_unpackhi_epi32(lstep3[21], lstep3[27]);
+ u[12] = _mm_unpacklo_epi32(lstep3[22], lstep3[24]);
+ u[13] = _mm_unpackhi_epi32(lstep3[22], lstep3[24]);
+ u[14] = _mm_unpacklo_epi32(lstep3[23], lstep3[25]);
+ u[15] = _mm_unpackhi_epi32(lstep3[23], lstep3[25]);
+
+ v[ 0] = k_madd_epi32(u[ 0], k32_p30_p02);
+ v[ 1] = k_madd_epi32(u[ 1], k32_p30_p02);
+ v[ 2] = k_madd_epi32(u[ 2], k32_p30_p02);
+ v[ 3] = k_madd_epi32(u[ 3], k32_p30_p02);
+ v[ 4] = k_madd_epi32(u[ 4], k32_p14_p18);
+ v[ 5] = k_madd_epi32(u[ 5], k32_p14_p18);
+ v[ 6] = k_madd_epi32(u[ 6], k32_p14_p18);
+ v[ 7] = k_madd_epi32(u[ 7], k32_p14_p18);
+ v[ 8] = k_madd_epi32(u[ 8], k32_p22_p10);
+ v[ 9] = k_madd_epi32(u[ 9], k32_p22_p10);
+ v[10] = k_madd_epi32(u[10], k32_p22_p10);
+ v[11] = k_madd_epi32(u[11], k32_p22_p10);
+ v[12] = k_madd_epi32(u[12], k32_p06_p26);
+ v[13] = k_madd_epi32(u[13], k32_p06_p26);
+ v[14] = k_madd_epi32(u[14], k32_p06_p26);
+ v[15] = k_madd_epi32(u[15], k32_p06_p26);
+ v[16] = k_madd_epi32(u[12], k32_m26_p06);
+ v[17] = k_madd_epi32(u[13], k32_m26_p06);
+ v[18] = k_madd_epi32(u[14], k32_m26_p06);
+ v[19] = k_madd_epi32(u[15], k32_m26_p06);
+ v[20] = k_madd_epi32(u[ 8], k32_m10_p22);
+ v[21] = k_madd_epi32(u[ 9], k32_m10_p22);
+ v[22] = k_madd_epi32(u[10], k32_m10_p22);
+ v[23] = k_madd_epi32(u[11], k32_m10_p22);
+ v[24] = k_madd_epi32(u[ 4], k32_m18_p14);
+ v[25] = k_madd_epi32(u[ 5], k32_m18_p14);
+ v[26] = k_madd_epi32(u[ 6], k32_m18_p14);
+ v[27] = k_madd_epi32(u[ 7], k32_m18_p14);
+ v[28] = k_madd_epi32(u[ 0], k32_m02_p30);
+ v[29] = k_madd_epi32(u[ 1], k32_m02_p30);
+ v[30] = k_madd_epi32(u[ 2], k32_m02_p30);
+ v[31] = k_madd_epi32(u[ 3], k32_m02_p30);
+
+ u[ 0] = k_packs_epi64(v[ 0], v[ 1]);
+ u[ 1] = k_packs_epi64(v[ 2], v[ 3]);
+ u[ 2] = k_packs_epi64(v[ 4], v[ 5]);
+ u[ 3] = k_packs_epi64(v[ 6], v[ 7]);
+ u[ 4] = k_packs_epi64(v[ 8], v[ 9]);
+ u[ 5] = k_packs_epi64(v[10], v[11]);
+ u[ 6] = k_packs_epi64(v[12], v[13]);
+ u[ 7] = k_packs_epi64(v[14], v[15]);
+ u[ 8] = k_packs_epi64(v[16], v[17]);
+ u[ 9] = k_packs_epi64(v[18], v[19]);
+ u[10] = k_packs_epi64(v[20], v[21]);
+ u[11] = k_packs_epi64(v[22], v[23]);
+ u[12] = k_packs_epi64(v[24], v[25]);
+ u[13] = k_packs_epi64(v[26], v[27]);
+ u[14] = k_packs_epi64(v[28], v[29]);
+ u[15] = k_packs_epi64(v[30], v[31]);
+
+ v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
+ v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
+ v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
+ v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
+ v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
+ v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
+ v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
+ v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
+ v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
+ v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
+ v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
+ v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
+ v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
+ v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING);
+ v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
+ v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
+
+ u[ 0] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS);
+ u[ 1] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS);
+ u[ 2] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS);
+ u[ 3] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS);
+ u[ 4] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS);
+ u[ 5] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS);
+ u[ 6] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS);
+ u[ 7] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS);
+ u[ 8] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS);
+ u[ 9] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS);
+ u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
+ u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
+ u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
+ u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS);
+ u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS);
+ u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS);
+
+ v[ 0] = _mm_cmplt_epi32(u[ 0], kZero);
+ v[ 1] = _mm_cmplt_epi32(u[ 1], kZero);
+ v[ 2] = _mm_cmplt_epi32(u[ 2], kZero);
+ v[ 3] = _mm_cmplt_epi32(u[ 3], kZero);
+ v[ 4] = _mm_cmplt_epi32(u[ 4], kZero);
+ v[ 5] = _mm_cmplt_epi32(u[ 5], kZero);
+ v[ 6] = _mm_cmplt_epi32(u[ 6], kZero);
+ v[ 7] = _mm_cmplt_epi32(u[ 7], kZero);
+ v[ 8] = _mm_cmplt_epi32(u[ 8], kZero);
+ v[ 9] = _mm_cmplt_epi32(u[ 9], kZero);
+ v[10] = _mm_cmplt_epi32(u[10], kZero);
+ v[11] = _mm_cmplt_epi32(u[11], kZero);
+ v[12] = _mm_cmplt_epi32(u[12], kZero);
+ v[13] = _mm_cmplt_epi32(u[13], kZero);
+ v[14] = _mm_cmplt_epi32(u[14], kZero);
+ v[15] = _mm_cmplt_epi32(u[15], kZero);
+
+ u[ 0] = _mm_sub_epi32(u[ 0], v[ 0]);
+ u[ 1] = _mm_sub_epi32(u[ 1], v[ 1]);
+ u[ 2] = _mm_sub_epi32(u[ 2], v[ 2]);
+ u[ 3] = _mm_sub_epi32(u[ 3], v[ 3]);
+ u[ 4] = _mm_sub_epi32(u[ 4], v[ 4]);
+ u[ 5] = _mm_sub_epi32(u[ 5], v[ 5]);
+ u[ 6] = _mm_sub_epi32(u[ 6], v[ 6]);
+ u[ 7] = _mm_sub_epi32(u[ 7], v[ 7]);
+ u[ 8] = _mm_sub_epi32(u[ 8], v[ 8]);
+ u[ 9] = _mm_sub_epi32(u[ 9], v[ 9]);
+ u[10] = _mm_sub_epi32(u[10], v[10]);
+ u[11] = _mm_sub_epi32(u[11], v[11]);
+ u[12] = _mm_sub_epi32(u[12], v[12]);
+ u[13] = _mm_sub_epi32(u[13], v[13]);
+ u[14] = _mm_sub_epi32(u[14], v[14]);
+ u[15] = _mm_sub_epi32(u[15], v[15]);
+
+ v[ 0] = _mm_add_epi32(u[ 0], K32One);
+ v[ 1] = _mm_add_epi32(u[ 1], K32One);
+ v[ 2] = _mm_add_epi32(u[ 2], K32One);
+ v[ 3] = _mm_add_epi32(u[ 3], K32One);
+ v[ 4] = _mm_add_epi32(u[ 4], K32One);
+ v[ 5] = _mm_add_epi32(u[ 5], K32One);
+ v[ 6] = _mm_add_epi32(u[ 6], K32One);
+ v[ 7] = _mm_add_epi32(u[ 7], K32One);
+ v[ 8] = _mm_add_epi32(u[ 8], K32One);
+ v[ 9] = _mm_add_epi32(u[ 9], K32One);
+ v[10] = _mm_add_epi32(u[10], K32One);
+ v[11] = _mm_add_epi32(u[11], K32One);
+ v[12] = _mm_add_epi32(u[12], K32One);
+ v[13] = _mm_add_epi32(u[13], K32One);
+ v[14] = _mm_add_epi32(u[14], K32One);
+ v[15] = _mm_add_epi32(u[15], K32One);
+
+ u[ 0] = _mm_srai_epi32(v[ 0], 2);
+ u[ 1] = _mm_srai_epi32(v[ 1], 2);
+ u[ 2] = _mm_srai_epi32(v[ 2], 2);
+ u[ 3] = _mm_srai_epi32(v[ 3], 2);
+ u[ 4] = _mm_srai_epi32(v[ 4], 2);
+ u[ 5] = _mm_srai_epi32(v[ 5], 2);
+ u[ 6] = _mm_srai_epi32(v[ 6], 2);
+ u[ 7] = _mm_srai_epi32(v[ 7], 2);
+ u[ 8] = _mm_srai_epi32(v[ 8], 2);
+ u[ 9] = _mm_srai_epi32(v[ 9], 2);
+ u[10] = _mm_srai_epi32(v[10], 2);
+ u[11] = _mm_srai_epi32(v[11], 2);
+ u[12] = _mm_srai_epi32(v[12], 2);
+ u[13] = _mm_srai_epi32(v[13], 2);
+ u[14] = _mm_srai_epi32(v[14], 2);
+ u[15] = _mm_srai_epi32(v[15], 2);
+
+ out[ 2] = _mm_packs_epi32(u[0], u[1]);
+ out[18] = _mm_packs_epi32(u[2], u[3]);
+ out[10] = _mm_packs_epi32(u[4], u[5]);
+ out[26] = _mm_packs_epi32(u[6], u[7]);
+ out[ 6] = _mm_packs_epi32(u[8], u[9]);
+ out[22] = _mm_packs_epi32(u[10], u[11]);
+ out[14] = _mm_packs_epi32(u[12], u[13]);
+ out[30] = _mm_packs_epi32(u[14], u[15]);
+ }
+ {
+ lstep1[32] = _mm_add_epi32(lstep3[34], lstep2[32]);
+ lstep1[33] = _mm_add_epi32(lstep3[35], lstep2[33]);
+ lstep1[34] = _mm_sub_epi32(lstep2[32], lstep3[34]);
+ lstep1[35] = _mm_sub_epi32(lstep2[33], lstep3[35]);
+ lstep1[36] = _mm_sub_epi32(lstep2[38], lstep3[36]);
+ lstep1[37] = _mm_sub_epi32(lstep2[39], lstep3[37]);
+ lstep1[38] = _mm_add_epi32(lstep3[36], lstep2[38]);
+ lstep1[39] = _mm_add_epi32(lstep3[37], lstep2[39]);
+ lstep1[40] = _mm_add_epi32(lstep3[42], lstep2[40]);
+ lstep1[41] = _mm_add_epi32(lstep3[43], lstep2[41]);
+ lstep1[42] = _mm_sub_epi32(lstep2[40], lstep3[42]);
+ lstep1[43] = _mm_sub_epi32(lstep2[41], lstep3[43]);
+ lstep1[44] = _mm_sub_epi32(lstep2[46], lstep3[44]);
+ lstep1[45] = _mm_sub_epi32(lstep2[47], lstep3[45]);
+ lstep1[46] = _mm_add_epi32(lstep3[44], lstep2[46]);
+ lstep1[47] = _mm_add_epi32(lstep3[45], lstep2[47]);
+ lstep1[48] = _mm_add_epi32(lstep3[50], lstep2[48]);
+ lstep1[49] = _mm_add_epi32(lstep3[51], lstep2[49]);
+ lstep1[50] = _mm_sub_epi32(lstep2[48], lstep3[50]);
+ lstep1[51] = _mm_sub_epi32(lstep2[49], lstep3[51]);
+ lstep1[52] = _mm_sub_epi32(lstep2[54], lstep3[52]);
+ lstep1[53] = _mm_sub_epi32(lstep2[55], lstep3[53]);
+ lstep1[54] = _mm_add_epi32(lstep3[52], lstep2[54]);
+ lstep1[55] = _mm_add_epi32(lstep3[53], lstep2[55]);
+ lstep1[56] = _mm_add_epi32(lstep3[58], lstep2[56]);
+ lstep1[57] = _mm_add_epi32(lstep3[59], lstep2[57]);
+ lstep1[58] = _mm_sub_epi32(lstep2[56], lstep3[58]);
+ lstep1[59] = _mm_sub_epi32(lstep2[57], lstep3[59]);
+ lstep1[60] = _mm_sub_epi32(lstep2[62], lstep3[60]);
+ lstep1[61] = _mm_sub_epi32(lstep2[63], lstep3[61]);
+ lstep1[62] = _mm_add_epi32(lstep3[60], lstep2[62]);
+ lstep1[63] = _mm_add_epi32(lstep3[61], lstep2[63]);
+ }
+ // stage 8
+ {
+ const __m128i k32_p31_p01 = pair_set_epi32(cospi_31_64, cospi_1_64);
+ const __m128i k32_p15_p17 = pair_set_epi32(cospi_15_64, cospi_17_64);
+ const __m128i k32_p23_p09 = pair_set_epi32(cospi_23_64, cospi_9_64);
+ const __m128i k32_p07_p25 = pair_set_epi32(cospi_7_64, cospi_25_64);
+ const __m128i k32_m25_p07 = pair_set_epi32(-cospi_25_64, cospi_7_64);
+ const __m128i k32_m09_p23 = pair_set_epi32(-cospi_9_64, cospi_23_64);
+ const __m128i k32_m17_p15 = pair_set_epi32(-cospi_17_64, cospi_15_64);
+ const __m128i k32_m01_p31 = pair_set_epi32(-cospi_1_64, cospi_31_64);
+
+ u[ 0] = _mm_unpacklo_epi32(lstep1[32], lstep1[62]);
+ u[ 1] = _mm_unpackhi_epi32(lstep1[32], lstep1[62]);
+ u[ 2] = _mm_unpacklo_epi32(lstep1[33], lstep1[63]);
+ u[ 3] = _mm_unpackhi_epi32(lstep1[33], lstep1[63]);
+ u[ 4] = _mm_unpacklo_epi32(lstep1[34], lstep1[60]);
+ u[ 5] = _mm_unpackhi_epi32(lstep1[34], lstep1[60]);
+ u[ 6] = _mm_unpacklo_epi32(lstep1[35], lstep1[61]);
+ u[ 7] = _mm_unpackhi_epi32(lstep1[35], lstep1[61]);
+ u[ 8] = _mm_unpacklo_epi32(lstep1[36], lstep1[58]);
+ u[ 9] = _mm_unpackhi_epi32(lstep1[36], lstep1[58]);
+ u[10] = _mm_unpacklo_epi32(lstep1[37], lstep1[59]);
+ u[11] = _mm_unpackhi_epi32(lstep1[37], lstep1[59]);
+ u[12] = _mm_unpacklo_epi32(lstep1[38], lstep1[56]);
+ u[13] = _mm_unpackhi_epi32(lstep1[38], lstep1[56]);
+ u[14] = _mm_unpacklo_epi32(lstep1[39], lstep1[57]);
+ u[15] = _mm_unpackhi_epi32(lstep1[39], lstep1[57]);
+
+ v[ 0] = k_madd_epi32(u[ 0], k32_p31_p01);
+ v[ 1] = k_madd_epi32(u[ 1], k32_p31_p01);
+ v[ 2] = k_madd_epi32(u[ 2], k32_p31_p01);
+ v[ 3] = k_madd_epi32(u[ 3], k32_p31_p01);
+ v[ 4] = k_madd_epi32(u[ 4], k32_p15_p17);
+ v[ 5] = k_madd_epi32(u[ 5], k32_p15_p17);
+ v[ 6] = k_madd_epi32(u[ 6], k32_p15_p17);
+ v[ 7] = k_madd_epi32(u[ 7], k32_p15_p17);
+ v[ 8] = k_madd_epi32(u[ 8], k32_p23_p09);
+ v[ 9] = k_madd_epi32(u[ 9], k32_p23_p09);
+ v[10] = k_madd_epi32(u[10], k32_p23_p09);
+ v[11] = k_madd_epi32(u[11], k32_p23_p09);
+ v[12] = k_madd_epi32(u[12], k32_p07_p25);
+ v[13] = k_madd_epi32(u[13], k32_p07_p25);
+ v[14] = k_madd_epi32(u[14], k32_p07_p25);
+ v[15] = k_madd_epi32(u[15], k32_p07_p25);
+ v[16] = k_madd_epi32(u[12], k32_m25_p07);
+ v[17] = k_madd_epi32(u[13], k32_m25_p07);
+ v[18] = k_madd_epi32(u[14], k32_m25_p07);
+ v[19] = k_madd_epi32(u[15], k32_m25_p07);
+ v[20] = k_madd_epi32(u[ 8], k32_m09_p23);
+ v[21] = k_madd_epi32(u[ 9], k32_m09_p23);
+ v[22] = k_madd_epi32(u[10], k32_m09_p23);
+ v[23] = k_madd_epi32(u[11], k32_m09_p23);
+ v[24] = k_madd_epi32(u[ 4], k32_m17_p15);
+ v[25] = k_madd_epi32(u[ 5], k32_m17_p15);
+ v[26] = k_madd_epi32(u[ 6], k32_m17_p15);
+ v[27] = k_madd_epi32(u[ 7], k32_m17_p15);
+ v[28] = k_madd_epi32(u[ 0], k32_m01_p31);
+ v[29] = k_madd_epi32(u[ 1], k32_m01_p31);
+ v[30] = k_madd_epi32(u[ 2], k32_m01_p31);
+ v[31] = k_madd_epi32(u[ 3], k32_m01_p31);
+
+ u[ 0] = k_packs_epi64(v[ 0], v[ 1]);
+ u[ 1] = k_packs_epi64(v[ 2], v[ 3]);
+ u[ 2] = k_packs_epi64(v[ 4], v[ 5]);
+ u[ 3] = k_packs_epi64(v[ 6], v[ 7]);
+ u[ 4] = k_packs_epi64(v[ 8], v[ 9]);
+ u[ 5] = k_packs_epi64(v[10], v[11]);
+ u[ 6] = k_packs_epi64(v[12], v[13]);
+ u[ 7] = k_packs_epi64(v[14], v[15]);
+ u[ 8] = k_packs_epi64(v[16], v[17]);
+ u[ 9] = k_packs_epi64(v[18], v[19]);
+ u[10] = k_packs_epi64(v[20], v[21]);
+ u[11] = k_packs_epi64(v[22], v[23]);
+ u[12] = k_packs_epi64(v[24], v[25]);
+ u[13] = k_packs_epi64(v[26], v[27]);
+ u[14] = k_packs_epi64(v[28], v[29]);
+ u[15] = k_packs_epi64(v[30], v[31]);
+
+ v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
+ v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
+ v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
+ v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
+ v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
+ v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
+ v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
+ v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
+ v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
+ v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
+ v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
+ v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
+ v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
+ v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING);
+ v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
+ v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
+
+ u[ 0] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS);
+ u[ 1] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS);
+ u[ 2] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS);
+ u[ 3] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS);
+ u[ 4] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS);
+ u[ 5] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS);
+ u[ 6] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS);
+ u[ 7] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS);
+ u[ 8] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS);
+ u[ 9] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS);
+ u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
+ u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
+ u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
+ u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS);
+ u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS);
+ u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS);
+
+ v[ 0] = _mm_cmplt_epi32(u[ 0], kZero);
+ v[ 1] = _mm_cmplt_epi32(u[ 1], kZero);
+ v[ 2] = _mm_cmplt_epi32(u[ 2], kZero);
+ v[ 3] = _mm_cmplt_epi32(u[ 3], kZero);
+ v[ 4] = _mm_cmplt_epi32(u[ 4], kZero);
+ v[ 5] = _mm_cmplt_epi32(u[ 5], kZero);
+ v[ 6] = _mm_cmplt_epi32(u[ 6], kZero);
+ v[ 7] = _mm_cmplt_epi32(u[ 7], kZero);
+ v[ 8] = _mm_cmplt_epi32(u[ 8], kZero);
+ v[ 9] = _mm_cmplt_epi32(u[ 9], kZero);
+ v[10] = _mm_cmplt_epi32(u[10], kZero);
+ v[11] = _mm_cmplt_epi32(u[11], kZero);
+ v[12] = _mm_cmplt_epi32(u[12], kZero);
+ v[13] = _mm_cmplt_epi32(u[13], kZero);
+ v[14] = _mm_cmplt_epi32(u[14], kZero);
+ v[15] = _mm_cmplt_epi32(u[15], kZero);
+
+ u[ 0] = _mm_sub_epi32(u[ 0], v[ 0]);
+ u[ 1] = _mm_sub_epi32(u[ 1], v[ 1]);
+ u[ 2] = _mm_sub_epi32(u[ 2], v[ 2]);
+ u[ 3] = _mm_sub_epi32(u[ 3], v[ 3]);
+ u[ 4] = _mm_sub_epi32(u[ 4], v[ 4]);
+ u[ 5] = _mm_sub_epi32(u[ 5], v[ 5]);
+ u[ 6] = _mm_sub_epi32(u[ 6], v[ 6]);
+ u[ 7] = _mm_sub_epi32(u[ 7], v[ 7]);
+ u[ 8] = _mm_sub_epi32(u[ 8], v[ 8]);
+ u[ 9] = _mm_sub_epi32(u[ 9], v[ 9]);
+ u[10] = _mm_sub_epi32(u[10], v[10]);
+ u[11] = _mm_sub_epi32(u[11], v[11]);
+ u[12] = _mm_sub_epi32(u[12], v[12]);
+ u[13] = _mm_sub_epi32(u[13], v[13]);
+ u[14] = _mm_sub_epi32(u[14], v[14]);
+ u[15] = _mm_sub_epi32(u[15], v[15]);
+
+ v[0] = _mm_add_epi32(u[0], K32One);
+ v[1] = _mm_add_epi32(u[1], K32One);
+ v[2] = _mm_add_epi32(u[2], K32One);
+ v[3] = _mm_add_epi32(u[3], K32One);
+ v[4] = _mm_add_epi32(u[4], K32One);
+ v[5] = _mm_add_epi32(u[5], K32One);
+ v[6] = _mm_add_epi32(u[6], K32One);
+ v[7] = _mm_add_epi32(u[7], K32One);
+ v[8] = _mm_add_epi32(u[8], K32One);
+ v[9] = _mm_add_epi32(u[9], K32One);
+ v[10] = _mm_add_epi32(u[10], K32One);
+ v[11] = _mm_add_epi32(u[11], K32One);
+ v[12] = _mm_add_epi32(u[12], K32One);
+ v[13] = _mm_add_epi32(u[13], K32One);
+ v[14] = _mm_add_epi32(u[14], K32One);
+ v[15] = _mm_add_epi32(u[15], K32One);
+
+ u[0] = _mm_srai_epi32(v[0], 2);
+ u[1] = _mm_srai_epi32(v[1], 2);
+ u[2] = _mm_srai_epi32(v[2], 2);
+ u[3] = _mm_srai_epi32(v[3], 2);
+ u[4] = _mm_srai_epi32(v[4], 2);
+ u[5] = _mm_srai_epi32(v[5], 2);
+ u[6] = _mm_srai_epi32(v[6], 2);
+ u[7] = _mm_srai_epi32(v[7], 2);
+ u[8] = _mm_srai_epi32(v[8], 2);
+ u[9] = _mm_srai_epi32(v[9], 2);
+ u[10] = _mm_srai_epi32(v[10], 2);
+ u[11] = _mm_srai_epi32(v[11], 2);
+ u[12] = _mm_srai_epi32(v[12], 2);
+ u[13] = _mm_srai_epi32(v[13], 2);
+ u[14] = _mm_srai_epi32(v[14], 2);
+ u[15] = _mm_srai_epi32(v[15], 2);
+
+ out[ 1] = _mm_packs_epi32(u[0], u[1]);
+ out[17] = _mm_packs_epi32(u[2], u[3]);
+ out[ 9] = _mm_packs_epi32(u[4], u[5]);
+ out[25] = _mm_packs_epi32(u[6], u[7]);
+ out[ 7] = _mm_packs_epi32(u[8], u[9]);
+ out[23] = _mm_packs_epi32(u[10], u[11]);
+ out[15] = _mm_packs_epi32(u[12], u[13]);
+ out[31] = _mm_packs_epi32(u[14], u[15]);
+ }
+ {
+ const __m128i k32_p27_p05 = pair_set_epi32(cospi_27_64, cospi_5_64);
+ const __m128i k32_p11_p21 = pair_set_epi32(cospi_11_64, cospi_21_64);
+ const __m128i k32_p19_p13 = pair_set_epi32(cospi_19_64, cospi_13_64);
+ const __m128i k32_p03_p29 = pair_set_epi32(cospi_3_64, cospi_29_64);
+ const __m128i k32_m29_p03 = pair_set_epi32(-cospi_29_64, cospi_3_64);
+ const __m128i k32_m13_p19 = pair_set_epi32(-cospi_13_64, cospi_19_64);
+ const __m128i k32_m21_p11 = pair_set_epi32(-cospi_21_64, cospi_11_64);
+ const __m128i k32_m05_p27 = pair_set_epi32(-cospi_5_64, cospi_27_64);
+
+ u[ 0] = _mm_unpacklo_epi32(lstep1[40], lstep1[54]);
+ u[ 1] = _mm_unpackhi_epi32(lstep1[40], lstep1[54]);
+ u[ 2] = _mm_unpacklo_epi32(lstep1[41], lstep1[55]);
+ u[ 3] = _mm_unpackhi_epi32(lstep1[41], lstep1[55]);
+ u[ 4] = _mm_unpacklo_epi32(lstep1[42], lstep1[52]);
+ u[ 5] = _mm_unpackhi_epi32(lstep1[42], lstep1[52]);
+ u[ 6] = _mm_unpacklo_epi32(lstep1[43], lstep1[53]);
+ u[ 7] = _mm_unpackhi_epi32(lstep1[43], lstep1[53]);
+ u[ 8] = _mm_unpacklo_epi32(lstep1[44], lstep1[50]);
+ u[ 9] = _mm_unpackhi_epi32(lstep1[44], lstep1[50]);
+ u[10] = _mm_unpacklo_epi32(lstep1[45], lstep1[51]);
+ u[11] = _mm_unpackhi_epi32(lstep1[45], lstep1[51]);
+ u[12] = _mm_unpacklo_epi32(lstep1[46], lstep1[48]);
+ u[13] = _mm_unpackhi_epi32(lstep1[46], lstep1[48]);
+ u[14] = _mm_unpacklo_epi32(lstep1[47], lstep1[49]);
+ u[15] = _mm_unpackhi_epi32(lstep1[47], lstep1[49]);
+
+ v[ 0] = k_madd_epi32(u[ 0], k32_p27_p05);
+ v[ 1] = k_madd_epi32(u[ 1], k32_p27_p05);
+ v[ 2] = k_madd_epi32(u[ 2], k32_p27_p05);
+ v[ 3] = k_madd_epi32(u[ 3], k32_p27_p05);
+ v[ 4] = k_madd_epi32(u[ 4], k32_p11_p21);
+ v[ 5] = k_madd_epi32(u[ 5], k32_p11_p21);
+ v[ 6] = k_madd_epi32(u[ 6], k32_p11_p21);
+ v[ 7] = k_madd_epi32(u[ 7], k32_p11_p21);
+ v[ 8] = k_madd_epi32(u[ 8], k32_p19_p13);
+ v[ 9] = k_madd_epi32(u[ 9], k32_p19_p13);
+ v[10] = k_madd_epi32(u[10], k32_p19_p13);
+ v[11] = k_madd_epi32(u[11], k32_p19_p13);
+ v[12] = k_madd_epi32(u[12], k32_p03_p29);
+ v[13] = k_madd_epi32(u[13], k32_p03_p29);
+ v[14] = k_madd_epi32(u[14], k32_p03_p29);
+ v[15] = k_madd_epi32(u[15], k32_p03_p29);
+ v[16] = k_madd_epi32(u[12], k32_m29_p03);
+ v[17] = k_madd_epi32(u[13], k32_m29_p03);
+ v[18] = k_madd_epi32(u[14], k32_m29_p03);
+ v[19] = k_madd_epi32(u[15], k32_m29_p03);
+ v[20] = k_madd_epi32(u[ 8], k32_m13_p19);
+ v[21] = k_madd_epi32(u[ 9], k32_m13_p19);
+ v[22] = k_madd_epi32(u[10], k32_m13_p19);
+ v[23] = k_madd_epi32(u[11], k32_m13_p19);
+ v[24] = k_madd_epi32(u[ 4], k32_m21_p11);
+ v[25] = k_madd_epi32(u[ 5], k32_m21_p11);
+ v[26] = k_madd_epi32(u[ 6], k32_m21_p11);
+ v[27] = k_madd_epi32(u[ 7], k32_m21_p11);
+ v[28] = k_madd_epi32(u[ 0], k32_m05_p27);
+ v[29] = k_madd_epi32(u[ 1], k32_m05_p27);
+ v[30] = k_madd_epi32(u[ 2], k32_m05_p27);
+ v[31] = k_madd_epi32(u[ 3], k32_m05_p27);
+
+ u[ 0] = k_packs_epi64(v[ 0], v[ 1]);
+ u[ 1] = k_packs_epi64(v[ 2], v[ 3]);
+ u[ 2] = k_packs_epi64(v[ 4], v[ 5]);
+ u[ 3] = k_packs_epi64(v[ 6], v[ 7]);
+ u[ 4] = k_packs_epi64(v[ 8], v[ 9]);
+ u[ 5] = k_packs_epi64(v[10], v[11]);
+ u[ 6] = k_packs_epi64(v[12], v[13]);
+ u[ 7] = k_packs_epi64(v[14], v[15]);
+ u[ 8] = k_packs_epi64(v[16], v[17]);
+ u[ 9] = k_packs_epi64(v[18], v[19]);
+ u[10] = k_packs_epi64(v[20], v[21]);
+ u[11] = k_packs_epi64(v[22], v[23]);
+ u[12] = k_packs_epi64(v[24], v[25]);
+ u[13] = k_packs_epi64(v[26], v[27]);
+ u[14] = k_packs_epi64(v[28], v[29]);
+ u[15] = k_packs_epi64(v[30], v[31]);
+
+ v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
+ v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
+ v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
+ v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
+ v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
+ v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
+ v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
+ v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
+ v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
+ v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
+ v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
+ v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
+ v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
+ v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING);
+ v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
+ v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
+
+ u[ 0] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS);
+ u[ 1] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS);
+ u[ 2] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS);
+ u[ 3] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS);
+ u[ 4] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS);
+ u[ 5] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS);
+ u[ 6] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS);
+ u[ 7] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS);
+ u[ 8] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS);
+ u[ 9] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS);
+ u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
+ u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
+ u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
+ u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS);
+ u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS);
+ u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS);
+
+ v[ 0] = _mm_cmplt_epi32(u[ 0], kZero);
+ v[ 1] = _mm_cmplt_epi32(u[ 1], kZero);
+ v[ 2] = _mm_cmplt_epi32(u[ 2], kZero);
+ v[ 3] = _mm_cmplt_epi32(u[ 3], kZero);
+ v[ 4] = _mm_cmplt_epi32(u[ 4], kZero);
+ v[ 5] = _mm_cmplt_epi32(u[ 5], kZero);
+ v[ 6] = _mm_cmplt_epi32(u[ 6], kZero);
+ v[ 7] = _mm_cmplt_epi32(u[ 7], kZero);
+ v[ 8] = _mm_cmplt_epi32(u[ 8], kZero);
+ v[ 9] = _mm_cmplt_epi32(u[ 9], kZero);
+ v[10] = _mm_cmplt_epi32(u[10], kZero);
+ v[11] = _mm_cmplt_epi32(u[11], kZero);
+ v[12] = _mm_cmplt_epi32(u[12], kZero);
+ v[13] = _mm_cmplt_epi32(u[13], kZero);
+ v[14] = _mm_cmplt_epi32(u[14], kZero);
+ v[15] = _mm_cmplt_epi32(u[15], kZero);
+
+ u[ 0] = _mm_sub_epi32(u[ 0], v[ 0]);
+ u[ 1] = _mm_sub_epi32(u[ 1], v[ 1]);
+ u[ 2] = _mm_sub_epi32(u[ 2], v[ 2]);
+ u[ 3] = _mm_sub_epi32(u[ 3], v[ 3]);
+ u[ 4] = _mm_sub_epi32(u[ 4], v[ 4]);
+ u[ 5] = _mm_sub_epi32(u[ 5], v[ 5]);
+ u[ 6] = _mm_sub_epi32(u[ 6], v[ 6]);
+ u[ 7] = _mm_sub_epi32(u[ 7], v[ 7]);
+ u[ 8] = _mm_sub_epi32(u[ 8], v[ 8]);
+ u[ 9] = _mm_sub_epi32(u[ 9], v[ 9]);
+ u[10] = _mm_sub_epi32(u[10], v[10]);
+ u[11] = _mm_sub_epi32(u[11], v[11]);
+ u[12] = _mm_sub_epi32(u[12], v[12]);
+ u[13] = _mm_sub_epi32(u[13], v[13]);
+ u[14] = _mm_sub_epi32(u[14], v[14]);
+ u[15] = _mm_sub_epi32(u[15], v[15]);
+
+ v[0] = _mm_add_epi32(u[0], K32One);
+ v[1] = _mm_add_epi32(u[1], K32One);
+ v[2] = _mm_add_epi32(u[2], K32One);
+ v[3] = _mm_add_epi32(u[3], K32One);
+ v[4] = _mm_add_epi32(u[4], K32One);
+ v[5] = _mm_add_epi32(u[5], K32One);
+ v[6] = _mm_add_epi32(u[6], K32One);
+ v[7] = _mm_add_epi32(u[7], K32One);
+ v[8] = _mm_add_epi32(u[8], K32One);
+ v[9] = _mm_add_epi32(u[9], K32One);
+ v[10] = _mm_add_epi32(u[10], K32One);
+ v[11] = _mm_add_epi32(u[11], K32One);
+ v[12] = _mm_add_epi32(u[12], K32One);
+ v[13] = _mm_add_epi32(u[13], K32One);
+ v[14] = _mm_add_epi32(u[14], K32One);
+ v[15] = _mm_add_epi32(u[15], K32One);
+
+ u[0] = _mm_srai_epi32(v[0], 2);
+ u[1] = _mm_srai_epi32(v[1], 2);
+ u[2] = _mm_srai_epi32(v[2], 2);
+ u[3] = _mm_srai_epi32(v[3], 2);
+ u[4] = _mm_srai_epi32(v[4], 2);
+ u[5] = _mm_srai_epi32(v[5], 2);
+ u[6] = _mm_srai_epi32(v[6], 2);
+ u[7] = _mm_srai_epi32(v[7], 2);
+ u[8] = _mm_srai_epi32(v[8], 2);
+ u[9] = _mm_srai_epi32(v[9], 2);
+ u[10] = _mm_srai_epi32(v[10], 2);
+ u[11] = _mm_srai_epi32(v[11], 2);
+ u[12] = _mm_srai_epi32(v[12], 2);
+ u[13] = _mm_srai_epi32(v[13], 2);
+ u[14] = _mm_srai_epi32(v[14], 2);
+ u[15] = _mm_srai_epi32(v[15], 2);
+
+ out[ 5] = _mm_packs_epi32(u[0], u[1]);
+ out[21] = _mm_packs_epi32(u[2], u[3]);
+ out[13] = _mm_packs_epi32(u[4], u[5]);
+ out[29] = _mm_packs_epi32(u[6], u[7]);
+ out[ 3] = _mm_packs_epi32(u[8], u[9]);
+ out[19] = _mm_packs_epi32(u[10], u[11]);
+ out[11] = _mm_packs_epi32(u[12], u[13]);
+ out[27] = _mm_packs_epi32(u[14], u[15]);
+ }
+ }
+#endif
+ // Transpose the results, do it as four 8x8 transposes.
+ {
+ int transpose_block;
+ int16_t *output;
+ if (0 == pass) {
+ output = &intermediate[column_start * 32];
+ } else {
+ output = &output_org[column_start * 32];
+ }
+ for (transpose_block = 0; transpose_block < 4; ++transpose_block) {
+ __m128i *this_out = &out[8 * transpose_block];
+ // 00 01 02 03 04 05 06 07
+ // 10 11 12 13 14 15 16 17
+ // 20 21 22 23 24 25 26 27
+ // 30 31 32 33 34 35 36 37
+ // 40 41 42 43 44 45 46 47
+ // 50 51 52 53 54 55 56 57
+ // 60 61 62 63 64 65 66 67
+ // 70 71 72 73 74 75 76 77
+ const __m128i tr0_0 = _mm_unpacklo_epi16(this_out[0], this_out[1]);
+ const __m128i tr0_1 = _mm_unpacklo_epi16(this_out[2], this_out[3]);
+ const __m128i tr0_2 = _mm_unpackhi_epi16(this_out[0], this_out[1]);
+ const __m128i tr0_3 = _mm_unpackhi_epi16(this_out[2], this_out[3]);
+ const __m128i tr0_4 = _mm_unpacklo_epi16(this_out[4], this_out[5]);
+ const __m128i tr0_5 = _mm_unpacklo_epi16(this_out[6], this_out[7]);
+ const __m128i tr0_6 = _mm_unpackhi_epi16(this_out[4], this_out[5]);
+ const __m128i tr0_7 = _mm_unpackhi_epi16(this_out[6], this_out[7]);
+ // 00 10 01 11 02 12 03 13
+ // 20 30 21 31 22 32 23 33
+ // 04 14 05 15 06 16 07 17
+ // 24 34 25 35 26 36 27 37
+ // 40 50 41 51 42 52 43 53
+ // 60 70 61 71 62 72 63 73
+ // 54 54 55 55 56 56 57 57
+ // 64 74 65 75 66 76 67 77
+ const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
+ const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3);
+ const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
+ const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3);
+ const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5);
+ const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);
+ const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);
+ const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);
+ // 00 10 20 30 01 11 21 31
+ // 40 50 60 70 41 51 61 71
+ // 02 12 22 32 03 13 23 33
+ // 42 52 62 72 43 53 63 73
+ // 04 14 24 34 05 15 21 36
+ // 44 54 64 74 45 55 61 76
+ // 06 16 26 36 07 17 27 37
+ // 46 56 66 76 47 57 67 77
+ __m128i tr2_0 = _mm_unpacklo_epi64(tr1_0, tr1_4);
+ __m128i tr2_1 = _mm_unpackhi_epi64(tr1_0, tr1_4);
+ __m128i tr2_2 = _mm_unpacklo_epi64(tr1_2, tr1_6);
+ __m128i tr2_3 = _mm_unpackhi_epi64(tr1_2, tr1_6);
+ __m128i tr2_4 = _mm_unpacklo_epi64(tr1_1, tr1_5);
+ __m128i tr2_5 = _mm_unpackhi_epi64(tr1_1, tr1_5);
+ __m128i tr2_6 = _mm_unpacklo_epi64(tr1_3, tr1_7);
+ __m128i tr2_7 = _mm_unpackhi_epi64(tr1_3, tr1_7);
+ // 00 10 20 30 40 50 60 70
+ // 01 11 21 31 41 51 61 71
+ // 02 12 22 32 42 52 62 72
+ // 03 13 23 33 43 53 63 73
+ // 04 14 24 34 44 54 64 74
+ // 05 15 25 35 45 55 65 75
+ // 06 16 26 36 46 56 66 76
+ // 07 17 27 37 47 57 67 77
+ if (0 == pass) {
+ // output[j] = (output[j] + 1 + (output[j] > 0)) >> 2;
+ // TODO(cd): see quality impact of only doing
+ // output[j] = (output[j] + 1) >> 2;
+ // which would remove the code between here ...
+ __m128i tr2_0_0 = _mm_cmpgt_epi16(tr2_0, kZero);
+ __m128i tr2_1_0 = _mm_cmpgt_epi16(tr2_1, kZero);
+ __m128i tr2_2_0 = _mm_cmpgt_epi16(tr2_2, kZero);
+ __m128i tr2_3_0 = _mm_cmpgt_epi16(tr2_3, kZero);
+ __m128i tr2_4_0 = _mm_cmpgt_epi16(tr2_4, kZero);
+ __m128i tr2_5_0 = _mm_cmpgt_epi16(tr2_5, kZero);
+ __m128i tr2_6_0 = _mm_cmpgt_epi16(tr2_6, kZero);
+ __m128i tr2_7_0 = _mm_cmpgt_epi16(tr2_7, kZero);
+ tr2_0 = _mm_sub_epi16(tr2_0, tr2_0_0);
+ tr2_1 = _mm_sub_epi16(tr2_1, tr2_1_0);
+ tr2_2 = _mm_sub_epi16(tr2_2, tr2_2_0);
+ tr2_3 = _mm_sub_epi16(tr2_3, tr2_3_0);
+ tr2_4 = _mm_sub_epi16(tr2_4, tr2_4_0);
+ tr2_5 = _mm_sub_epi16(tr2_5, tr2_5_0);
+ tr2_6 = _mm_sub_epi16(tr2_6, tr2_6_0);
+ tr2_7 = _mm_sub_epi16(tr2_7, tr2_7_0);
+ // ... and here.
+ // PS: also change code in vp9/encoder/vp9_dct.c
+ tr2_0 = _mm_add_epi16(tr2_0, kOne);
+ tr2_1 = _mm_add_epi16(tr2_1, kOne);
+ tr2_2 = _mm_add_epi16(tr2_2, kOne);
+ tr2_3 = _mm_add_epi16(tr2_3, kOne);
+ tr2_4 = _mm_add_epi16(tr2_4, kOne);
+ tr2_5 = _mm_add_epi16(tr2_5, kOne);
+ tr2_6 = _mm_add_epi16(tr2_6, kOne);
+ tr2_7 = _mm_add_epi16(tr2_7, kOne);
+ tr2_0 = _mm_srai_epi16(tr2_0, 2);
+ tr2_1 = _mm_srai_epi16(tr2_1, 2);
+ tr2_2 = _mm_srai_epi16(tr2_2, 2);
+ tr2_3 = _mm_srai_epi16(tr2_3, 2);
+ tr2_4 = _mm_srai_epi16(tr2_4, 2);
+ tr2_5 = _mm_srai_epi16(tr2_5, 2);
+ tr2_6 = _mm_srai_epi16(tr2_6, 2);
+ tr2_7 = _mm_srai_epi16(tr2_7, 2);
+ }
+ // Note: even though all these stores are aligned, using the aligned
+ // intrinsic make the code slightly slower.
+ _mm_storeu_si128((__m128i *)(output + 0 * 32), tr2_0);
+ _mm_storeu_si128((__m128i *)(output + 1 * 32), tr2_1);
+ _mm_storeu_si128((__m128i *)(output + 2 * 32), tr2_2);
+ _mm_storeu_si128((__m128i *)(output + 3 * 32), tr2_3);
+ _mm_storeu_si128((__m128i *)(output + 4 * 32), tr2_4);
+ _mm_storeu_si128((__m128i *)(output + 5 * 32), tr2_5);
+ _mm_storeu_si128((__m128i *)(output + 6 * 32), tr2_6);
+ _mm_storeu_si128((__m128i *)(output + 7 * 32), tr2_7);
+ // Process next 8x8
+ output += 8;
+ }
+ }
+ }
+ }
+}
diff --git a/libvpx/vp9/encoder/x86/vp9_dct_sse2.c b/libvpx/vp9/encoder/x86/vp9_dct_sse2.c
new file mode 100644
index 0000000..eb271fe
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_dct_sse2.c
@@ -0,0 +1,2585 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <emmintrin.h> // SSE2
+#include "vp9/common/vp9_idct.h" // for cospi constants
+#include "vpx_ports/mem.h"
+
+void vp9_short_fdct4x4_sse2(int16_t *input, int16_t *output, int pitch) {
+ // The 2D transform is done with two passes which are actually pretty
+ // similar. In the first one, we transform the columns and transpose
+ // the results. In the second one, we transform the rows. To achieve that,
+ // as the first pass results are transposed, we tranpose the columns (that
+ // is the transposed rows) and transpose the results (so that it goes back
+ // in normal/row positions).
+ const int stride = pitch >> 1;
+ int pass;
+ // Constants
+ // When we use them, in one case, they are all the same. In all others
+ // it's a pair of them that we need to repeat four times. This is done
+ // by constructing the 32 bit constant corresponding to that pair.
+ const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
+ const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+ const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
+ const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
+ const __m128i k__nonzero_bias_a = _mm_setr_epi16(0, 1, 1, 1, 1, 1, 1, 1);
+ const __m128i k__nonzero_bias_b = _mm_setr_epi16(1, 0, 0, 0, 0, 0, 0, 0);
+ const __m128i kOne = _mm_set1_epi16(1);
+ __m128i in0, in1, in2, in3;
+ // Load inputs.
+ {
+ in0 = _mm_loadl_epi64((const __m128i *)(input + 0 * stride));
+ in1 = _mm_loadl_epi64((const __m128i *)(input + 1 * stride));
+ in2 = _mm_loadl_epi64((const __m128i *)(input + 2 * stride));
+ in3 = _mm_loadl_epi64((const __m128i *)(input + 3 * stride));
+ // x = x << 4
+ in0 = _mm_slli_epi16(in0, 4);
+ in1 = _mm_slli_epi16(in1, 4);
+ in2 = _mm_slli_epi16(in2, 4);
+ in3 = _mm_slli_epi16(in3, 4);
+ // if (i == 0 && input[0]) input[0] += 1;
+ {
+ // The mask will only contain wether the first value is zero, all
+ // other comparison will fail as something shifted by 4 (above << 4)
+ // can never be equal to one. To increment in the non-zero case, we
+ // add the mask and one for the first element:
+ // - if zero, mask = -1, v = v - 1 + 1 = v
+ // - if non-zero, mask = 0, v = v + 0 + 1 = v + 1
+ __m128i mask = _mm_cmpeq_epi16(in0, k__nonzero_bias_a);
+ in0 = _mm_add_epi16(in0, mask);
+ in0 = _mm_add_epi16(in0, k__nonzero_bias_b);
+ }
+ }
+ // Do the two transform/transpose passes
+ for (pass = 0; pass < 2; ++pass) {
+ // Transform 1/2: Add/substract
+ const __m128i r0 = _mm_add_epi16(in0, in3);
+ const __m128i r1 = _mm_add_epi16(in1, in2);
+ const __m128i r2 = _mm_sub_epi16(in1, in2);
+ const __m128i r3 = _mm_sub_epi16(in0, in3);
+ // Transform 1/2: Interleave to do the multiply by constants which gets us
+ // into 32 bits.
+ const __m128i t0 = _mm_unpacklo_epi16(r0, r1);
+ const __m128i t2 = _mm_unpacklo_epi16(r2, r3);
+ const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16);
+ const __m128i u2 = _mm_madd_epi16(t0, k__cospi_p16_m16);
+ const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p24_p08);
+ const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m08_p24);
+ const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+ const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+ const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
+ const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
+ const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+ const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+ const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
+ const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
+ // Combine and transpose
+ const __m128i res0 = _mm_packs_epi32(w0, w2);
+ const __m128i res1 = _mm_packs_epi32(w4, w6);
+ // 00 01 02 03 20 21 22 23
+ // 10 11 12 13 30 31 32 33
+ const __m128i tr0_0 = _mm_unpacklo_epi16(res0, res1);
+ const __m128i tr0_1 = _mm_unpackhi_epi16(res0, res1);
+ // 00 10 01 11 02 12 03 13
+ // 20 30 21 31 22 32 23 33
+ in0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
+ in2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
+ // 00 10 20 30 01 11 21 31 in0 contains 0 followed by 1
+ // 02 12 22 32 03 13 23 33 in2 contains 2 followed by 3
+ if (0 == pass) {
+ // Extract values in the high part for second pass as transform code
+ // only uses the first four values.
+ in1 = _mm_unpackhi_epi64(in0, in0);
+ in3 = _mm_unpackhi_epi64(in2, in2);
+ } else {
+ // Post-condition output and store it (v + 1) >> 2, taking advantage
+ // of the fact 1/3 are stored just after 0/2.
+ __m128i out01 = _mm_add_epi16(in0, kOne);
+ __m128i out23 = _mm_add_epi16(in2, kOne);
+ out01 = _mm_srai_epi16(out01, 2);
+ out23 = _mm_srai_epi16(out23, 2);
+ _mm_storeu_si128((__m128i *)(output + 0 * 4), out01);
+ _mm_storeu_si128((__m128i *)(output + 2 * 4), out23);
+ }
+ }
+}
+
+void vp9_short_fdct8x4_sse2(int16_t *input, int16_t *output, int pitch) {
+ vp9_short_fdct4x4_sse2(input, output, pitch);
+ vp9_short_fdct4x4_sse2(input + 4, output + 16, pitch);
+}
+
+static INLINE void load_buffer_4x4(int16_t *input, __m128i *in, int stride) {
+ const __m128i k__nonzero_bias_a = _mm_setr_epi16(0, 1, 1, 1, 1, 1, 1, 1);
+ const __m128i k__nonzero_bias_b = _mm_setr_epi16(1, 0, 0, 0, 0, 0, 0, 0);
+ __m128i mask;
+
+ in[0] = _mm_loadl_epi64((const __m128i *)(input + 0 * stride));
+ in[1] = _mm_loadl_epi64((const __m128i *)(input + 1 * stride));
+ in[2] = _mm_loadl_epi64((const __m128i *)(input + 2 * stride));
+ in[3] = _mm_loadl_epi64((const __m128i *)(input + 3 * stride));
+
+ in[0] = _mm_slli_epi16(in[0], 4);
+ in[1] = _mm_slli_epi16(in[1], 4);
+ in[2] = _mm_slli_epi16(in[2], 4);
+ in[3] = _mm_slli_epi16(in[3], 4);
+
+ mask = _mm_cmpeq_epi16(in[0], k__nonzero_bias_a);
+ in[0] = _mm_add_epi16(in[0], mask);
+ in[0] = _mm_add_epi16(in[0], k__nonzero_bias_b);
+}
+
+static INLINE void write_buffer_4x4(int16_t *output, __m128i *res) {
+ const __m128i kOne = _mm_set1_epi16(1);
+ __m128i in01 = _mm_unpacklo_epi64(res[0], res[1]);
+ __m128i in23 = _mm_unpacklo_epi64(res[2], res[3]);
+ __m128i out01 = _mm_add_epi16(in01, kOne);
+ __m128i out23 = _mm_add_epi16(in23, kOne);
+ out01 = _mm_srai_epi16(out01, 2);
+ out23 = _mm_srai_epi16(out23, 2);
+ _mm_store_si128((__m128i *)(output + 0 * 8), out01);
+ _mm_store_si128((__m128i *)(output + 1 * 8), out23);
+}
+
+static INLINE void transpose_4x4(__m128i *res) {
+ // Combine and transpose
+ // 00 01 02 03 20 21 22 23
+ // 10 11 12 13 30 31 32 33
+ const __m128i tr0_0 = _mm_unpacklo_epi16(res[0], res[1]);
+ const __m128i tr0_1 = _mm_unpackhi_epi16(res[0], res[1]);
+
+ // 00 10 01 11 02 12 03 13
+ // 20 30 21 31 22 32 23 33
+ res[0] = _mm_unpacklo_epi32(tr0_0, tr0_1);
+ res[2] = _mm_unpackhi_epi32(tr0_0, tr0_1);
+
+ // 00 10 20 30 01 11 21 31
+ // 02 12 22 32 03 13 23 33
+ // only use the first 4 16-bit integers
+ res[1] = _mm_unpackhi_epi64(res[0], res[0]);
+ res[3] = _mm_unpackhi_epi64(res[2], res[2]);
+}
+
+void fdct4_1d_sse2(__m128i *in) {
+ const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
+ const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+ const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
+ const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
+
+ __m128i u[4], v[4];
+ u[0] = _mm_add_epi16(in[0], in[3]);
+ u[1] = _mm_add_epi16(in[1], in[2]);
+ u[2] = _mm_sub_epi16(in[1], in[2]);
+ u[3] = _mm_sub_epi16(in[0], in[3]);
+
+ v[0] = _mm_unpacklo_epi16(u[0], u[1]);
+ v[1] = _mm_unpacklo_epi16(u[2], u[3]);
+ u[0] = _mm_madd_epi16(v[0], k__cospi_p16_p16); // 0
+ u[1] = _mm_madd_epi16(v[0], k__cospi_p16_m16); // 2
+ u[2] = _mm_madd_epi16(v[1], k__cospi_p24_p08); // 1
+ u[3] = _mm_madd_epi16(v[1], k__cospi_m08_p24); // 3
+
+ v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+ v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+ v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+ v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+ u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
+ u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
+ u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
+ u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
+
+ in[0] = _mm_packs_epi32(u[0], u[1]);
+ in[1] = _mm_packs_epi32(u[2], u[3]);
+ transpose_4x4(in);
+}
+
+void fadst4_1d_sse2(__m128i *in) {
+ const __m128i k__sinpi_p01_p02 = pair_set_epi16(sinpi_1_9, sinpi_2_9);
+ const __m128i k__sinpi_p04_m01 = pair_set_epi16(sinpi_4_9, -sinpi_1_9);
+ const __m128i k__sinpi_p03_p04 = pair_set_epi16(sinpi_3_9, sinpi_4_9);
+ const __m128i k__sinpi_m03_p02 = pair_set_epi16(-sinpi_3_9, sinpi_2_9);
+ const __m128i k__sinpi_p03_p03 = _mm_set1_epi16(sinpi_3_9);
+ const __m128i kZero = _mm_set1_epi16(0);
+ const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
+ __m128i u[8], v[8];
+ __m128i in7 = _mm_add_epi16(in[0], in[1]);
+ in7 = _mm_sub_epi16(in7, in[3]);
+
+ u[0] = _mm_unpacklo_epi16(in[0], in[1]);
+ u[1] = _mm_unpacklo_epi16(in[2], in[3]);
+ u[2] = _mm_unpacklo_epi16(in7, kZero);
+ u[3] = _mm_unpacklo_epi16(in[2], kZero);
+
+ v[0] = _mm_madd_epi16(u[0], k__sinpi_p01_p02); // s0 + s2
+ v[1] = _mm_madd_epi16(u[1], k__sinpi_p03_p04); // s4 + s5
+ v[2] = _mm_madd_epi16(u[2], k__sinpi_p03_p03); // x1
+ v[3] = _mm_madd_epi16(u[0], k__sinpi_p04_m01); // s1 - s3
+ v[4] = _mm_madd_epi16(u[1], k__sinpi_m03_p02); // -s4 + s6
+ v[5] = _mm_madd_epi16(u[3], k__sinpi_p03_p03); // s4
+
+ u[0] = _mm_add_epi32(v[0], v[1]);
+ u[1] = v[2];
+ u[2] = _mm_add_epi32(v[3], v[4]);
+ u[3] = _mm_sub_epi32(u[2], u[0]);
+ u[4] = _mm_slli_epi32(v[5], 2);
+ u[5] = _mm_sub_epi32(u[4], v[5]);
+ u[6] = _mm_add_epi32(u[3], u[5]);
+
+ v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+ v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+ v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+ v[3] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+
+ u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
+ u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
+ u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
+ u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
+
+ in[0] = _mm_packs_epi32(u[0], u[2]);
+ in[1] = _mm_packs_epi32(u[1], u[3]);
+ transpose_4x4(in);
+}
+
+void vp9_short_fht4x4_sse2(int16_t *input, int16_t *output,
+ int stride, int tx_type) {
+ __m128i in[4];
+ load_buffer_4x4(input, in, stride);
+ switch (tx_type) {
+ case 0: // DCT_DCT
+ fdct4_1d_sse2(in);
+ fdct4_1d_sse2(in);
+ break;
+ case 1: // ADST_DCT
+ fadst4_1d_sse2(in);
+ fdct4_1d_sse2(in);
+ break;
+ case 2: // DCT_ADST
+ fdct4_1d_sse2(in);
+ fadst4_1d_sse2(in);
+ break;
+ case 3: // ADST_ADST
+ fadst4_1d_sse2(in);
+ fadst4_1d_sse2(in);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ write_buffer_4x4(output, in);
+}
+
+void vp9_short_fdct8x8_sse2(int16_t *input, int16_t *output, int pitch) {
+ const int stride = pitch >> 1;
+ int pass;
+ // Constants
+ // When we use them, in one case, they are all the same. In all others
+ // it's a pair of them that we need to repeat four times. This is done
+ // by constructing the 32 bit constant corresponding to that pair.
+ const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
+ const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+ const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
+ const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64);
+ const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64);
+ const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64);
+ const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
+ const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
+ // Load input
+ __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride));
+ __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride));
+ __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride));
+ __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride));
+ __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride));
+ __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride));
+ __m128i in6 = _mm_load_si128((const __m128i *)(input + 6 * stride));
+ __m128i in7 = _mm_load_si128((const __m128i *)(input + 7 * stride));
+ // Pre-condition input (shift by two)
+ in0 = _mm_slli_epi16(in0, 2);
+ in1 = _mm_slli_epi16(in1, 2);
+ in2 = _mm_slli_epi16(in2, 2);
+ in3 = _mm_slli_epi16(in3, 2);
+ in4 = _mm_slli_epi16(in4, 2);
+ in5 = _mm_slli_epi16(in5, 2);
+ in6 = _mm_slli_epi16(in6, 2);
+ in7 = _mm_slli_epi16(in7, 2);
+
+ // We do two passes, first the columns, then the rows. The results of the
+ // first pass are transposed so that the same column code can be reused. The
+ // results of the second pass are also transposed so that the rows (processed
+ // as columns) are put back in row positions.
+ for (pass = 0; pass < 2; pass++) {
+ // To store results of each pass before the transpose.
+ __m128i res0, res1, res2, res3, res4, res5, res6, res7;
+ // Add/substract
+ const __m128i q0 = _mm_add_epi16(in0, in7);
+ const __m128i q1 = _mm_add_epi16(in1, in6);
+ const __m128i q2 = _mm_add_epi16(in2, in5);
+ const __m128i q3 = _mm_add_epi16(in3, in4);
+ const __m128i q4 = _mm_sub_epi16(in3, in4);
+ const __m128i q5 = _mm_sub_epi16(in2, in5);
+ const __m128i q6 = _mm_sub_epi16(in1, in6);
+ const __m128i q7 = _mm_sub_epi16(in0, in7);
+ // Work on first four results
+ {
+ // Add/substract
+ const __m128i r0 = _mm_add_epi16(q0, q3);
+ const __m128i r1 = _mm_add_epi16(q1, q2);
+ const __m128i r2 = _mm_sub_epi16(q1, q2);
+ const __m128i r3 = _mm_sub_epi16(q0, q3);
+ // Interleave to do the multiply by constants which gets us into 32bits
+ const __m128i t0 = _mm_unpacklo_epi16(r0, r1);
+ const __m128i t1 = _mm_unpackhi_epi16(r0, r1);
+ const __m128i t2 = _mm_unpacklo_epi16(r2, r3);
+ const __m128i t3 = _mm_unpackhi_epi16(r2, r3);
+ const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16);
+ const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p16_p16);
+ const __m128i u2 = _mm_madd_epi16(t0, k__cospi_p16_m16);
+ const __m128i u3 = _mm_madd_epi16(t1, k__cospi_p16_m16);
+ const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p24_p08);
+ const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p24_p08);
+ const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m08_p24);
+ const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m08_p24);
+ // dct_const_round_shift
+ const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+ const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+ const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+ const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+ const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
+ const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING);
+ const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
+ const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING);
+ const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+ const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+ const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+ const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+ const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
+ const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
+ const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
+ const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
+ // Combine
+ res0 = _mm_packs_epi32(w0, w1);
+ res4 = _mm_packs_epi32(w2, w3);
+ res2 = _mm_packs_epi32(w4, w5);
+ res6 = _mm_packs_epi32(w6, w7);
+ }
+ // Work on next four results
+ {
+ // Interleave to do the multiply by constants which gets us into 32bits
+ const __m128i d0 = _mm_unpacklo_epi16(q6, q5);
+ const __m128i d1 = _mm_unpackhi_epi16(q6, q5);
+ const __m128i e0 = _mm_madd_epi16(d0, k__cospi_p16_m16);
+ const __m128i e1 = _mm_madd_epi16(d1, k__cospi_p16_m16);
+ const __m128i e2 = _mm_madd_epi16(d0, k__cospi_p16_p16);
+ const __m128i e3 = _mm_madd_epi16(d1, k__cospi_p16_p16);
+ // dct_const_round_shift
+ const __m128i f0 = _mm_add_epi32(e0, k__DCT_CONST_ROUNDING);
+ const __m128i f1 = _mm_add_epi32(e1, k__DCT_CONST_ROUNDING);
+ const __m128i f2 = _mm_add_epi32(e2, k__DCT_CONST_ROUNDING);
+ const __m128i f3 = _mm_add_epi32(e3, k__DCT_CONST_ROUNDING);
+ const __m128i s0 = _mm_srai_epi32(f0, DCT_CONST_BITS);
+ const __m128i s1 = _mm_srai_epi32(f1, DCT_CONST_BITS);
+ const __m128i s2 = _mm_srai_epi32(f2, DCT_CONST_BITS);
+ const __m128i s3 = _mm_srai_epi32(f3, DCT_CONST_BITS);
+ // Combine
+ const __m128i r0 = _mm_packs_epi32(s0, s1);
+ const __m128i r1 = _mm_packs_epi32(s2, s3);
+ // Add/substract
+ const __m128i x0 = _mm_add_epi16(q4, r0);
+ const __m128i x1 = _mm_sub_epi16(q4, r0);
+ const __m128i x2 = _mm_sub_epi16(q7, r1);
+ const __m128i x3 = _mm_add_epi16(q7, r1);
+ // Interleave to do the multiply by constants which gets us into 32bits
+ const __m128i t0 = _mm_unpacklo_epi16(x0, x3);
+ const __m128i t1 = _mm_unpackhi_epi16(x0, x3);
+ const __m128i t2 = _mm_unpacklo_epi16(x1, x2);
+ const __m128i t3 = _mm_unpackhi_epi16(x1, x2);
+ const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p28_p04);
+ const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p28_p04);
+ const __m128i u2 = _mm_madd_epi16(t0, k__cospi_m04_p28);
+ const __m128i u3 = _mm_madd_epi16(t1, k__cospi_m04_p28);
+ const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p12_p20);
+ const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p12_p20);
+ const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m20_p12);
+ const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m20_p12);
+ // dct_const_round_shift
+ const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+ const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+ const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+ const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+ const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
+ const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING);
+ const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
+ const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING);
+ const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+ const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+ const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+ const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+ const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
+ const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
+ const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
+ const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
+ // Combine
+ res1 = _mm_packs_epi32(w0, w1);
+ res7 = _mm_packs_epi32(w2, w3);
+ res5 = _mm_packs_epi32(w4, w5);
+ res3 = _mm_packs_epi32(w6, w7);
+ }
+ // Transpose the 8x8.
+ {
+ // 00 01 02 03 04 05 06 07
+ // 10 11 12 13 14 15 16 17
+ // 20 21 22 23 24 25 26 27
+ // 30 31 32 33 34 35 36 37
+ // 40 41 42 43 44 45 46 47
+ // 50 51 52 53 54 55 56 57
+ // 60 61 62 63 64 65 66 67
+ // 70 71 72 73 74 75 76 77
+ const __m128i tr0_0 = _mm_unpacklo_epi16(res0, res1);
+ const __m128i tr0_1 = _mm_unpacklo_epi16(res2, res3);
+ const __m128i tr0_2 = _mm_unpackhi_epi16(res0, res1);
+ const __m128i tr0_3 = _mm_unpackhi_epi16(res2, res3);
+ const __m128i tr0_4 = _mm_unpacklo_epi16(res4, res5);
+ const __m128i tr0_5 = _mm_unpacklo_epi16(res6, res7);
+ const __m128i tr0_6 = _mm_unpackhi_epi16(res4, res5);
+ const __m128i tr0_7 = _mm_unpackhi_epi16(res6, res7);
+ // 00 10 01 11 02 12 03 13
+ // 20 30 21 31 22 32 23 33
+ // 04 14 05 15 06 16 07 17
+ // 24 34 25 35 26 36 27 37
+ // 40 50 41 51 42 52 43 53
+ // 60 70 61 71 62 72 63 73
+ // 54 54 55 55 56 56 57 57
+ // 64 74 65 75 66 76 67 77
+ const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
+ const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3);
+ const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
+ const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3);
+ const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5);
+ const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);
+ const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);
+ const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);
+ // 00 10 20 30 01 11 21 31
+ // 40 50 60 70 41 51 61 71
+ // 02 12 22 32 03 13 23 33
+ // 42 52 62 72 43 53 63 73
+ // 04 14 24 34 05 15 21 36
+ // 44 54 64 74 45 55 61 76
+ // 06 16 26 36 07 17 27 37
+ // 46 56 66 76 47 57 67 77
+ in0 = _mm_unpacklo_epi64(tr1_0, tr1_4);
+ in1 = _mm_unpackhi_epi64(tr1_0, tr1_4);
+ in2 = _mm_unpacklo_epi64(tr1_2, tr1_6);
+ in3 = _mm_unpackhi_epi64(tr1_2, tr1_6);
+ in4 = _mm_unpacklo_epi64(tr1_1, tr1_5);
+ in5 = _mm_unpackhi_epi64(tr1_1, tr1_5);
+ in6 = _mm_unpacklo_epi64(tr1_3, tr1_7);
+ in7 = _mm_unpackhi_epi64(tr1_3, tr1_7);
+ // 00 10 20 30 40 50 60 70
+ // 01 11 21 31 41 51 61 71
+ // 02 12 22 32 42 52 62 72
+ // 03 13 23 33 43 53 63 73
+ // 04 14 24 34 44 54 64 74
+ // 05 15 25 35 45 55 65 75
+ // 06 16 26 36 46 56 66 76
+ // 07 17 27 37 47 57 67 77
+ }
+ }
+ // Post-condition output and store it
+ {
+ // Post-condition (division by two)
+ // division of two 16 bits signed numbers using shifts
+ // n / 2 = (n - (n >> 15)) >> 1
+ const __m128i sign_in0 = _mm_srai_epi16(in0, 15);
+ const __m128i sign_in1 = _mm_srai_epi16(in1, 15);
+ const __m128i sign_in2 = _mm_srai_epi16(in2, 15);
+ const __m128i sign_in3 = _mm_srai_epi16(in3, 15);
+ const __m128i sign_in4 = _mm_srai_epi16(in4, 15);
+ const __m128i sign_in5 = _mm_srai_epi16(in5, 15);
+ const __m128i sign_in6 = _mm_srai_epi16(in6, 15);
+ const __m128i sign_in7 = _mm_srai_epi16(in7, 15);
+ in0 = _mm_sub_epi16(in0, sign_in0);
+ in1 = _mm_sub_epi16(in1, sign_in1);
+ in2 = _mm_sub_epi16(in2, sign_in2);
+ in3 = _mm_sub_epi16(in3, sign_in3);
+ in4 = _mm_sub_epi16(in4, sign_in4);
+ in5 = _mm_sub_epi16(in5, sign_in5);
+ in6 = _mm_sub_epi16(in6, sign_in6);
+ in7 = _mm_sub_epi16(in7, sign_in7);
+ in0 = _mm_srai_epi16(in0, 1);
+ in1 = _mm_srai_epi16(in1, 1);
+ in2 = _mm_srai_epi16(in2, 1);
+ in3 = _mm_srai_epi16(in3, 1);
+ in4 = _mm_srai_epi16(in4, 1);
+ in5 = _mm_srai_epi16(in5, 1);
+ in6 = _mm_srai_epi16(in6, 1);
+ in7 = _mm_srai_epi16(in7, 1);
+ // store results
+ _mm_store_si128((__m128i *)(output + 0 * 8), in0);
+ _mm_store_si128((__m128i *)(output + 1 * 8), in1);
+ _mm_store_si128((__m128i *)(output + 2 * 8), in2);
+ _mm_store_si128((__m128i *)(output + 3 * 8), in3);
+ _mm_store_si128((__m128i *)(output + 4 * 8), in4);
+ _mm_store_si128((__m128i *)(output + 5 * 8), in5);
+ _mm_store_si128((__m128i *)(output + 6 * 8), in6);
+ _mm_store_si128((__m128i *)(output + 7 * 8), in7);
+ }
+}
+
+// load 8x8 array
+static INLINE void load_buffer_8x8(int16_t *input, __m128i *in, int stride) {
+ in[0] = _mm_load_si128((__m128i *)(input + 0 * stride));
+ in[1] = _mm_load_si128((__m128i *)(input + 1 * stride));
+ in[2] = _mm_load_si128((__m128i *)(input + 2 * stride));
+ in[3] = _mm_load_si128((__m128i *)(input + 3 * stride));
+ in[4] = _mm_load_si128((__m128i *)(input + 4 * stride));
+ in[5] = _mm_load_si128((__m128i *)(input + 5 * stride));
+ in[6] = _mm_load_si128((__m128i *)(input + 6 * stride));
+ in[7] = _mm_load_si128((__m128i *)(input + 7 * stride));
+
+ in[0] = _mm_slli_epi16(in[0], 2);
+ in[1] = _mm_slli_epi16(in[1], 2);
+ in[2] = _mm_slli_epi16(in[2], 2);
+ in[3] = _mm_slli_epi16(in[3], 2);
+ in[4] = _mm_slli_epi16(in[4], 2);
+ in[5] = _mm_slli_epi16(in[5], 2);
+ in[6] = _mm_slli_epi16(in[6], 2);
+ in[7] = _mm_slli_epi16(in[7], 2);
+}
+
+// right shift and rounding
+static INLINE void right_shift_8x8(__m128i *res, int const bit) {
+ const __m128i kOne = _mm_set1_epi16(1);
+ const int bit_m02 = bit - 2;
+ __m128i sign0 = _mm_srai_epi16(res[0], 15);
+ __m128i sign1 = _mm_srai_epi16(res[1], 15);
+ __m128i sign2 = _mm_srai_epi16(res[2], 15);
+ __m128i sign3 = _mm_srai_epi16(res[3], 15);
+ __m128i sign4 = _mm_srai_epi16(res[4], 15);
+ __m128i sign5 = _mm_srai_epi16(res[5], 15);
+ __m128i sign6 = _mm_srai_epi16(res[6], 15);
+ __m128i sign7 = _mm_srai_epi16(res[7], 15);
+
+ if (bit_m02 >= 0) {
+ __m128i k_const_rounding = _mm_slli_epi16(kOne, bit_m02);
+ res[0] = _mm_add_epi16(res[0], k_const_rounding);
+ res[1] = _mm_add_epi16(res[1], k_const_rounding);
+ res[2] = _mm_add_epi16(res[2], k_const_rounding);
+ res[3] = _mm_add_epi16(res[3], k_const_rounding);
+ res[4] = _mm_add_epi16(res[4], k_const_rounding);
+ res[5] = _mm_add_epi16(res[5], k_const_rounding);
+ res[6] = _mm_add_epi16(res[6], k_const_rounding);
+ res[7] = _mm_add_epi16(res[7], k_const_rounding);
+ }
+
+ res[0] = _mm_sub_epi16(res[0], sign0);
+ res[1] = _mm_sub_epi16(res[1], sign1);
+ res[2] = _mm_sub_epi16(res[2], sign2);
+ res[3] = _mm_sub_epi16(res[3], sign3);
+ res[4] = _mm_sub_epi16(res[4], sign4);
+ res[5] = _mm_sub_epi16(res[5], sign5);
+ res[6] = _mm_sub_epi16(res[6], sign6);
+ res[7] = _mm_sub_epi16(res[7], sign7);
+
+ res[0] = _mm_srai_epi16(res[0], bit);
+ res[1] = _mm_srai_epi16(res[1], bit);
+ res[2] = _mm_srai_epi16(res[2], bit);
+ res[3] = _mm_srai_epi16(res[3], bit);
+ res[4] = _mm_srai_epi16(res[4], bit);
+ res[5] = _mm_srai_epi16(res[5], bit);
+ res[6] = _mm_srai_epi16(res[6], bit);
+ res[7] = _mm_srai_epi16(res[7], bit);
+}
+
+// write 8x8 array
+static INLINE void write_buffer_8x8(int16_t *output, __m128i *res, int stride) {
+ _mm_store_si128((__m128i *)(output + 0 * stride), res[0]);
+ _mm_store_si128((__m128i *)(output + 1 * stride), res[1]);
+ _mm_store_si128((__m128i *)(output + 2 * stride), res[2]);
+ _mm_store_si128((__m128i *)(output + 3 * stride), res[3]);
+ _mm_store_si128((__m128i *)(output + 4 * stride), res[4]);
+ _mm_store_si128((__m128i *)(output + 5 * stride), res[5]);
+ _mm_store_si128((__m128i *)(output + 6 * stride), res[6]);
+ _mm_store_si128((__m128i *)(output + 7 * stride), res[7]);
+}
+
+// perform in-place transpose
+static INLINE void array_transpose_8x8(__m128i *in, __m128i *res) {
+ const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]);
+ const __m128i tr0_1 = _mm_unpacklo_epi16(in[2], in[3]);
+ const __m128i tr0_2 = _mm_unpackhi_epi16(in[0], in[1]);
+ const __m128i tr0_3 = _mm_unpackhi_epi16(in[2], in[3]);
+ const __m128i tr0_4 = _mm_unpacklo_epi16(in[4], in[5]);
+ const __m128i tr0_5 = _mm_unpacklo_epi16(in[6], in[7]);
+ const __m128i tr0_6 = _mm_unpackhi_epi16(in[4], in[5]);
+ const __m128i tr0_7 = _mm_unpackhi_epi16(in[6], in[7]);
+ // 00 10 01 11 02 12 03 13
+ // 20 30 21 31 22 32 23 33
+ // 04 14 05 15 06 16 07 17
+ // 24 34 25 35 26 36 27 37
+ // 40 50 41 51 42 52 43 53
+ // 60 70 61 71 62 72 63 73
+ // 44 54 45 55 46 56 47 57
+ // 64 74 65 75 66 76 67 77
+ const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
+ const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_4, tr0_5);
+ const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
+ const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_4, tr0_5);
+ const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_2, tr0_3);
+ const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);
+ const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_2, tr0_3);
+ const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);
+ // 00 10 20 30 01 11 21 31
+ // 40 50 60 70 41 51 61 71
+ // 02 12 22 32 03 13 23 33
+ // 42 52 62 72 43 53 63 73
+ // 04 14 24 34 05 15 25 35
+ // 44 54 64 74 45 55 65 75
+ // 06 16 26 36 07 17 27 37
+ // 46 56 66 76 47 57 67 77
+ res[0] = _mm_unpacklo_epi64(tr1_0, tr1_1);
+ res[1] = _mm_unpackhi_epi64(tr1_0, tr1_1);
+ res[2] = _mm_unpacklo_epi64(tr1_2, tr1_3);
+ res[3] = _mm_unpackhi_epi64(tr1_2, tr1_3);
+ res[4] = _mm_unpacklo_epi64(tr1_4, tr1_5);
+ res[5] = _mm_unpackhi_epi64(tr1_4, tr1_5);
+ res[6] = _mm_unpacklo_epi64(tr1_6, tr1_7);
+ res[7] = _mm_unpackhi_epi64(tr1_6, tr1_7);
+ // 00 10 20 30 40 50 60 70
+ // 01 11 21 31 41 51 61 71
+ // 02 12 22 32 42 52 62 72
+ // 03 13 23 33 43 53 63 73
+ // 04 14 24 34 44 54 64 74
+ // 05 15 25 35 45 55 65 75
+ // 06 16 26 36 46 56 66 76
+ // 07 17 27 37 47 57 67 77
+}
+
+void fdct8_1d_sse2(__m128i *in) {
+ // constants
+ const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
+ const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+ const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
+ const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64);
+ const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64);
+ const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64);
+ const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
+ const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
+ __m128i u0, u1, u2, u3, u4, u5, u6, u7;
+ __m128i v0, v1, v2, v3, v4, v5, v6, v7;
+ __m128i s0, s1, s2, s3, s4, s5, s6, s7;
+
+ // stage 1
+ s0 = _mm_add_epi16(in[0], in[7]);
+ s1 = _mm_add_epi16(in[1], in[6]);
+ s2 = _mm_add_epi16(in[2], in[5]);
+ s3 = _mm_add_epi16(in[3], in[4]);
+ s4 = _mm_sub_epi16(in[3], in[4]);
+ s5 = _mm_sub_epi16(in[2], in[5]);
+ s6 = _mm_sub_epi16(in[1], in[6]);
+ s7 = _mm_sub_epi16(in[0], in[7]);
+
+ u0 = _mm_add_epi16(s0, s3);
+ u1 = _mm_add_epi16(s1, s2);
+ u2 = _mm_sub_epi16(s1, s2);
+ u3 = _mm_sub_epi16(s0, s3);
+ // interleave and perform butterfly multiplication/addition
+ v0 = _mm_unpacklo_epi16(u0, u1);
+ v1 = _mm_unpackhi_epi16(u0, u1);
+ v2 = _mm_unpacklo_epi16(u2, u3);
+ v3 = _mm_unpackhi_epi16(u2, u3);
+
+ u0 = _mm_madd_epi16(v0, k__cospi_p16_p16);
+ u1 = _mm_madd_epi16(v1, k__cospi_p16_p16);
+ u2 = _mm_madd_epi16(v0, k__cospi_p16_m16);
+ u3 = _mm_madd_epi16(v1, k__cospi_p16_m16);
+ u4 = _mm_madd_epi16(v2, k__cospi_p24_p08);
+ u5 = _mm_madd_epi16(v3, k__cospi_p24_p08);
+ u6 = _mm_madd_epi16(v2, k__cospi_m08_p24);
+ u7 = _mm_madd_epi16(v3, k__cospi_m08_p24);
+
+ // shift and rounding
+ v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+ v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+ v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+ v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+ v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
+ v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING);
+ v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
+ v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING);
+
+ u0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+ u1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+ u2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+ u3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+ u4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
+ u5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
+ u6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
+ u7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
+
+ in[0] = _mm_packs_epi32(u0, u1);
+ in[2] = _mm_packs_epi32(u4, u5);
+ in[4] = _mm_packs_epi32(u2, u3);
+ in[6] = _mm_packs_epi32(u6, u7);
+
+ // stage 2
+ // interleave and perform butterfly multiplication/addition
+ u0 = _mm_unpacklo_epi16(s6, s5);
+ u1 = _mm_unpackhi_epi16(s6, s5);
+ v0 = _mm_madd_epi16(u0, k__cospi_p16_m16);
+ v1 = _mm_madd_epi16(u1, k__cospi_p16_m16);
+ v2 = _mm_madd_epi16(u0, k__cospi_p16_p16);
+ v3 = _mm_madd_epi16(u1, k__cospi_p16_p16);
+
+ // shift and rounding
+ u0 = _mm_add_epi32(v0, k__DCT_CONST_ROUNDING);
+ u1 = _mm_add_epi32(v1, k__DCT_CONST_ROUNDING);
+ u2 = _mm_add_epi32(v2, k__DCT_CONST_ROUNDING);
+ u3 = _mm_add_epi32(v3, k__DCT_CONST_ROUNDING);
+
+ v0 = _mm_srai_epi32(u0, DCT_CONST_BITS);
+ v1 = _mm_srai_epi32(u1, DCT_CONST_BITS);
+ v2 = _mm_srai_epi32(u2, DCT_CONST_BITS);
+ v3 = _mm_srai_epi32(u3, DCT_CONST_BITS);
+
+ u0 = _mm_packs_epi32(v0, v1);
+ u1 = _mm_packs_epi32(v2, v3);
+
+ // stage 3
+ s0 = _mm_add_epi16(s4, u0);
+ s1 = _mm_sub_epi16(s4, u0);
+ s2 = _mm_sub_epi16(s7, u1);
+ s3 = _mm_add_epi16(s7, u1);
+
+ // stage 4
+ u0 = _mm_unpacklo_epi16(s0, s3);
+ u1 = _mm_unpackhi_epi16(s0, s3);
+ u2 = _mm_unpacklo_epi16(s1, s2);
+ u3 = _mm_unpackhi_epi16(s1, s2);
+
+ v0 = _mm_madd_epi16(u0, k__cospi_p28_p04);
+ v1 = _mm_madd_epi16(u1, k__cospi_p28_p04);
+ v2 = _mm_madd_epi16(u2, k__cospi_p12_p20);
+ v3 = _mm_madd_epi16(u3, k__cospi_p12_p20);
+ v4 = _mm_madd_epi16(u2, k__cospi_m20_p12);
+ v5 = _mm_madd_epi16(u3, k__cospi_m20_p12);
+ v6 = _mm_madd_epi16(u0, k__cospi_m04_p28);
+ v7 = _mm_madd_epi16(u1, k__cospi_m04_p28);
+
+ // shift and rounding
+ u0 = _mm_add_epi32(v0, k__DCT_CONST_ROUNDING);
+ u1 = _mm_add_epi32(v1, k__DCT_CONST_ROUNDING);
+ u2 = _mm_add_epi32(v2, k__DCT_CONST_ROUNDING);
+ u3 = _mm_add_epi32(v3, k__DCT_CONST_ROUNDING);
+ u4 = _mm_add_epi32(v4, k__DCT_CONST_ROUNDING);
+ u5 = _mm_add_epi32(v5, k__DCT_CONST_ROUNDING);
+ u6 = _mm_add_epi32(v6, k__DCT_CONST_ROUNDING);
+ u7 = _mm_add_epi32(v7, k__DCT_CONST_ROUNDING);
+
+ v0 = _mm_srai_epi32(u0, DCT_CONST_BITS);
+ v1 = _mm_srai_epi32(u1, DCT_CONST_BITS);
+ v2 = _mm_srai_epi32(u2, DCT_CONST_BITS);
+ v3 = _mm_srai_epi32(u3, DCT_CONST_BITS);
+ v4 = _mm_srai_epi32(u4, DCT_CONST_BITS);
+ v5 = _mm_srai_epi32(u5, DCT_CONST_BITS);
+ v6 = _mm_srai_epi32(u6, DCT_CONST_BITS);
+ v7 = _mm_srai_epi32(u7, DCT_CONST_BITS);
+
+ in[1] = _mm_packs_epi32(v0, v1);
+ in[3] = _mm_packs_epi32(v4, v5);
+ in[5] = _mm_packs_epi32(v2, v3);
+ in[7] = _mm_packs_epi32(v6, v7);
+
+ // transpose
+ array_transpose_8x8(in, in);
+}
+
+void fadst8_1d_sse2(__m128i *in) {
+ // Constants
+ const __m128i k__cospi_p02_p30 = pair_set_epi16(cospi_2_64, cospi_30_64);
+ const __m128i k__cospi_p30_m02 = pair_set_epi16(cospi_30_64, -cospi_2_64);
+ const __m128i k__cospi_p10_p22 = pair_set_epi16(cospi_10_64, cospi_22_64);
+ const __m128i k__cospi_p22_m10 = pair_set_epi16(cospi_22_64, -cospi_10_64);
+ const __m128i k__cospi_p18_p14 = pair_set_epi16(cospi_18_64, cospi_14_64);
+ const __m128i k__cospi_p14_m18 = pair_set_epi16(cospi_14_64, -cospi_18_64);
+ const __m128i k__cospi_p26_p06 = pair_set_epi16(cospi_26_64, cospi_6_64);
+ const __m128i k__cospi_p06_m26 = pair_set_epi16(cospi_6_64, -cospi_26_64);
+ const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64);
+ const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
+ const __m128i k__cospi_m24_p08 = pair_set_epi16(-cospi_24_64, cospi_8_64);
+ const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+ const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
+ const __m128i k__const_0 = _mm_set1_epi16(0);
+ const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
+
+ __m128i u0, u1, u2, u3, u4, u5, u6, u7, u8, u9, u10, u11, u12, u13, u14, u15;
+ __m128i v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15;
+ __m128i w0, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15;
+ __m128i s0, s1, s2, s3, s4, s5, s6, s7;
+ __m128i in0, in1, in2, in3, in4, in5, in6, in7;
+
+ // properly aligned for butterfly input
+ in0 = in[7];
+ in1 = in[0];
+ in2 = in[5];
+ in3 = in[2];
+ in4 = in[3];
+ in5 = in[4];
+ in6 = in[1];
+ in7 = in[6];
+
+ // column transformation
+ // stage 1
+ // interleave and multiply/add into 32-bit integer
+ s0 = _mm_unpacklo_epi16(in0, in1);
+ s1 = _mm_unpackhi_epi16(in0, in1);
+ s2 = _mm_unpacklo_epi16(in2, in3);
+ s3 = _mm_unpackhi_epi16(in2, in3);
+ s4 = _mm_unpacklo_epi16(in4, in5);
+ s5 = _mm_unpackhi_epi16(in4, in5);
+ s6 = _mm_unpacklo_epi16(in6, in7);
+ s7 = _mm_unpackhi_epi16(in6, in7);
+
+ u0 = _mm_madd_epi16(s0, k__cospi_p02_p30);
+ u1 = _mm_madd_epi16(s1, k__cospi_p02_p30);
+ u2 = _mm_madd_epi16(s0, k__cospi_p30_m02);
+ u3 = _mm_madd_epi16(s1, k__cospi_p30_m02);
+ u4 = _mm_madd_epi16(s2, k__cospi_p10_p22);
+ u5 = _mm_madd_epi16(s3, k__cospi_p10_p22);
+ u6 = _mm_madd_epi16(s2, k__cospi_p22_m10);
+ u7 = _mm_madd_epi16(s3, k__cospi_p22_m10);
+ u8 = _mm_madd_epi16(s4, k__cospi_p18_p14);
+ u9 = _mm_madd_epi16(s5, k__cospi_p18_p14);
+ u10 = _mm_madd_epi16(s4, k__cospi_p14_m18);
+ u11 = _mm_madd_epi16(s5, k__cospi_p14_m18);
+ u12 = _mm_madd_epi16(s6, k__cospi_p26_p06);
+ u13 = _mm_madd_epi16(s7, k__cospi_p26_p06);
+ u14 = _mm_madd_epi16(s6, k__cospi_p06_m26);
+ u15 = _mm_madd_epi16(s7, k__cospi_p06_m26);
+
+ // addition
+ w0 = _mm_add_epi32(u0, u8);
+ w1 = _mm_add_epi32(u1, u9);
+ w2 = _mm_add_epi32(u2, u10);
+ w3 = _mm_add_epi32(u3, u11);
+ w4 = _mm_add_epi32(u4, u12);
+ w5 = _mm_add_epi32(u5, u13);
+ w6 = _mm_add_epi32(u6, u14);
+ w7 = _mm_add_epi32(u7, u15);
+ w8 = _mm_sub_epi32(u0, u8);
+ w9 = _mm_sub_epi32(u1, u9);
+ w10 = _mm_sub_epi32(u2, u10);
+ w11 = _mm_sub_epi32(u3, u11);
+ w12 = _mm_sub_epi32(u4, u12);
+ w13 = _mm_sub_epi32(u5, u13);
+ w14 = _mm_sub_epi32(u6, u14);
+ w15 = _mm_sub_epi32(u7, u15);
+
+ // shift and rounding
+ v0 = _mm_add_epi32(w0, k__DCT_CONST_ROUNDING);
+ v1 = _mm_add_epi32(w1, k__DCT_CONST_ROUNDING);
+ v2 = _mm_add_epi32(w2, k__DCT_CONST_ROUNDING);
+ v3 = _mm_add_epi32(w3, k__DCT_CONST_ROUNDING);
+ v4 = _mm_add_epi32(w4, k__DCT_CONST_ROUNDING);
+ v5 = _mm_add_epi32(w5, k__DCT_CONST_ROUNDING);
+ v6 = _mm_add_epi32(w6, k__DCT_CONST_ROUNDING);
+ v7 = _mm_add_epi32(w7, k__DCT_CONST_ROUNDING);
+ v8 = _mm_add_epi32(w8, k__DCT_CONST_ROUNDING);
+ v9 = _mm_add_epi32(w9, k__DCT_CONST_ROUNDING);
+ v10 = _mm_add_epi32(w10, k__DCT_CONST_ROUNDING);
+ v11 = _mm_add_epi32(w11, k__DCT_CONST_ROUNDING);
+ v12 = _mm_add_epi32(w12, k__DCT_CONST_ROUNDING);
+ v13 = _mm_add_epi32(w13, k__DCT_CONST_ROUNDING);
+ v14 = _mm_add_epi32(w14, k__DCT_CONST_ROUNDING);
+ v15 = _mm_add_epi32(w15, k__DCT_CONST_ROUNDING);
+
+ u0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+ u1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+ u2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+ u3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+ u4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
+ u5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
+ u6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
+ u7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
+ u8 = _mm_srai_epi32(v8, DCT_CONST_BITS);
+ u9 = _mm_srai_epi32(v9, DCT_CONST_BITS);
+ u10 = _mm_srai_epi32(v10, DCT_CONST_BITS);
+ u11 = _mm_srai_epi32(v11, DCT_CONST_BITS);
+ u12 = _mm_srai_epi32(v12, DCT_CONST_BITS);
+ u13 = _mm_srai_epi32(v13, DCT_CONST_BITS);
+ u14 = _mm_srai_epi32(v14, DCT_CONST_BITS);
+ u15 = _mm_srai_epi32(v15, DCT_CONST_BITS);
+
+ // back to 16-bit and pack 8 integers into __m128i
+ in[0] = _mm_packs_epi32(u0, u1);
+ in[1] = _mm_packs_epi32(u2, u3);
+ in[2] = _mm_packs_epi32(u4, u5);
+ in[3] = _mm_packs_epi32(u6, u7);
+ in[4] = _mm_packs_epi32(u8, u9);
+ in[5] = _mm_packs_epi32(u10, u11);
+ in[6] = _mm_packs_epi32(u12, u13);
+ in[7] = _mm_packs_epi32(u14, u15);
+
+ // stage 2
+ s0 = _mm_add_epi16(in[0], in[2]);
+ s1 = _mm_add_epi16(in[1], in[3]);
+ s2 = _mm_sub_epi16(in[0], in[2]);
+ s3 = _mm_sub_epi16(in[1], in[3]);
+ u0 = _mm_unpacklo_epi16(in[4], in[5]);
+ u1 = _mm_unpackhi_epi16(in[4], in[5]);
+ u2 = _mm_unpacklo_epi16(in[6], in[7]);
+ u3 = _mm_unpackhi_epi16(in[6], in[7]);
+
+ v0 = _mm_madd_epi16(u0, k__cospi_p08_p24);
+ v1 = _mm_madd_epi16(u1, k__cospi_p08_p24);
+ v2 = _mm_madd_epi16(u0, k__cospi_p24_m08);
+ v3 = _mm_madd_epi16(u1, k__cospi_p24_m08);
+ v4 = _mm_madd_epi16(u2, k__cospi_m24_p08);
+ v5 = _mm_madd_epi16(u3, k__cospi_m24_p08);
+ v6 = _mm_madd_epi16(u2, k__cospi_p08_p24);
+ v7 = _mm_madd_epi16(u3, k__cospi_p08_p24);
+
+ w0 = _mm_add_epi32(v0, v4);
+ w1 = _mm_add_epi32(v1, v5);
+ w2 = _mm_add_epi32(v2, v6);
+ w3 = _mm_add_epi32(v3, v7);
+ w4 = _mm_sub_epi32(v0, v4);
+ w5 = _mm_sub_epi32(v1, v5);
+ w6 = _mm_sub_epi32(v2, v6);
+ w7 = _mm_sub_epi32(v3, v7);
+
+ v0 = _mm_add_epi32(w0, k__DCT_CONST_ROUNDING);
+ v1 = _mm_add_epi32(w1, k__DCT_CONST_ROUNDING);
+ v2 = _mm_add_epi32(w2, k__DCT_CONST_ROUNDING);
+ v3 = _mm_add_epi32(w3, k__DCT_CONST_ROUNDING);
+ v4 = _mm_add_epi32(w4, k__DCT_CONST_ROUNDING);
+ v5 = _mm_add_epi32(w5, k__DCT_CONST_ROUNDING);
+ v6 = _mm_add_epi32(w6, k__DCT_CONST_ROUNDING);
+ v7 = _mm_add_epi32(w7, k__DCT_CONST_ROUNDING);
+
+ u0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+ u1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+ u2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+ u3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+ u4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
+ u5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
+ u6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
+ u7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
+
+ // back to 16-bit intergers
+ s4 = _mm_packs_epi32(u0, u1);
+ s5 = _mm_packs_epi32(u2, u3);
+ s6 = _mm_packs_epi32(u4, u5);
+ s7 = _mm_packs_epi32(u6, u7);
+
+ // stage 3
+ u0 = _mm_unpacklo_epi16(s2, s3);
+ u1 = _mm_unpackhi_epi16(s2, s3);
+ u2 = _mm_unpacklo_epi16(s6, s7);
+ u3 = _mm_unpackhi_epi16(s6, s7);
+
+ v0 = _mm_madd_epi16(u0, k__cospi_p16_p16);
+ v1 = _mm_madd_epi16(u1, k__cospi_p16_p16);
+ v2 = _mm_madd_epi16(u0, k__cospi_p16_m16);
+ v3 = _mm_madd_epi16(u1, k__cospi_p16_m16);
+ v4 = _mm_madd_epi16(u2, k__cospi_p16_p16);
+ v5 = _mm_madd_epi16(u3, k__cospi_p16_p16);
+ v6 = _mm_madd_epi16(u2, k__cospi_p16_m16);
+ v7 = _mm_madd_epi16(u3, k__cospi_p16_m16);
+
+ u0 = _mm_add_epi32(v0, k__DCT_CONST_ROUNDING);
+ u1 = _mm_add_epi32(v1, k__DCT_CONST_ROUNDING);
+ u2 = _mm_add_epi32(v2, k__DCT_CONST_ROUNDING);
+ u3 = _mm_add_epi32(v3, k__DCT_CONST_ROUNDING);
+ u4 = _mm_add_epi32(v4, k__DCT_CONST_ROUNDING);
+ u5 = _mm_add_epi32(v5, k__DCT_CONST_ROUNDING);
+ u6 = _mm_add_epi32(v6, k__DCT_CONST_ROUNDING);
+ u7 = _mm_add_epi32(v7, k__DCT_CONST_ROUNDING);
+
+ v0 = _mm_srai_epi32(u0, DCT_CONST_BITS);
+ v1 = _mm_srai_epi32(u1, DCT_CONST_BITS);
+ v2 = _mm_srai_epi32(u2, DCT_CONST_BITS);
+ v3 = _mm_srai_epi32(u3, DCT_CONST_BITS);
+ v4 = _mm_srai_epi32(u4, DCT_CONST_BITS);
+ v5 = _mm_srai_epi32(u5, DCT_CONST_BITS);
+ v6 = _mm_srai_epi32(u6, DCT_CONST_BITS);
+ v7 = _mm_srai_epi32(u7, DCT_CONST_BITS);
+
+ s2 = _mm_packs_epi32(v0, v1);
+ s3 = _mm_packs_epi32(v2, v3);
+ s6 = _mm_packs_epi32(v4, v5);
+ s7 = _mm_packs_epi32(v6, v7);
+
+ // FIXME(jingning): do subtract using bit inversion?
+ in[0] = s0;
+ in[1] = _mm_sub_epi16(k__const_0, s4);
+ in[2] = s6;
+ in[3] = _mm_sub_epi16(k__const_0, s2);
+ in[4] = s3;
+ in[5] = _mm_sub_epi16(k__const_0, s7);
+ in[6] = s5;
+ in[7] = _mm_sub_epi16(k__const_0, s1);
+
+ // transpose
+ array_transpose_8x8(in, in);
+}
+
+void vp9_short_fht8x8_sse2(int16_t *input, int16_t *output,
+ int stride, int tx_type) {
+ __m128i in[8];
+ load_buffer_8x8(input, in, stride);
+ switch (tx_type) {
+ case 0: // DCT_DCT
+ fdct8_1d_sse2(in);
+ fdct8_1d_sse2(in);
+ break;
+ case 1: // ADST_DCT
+ fadst8_1d_sse2(in);
+ fdct8_1d_sse2(in);
+ break;
+ case 2: // DCT_ADST
+ fdct8_1d_sse2(in);
+ fadst8_1d_sse2(in);
+ break;
+ case 3: // ADST_ADST
+ fadst8_1d_sse2(in);
+ fadst8_1d_sse2(in);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ right_shift_8x8(in, 1);
+ write_buffer_8x8(output, in, 8);
+}
+
+void vp9_short_fdct16x16_sse2(int16_t *input, int16_t *output, int pitch) {
+ // The 2D transform is done with two passes which are actually pretty
+ // similar. In the first one, we transform the columns and transpose
+ // the results. In the second one, we transform the rows. To achieve that,
+ // as the first pass results are transposed, we tranpose the columns (that
+ // is the transposed rows) and transpose the results (so that it goes back
+ // in normal/row positions).
+ const int stride = pitch >> 1;
+ int pass;
+ // We need an intermediate buffer between passes.
+ DECLARE_ALIGNED_ARRAY(16, int16_t, intermediate, 256);
+ int16_t *in = input;
+ int16_t *out = intermediate;
+ // Constants
+ // When we use them, in one case, they are all the same. In all others
+ // it's a pair of them that we need to repeat four times. This is done
+ // by constructing the 32 bit constant corresponding to that pair.
+ const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
+ const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+ const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
+ const __m128i k__cospi_m24_m08 = pair_set_epi16(-cospi_24_64, -cospi_8_64);
+ const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64);
+ const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64);
+ const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64);
+ const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
+ const __m128i k__cospi_p30_p02 = pair_set_epi16(cospi_30_64, cospi_2_64);
+ const __m128i k__cospi_p14_p18 = pair_set_epi16(cospi_14_64, cospi_18_64);
+ const __m128i k__cospi_m02_p30 = pair_set_epi16(-cospi_2_64, cospi_30_64);
+ const __m128i k__cospi_m18_p14 = pair_set_epi16(-cospi_18_64, cospi_14_64);
+ const __m128i k__cospi_p22_p10 = pair_set_epi16(cospi_22_64, cospi_10_64);
+ const __m128i k__cospi_p06_p26 = pair_set_epi16(cospi_6_64, cospi_26_64);
+ const __m128i k__cospi_m10_p22 = pair_set_epi16(-cospi_10_64, cospi_22_64);
+ const __m128i k__cospi_m26_p06 = pair_set_epi16(-cospi_26_64, cospi_6_64);
+ const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
+ const __m128i kOne = _mm_set1_epi16(1);
+ // Do the two transform/transpose passes
+ for (pass = 0; pass < 2; ++pass) {
+ // We process eight columns (transposed rows in second pass) at a time.
+ int column_start;
+ for (column_start = 0; column_start < 16; column_start += 8) {
+ __m128i in00, in01, in02, in03, in04, in05, in06, in07;
+ __m128i in08, in09, in10, in11, in12, in13, in14, in15;
+ __m128i input0, input1, input2, input3, input4, input5, input6, input7;
+ __m128i step1_0, step1_1, step1_2, step1_3;
+ __m128i step1_4, step1_5, step1_6, step1_7;
+ __m128i step2_1, step2_2, step2_3, step2_4, step2_5, step2_6;
+ __m128i step3_0, step3_1, step3_2, step3_3;
+ __m128i step3_4, step3_5, step3_6, step3_7;
+ __m128i res00, res01, res02, res03, res04, res05, res06, res07;
+ __m128i res08, res09, res10, res11, res12, res13, res14, res15;
+ // Load and pre-condition input.
+ if (0 == pass) {
+ in00 = _mm_load_si128((const __m128i *)(in + 0 * stride));
+ in01 = _mm_load_si128((const __m128i *)(in + 1 * stride));
+ in02 = _mm_load_si128((const __m128i *)(in + 2 * stride));
+ in03 = _mm_load_si128((const __m128i *)(in + 3 * stride));
+ in04 = _mm_load_si128((const __m128i *)(in + 4 * stride));
+ in05 = _mm_load_si128((const __m128i *)(in + 5 * stride));
+ in06 = _mm_load_si128((const __m128i *)(in + 6 * stride));
+ in07 = _mm_load_si128((const __m128i *)(in + 7 * stride));
+ in08 = _mm_load_si128((const __m128i *)(in + 8 * stride));
+ in09 = _mm_load_si128((const __m128i *)(in + 9 * stride));
+ in10 = _mm_load_si128((const __m128i *)(in + 10 * stride));
+ in11 = _mm_load_si128((const __m128i *)(in + 11 * stride));
+ in12 = _mm_load_si128((const __m128i *)(in + 12 * stride));
+ in13 = _mm_load_si128((const __m128i *)(in + 13 * stride));
+ in14 = _mm_load_si128((const __m128i *)(in + 14 * stride));
+ in15 = _mm_load_si128((const __m128i *)(in + 15 * stride));
+ // x = x << 2
+ in00 = _mm_slli_epi16(in00, 2);
+ in01 = _mm_slli_epi16(in01, 2);
+ in02 = _mm_slli_epi16(in02, 2);
+ in03 = _mm_slli_epi16(in03, 2);
+ in04 = _mm_slli_epi16(in04, 2);
+ in05 = _mm_slli_epi16(in05, 2);
+ in06 = _mm_slli_epi16(in06, 2);
+ in07 = _mm_slli_epi16(in07, 2);
+ in08 = _mm_slli_epi16(in08, 2);
+ in09 = _mm_slli_epi16(in09, 2);
+ in10 = _mm_slli_epi16(in10, 2);
+ in11 = _mm_slli_epi16(in11, 2);
+ in12 = _mm_slli_epi16(in12, 2);
+ in13 = _mm_slli_epi16(in13, 2);
+ in14 = _mm_slli_epi16(in14, 2);
+ in15 = _mm_slli_epi16(in15, 2);
+ } else {
+ in00 = _mm_load_si128((const __m128i *)(in + 0 * 16));
+ in01 = _mm_load_si128((const __m128i *)(in + 1 * 16));
+ in02 = _mm_load_si128((const __m128i *)(in + 2 * 16));
+ in03 = _mm_load_si128((const __m128i *)(in + 3 * 16));
+ in04 = _mm_load_si128((const __m128i *)(in + 4 * 16));
+ in05 = _mm_load_si128((const __m128i *)(in + 5 * 16));
+ in06 = _mm_load_si128((const __m128i *)(in + 6 * 16));
+ in07 = _mm_load_si128((const __m128i *)(in + 7 * 16));
+ in08 = _mm_load_si128((const __m128i *)(in + 8 * 16));
+ in09 = _mm_load_si128((const __m128i *)(in + 9 * 16));
+ in10 = _mm_load_si128((const __m128i *)(in + 10 * 16));
+ in11 = _mm_load_si128((const __m128i *)(in + 11 * 16));
+ in12 = _mm_load_si128((const __m128i *)(in + 12 * 16));
+ in13 = _mm_load_si128((const __m128i *)(in + 13 * 16));
+ in14 = _mm_load_si128((const __m128i *)(in + 14 * 16));
+ in15 = _mm_load_si128((const __m128i *)(in + 15 * 16));
+ // x = (x + 1) >> 2
+ in00 = _mm_add_epi16(in00, kOne);
+ in01 = _mm_add_epi16(in01, kOne);
+ in02 = _mm_add_epi16(in02, kOne);
+ in03 = _mm_add_epi16(in03, kOne);
+ in04 = _mm_add_epi16(in04, kOne);
+ in05 = _mm_add_epi16(in05, kOne);
+ in06 = _mm_add_epi16(in06, kOne);
+ in07 = _mm_add_epi16(in07, kOne);
+ in08 = _mm_add_epi16(in08, kOne);
+ in09 = _mm_add_epi16(in09, kOne);
+ in10 = _mm_add_epi16(in10, kOne);
+ in11 = _mm_add_epi16(in11, kOne);
+ in12 = _mm_add_epi16(in12, kOne);
+ in13 = _mm_add_epi16(in13, kOne);
+ in14 = _mm_add_epi16(in14, kOne);
+ in15 = _mm_add_epi16(in15, kOne);
+ in00 = _mm_srai_epi16(in00, 2);
+ in01 = _mm_srai_epi16(in01, 2);
+ in02 = _mm_srai_epi16(in02, 2);
+ in03 = _mm_srai_epi16(in03, 2);
+ in04 = _mm_srai_epi16(in04, 2);
+ in05 = _mm_srai_epi16(in05, 2);
+ in06 = _mm_srai_epi16(in06, 2);
+ in07 = _mm_srai_epi16(in07, 2);
+ in08 = _mm_srai_epi16(in08, 2);
+ in09 = _mm_srai_epi16(in09, 2);
+ in10 = _mm_srai_epi16(in10, 2);
+ in11 = _mm_srai_epi16(in11, 2);
+ in12 = _mm_srai_epi16(in12, 2);
+ in13 = _mm_srai_epi16(in13, 2);
+ in14 = _mm_srai_epi16(in14, 2);
+ in15 = _mm_srai_epi16(in15, 2);
+ }
+ in += 8;
+ // Calculate input for the first 8 results.
+ {
+ input0 = _mm_add_epi16(in00, in15);
+ input1 = _mm_add_epi16(in01, in14);
+ input2 = _mm_add_epi16(in02, in13);
+ input3 = _mm_add_epi16(in03, in12);
+ input4 = _mm_add_epi16(in04, in11);
+ input5 = _mm_add_epi16(in05, in10);
+ input6 = _mm_add_epi16(in06, in09);
+ input7 = _mm_add_epi16(in07, in08);
+ }
+ // Calculate input for the next 8 results.
+ {
+ step1_0 = _mm_sub_epi16(in07, in08);
+ step1_1 = _mm_sub_epi16(in06, in09);
+ step1_2 = _mm_sub_epi16(in05, in10);
+ step1_3 = _mm_sub_epi16(in04, in11);
+ step1_4 = _mm_sub_epi16(in03, in12);
+ step1_5 = _mm_sub_epi16(in02, in13);
+ step1_6 = _mm_sub_epi16(in01, in14);
+ step1_7 = _mm_sub_epi16(in00, in15);
+ }
+ // Work on the first eight values; fdct8_1d(input, even_results);
+ {
+ // Add/substract
+ const __m128i q0 = _mm_add_epi16(input0, input7);
+ const __m128i q1 = _mm_add_epi16(input1, input6);
+ const __m128i q2 = _mm_add_epi16(input2, input5);
+ const __m128i q3 = _mm_add_epi16(input3, input4);
+ const __m128i q4 = _mm_sub_epi16(input3, input4);
+ const __m128i q5 = _mm_sub_epi16(input2, input5);
+ const __m128i q6 = _mm_sub_epi16(input1, input6);
+ const __m128i q7 = _mm_sub_epi16(input0, input7);
+ // Work on first four results
+ {
+ // Add/substract
+ const __m128i r0 = _mm_add_epi16(q0, q3);
+ const __m128i r1 = _mm_add_epi16(q1, q2);
+ const __m128i r2 = _mm_sub_epi16(q1, q2);
+ const __m128i r3 = _mm_sub_epi16(q0, q3);
+ // Interleave to do the multiply by constants which gets us
+ // into 32 bits.
+ const __m128i t0 = _mm_unpacklo_epi16(r0, r1);
+ const __m128i t1 = _mm_unpackhi_epi16(r0, r1);
+ const __m128i t2 = _mm_unpacklo_epi16(r2, r3);
+ const __m128i t3 = _mm_unpackhi_epi16(r2, r3);
+ const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16);
+ const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p16_p16);
+ const __m128i u2 = _mm_madd_epi16(t0, k__cospi_p16_m16);
+ const __m128i u3 = _mm_madd_epi16(t1, k__cospi_p16_m16);
+ const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p24_p08);
+ const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p24_p08);
+ const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m08_p24);
+ const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m08_p24);
+ // dct_const_round_shift
+ const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+ const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+ const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+ const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+ const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
+ const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING);
+ const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
+ const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING);
+ const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+ const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+ const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+ const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+ const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
+ const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
+ const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
+ const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
+ // Combine
+ res00 = _mm_packs_epi32(w0, w1);
+ res08 = _mm_packs_epi32(w2, w3);
+ res04 = _mm_packs_epi32(w4, w5);
+ res12 = _mm_packs_epi32(w6, w7);
+ }
+ // Work on next four results
+ {
+ // Interleave to do the multiply by constants which gets us
+ // into 32 bits.
+ const __m128i d0 = _mm_unpacklo_epi16(q6, q5);
+ const __m128i d1 = _mm_unpackhi_epi16(q6, q5);
+ const __m128i e0 = _mm_madd_epi16(d0, k__cospi_p16_m16);
+ const __m128i e1 = _mm_madd_epi16(d1, k__cospi_p16_m16);
+ const __m128i e2 = _mm_madd_epi16(d0, k__cospi_p16_p16);
+ const __m128i e3 = _mm_madd_epi16(d1, k__cospi_p16_p16);
+ // dct_const_round_shift
+ const __m128i f0 = _mm_add_epi32(e0, k__DCT_CONST_ROUNDING);
+ const __m128i f1 = _mm_add_epi32(e1, k__DCT_CONST_ROUNDING);
+ const __m128i f2 = _mm_add_epi32(e2, k__DCT_CONST_ROUNDING);
+ const __m128i f3 = _mm_add_epi32(e3, k__DCT_CONST_ROUNDING);
+ const __m128i s0 = _mm_srai_epi32(f0, DCT_CONST_BITS);
+ const __m128i s1 = _mm_srai_epi32(f1, DCT_CONST_BITS);
+ const __m128i s2 = _mm_srai_epi32(f2, DCT_CONST_BITS);
+ const __m128i s3 = _mm_srai_epi32(f3, DCT_CONST_BITS);
+ // Combine
+ const __m128i r0 = _mm_packs_epi32(s0, s1);
+ const __m128i r1 = _mm_packs_epi32(s2, s3);
+ // Add/substract
+ const __m128i x0 = _mm_add_epi16(q4, r0);
+ const __m128i x1 = _mm_sub_epi16(q4, r0);
+ const __m128i x2 = _mm_sub_epi16(q7, r1);
+ const __m128i x3 = _mm_add_epi16(q7, r1);
+ // Interleave to do the multiply by constants which gets us
+ // into 32 bits.
+ const __m128i t0 = _mm_unpacklo_epi16(x0, x3);
+ const __m128i t1 = _mm_unpackhi_epi16(x0, x3);
+ const __m128i t2 = _mm_unpacklo_epi16(x1, x2);
+ const __m128i t3 = _mm_unpackhi_epi16(x1, x2);
+ const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p28_p04);
+ const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p28_p04);
+ const __m128i u2 = _mm_madd_epi16(t0, k__cospi_m04_p28);
+ const __m128i u3 = _mm_madd_epi16(t1, k__cospi_m04_p28);
+ const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p12_p20);
+ const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p12_p20);
+ const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m20_p12);
+ const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m20_p12);
+ // dct_const_round_shift
+ const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+ const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+ const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+ const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+ const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
+ const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING);
+ const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
+ const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING);
+ const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+ const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+ const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+ const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+ const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
+ const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
+ const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
+ const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
+ // Combine
+ res02 = _mm_packs_epi32(w0, w1);
+ res14 = _mm_packs_epi32(w2, w3);
+ res10 = _mm_packs_epi32(w4, w5);
+ res06 = _mm_packs_epi32(w6, w7);
+ }
+ }
+ // Work on the next eight values; step1 -> odd_results
+ {
+ // step 2
+ {
+ const __m128i t0 = _mm_unpacklo_epi16(step1_5, step1_2);
+ const __m128i t1 = _mm_unpackhi_epi16(step1_5, step1_2);
+ const __m128i t2 = _mm_unpacklo_epi16(step1_4, step1_3);
+ const __m128i t3 = _mm_unpackhi_epi16(step1_4, step1_3);
+ const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_m16);
+ const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p16_m16);
+ const __m128i u2 = _mm_madd_epi16(t2, k__cospi_p16_m16);
+ const __m128i u3 = _mm_madd_epi16(t3, k__cospi_p16_m16);
+ // dct_const_round_shift
+ const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+ const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+ const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+ const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+ const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+ const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+ const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+ const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+ // Combine
+ step2_2 = _mm_packs_epi32(w0, w1);
+ step2_3 = _mm_packs_epi32(w2, w3);
+ }
+ {
+ const __m128i t0 = _mm_unpacklo_epi16(step1_5, step1_2);
+ const __m128i t1 = _mm_unpackhi_epi16(step1_5, step1_2);
+ const __m128i t2 = _mm_unpacklo_epi16(step1_4, step1_3);
+ const __m128i t3 = _mm_unpackhi_epi16(step1_4, step1_3);
+ const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16);
+ const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p16_p16);
+ const __m128i u2 = _mm_madd_epi16(t2, k__cospi_p16_p16);
+ const __m128i u3 = _mm_madd_epi16(t3, k__cospi_p16_p16);
+ // dct_const_round_shift
+ const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+ const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+ const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+ const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+ const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+ const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+ const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+ const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+ // Combine
+ step2_5 = _mm_packs_epi32(w0, w1);
+ step2_4 = _mm_packs_epi32(w2, w3);
+ }
+ // step 3
+ {
+ step3_0 = _mm_add_epi16(step1_0, step2_3);
+ step3_1 = _mm_add_epi16(step1_1, step2_2);
+ step3_2 = _mm_sub_epi16(step1_1, step2_2);
+ step3_3 = _mm_sub_epi16(step1_0, step2_3);
+ step3_4 = _mm_sub_epi16(step1_7, step2_4);
+ step3_5 = _mm_sub_epi16(step1_6, step2_5);
+ step3_6 = _mm_add_epi16(step1_6, step2_5);
+ step3_7 = _mm_add_epi16(step1_7, step2_4);
+ }
+ // step 4
+ {
+ const __m128i t0 = _mm_unpacklo_epi16(step3_1, step3_6);
+ const __m128i t1 = _mm_unpackhi_epi16(step3_1, step3_6);
+ const __m128i t2 = _mm_unpacklo_epi16(step3_2, step3_5);
+ const __m128i t3 = _mm_unpackhi_epi16(step3_2, step3_5);
+ const __m128i u0 = _mm_madd_epi16(t0, k__cospi_m08_p24);
+ const __m128i u1 = _mm_madd_epi16(t1, k__cospi_m08_p24);
+ const __m128i u2 = _mm_madd_epi16(t2, k__cospi_m24_m08);
+ const __m128i u3 = _mm_madd_epi16(t3, k__cospi_m24_m08);
+ // dct_const_round_shift
+ const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+ const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+ const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+ const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+ const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+ const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+ const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+ const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+ // Combine
+ step2_1 = _mm_packs_epi32(w0, w1);
+ step2_2 = _mm_packs_epi32(w2, w3);
+ }
+ {
+ const __m128i t0 = _mm_unpacklo_epi16(step3_1, step3_6);
+ const __m128i t1 = _mm_unpackhi_epi16(step3_1, step3_6);
+ const __m128i t2 = _mm_unpacklo_epi16(step3_2, step3_5);
+ const __m128i t3 = _mm_unpackhi_epi16(step3_2, step3_5);
+ const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p24_p08);
+ const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p24_p08);
+ const __m128i u2 = _mm_madd_epi16(t2, k__cospi_m08_p24);
+ const __m128i u3 = _mm_madd_epi16(t3, k__cospi_m08_p24);
+ // dct_const_round_shift
+ const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+ const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+ const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+ const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+ const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+ const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+ const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+ const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+ // Combine
+ step2_6 = _mm_packs_epi32(w0, w1);
+ step2_5 = _mm_packs_epi32(w2, w3);
+ }
+ // step 5
+ {
+ step1_0 = _mm_add_epi16(step3_0, step2_1);
+ step1_1 = _mm_sub_epi16(step3_0, step2_1);
+ step1_2 = _mm_sub_epi16(step3_3, step2_2);
+ step1_3 = _mm_add_epi16(step3_3, step2_2);
+ step1_4 = _mm_add_epi16(step3_4, step2_5);
+ step1_5 = _mm_sub_epi16(step3_4, step2_5);
+ step1_6 = _mm_sub_epi16(step3_7, step2_6);
+ step1_7 = _mm_add_epi16(step3_7, step2_6);
+ }
+ // step 6
+ {
+ const __m128i t0 = _mm_unpacklo_epi16(step1_0, step1_7);
+ const __m128i t1 = _mm_unpackhi_epi16(step1_0, step1_7);
+ const __m128i t2 = _mm_unpacklo_epi16(step1_1, step1_6);
+ const __m128i t3 = _mm_unpackhi_epi16(step1_1, step1_6);
+ const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p30_p02);
+ const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p30_p02);
+ const __m128i u2 = _mm_madd_epi16(t2, k__cospi_p14_p18);
+ const __m128i u3 = _mm_madd_epi16(t3, k__cospi_p14_p18);
+ // dct_const_round_shift
+ const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+ const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+ const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+ const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+ const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+ const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+ const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+ const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+ // Combine
+ res01 = _mm_packs_epi32(w0, w1);
+ res09 = _mm_packs_epi32(w2, w3);
+ }
+ {
+ const __m128i t0 = _mm_unpacklo_epi16(step1_2, step1_5);
+ const __m128i t1 = _mm_unpackhi_epi16(step1_2, step1_5);
+ const __m128i t2 = _mm_unpacklo_epi16(step1_3, step1_4);
+ const __m128i t3 = _mm_unpackhi_epi16(step1_3, step1_4);
+ const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p22_p10);
+ const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p22_p10);
+ const __m128i u2 = _mm_madd_epi16(t2, k__cospi_p06_p26);
+ const __m128i u3 = _mm_madd_epi16(t3, k__cospi_p06_p26);
+ // dct_const_round_shift
+ const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+ const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+ const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+ const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+ const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+ const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+ const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+ const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+ // Combine
+ res05 = _mm_packs_epi32(w0, w1);
+ res13 = _mm_packs_epi32(w2, w3);
+ }
+ {
+ const __m128i t0 = _mm_unpacklo_epi16(step1_2, step1_5);
+ const __m128i t1 = _mm_unpackhi_epi16(step1_2, step1_5);
+ const __m128i t2 = _mm_unpacklo_epi16(step1_3, step1_4);
+ const __m128i t3 = _mm_unpackhi_epi16(step1_3, step1_4);
+ const __m128i u0 = _mm_madd_epi16(t0, k__cospi_m10_p22);
+ const __m128i u1 = _mm_madd_epi16(t1, k__cospi_m10_p22);
+ const __m128i u2 = _mm_madd_epi16(t2, k__cospi_m26_p06);
+ const __m128i u3 = _mm_madd_epi16(t3, k__cospi_m26_p06);
+ // dct_const_round_shift
+ const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+ const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+ const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+ const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+ const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+ const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+ const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+ const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+ // Combine
+ res11 = _mm_packs_epi32(w0, w1);
+ res03 = _mm_packs_epi32(w2, w3);
+ }
+ {
+ const __m128i t0 = _mm_unpacklo_epi16(step1_0, step1_7);
+ const __m128i t1 = _mm_unpackhi_epi16(step1_0, step1_7);
+ const __m128i t2 = _mm_unpacklo_epi16(step1_1, step1_6);
+ const __m128i t3 = _mm_unpackhi_epi16(step1_1, step1_6);
+ const __m128i u0 = _mm_madd_epi16(t0, k__cospi_m02_p30);
+ const __m128i u1 = _mm_madd_epi16(t1, k__cospi_m02_p30);
+ const __m128i u2 = _mm_madd_epi16(t2, k__cospi_m18_p14);
+ const __m128i u3 = _mm_madd_epi16(t3, k__cospi_m18_p14);
+ // dct_const_round_shift
+ const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+ const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+ const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+ const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+ const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+ const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+ const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+ const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+ // Combine
+ res15 = _mm_packs_epi32(w0, w1);
+ res07 = _mm_packs_epi32(w2, w3);
+ }
+ }
+ // Transpose the results, do it as two 8x8 transposes.
+ {
+ // 00 01 02 03 04 05 06 07
+ // 10 11 12 13 14 15 16 17
+ // 20 21 22 23 24 25 26 27
+ // 30 31 32 33 34 35 36 37
+ // 40 41 42 43 44 45 46 47
+ // 50 51 52 53 54 55 56 57
+ // 60 61 62 63 64 65 66 67
+ // 70 71 72 73 74 75 76 77
+ const __m128i tr0_0 = _mm_unpacklo_epi16(res00, res01);
+ const __m128i tr0_1 = _mm_unpacklo_epi16(res02, res03);
+ const __m128i tr0_2 = _mm_unpackhi_epi16(res00, res01);
+ const __m128i tr0_3 = _mm_unpackhi_epi16(res02, res03);
+ const __m128i tr0_4 = _mm_unpacklo_epi16(res04, res05);
+ const __m128i tr0_5 = _mm_unpacklo_epi16(res06, res07);
+ const __m128i tr0_6 = _mm_unpackhi_epi16(res04, res05);
+ const __m128i tr0_7 = _mm_unpackhi_epi16(res06, res07);
+ // 00 10 01 11 02 12 03 13
+ // 20 30 21 31 22 32 23 33
+ // 04 14 05 15 06 16 07 17
+ // 24 34 25 35 26 36 27 37
+ // 40 50 41 51 42 52 43 53
+ // 60 70 61 71 62 72 63 73
+ // 54 54 55 55 56 56 57 57
+ // 64 74 65 75 66 76 67 77
+ const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
+ const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3);
+ const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
+ const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3);
+ const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5);
+ const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);
+ const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);
+ const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);
+ // 00 10 20 30 01 11 21 31
+ // 40 50 60 70 41 51 61 71
+ // 02 12 22 32 03 13 23 33
+ // 42 52 62 72 43 53 63 73
+ // 04 14 24 34 05 15 21 36
+ // 44 54 64 74 45 55 61 76
+ // 06 16 26 36 07 17 27 37
+ // 46 56 66 76 47 57 67 77
+ const __m128i tr2_0 = _mm_unpacklo_epi64(tr1_0, tr1_4);
+ const __m128i tr2_1 = _mm_unpackhi_epi64(tr1_0, tr1_4);
+ const __m128i tr2_2 = _mm_unpacklo_epi64(tr1_2, tr1_6);
+ const __m128i tr2_3 = _mm_unpackhi_epi64(tr1_2, tr1_6);
+ const __m128i tr2_4 = _mm_unpacklo_epi64(tr1_1, tr1_5);
+ const __m128i tr2_5 = _mm_unpackhi_epi64(tr1_1, tr1_5);
+ const __m128i tr2_6 = _mm_unpacklo_epi64(tr1_3, tr1_7);
+ const __m128i tr2_7 = _mm_unpackhi_epi64(tr1_3, tr1_7);
+ // 00 10 20 30 40 50 60 70
+ // 01 11 21 31 41 51 61 71
+ // 02 12 22 32 42 52 62 72
+ // 03 13 23 33 43 53 63 73
+ // 04 14 24 34 44 54 64 74
+ // 05 15 25 35 45 55 65 75
+ // 06 16 26 36 46 56 66 76
+ // 07 17 27 37 47 57 67 77
+ _mm_storeu_si128((__m128i *)(out + 0 * 16), tr2_0);
+ _mm_storeu_si128((__m128i *)(out + 1 * 16), tr2_1);
+ _mm_storeu_si128((__m128i *)(out + 2 * 16), tr2_2);
+ _mm_storeu_si128((__m128i *)(out + 3 * 16), tr2_3);
+ _mm_storeu_si128((__m128i *)(out + 4 * 16), tr2_4);
+ _mm_storeu_si128((__m128i *)(out + 5 * 16), tr2_5);
+ _mm_storeu_si128((__m128i *)(out + 6 * 16), tr2_6);
+ _mm_storeu_si128((__m128i *)(out + 7 * 16), tr2_7);
+ }
+ {
+ // 00 01 02 03 04 05 06 07
+ // 10 11 12 13 14 15 16 17
+ // 20 21 22 23 24 25 26 27
+ // 30 31 32 33 34 35 36 37
+ // 40 41 42 43 44 45 46 47
+ // 50 51 52 53 54 55 56 57
+ // 60 61 62 63 64 65 66 67
+ // 70 71 72 73 74 75 76 77
+ const __m128i tr0_0 = _mm_unpacklo_epi16(res08, res09);
+ const __m128i tr0_1 = _mm_unpacklo_epi16(res10, res11);
+ const __m128i tr0_2 = _mm_unpackhi_epi16(res08, res09);
+ const __m128i tr0_3 = _mm_unpackhi_epi16(res10, res11);
+ const __m128i tr0_4 = _mm_unpacklo_epi16(res12, res13);
+ const __m128i tr0_5 = _mm_unpacklo_epi16(res14, res15);
+ const __m128i tr0_6 = _mm_unpackhi_epi16(res12, res13);
+ const __m128i tr0_7 = _mm_unpackhi_epi16(res14, res15);
+ // 00 10 01 11 02 12 03 13
+ // 20 30 21 31 22 32 23 33
+ // 04 14 05 15 06 16 07 17
+ // 24 34 25 35 26 36 27 37
+ // 40 50 41 51 42 52 43 53
+ // 60 70 61 71 62 72 63 73
+ // 54 54 55 55 56 56 57 57
+ // 64 74 65 75 66 76 67 77
+ const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
+ const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3);
+ const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
+ const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3);
+ const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5);
+ const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);
+ const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);
+ const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);
+ // 00 10 20 30 01 11 21 31
+ // 40 50 60 70 41 51 61 71
+ // 02 12 22 32 03 13 23 33
+ // 42 52 62 72 43 53 63 73
+ // 04 14 24 34 05 15 21 36
+ // 44 54 64 74 45 55 61 76
+ // 06 16 26 36 07 17 27 37
+ // 46 56 66 76 47 57 67 77
+ const __m128i tr2_0 = _mm_unpacklo_epi64(tr1_0, tr1_4);
+ const __m128i tr2_1 = _mm_unpackhi_epi64(tr1_0, tr1_4);
+ const __m128i tr2_2 = _mm_unpacklo_epi64(tr1_2, tr1_6);
+ const __m128i tr2_3 = _mm_unpackhi_epi64(tr1_2, tr1_6);
+ const __m128i tr2_4 = _mm_unpacklo_epi64(tr1_1, tr1_5);
+ const __m128i tr2_5 = _mm_unpackhi_epi64(tr1_1, tr1_5);
+ const __m128i tr2_6 = _mm_unpacklo_epi64(tr1_3, tr1_7);
+ const __m128i tr2_7 = _mm_unpackhi_epi64(tr1_3, tr1_7);
+ // 00 10 20 30 40 50 60 70
+ // 01 11 21 31 41 51 61 71
+ // 02 12 22 32 42 52 62 72
+ // 03 13 23 33 43 53 63 73
+ // 04 14 24 34 44 54 64 74
+ // 05 15 25 35 45 55 65 75
+ // 06 16 26 36 46 56 66 76
+ // 07 17 27 37 47 57 67 77
+ // Store results
+ _mm_store_si128((__m128i *)(out + 8 + 0 * 16), tr2_0);
+ _mm_store_si128((__m128i *)(out + 8 + 1 * 16), tr2_1);
+ _mm_store_si128((__m128i *)(out + 8 + 2 * 16), tr2_2);
+ _mm_store_si128((__m128i *)(out + 8 + 3 * 16), tr2_3);
+ _mm_store_si128((__m128i *)(out + 8 + 4 * 16), tr2_4);
+ _mm_store_si128((__m128i *)(out + 8 + 5 * 16), tr2_5);
+ _mm_store_si128((__m128i *)(out + 8 + 6 * 16), tr2_6);
+ _mm_store_si128((__m128i *)(out + 8 + 7 * 16), tr2_7);
+ }
+ out += 8*16;
+ }
+ // Setup in/out for next pass.
+ in = intermediate;
+ out = output;
+ }
+}
+
+static INLINE void load_buffer_16x16(int16_t* input, __m128i *in0,
+ __m128i *in1, int stride) {
+ // load first 8 columns
+ load_buffer_8x8(input, in0, stride);
+ load_buffer_8x8(input + 8 * stride, in0 + 8, stride);
+
+ input += 8;
+ // load second 8 columns
+ load_buffer_8x8(input, in1, stride);
+ load_buffer_8x8(input + 8 * stride, in1 + 8, stride);
+}
+
+static INLINE void write_buffer_16x16(int16_t *output, __m128i *in0,
+ __m128i *in1, int stride) {
+ // write first 8 columns
+ write_buffer_8x8(output, in0, stride);
+ write_buffer_8x8(output + 8 * stride, in0 + 8, stride);
+ // write second 8 columns
+ output += 8;
+ write_buffer_8x8(output, in1, stride);
+ write_buffer_8x8(output + 8 * stride, in1 + 8, stride);
+}
+
+static INLINE void array_transpose_16x16(__m128i *res0, __m128i *res1) {
+ __m128i tbuf[8];
+ array_transpose_8x8(res0, res0);
+ array_transpose_8x8(res1, tbuf);
+ array_transpose_8x8(res0 + 8, res1);
+ array_transpose_8x8(res1 + 8, res1 + 8);
+
+ res0[8] = tbuf[0];
+ res0[9] = tbuf[1];
+ res0[10] = tbuf[2];
+ res0[11] = tbuf[3];
+ res0[12] = tbuf[4];
+ res0[13] = tbuf[5];
+ res0[14] = tbuf[6];
+ res0[15] = tbuf[7];
+}
+
+static INLINE void right_shift_16x16(__m128i *res0, __m128i *res1) {
+ // perform rounding operations
+ right_shift_8x8(res0, 2);
+ right_shift_8x8(res0 + 8, 2);
+ right_shift_8x8(res1, 2);
+ right_shift_8x8(res1 + 8, 2);
+}
+
+void fdct16_1d_8col(__m128i *in) {
+ // perform 16x16 1-D DCT for 8 columns
+ __m128i i[8], s[8], p[8], t[8], u[16], v[16];
+ const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
+ const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+ const __m128i k__cospi_m16_p16 = pair_set_epi16(-cospi_16_64, cospi_16_64);
+ const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
+ const __m128i k__cospi_m24_m08 = pair_set_epi16(-cospi_24_64, -cospi_8_64);
+ const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64);
+ const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64);
+ const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64);
+ const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
+ const __m128i k__cospi_p30_p02 = pair_set_epi16(cospi_30_64, cospi_2_64);
+ const __m128i k__cospi_p14_p18 = pair_set_epi16(cospi_14_64, cospi_18_64);
+ const __m128i k__cospi_m02_p30 = pair_set_epi16(-cospi_2_64, cospi_30_64);
+ const __m128i k__cospi_m18_p14 = pair_set_epi16(-cospi_18_64, cospi_14_64);
+ const __m128i k__cospi_p22_p10 = pair_set_epi16(cospi_22_64, cospi_10_64);
+ const __m128i k__cospi_p06_p26 = pair_set_epi16(cospi_6_64, cospi_26_64);
+ const __m128i k__cospi_m10_p22 = pair_set_epi16(-cospi_10_64, cospi_22_64);
+ const __m128i k__cospi_m26_p06 = pair_set_epi16(-cospi_26_64, cospi_6_64);
+ const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
+
+ // stage 1
+ i[0] = _mm_add_epi16(in[0], in[15]);
+ i[1] = _mm_add_epi16(in[1], in[14]);
+ i[2] = _mm_add_epi16(in[2], in[13]);
+ i[3] = _mm_add_epi16(in[3], in[12]);
+ i[4] = _mm_add_epi16(in[4], in[11]);
+ i[5] = _mm_add_epi16(in[5], in[10]);
+ i[6] = _mm_add_epi16(in[6], in[9]);
+ i[7] = _mm_add_epi16(in[7], in[8]);
+
+ s[0] = _mm_sub_epi16(in[7], in[8]);
+ s[1] = _mm_sub_epi16(in[6], in[9]);
+ s[2] = _mm_sub_epi16(in[5], in[10]);
+ s[3] = _mm_sub_epi16(in[4], in[11]);
+ s[4] = _mm_sub_epi16(in[3], in[12]);
+ s[5] = _mm_sub_epi16(in[2], in[13]);
+ s[6] = _mm_sub_epi16(in[1], in[14]);
+ s[7] = _mm_sub_epi16(in[0], in[15]);
+
+ p[0] = _mm_add_epi16(i[0], i[7]);
+ p[1] = _mm_add_epi16(i[1], i[6]);
+ p[2] = _mm_add_epi16(i[2], i[5]);
+ p[3] = _mm_add_epi16(i[3], i[4]);
+ p[4] = _mm_sub_epi16(i[3], i[4]);
+ p[5] = _mm_sub_epi16(i[2], i[5]);
+ p[6] = _mm_sub_epi16(i[1], i[6]);
+ p[7] = _mm_sub_epi16(i[0], i[7]);
+
+ u[0] = _mm_add_epi16(p[0], p[3]);
+ u[1] = _mm_add_epi16(p[1], p[2]);
+ u[2] = _mm_sub_epi16(p[1], p[2]);
+ u[3] = _mm_sub_epi16(p[0], p[3]);
+
+ v[0] = _mm_unpacklo_epi16(u[0], u[1]);
+ v[1] = _mm_unpackhi_epi16(u[0], u[1]);
+ v[2] = _mm_unpacklo_epi16(u[2], u[3]);
+ v[3] = _mm_unpackhi_epi16(u[2], u[3]);
+
+ u[0] = _mm_madd_epi16(v[0], k__cospi_p16_p16);
+ u[1] = _mm_madd_epi16(v[1], k__cospi_p16_p16);
+ u[2] = _mm_madd_epi16(v[0], k__cospi_p16_m16);
+ u[3] = _mm_madd_epi16(v[1], k__cospi_p16_m16);
+ u[4] = _mm_madd_epi16(v[2], k__cospi_p24_p08);
+ u[5] = _mm_madd_epi16(v[3], k__cospi_p24_p08);
+ u[6] = _mm_madd_epi16(v[2], k__cospi_m08_p24);
+ u[7] = _mm_madd_epi16(v[3], k__cospi_m08_p24);
+
+ v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+ v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+ v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+ v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+ v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+ v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+ v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+ v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+
+ u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
+ u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
+ u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
+ u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
+ u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS);
+ u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS);
+ u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS);
+ u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS);
+
+ in[0] = _mm_packs_epi32(u[0], u[1]);
+ in[4] = _mm_packs_epi32(u[4], u[5]);
+ in[8] = _mm_packs_epi32(u[2], u[3]);
+ in[12] = _mm_packs_epi32(u[6], u[7]);
+
+ u[0] = _mm_unpacklo_epi16(p[5], p[6]);
+ u[1] = _mm_unpackhi_epi16(p[5], p[6]);
+ v[0] = _mm_madd_epi16(u[0], k__cospi_m16_p16);
+ v[1] = _mm_madd_epi16(u[1], k__cospi_m16_p16);
+ v[2] = _mm_madd_epi16(u[0], k__cospi_p16_p16);
+ v[3] = _mm_madd_epi16(u[1], k__cospi_p16_p16);
+
+ u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
+ u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
+ u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
+ u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
+
+ v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
+ v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
+ v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
+ v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
+
+ u[0] = _mm_packs_epi32(v[0], v[1]);
+ u[1] = _mm_packs_epi32(v[2], v[3]);
+
+ t[0] = _mm_add_epi16(p[4], u[0]);
+ t[1] = _mm_sub_epi16(p[4], u[0]);
+ t[2] = _mm_sub_epi16(p[7], u[1]);
+ t[3] = _mm_add_epi16(p[7], u[1]);
+
+ u[0] = _mm_unpacklo_epi16(t[0], t[3]);
+ u[1] = _mm_unpackhi_epi16(t[0], t[3]);
+ u[2] = _mm_unpacklo_epi16(t[1], t[2]);
+ u[3] = _mm_unpackhi_epi16(t[1], t[2]);
+
+ v[0] = _mm_madd_epi16(u[0], k__cospi_p28_p04);
+ v[1] = _mm_madd_epi16(u[1], k__cospi_p28_p04);
+ v[2] = _mm_madd_epi16(u[2], k__cospi_p12_p20);
+ v[3] = _mm_madd_epi16(u[3], k__cospi_p12_p20);
+ v[4] = _mm_madd_epi16(u[2], k__cospi_m20_p12);
+ v[5] = _mm_madd_epi16(u[3], k__cospi_m20_p12);
+ v[6] = _mm_madd_epi16(u[0], k__cospi_m04_p28);
+ v[7] = _mm_madd_epi16(u[1], k__cospi_m04_p28);
+
+ u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
+ u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
+ u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
+ u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
+ u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING);
+ u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING);
+ u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING);
+ u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING);
+
+ v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
+ v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
+ v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
+ v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
+ v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
+ v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
+ v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
+ v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
+
+ in[2] = _mm_packs_epi32(v[0], v[1]);
+ in[6] = _mm_packs_epi32(v[4], v[5]);
+ in[10] = _mm_packs_epi32(v[2], v[3]);
+ in[14] = _mm_packs_epi32(v[6], v[7]);
+
+ // stage 2
+ u[0] = _mm_unpacklo_epi16(s[2], s[5]);
+ u[1] = _mm_unpackhi_epi16(s[2], s[5]);
+ u[2] = _mm_unpacklo_epi16(s[3], s[4]);
+ u[3] = _mm_unpackhi_epi16(s[3], s[4]);
+
+ v[0] = _mm_madd_epi16(u[0], k__cospi_m16_p16);
+ v[1] = _mm_madd_epi16(u[1], k__cospi_m16_p16);
+ v[2] = _mm_madd_epi16(u[2], k__cospi_m16_p16);
+ v[3] = _mm_madd_epi16(u[3], k__cospi_m16_p16);
+ v[4] = _mm_madd_epi16(u[2], k__cospi_p16_p16);
+ v[5] = _mm_madd_epi16(u[3], k__cospi_p16_p16);
+ v[6] = _mm_madd_epi16(u[0], k__cospi_p16_p16);
+ v[7] = _mm_madd_epi16(u[1], k__cospi_p16_p16);
+
+ u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
+ u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
+ u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
+ u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
+ u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING);
+ u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING);
+ u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING);
+ u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING);
+
+ v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
+ v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
+ v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
+ v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
+ v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
+ v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
+ v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
+ v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
+
+ t[2] = _mm_packs_epi32(v[0], v[1]);
+ t[3] = _mm_packs_epi32(v[2], v[3]);
+ t[4] = _mm_packs_epi32(v[4], v[5]);
+ t[5] = _mm_packs_epi32(v[6], v[7]);
+
+ // stage 3
+ p[0] = _mm_add_epi16(s[0], t[3]);
+ p[1] = _mm_add_epi16(s[1], t[2]);
+ p[2] = _mm_sub_epi16(s[1], t[2]);
+ p[3] = _mm_sub_epi16(s[0], t[3]);
+ p[4] = _mm_sub_epi16(s[7], t[4]);
+ p[5] = _mm_sub_epi16(s[6], t[5]);
+ p[6] = _mm_add_epi16(s[6], t[5]);
+ p[7] = _mm_add_epi16(s[7], t[4]);
+
+ // stage 4
+ u[0] = _mm_unpacklo_epi16(p[1], p[6]);
+ u[1] = _mm_unpackhi_epi16(p[1], p[6]);
+ u[2] = _mm_unpacklo_epi16(p[2], p[5]);
+ u[3] = _mm_unpackhi_epi16(p[2], p[5]);
+
+ v[0] = _mm_madd_epi16(u[0], k__cospi_m08_p24);
+ v[1] = _mm_madd_epi16(u[1], k__cospi_m08_p24);
+ v[2] = _mm_madd_epi16(u[2], k__cospi_m24_m08);
+ v[3] = _mm_madd_epi16(u[3], k__cospi_m24_m08);
+ v[4] = _mm_madd_epi16(u[2], k__cospi_m08_p24);
+ v[5] = _mm_madd_epi16(u[3], k__cospi_m08_p24);
+ v[6] = _mm_madd_epi16(u[0], k__cospi_p24_p08);
+ v[7] = _mm_madd_epi16(u[1], k__cospi_p24_p08);
+
+ u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
+ u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
+ u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
+ u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
+ u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING);
+ u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING);
+ u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING);
+ u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING);
+
+ v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
+ v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
+ v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
+ v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
+ v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
+ v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
+ v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
+ v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
+
+ t[1] = _mm_packs_epi32(v[0], v[1]);
+ t[2] = _mm_packs_epi32(v[2], v[3]);
+ t[5] = _mm_packs_epi32(v[4], v[5]);
+ t[6] = _mm_packs_epi32(v[6], v[7]);
+
+ // stage 5
+ s[0] = _mm_add_epi16(p[0], t[1]);
+ s[1] = _mm_sub_epi16(p[0], t[1]);
+ s[2] = _mm_sub_epi16(p[3], t[2]);
+ s[3] = _mm_add_epi16(p[3], t[2]);
+ s[4] = _mm_add_epi16(p[4], t[5]);
+ s[5] = _mm_sub_epi16(p[4], t[5]);
+ s[6] = _mm_sub_epi16(p[7], t[6]);
+ s[7] = _mm_add_epi16(p[7], t[6]);
+
+ // stage 6
+ u[0] = _mm_unpacklo_epi16(s[0], s[7]);
+ u[1] = _mm_unpackhi_epi16(s[0], s[7]);
+ u[2] = _mm_unpacklo_epi16(s[1], s[6]);
+ u[3] = _mm_unpackhi_epi16(s[1], s[6]);
+ u[4] = _mm_unpacklo_epi16(s[2], s[5]);
+ u[5] = _mm_unpackhi_epi16(s[2], s[5]);
+ u[6] = _mm_unpacklo_epi16(s[3], s[4]);
+ u[7] = _mm_unpackhi_epi16(s[3], s[4]);
+
+ v[0] = _mm_madd_epi16(u[0], k__cospi_p30_p02);
+ v[1] = _mm_madd_epi16(u[1], k__cospi_p30_p02);
+ v[2] = _mm_madd_epi16(u[2], k__cospi_p14_p18);
+ v[3] = _mm_madd_epi16(u[3], k__cospi_p14_p18);
+ v[4] = _mm_madd_epi16(u[4], k__cospi_p22_p10);
+ v[5] = _mm_madd_epi16(u[5], k__cospi_p22_p10);
+ v[6] = _mm_madd_epi16(u[6], k__cospi_p06_p26);
+ v[7] = _mm_madd_epi16(u[7], k__cospi_p06_p26);
+ v[8] = _mm_madd_epi16(u[6], k__cospi_m26_p06);
+ v[9] = _mm_madd_epi16(u[7], k__cospi_m26_p06);
+ v[10] = _mm_madd_epi16(u[4], k__cospi_m10_p22);
+ v[11] = _mm_madd_epi16(u[5], k__cospi_m10_p22);
+ v[12] = _mm_madd_epi16(u[2], k__cospi_m18_p14);
+ v[13] = _mm_madd_epi16(u[3], k__cospi_m18_p14);
+ v[14] = _mm_madd_epi16(u[0], k__cospi_m02_p30);
+ v[15] = _mm_madd_epi16(u[1], k__cospi_m02_p30);
+
+ u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
+ u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
+ u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
+ u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
+ u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING);
+ u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING);
+ u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING);
+ u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING);
+ u[8] = _mm_add_epi32(v[8], k__DCT_CONST_ROUNDING);
+ u[9] = _mm_add_epi32(v[9], k__DCT_CONST_ROUNDING);
+ u[10] = _mm_add_epi32(v[10], k__DCT_CONST_ROUNDING);
+ u[11] = _mm_add_epi32(v[11], k__DCT_CONST_ROUNDING);
+ u[12] = _mm_add_epi32(v[12], k__DCT_CONST_ROUNDING);
+ u[13] = _mm_add_epi32(v[13], k__DCT_CONST_ROUNDING);
+ u[14] = _mm_add_epi32(v[14], k__DCT_CONST_ROUNDING);
+ u[15] = _mm_add_epi32(v[15], k__DCT_CONST_ROUNDING);
+
+ v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
+ v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
+ v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
+ v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
+ v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
+ v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
+ v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
+ v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
+ v[8] = _mm_srai_epi32(u[8], DCT_CONST_BITS);
+ v[9] = _mm_srai_epi32(u[9], DCT_CONST_BITS);
+ v[10] = _mm_srai_epi32(u[10], DCT_CONST_BITS);
+ v[11] = _mm_srai_epi32(u[11], DCT_CONST_BITS);
+ v[12] = _mm_srai_epi32(u[12], DCT_CONST_BITS);
+ v[13] = _mm_srai_epi32(u[13], DCT_CONST_BITS);
+ v[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS);
+ v[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS);
+
+ in[1] = _mm_packs_epi32(v[0], v[1]);
+ in[9] = _mm_packs_epi32(v[2], v[3]);
+ in[5] = _mm_packs_epi32(v[4], v[5]);
+ in[13] = _mm_packs_epi32(v[6], v[7]);
+ in[3] = _mm_packs_epi32(v[8], v[9]);
+ in[11] = _mm_packs_epi32(v[10], v[11]);
+ in[7] = _mm_packs_epi32(v[12], v[13]);
+ in[15] = _mm_packs_epi32(v[14], v[15]);
+}
+
+void fadst16_1d_8col(__m128i *in) {
+ // perform 16x16 1-D ADST for 8 columns
+ __m128i s[16], x[16], u[32], v[32];
+ const __m128i k__cospi_p01_p31 = pair_set_epi16(cospi_1_64, cospi_31_64);
+ const __m128i k__cospi_p31_m01 = pair_set_epi16(cospi_31_64, -cospi_1_64);
+ const __m128i k__cospi_p05_p27 = pair_set_epi16(cospi_5_64, cospi_27_64);
+ const __m128i k__cospi_p27_m05 = pair_set_epi16(cospi_27_64, -cospi_5_64);
+ const __m128i k__cospi_p09_p23 = pair_set_epi16(cospi_9_64, cospi_23_64);
+ const __m128i k__cospi_p23_m09 = pair_set_epi16(cospi_23_64, -cospi_9_64);
+ const __m128i k__cospi_p13_p19 = pair_set_epi16(cospi_13_64, cospi_19_64);
+ const __m128i k__cospi_p19_m13 = pair_set_epi16(cospi_19_64, -cospi_13_64);
+ const __m128i k__cospi_p17_p15 = pair_set_epi16(cospi_17_64, cospi_15_64);
+ const __m128i k__cospi_p15_m17 = pair_set_epi16(cospi_15_64, -cospi_17_64);
+ const __m128i k__cospi_p21_p11 = pair_set_epi16(cospi_21_64, cospi_11_64);
+ const __m128i k__cospi_p11_m21 = pair_set_epi16(cospi_11_64, -cospi_21_64);
+ const __m128i k__cospi_p25_p07 = pair_set_epi16(cospi_25_64, cospi_7_64);
+ const __m128i k__cospi_p07_m25 = pair_set_epi16(cospi_7_64, -cospi_25_64);
+ const __m128i k__cospi_p29_p03 = pair_set_epi16(cospi_29_64, cospi_3_64);
+ const __m128i k__cospi_p03_m29 = pair_set_epi16(cospi_3_64, -cospi_29_64);
+ const __m128i k__cospi_p04_p28 = pair_set_epi16(cospi_4_64, cospi_28_64);
+ const __m128i k__cospi_p28_m04 = pair_set_epi16(cospi_28_64, -cospi_4_64);
+ const __m128i k__cospi_p20_p12 = pair_set_epi16(cospi_20_64, cospi_12_64);
+ const __m128i k__cospi_p12_m20 = pair_set_epi16(cospi_12_64, -cospi_20_64);
+ const __m128i k__cospi_m28_p04 = pair_set_epi16(-cospi_28_64, cospi_4_64);
+ const __m128i k__cospi_m12_p20 = pair_set_epi16(-cospi_12_64, cospi_20_64);
+ const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64);
+ const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
+ const __m128i k__cospi_m24_p08 = pair_set_epi16(-cospi_24_64, cospi_8_64);
+ const __m128i k__cospi_m16_m16 = _mm_set1_epi16(-cospi_16_64);
+ const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
+ const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+ const __m128i k__cospi_m16_p16 = pair_set_epi16(-cospi_16_64, cospi_16_64);
+ const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
+ const __m128i kZero = _mm_set1_epi16(0);
+
+ u[0] = _mm_unpacklo_epi16(in[15], in[0]);
+ u[1] = _mm_unpackhi_epi16(in[15], in[0]);
+ u[2] = _mm_unpacklo_epi16(in[13], in[2]);
+ u[3] = _mm_unpackhi_epi16(in[13], in[2]);
+ u[4] = _mm_unpacklo_epi16(in[11], in[4]);
+ u[5] = _mm_unpackhi_epi16(in[11], in[4]);
+ u[6] = _mm_unpacklo_epi16(in[9], in[6]);
+ u[7] = _mm_unpackhi_epi16(in[9], in[6]);
+ u[8] = _mm_unpacklo_epi16(in[7], in[8]);
+ u[9] = _mm_unpackhi_epi16(in[7], in[8]);
+ u[10] = _mm_unpacklo_epi16(in[5], in[10]);
+ u[11] = _mm_unpackhi_epi16(in[5], in[10]);
+ u[12] = _mm_unpacklo_epi16(in[3], in[12]);
+ u[13] = _mm_unpackhi_epi16(in[3], in[12]);
+ u[14] = _mm_unpacklo_epi16(in[1], in[14]);
+ u[15] = _mm_unpackhi_epi16(in[1], in[14]);
+
+ v[0] = _mm_madd_epi16(u[0], k__cospi_p01_p31);
+ v[1] = _mm_madd_epi16(u[1], k__cospi_p01_p31);
+ v[2] = _mm_madd_epi16(u[0], k__cospi_p31_m01);
+ v[3] = _mm_madd_epi16(u[1], k__cospi_p31_m01);
+ v[4] = _mm_madd_epi16(u[2], k__cospi_p05_p27);
+ v[5] = _mm_madd_epi16(u[3], k__cospi_p05_p27);
+ v[6] = _mm_madd_epi16(u[2], k__cospi_p27_m05);
+ v[7] = _mm_madd_epi16(u[3], k__cospi_p27_m05);
+ v[8] = _mm_madd_epi16(u[4], k__cospi_p09_p23);
+ v[9] = _mm_madd_epi16(u[5], k__cospi_p09_p23);
+ v[10] = _mm_madd_epi16(u[4], k__cospi_p23_m09);
+ v[11] = _mm_madd_epi16(u[5], k__cospi_p23_m09);
+ v[12] = _mm_madd_epi16(u[6], k__cospi_p13_p19);
+ v[13] = _mm_madd_epi16(u[7], k__cospi_p13_p19);
+ v[14] = _mm_madd_epi16(u[6], k__cospi_p19_m13);
+ v[15] = _mm_madd_epi16(u[7], k__cospi_p19_m13);
+ v[16] = _mm_madd_epi16(u[8], k__cospi_p17_p15);
+ v[17] = _mm_madd_epi16(u[9], k__cospi_p17_p15);
+ v[18] = _mm_madd_epi16(u[8], k__cospi_p15_m17);
+ v[19] = _mm_madd_epi16(u[9], k__cospi_p15_m17);
+ v[20] = _mm_madd_epi16(u[10], k__cospi_p21_p11);
+ v[21] = _mm_madd_epi16(u[11], k__cospi_p21_p11);
+ v[22] = _mm_madd_epi16(u[10], k__cospi_p11_m21);
+ v[23] = _mm_madd_epi16(u[11], k__cospi_p11_m21);
+ v[24] = _mm_madd_epi16(u[12], k__cospi_p25_p07);
+ v[25] = _mm_madd_epi16(u[13], k__cospi_p25_p07);
+ v[26] = _mm_madd_epi16(u[12], k__cospi_p07_m25);
+ v[27] = _mm_madd_epi16(u[13], k__cospi_p07_m25);
+ v[28] = _mm_madd_epi16(u[14], k__cospi_p29_p03);
+ v[29] = _mm_madd_epi16(u[15], k__cospi_p29_p03);
+ v[30] = _mm_madd_epi16(u[14], k__cospi_p03_m29);
+ v[31] = _mm_madd_epi16(u[15], k__cospi_p03_m29);
+
+ u[0] = _mm_add_epi32(v[0], v[16]);
+ u[1] = _mm_add_epi32(v[1], v[17]);
+ u[2] = _mm_add_epi32(v[2], v[18]);
+ u[3] = _mm_add_epi32(v[3], v[19]);
+ u[4] = _mm_add_epi32(v[4], v[20]);
+ u[5] = _mm_add_epi32(v[5], v[21]);
+ u[6] = _mm_add_epi32(v[6], v[22]);
+ u[7] = _mm_add_epi32(v[7], v[23]);
+ u[8] = _mm_add_epi32(v[8], v[24]);
+ u[9] = _mm_add_epi32(v[9], v[25]);
+ u[10] = _mm_add_epi32(v[10], v[26]);
+ u[11] = _mm_add_epi32(v[11], v[27]);
+ u[12] = _mm_add_epi32(v[12], v[28]);
+ u[13] = _mm_add_epi32(v[13], v[29]);
+ u[14] = _mm_add_epi32(v[14], v[30]);
+ u[15] = _mm_add_epi32(v[15], v[31]);
+ u[16] = _mm_sub_epi32(v[0], v[16]);
+ u[17] = _mm_sub_epi32(v[1], v[17]);
+ u[18] = _mm_sub_epi32(v[2], v[18]);
+ u[19] = _mm_sub_epi32(v[3], v[19]);
+ u[20] = _mm_sub_epi32(v[4], v[20]);
+ u[21] = _mm_sub_epi32(v[5], v[21]);
+ u[22] = _mm_sub_epi32(v[6], v[22]);
+ u[23] = _mm_sub_epi32(v[7], v[23]);
+ u[24] = _mm_sub_epi32(v[8], v[24]);
+ u[25] = _mm_sub_epi32(v[9], v[25]);
+ u[26] = _mm_sub_epi32(v[10], v[26]);
+ u[27] = _mm_sub_epi32(v[11], v[27]);
+ u[28] = _mm_sub_epi32(v[12], v[28]);
+ u[29] = _mm_sub_epi32(v[13], v[29]);
+ u[30] = _mm_sub_epi32(v[14], v[30]);
+ u[31] = _mm_sub_epi32(v[15], v[31]);
+
+ v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+ v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+ v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+ v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+ v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+ v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+ v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+ v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+ v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING);
+ v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING);
+ v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
+ v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
+ v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
+ v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING);
+ v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
+ v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
+ v[16] = _mm_add_epi32(u[16], k__DCT_CONST_ROUNDING);
+ v[17] = _mm_add_epi32(u[17], k__DCT_CONST_ROUNDING);
+ v[18] = _mm_add_epi32(u[18], k__DCT_CONST_ROUNDING);
+ v[19] = _mm_add_epi32(u[19], k__DCT_CONST_ROUNDING);
+ v[20] = _mm_add_epi32(u[20], k__DCT_CONST_ROUNDING);
+ v[21] = _mm_add_epi32(u[21], k__DCT_CONST_ROUNDING);
+ v[22] = _mm_add_epi32(u[22], k__DCT_CONST_ROUNDING);
+ v[23] = _mm_add_epi32(u[23], k__DCT_CONST_ROUNDING);
+ v[24] = _mm_add_epi32(u[24], k__DCT_CONST_ROUNDING);
+ v[25] = _mm_add_epi32(u[25], k__DCT_CONST_ROUNDING);
+ v[26] = _mm_add_epi32(u[26], k__DCT_CONST_ROUNDING);
+ v[27] = _mm_add_epi32(u[27], k__DCT_CONST_ROUNDING);
+ v[28] = _mm_add_epi32(u[28], k__DCT_CONST_ROUNDING);
+ v[29] = _mm_add_epi32(u[29], k__DCT_CONST_ROUNDING);
+ v[30] = _mm_add_epi32(u[30], k__DCT_CONST_ROUNDING);
+ v[31] = _mm_add_epi32(u[31], k__DCT_CONST_ROUNDING);
+
+ u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
+ u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
+ u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
+ u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
+ u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS);
+ u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS);
+ u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS);
+ u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS);
+ u[8] = _mm_srai_epi32(v[8], DCT_CONST_BITS);
+ u[9] = _mm_srai_epi32(v[9], DCT_CONST_BITS);
+ u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
+ u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
+ u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
+ u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS);
+ u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS);
+ u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS);
+ u[16] = _mm_srai_epi32(v[16], DCT_CONST_BITS);
+ u[17] = _mm_srai_epi32(v[17], DCT_CONST_BITS);
+ u[18] = _mm_srai_epi32(v[18], DCT_CONST_BITS);
+ u[19] = _mm_srai_epi32(v[19], DCT_CONST_BITS);
+ u[20] = _mm_srai_epi32(v[20], DCT_CONST_BITS);
+ u[21] = _mm_srai_epi32(v[21], DCT_CONST_BITS);
+ u[22] = _mm_srai_epi32(v[22], DCT_CONST_BITS);
+ u[23] = _mm_srai_epi32(v[23], DCT_CONST_BITS);
+ u[24] = _mm_srai_epi32(v[24], DCT_CONST_BITS);
+ u[25] = _mm_srai_epi32(v[25], DCT_CONST_BITS);
+ u[26] = _mm_srai_epi32(v[26], DCT_CONST_BITS);
+ u[27] = _mm_srai_epi32(v[27], DCT_CONST_BITS);
+ u[28] = _mm_srai_epi32(v[28], DCT_CONST_BITS);
+ u[29] = _mm_srai_epi32(v[29], DCT_CONST_BITS);
+ u[30] = _mm_srai_epi32(v[30], DCT_CONST_BITS);
+ u[31] = _mm_srai_epi32(v[31], DCT_CONST_BITS);
+
+ s[0] = _mm_packs_epi32(u[0], u[1]);
+ s[1] = _mm_packs_epi32(u[2], u[3]);
+ s[2] = _mm_packs_epi32(u[4], u[5]);
+ s[3] = _mm_packs_epi32(u[6], u[7]);
+ s[4] = _mm_packs_epi32(u[8], u[9]);
+ s[5] = _mm_packs_epi32(u[10], u[11]);
+ s[6] = _mm_packs_epi32(u[12], u[13]);
+ s[7] = _mm_packs_epi32(u[14], u[15]);
+ s[8] = _mm_packs_epi32(u[16], u[17]);
+ s[9] = _mm_packs_epi32(u[18], u[19]);
+ s[10] = _mm_packs_epi32(u[20], u[21]);
+ s[11] = _mm_packs_epi32(u[22], u[23]);
+ s[12] = _mm_packs_epi32(u[24], u[25]);
+ s[13] = _mm_packs_epi32(u[26], u[27]);
+ s[14] = _mm_packs_epi32(u[28], u[29]);
+ s[15] = _mm_packs_epi32(u[30], u[31]);
+
+ // stage 2
+ u[0] = _mm_unpacklo_epi16(s[8], s[9]);
+ u[1] = _mm_unpackhi_epi16(s[8], s[9]);
+ u[2] = _mm_unpacklo_epi16(s[10], s[11]);
+ u[3] = _mm_unpackhi_epi16(s[10], s[11]);
+ u[4] = _mm_unpacklo_epi16(s[12], s[13]);
+ u[5] = _mm_unpackhi_epi16(s[12], s[13]);
+ u[6] = _mm_unpacklo_epi16(s[14], s[15]);
+ u[7] = _mm_unpackhi_epi16(s[14], s[15]);
+
+ v[0] = _mm_madd_epi16(u[0], k__cospi_p04_p28);
+ v[1] = _mm_madd_epi16(u[1], k__cospi_p04_p28);
+ v[2] = _mm_madd_epi16(u[0], k__cospi_p28_m04);
+ v[3] = _mm_madd_epi16(u[1], k__cospi_p28_m04);
+ v[4] = _mm_madd_epi16(u[2], k__cospi_p20_p12);
+ v[5] = _mm_madd_epi16(u[3], k__cospi_p20_p12);
+ v[6] = _mm_madd_epi16(u[2], k__cospi_p12_m20);
+ v[7] = _mm_madd_epi16(u[3], k__cospi_p12_m20);
+ v[8] = _mm_madd_epi16(u[4], k__cospi_m28_p04);
+ v[9] = _mm_madd_epi16(u[5], k__cospi_m28_p04);
+ v[10] = _mm_madd_epi16(u[4], k__cospi_p04_p28);
+ v[11] = _mm_madd_epi16(u[5], k__cospi_p04_p28);
+ v[12] = _mm_madd_epi16(u[6], k__cospi_m12_p20);
+ v[13] = _mm_madd_epi16(u[7], k__cospi_m12_p20);
+ v[14] = _mm_madd_epi16(u[6], k__cospi_p20_p12);
+ v[15] = _mm_madd_epi16(u[7], k__cospi_p20_p12);
+
+ u[0] = _mm_add_epi32(v[0], v[8]);
+ u[1] = _mm_add_epi32(v[1], v[9]);
+ u[2] = _mm_add_epi32(v[2], v[10]);
+ u[3] = _mm_add_epi32(v[3], v[11]);
+ u[4] = _mm_add_epi32(v[4], v[12]);
+ u[5] = _mm_add_epi32(v[5], v[13]);
+ u[6] = _mm_add_epi32(v[6], v[14]);
+ u[7] = _mm_add_epi32(v[7], v[15]);
+ u[8] = _mm_sub_epi32(v[0], v[8]);
+ u[9] = _mm_sub_epi32(v[1], v[9]);
+ u[10] = _mm_sub_epi32(v[2], v[10]);
+ u[11] = _mm_sub_epi32(v[3], v[11]);
+ u[12] = _mm_sub_epi32(v[4], v[12]);
+ u[13] = _mm_sub_epi32(v[5], v[13]);
+ u[14] = _mm_sub_epi32(v[6], v[14]);
+ u[15] = _mm_sub_epi32(v[7], v[15]);
+
+ v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+ v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+ v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+ v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+ v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+ v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+ v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+ v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+ v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING);
+ v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING);
+ v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
+ v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
+ v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
+ v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING);
+ v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
+ v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
+
+ u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
+ u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
+ u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
+ u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
+ u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS);
+ u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS);
+ u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS);
+ u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS);
+ u[8] = _mm_srai_epi32(v[8], DCT_CONST_BITS);
+ u[9] = _mm_srai_epi32(v[9], DCT_CONST_BITS);
+ u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
+ u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
+ u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
+ u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS);
+ u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS);
+ u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS);
+
+ x[0] = _mm_add_epi16(s[0], s[4]);
+ x[1] = _mm_add_epi16(s[1], s[5]);
+ x[2] = _mm_add_epi16(s[2], s[6]);
+ x[3] = _mm_add_epi16(s[3], s[7]);
+ x[4] = _mm_sub_epi16(s[0], s[4]);
+ x[5] = _mm_sub_epi16(s[1], s[5]);
+ x[6] = _mm_sub_epi16(s[2], s[6]);
+ x[7] = _mm_sub_epi16(s[3], s[7]);
+ x[8] = _mm_packs_epi32(u[0], u[1]);
+ x[9] = _mm_packs_epi32(u[2], u[3]);
+ x[10] = _mm_packs_epi32(u[4], u[5]);
+ x[11] = _mm_packs_epi32(u[6], u[7]);
+ x[12] = _mm_packs_epi32(u[8], u[9]);
+ x[13] = _mm_packs_epi32(u[10], u[11]);
+ x[14] = _mm_packs_epi32(u[12], u[13]);
+ x[15] = _mm_packs_epi32(u[14], u[15]);
+
+ // stage 3
+ u[0] = _mm_unpacklo_epi16(x[4], x[5]);
+ u[1] = _mm_unpackhi_epi16(x[4], x[5]);
+ u[2] = _mm_unpacklo_epi16(x[6], x[7]);
+ u[3] = _mm_unpackhi_epi16(x[6], x[7]);
+ u[4] = _mm_unpacklo_epi16(x[12], x[13]);
+ u[5] = _mm_unpackhi_epi16(x[12], x[13]);
+ u[6] = _mm_unpacklo_epi16(x[14], x[15]);
+ u[7] = _mm_unpackhi_epi16(x[14], x[15]);
+
+ v[0] = _mm_madd_epi16(u[0], k__cospi_p08_p24);
+ v[1] = _mm_madd_epi16(u[1], k__cospi_p08_p24);
+ v[2] = _mm_madd_epi16(u[0], k__cospi_p24_m08);
+ v[3] = _mm_madd_epi16(u[1], k__cospi_p24_m08);
+ v[4] = _mm_madd_epi16(u[2], k__cospi_m24_p08);
+ v[5] = _mm_madd_epi16(u[3], k__cospi_m24_p08);
+ v[6] = _mm_madd_epi16(u[2], k__cospi_p08_p24);
+ v[7] = _mm_madd_epi16(u[3], k__cospi_p08_p24);
+ v[8] = _mm_madd_epi16(u[4], k__cospi_p08_p24);
+ v[9] = _mm_madd_epi16(u[5], k__cospi_p08_p24);
+ v[10] = _mm_madd_epi16(u[4], k__cospi_p24_m08);
+ v[11] = _mm_madd_epi16(u[5], k__cospi_p24_m08);
+ v[12] = _mm_madd_epi16(u[6], k__cospi_m24_p08);
+ v[13] = _mm_madd_epi16(u[7], k__cospi_m24_p08);
+ v[14] = _mm_madd_epi16(u[6], k__cospi_p08_p24);
+ v[15] = _mm_madd_epi16(u[7], k__cospi_p08_p24);
+
+ u[0] = _mm_add_epi32(v[0], v[4]);
+ u[1] = _mm_add_epi32(v[1], v[5]);
+ u[2] = _mm_add_epi32(v[2], v[6]);
+ u[3] = _mm_add_epi32(v[3], v[7]);
+ u[4] = _mm_sub_epi32(v[0], v[4]);
+ u[5] = _mm_sub_epi32(v[1], v[5]);
+ u[6] = _mm_sub_epi32(v[2], v[6]);
+ u[7] = _mm_sub_epi32(v[3], v[7]);
+ u[8] = _mm_add_epi32(v[8], v[12]);
+ u[9] = _mm_add_epi32(v[9], v[13]);
+ u[10] = _mm_add_epi32(v[10], v[14]);
+ u[11] = _mm_add_epi32(v[11], v[15]);
+ u[12] = _mm_sub_epi32(v[8], v[12]);
+ u[13] = _mm_sub_epi32(v[9], v[13]);
+ u[14] = _mm_sub_epi32(v[10], v[14]);
+ u[15] = _mm_sub_epi32(v[11], v[15]);
+
+ u[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+ u[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+ u[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+ u[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+ u[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+ u[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+ u[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+ u[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+ u[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING);
+ u[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING);
+ u[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
+ u[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
+ u[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
+ u[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING);
+ u[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
+ u[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
+
+ v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
+ v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
+ v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
+ v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
+ v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
+ v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
+ v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
+ v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
+ v[8] = _mm_srai_epi32(u[8], DCT_CONST_BITS);
+ v[9] = _mm_srai_epi32(u[9], DCT_CONST_BITS);
+ v[10] = _mm_srai_epi32(u[10], DCT_CONST_BITS);
+ v[11] = _mm_srai_epi32(u[11], DCT_CONST_BITS);
+ v[12] = _mm_srai_epi32(u[12], DCT_CONST_BITS);
+ v[13] = _mm_srai_epi32(u[13], DCT_CONST_BITS);
+ v[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS);
+ v[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS);
+
+ s[0] = _mm_add_epi16(x[0], x[2]);
+ s[1] = _mm_add_epi16(x[1], x[3]);
+ s[2] = _mm_sub_epi16(x[0], x[2]);
+ s[3] = _mm_sub_epi16(x[1], x[3]);
+ s[4] = _mm_packs_epi32(v[0], v[1]);
+ s[5] = _mm_packs_epi32(v[2], v[3]);
+ s[6] = _mm_packs_epi32(v[4], v[5]);
+ s[7] = _mm_packs_epi32(v[6], v[7]);
+ s[8] = _mm_add_epi16(x[8], x[10]);
+ s[9] = _mm_add_epi16(x[9], x[11]);
+ s[10] = _mm_sub_epi16(x[8], x[10]);
+ s[11] = _mm_sub_epi16(x[9], x[11]);
+ s[12] = _mm_packs_epi32(v[8], v[9]);
+ s[13] = _mm_packs_epi32(v[10], v[11]);
+ s[14] = _mm_packs_epi32(v[12], v[13]);
+ s[15] = _mm_packs_epi32(v[14], v[15]);
+
+ // stage 4
+ u[0] = _mm_unpacklo_epi16(s[2], s[3]);
+ u[1] = _mm_unpackhi_epi16(s[2], s[3]);
+ u[2] = _mm_unpacklo_epi16(s[6], s[7]);
+ u[3] = _mm_unpackhi_epi16(s[6], s[7]);
+ u[4] = _mm_unpacklo_epi16(s[10], s[11]);
+ u[5] = _mm_unpackhi_epi16(s[10], s[11]);
+ u[6] = _mm_unpacklo_epi16(s[14], s[15]);
+ u[7] = _mm_unpackhi_epi16(s[14], s[15]);
+
+ v[0] = _mm_madd_epi16(u[0], k__cospi_m16_m16);
+ v[1] = _mm_madd_epi16(u[1], k__cospi_m16_m16);
+ v[2] = _mm_madd_epi16(u[0], k__cospi_p16_m16);
+ v[3] = _mm_madd_epi16(u[1], k__cospi_p16_m16);
+ v[4] = _mm_madd_epi16(u[2], k__cospi_p16_p16);
+ v[5] = _mm_madd_epi16(u[3], k__cospi_p16_p16);
+ v[6] = _mm_madd_epi16(u[2], k__cospi_m16_p16);
+ v[7] = _mm_madd_epi16(u[3], k__cospi_m16_p16);
+ v[8] = _mm_madd_epi16(u[4], k__cospi_p16_p16);
+ v[9] = _mm_madd_epi16(u[5], k__cospi_p16_p16);
+ v[10] = _mm_madd_epi16(u[4], k__cospi_m16_p16);
+ v[11] = _mm_madd_epi16(u[5], k__cospi_m16_p16);
+ v[12] = _mm_madd_epi16(u[6], k__cospi_m16_m16);
+ v[13] = _mm_madd_epi16(u[7], k__cospi_m16_m16);
+ v[14] = _mm_madd_epi16(u[6], k__cospi_p16_m16);
+ v[15] = _mm_madd_epi16(u[7], k__cospi_p16_m16);
+
+ u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
+ u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
+ u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
+ u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
+ u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING);
+ u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING);
+ u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING);
+ u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING);
+ u[8] = _mm_add_epi32(v[8], k__DCT_CONST_ROUNDING);
+ u[9] = _mm_add_epi32(v[9], k__DCT_CONST_ROUNDING);
+ u[10] = _mm_add_epi32(v[10], k__DCT_CONST_ROUNDING);
+ u[11] = _mm_add_epi32(v[11], k__DCT_CONST_ROUNDING);
+ u[12] = _mm_add_epi32(v[12], k__DCT_CONST_ROUNDING);
+ u[13] = _mm_add_epi32(v[13], k__DCT_CONST_ROUNDING);
+ u[14] = _mm_add_epi32(v[14], k__DCT_CONST_ROUNDING);
+ u[15] = _mm_add_epi32(v[15], k__DCT_CONST_ROUNDING);
+
+ v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
+ v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
+ v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
+ v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
+ v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
+ v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
+ v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
+ v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
+ v[8] = _mm_srai_epi32(u[8], DCT_CONST_BITS);
+ v[9] = _mm_srai_epi32(u[9], DCT_CONST_BITS);
+ v[10] = _mm_srai_epi32(u[10], DCT_CONST_BITS);
+ v[11] = _mm_srai_epi32(u[11], DCT_CONST_BITS);
+ v[12] = _mm_srai_epi32(u[12], DCT_CONST_BITS);
+ v[13] = _mm_srai_epi32(u[13], DCT_CONST_BITS);
+ v[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS);
+ v[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS);
+
+ in[0] = s[0];
+ in[1] = _mm_sub_epi16(kZero, s[8]);
+ in[2] = s[12];
+ in[3] = _mm_sub_epi16(kZero, s[4]);
+ in[4] = _mm_packs_epi32(v[4], v[5]);
+ in[5] = _mm_packs_epi32(v[12], v[13]);
+ in[6] = _mm_packs_epi32(v[8], v[9]);
+ in[7] = _mm_packs_epi32(v[0], v[1]);
+ in[8] = _mm_packs_epi32(v[2], v[3]);
+ in[9] = _mm_packs_epi32(v[10], v[11]);
+ in[10] = _mm_packs_epi32(v[14], v[15]);
+ in[11] = _mm_packs_epi32(v[6], v[7]);
+ in[12] = s[5];
+ in[13] = _mm_sub_epi16(kZero, s[13]);
+ in[14] = s[9];
+ in[15] = _mm_sub_epi16(kZero, s[1]);
+}
+
+void fdct16_1d_sse2(__m128i *in0, __m128i *in1) {
+ fdct16_1d_8col(in0);
+ fdct16_1d_8col(in1);
+ array_transpose_16x16(in0, in1);
+}
+
+void fadst16_1d_sse2(__m128i *in0, __m128i *in1) {
+ fadst16_1d_8col(in0);
+ fadst16_1d_8col(in1);
+ array_transpose_16x16(in0, in1);
+}
+
+void vp9_short_fht16x16_sse2(int16_t *input, int16_t *output,
+ int stride, int tx_type) {
+ __m128i in0[16], in1[16];
+ load_buffer_16x16(input, in0, in1, stride);
+ switch (tx_type) {
+ case 0: // DCT_DCT
+ fdct16_1d_sse2(in0, in1);
+ right_shift_16x16(in0, in1);
+ fdct16_1d_sse2(in0, in1);
+ break;
+ case 1: // ADST_DCT
+ fadst16_1d_sse2(in0, in1);
+ right_shift_16x16(in0, in1);
+ fdct16_1d_sse2(in0, in1);
+ break;
+ case 2: // DCT_ADST
+ fdct16_1d_sse2(in0, in1);
+ right_shift_16x16(in0, in1);
+ fadst16_1d_sse2(in0, in1);
+ break;
+ case 3: // ADST_ADST
+ fadst16_1d_sse2(in0, in1);
+ right_shift_16x16(in0, in1);
+ fadst16_1d_sse2(in0, in1);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ write_buffer_16x16(output, in0, in1, 16);
+}
+
+#define FDCT32x32_2D vp9_short_fdct32x32_rd_sse2
+#define FDCT32x32_HIGH_PRECISION 0
+#include "vp9/encoder/x86/vp9_dct32x32_sse2.c"
+#undef FDCT32x32_2D
+#undef FDCT32x32_HIGH_PRECISION
+
+#define FDCT32x32_2D vp9_short_fdct32x32_sse2
+#define FDCT32x32_HIGH_PRECISION 1
+#include "vp9/encoder/x86/vp9_dct32x32_sse2.c" // NOLINT
+#undef FDCT32x32_2D
+#undef FDCT32x32_HIGH_PRECISION
diff --git a/libvpx/vp9/encoder/x86/vp9_error_sse2.asm b/libvpx/vp9/encoder/x86/vp9_error_sse2.asm
new file mode 100644
index 0000000..1126fdb
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_error_sse2.asm
@@ -0,0 +1,74 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+%include "third_party/x86inc/x86inc.asm"
+
+SECTION .text
+
+; int64_t vp9_block_error(int16_t *coeff, int16_t *dqcoeff, intptr_t block_size,
+; int64_t *ssz)
+
+INIT_XMM sse2
+cglobal block_error, 3, 3, 8, uqc, dqc, size, ssz
+ pxor m4, m4 ; sse accumulator
+ pxor m6, m6 ; ssz accumulator
+ pxor m5, m5 ; dedicated zero register
+ lea uqcq, [uqcq+sizeq*2]
+ lea dqcq, [dqcq+sizeq*2]
+ neg sizeq
+.loop:
+ mova m2, [uqcq+sizeq*2]
+ mova m0, [dqcq+sizeq*2]
+ mova m3, [uqcq+sizeq*2+mmsize]
+ mova m1, [dqcq+sizeq*2+mmsize]
+ psubw m0, m2
+ psubw m1, m3
+ ; individual errors are max. 15bit+sign, so squares are 30bit, and
+ ; thus the sum of 2 should fit in a 31bit integer (+ unused sign bit)
+ pmaddwd m0, m0
+ pmaddwd m1, m1
+ pmaddwd m2, m2
+ pmaddwd m3, m3
+ ; accumulate in 64bit
+ punpckldq m7, m0, m5
+ punpckhdq m0, m5
+ paddq m4, m7
+ punpckldq m7, m1, m5
+ paddq m4, m0
+ punpckhdq m1, m5
+ paddq m4, m7
+ punpckldq m7, m2, m5
+ paddq m4, m1
+ punpckhdq m2, m5
+ paddq m6, m7
+ punpckldq m7, m3, m5
+ paddq m6, m2
+ punpckhdq m3, m5
+ paddq m6, m7
+ paddq m6, m3
+ add sizeq, mmsize
+ jl .loop
+
+ ; accumulate horizontally and store in return value
+ movhlps m5, m4
+ movhlps m7, m6
+ paddq m4, m5
+ paddq m6, m7
+%if ARCH_X86_64
+ movq rax, m4
+ movq [sszq], m6
+%else
+ mov eax, sszm
+ pshufd m5, m4, 0x1
+ movq [eax], m6
+ movd eax, m4
+ movd edx, m5
+%endif
+ RET
diff --git a/libvpx/vp9/encoder/x86/vp9_mcomp_x86.h b/libvpx/vp9/encoder/x86/vp9_mcomp_x86.h
new file mode 100644
index 0000000..ca80b8b
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_mcomp_x86.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_ENCODER_X86_VP9_MCOMP_X86_H_
+#define VP9_ENCODER_X86_VP9_MCOMP_X86_H_
+
+#if HAVE_SSE3
+#if !CONFIG_RUNTIME_CPU_DETECT
+
+#undef vp9_search_full_search
+#define vp9_search_full_search vp9_full_search_sadx3
+
+#undef vp9_search_refining_search
+#define vp9_search_refining_search vp9_refining_search_sadx4
+
+#undef vp9_search_diamond_search
+#define vp9_search_diamond_search vp9_diamond_search_sadx4
+
+#endif
+#endif
+
+#if HAVE_SSE4_1
+#if !CONFIG_RUNTIME_CPU_DETECT
+
+#undef vp9_search_full_search
+#define vp9_search_full_search vp9_full_search_sadx8
+
+#endif
+#endif
+
+#endif
+
diff --git a/libvpx/vp9/encoder/x86/vp9_quantize_ssse3.asm b/libvpx/vp9/encoder/x86/vp9_quantize_ssse3.asm
new file mode 100644
index 0000000..db30660
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_quantize_ssse3.asm
@@ -0,0 +1,218 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+%include "third_party/x86inc/x86inc.asm"
+
+SECTION_RODATA
+pw_1: times 8 dw 1
+
+SECTION .text
+
+%macro QUANTIZE_FN 2
+cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, skip, zbin, round, quant, \
+ shift, qcoeff, dqcoeff, dequant, zbin_oq, \
+ eob, scan, iscan
+ cmp dword skipm, 0
+ jne .blank
+
+ ; actual quantize loop - setup pointers, rounders, etc.
+ movifnidn coeffq, coeffmp
+ movifnidn ncoeffq, ncoeffmp
+ mov r2, dequantmp
+ movifnidn zbinq, zbinmp
+ movifnidn roundq, roundmp
+ movifnidn quantq, quantmp
+ movd m4, dword zbin_oqm ; m4 = zbin_oq
+ mova m0, [zbinq] ; m0 = zbin
+ punpcklwd m4, m4
+ mova m1, [roundq] ; m1 = round
+ pshufd m4, m4, 0
+ mova m2, [quantq] ; m2 = quant
+ paddw m0, m4 ; m0 = zbin + zbin_oq
+%ifidn %1, b_32x32
+ pcmpeqw m5, m5
+ psrlw m5, 15
+ paddw m0, m5
+ paddw m1, m5
+ psrlw m0, 1 ; m0 = (m0 + 1) / 2
+ psrlw m1, 1 ; m1 = (m1 + 1) / 2
+%endif
+ mova m3, [r2q] ; m3 = dequant
+ psubw m0, [pw_1]
+ mov r2, shiftmp
+ mov r3, qcoeffmp
+ mova m4, [r2] ; m4 = shift
+ mov r4, dqcoeffmp
+ mov r5, iscanmp
+%ifidn %1, b_32x32
+ psllw m4, 1
+%endif
+ pxor m5, m5 ; m5 = dedicated zero
+ DEFINE_ARGS coeff, ncoeff, d1, qcoeff, dqcoeff, iscan, d2, d3, d4, d5, d6, eob
+ lea coeffq, [ coeffq+ncoeffq*2]
+ lea iscanq, [ iscanq+ncoeffq*2]
+ lea qcoeffq, [ qcoeffq+ncoeffq*2]
+ lea dqcoeffq, [dqcoeffq+ncoeffq*2]
+ neg ncoeffq
+
+ ; get DC and first 15 AC coeffs
+ mova m9, [ coeffq+ncoeffq*2+ 0] ; m9 = c[i]
+ mova m10, [ coeffq+ncoeffq*2+16] ; m10 = c[i]
+ pabsw m6, m9 ; m6 = abs(m9)
+ pabsw m11, m10 ; m11 = abs(m10)
+ pcmpgtw m7, m6, m0 ; m7 = c[i] >= zbin
+ punpckhqdq m0, m0
+ pcmpgtw m12, m11, m0 ; m12 = c[i] >= zbin
+ paddsw m6, m1 ; m6 += round
+ punpckhqdq m1, m1
+ paddsw m11, m1 ; m11 += round
+ pmulhw m8, m6, m2 ; m8 = m6*q>>16
+ punpckhqdq m2, m2
+ pmulhw m13, m11, m2 ; m13 = m11*q>>16
+ paddw m8, m6 ; m8 += m6
+ paddw m13, m11 ; m13 += m11
+ pmulhw m8, m4 ; m8 = m8*qsh>>16
+ punpckhqdq m4, m4
+ pmulhw m13, m4 ; m13 = m13*qsh>>16
+ psignw m8, m9 ; m8 = reinsert sign
+ psignw m13, m10 ; m13 = reinsert sign
+ pand m8, m7
+ pand m13, m12
+ mova [qcoeffq+ncoeffq*2+ 0], m8
+ mova [qcoeffq+ncoeffq*2+16], m13
+%ifidn %1, b_32x32
+ pabsw m8, m8
+ pabsw m13, m13
+%endif
+ pmullw m8, m3 ; dqc[i] = qc[i] * q
+ punpckhqdq m3, m3
+ pmullw m13, m3 ; dqc[i] = qc[i] * q
+%ifidn %1, b_32x32
+ psrlw m8, 1
+ psrlw m13, 1
+ psignw m8, m9
+ psignw m13, m10
+%endif
+ mova [dqcoeffq+ncoeffq*2+ 0], m8
+ mova [dqcoeffq+ncoeffq*2+16], m13
+ pcmpeqw m8, m5 ; m8 = c[i] == 0
+ pcmpeqw m13, m5 ; m13 = c[i] == 0
+ mova m6, [ iscanq+ncoeffq*2+ 0] ; m6 = scan[i]
+ mova m11, [ iscanq+ncoeffq*2+16] ; m11 = scan[i]
+ psubw m6, m7 ; m6 = scan[i] + 1
+ psubw m11, m12 ; m11 = scan[i] + 1
+ pandn m8, m6 ; m8 = max(eob)
+ pandn m13, m11 ; m13 = max(eob)
+ pmaxsw m8, m13
+ add ncoeffq, mmsize
+ jz .accumulate_eob
+
+.ac_only_loop:
+ mova m9, [ coeffq+ncoeffq*2+ 0] ; m9 = c[i]
+ mova m10, [ coeffq+ncoeffq*2+16] ; m10 = c[i]
+ pabsw m6, m9 ; m6 = abs(m9)
+ pabsw m11, m10 ; m11 = abs(m10)
+ pcmpgtw m7, m6, m0 ; m7 = c[i] >= zbin
+ pcmpgtw m12, m11, m0 ; m12 = c[i] >= zbin
+%ifidn %1, b_32x32
+ pmovmskb r6, m7
+ pmovmskb r2, m12
+ or r6, r2
+ jz .skip_iter
+%endif
+ paddsw m6, m1 ; m6 += round
+ paddsw m11, m1 ; m11 += round
+ pmulhw m14, m6, m2 ; m14 = m6*q>>16
+ pmulhw m13, m11, m2 ; m13 = m11*q>>16
+ paddw m14, m6 ; m14 += m6
+ paddw m13, m11 ; m13 += m11
+ pmulhw m14, m4 ; m14 = m14*qsh>>16
+ pmulhw m13, m4 ; m13 = m13*qsh>>16
+ psignw m14, m9 ; m14 = reinsert sign
+ psignw m13, m10 ; m13 = reinsert sign
+ pand m14, m7
+ pand m13, m12
+ mova [qcoeffq+ncoeffq*2+ 0], m14
+ mova [qcoeffq+ncoeffq*2+16], m13
+%ifidn %1, b_32x32
+ pabsw m14, m14
+ pabsw m13, m13
+%endif
+ pmullw m14, m3 ; dqc[i] = qc[i] * q
+ pmullw m13, m3 ; dqc[i] = qc[i] * q
+%ifidn %1, b_32x32
+ psrlw m14, 1
+ psrlw m13, 1
+ psignw m14, m9
+ psignw m13, m10
+%endif
+ mova [dqcoeffq+ncoeffq*2+ 0], m14
+ mova [dqcoeffq+ncoeffq*2+16], m13
+ pcmpeqw m14, m5 ; m14 = c[i] == 0
+ pcmpeqw m13, m5 ; m13 = c[i] == 0
+ mova m6, [ iscanq+ncoeffq*2+ 0] ; m6 = scan[i]
+ mova m11, [ iscanq+ncoeffq*2+16] ; m11 = scan[i]
+ psubw m6, m7 ; m6 = scan[i] + 1
+ psubw m11, m12 ; m11 = scan[i] + 1
+ pandn m14, m6 ; m14 = max(eob)
+ pandn m13, m11 ; m13 = max(eob)
+ pmaxsw m8, m14
+ pmaxsw m8, m13
+ add ncoeffq, mmsize
+ jl .ac_only_loop
+
+%ifidn %1, b_32x32
+ jmp .accumulate_eob
+.skip_iter:
+ mova [qcoeffq+ncoeffq*2+ 0], m5
+ mova [qcoeffq+ncoeffq*2+16], m5
+ mova [dqcoeffq+ncoeffq*2+ 0], m5
+ mova [dqcoeffq+ncoeffq*2+16], m5
+ add ncoeffq, mmsize
+ jl .ac_only_loop
+%endif
+
+.accumulate_eob:
+ ; horizontally accumulate/max eobs and write into [eob] memory pointer
+ mov r2, eobmp
+ pshufd m7, m8, 0xe
+ pmaxsw m8, m7
+ pshuflw m7, m8, 0xe
+ pmaxsw m8, m7
+ pshuflw m7, m8, 0x1
+ pmaxsw m8, m7
+ pextrw [r2], m8, 0
+ RET
+
+ ; skip-block, i.e. just write all zeroes
+.blank:
+ mov r0, dqcoeffmp
+ movifnidn ncoeffq, ncoeffmp
+ mov r2, qcoeffmp
+ mov r3, eobmp
+ DEFINE_ARGS dqcoeff, ncoeff, qcoeff, eob
+ lea dqcoeffq, [dqcoeffq+ncoeffq*2]
+ lea qcoeffq, [ qcoeffq+ncoeffq*2]
+ neg ncoeffq
+ pxor m7, m7
+.blank_loop:
+ mova [dqcoeffq+ncoeffq*2+ 0], m7
+ mova [dqcoeffq+ncoeffq*2+16], m7
+ mova [qcoeffq+ncoeffq*2+ 0], m7
+ mova [qcoeffq+ncoeffq*2+16], m7
+ add ncoeffq, mmsize
+ jl .blank_loop
+ mov word [eobq], 0
+ RET
+%endmacro
+
+INIT_XMM ssse3
+QUANTIZE_FN b, 6
+QUANTIZE_FN b_32x32, 7
diff --git a/libvpx/vp9/encoder/x86/vp9_sad4d_sse2.asm b/libvpx/vp9/encoder/x86/vp9_sad4d_sse2.asm
new file mode 100644
index 0000000..b493628
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_sad4d_sse2.asm
@@ -0,0 +1,231 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+%include "third_party/x86inc/x86inc.asm"
+
+SECTION .text
+
+; PROCESS_4x2x4 first, off_{first,second}_{src,ref}, advance_at_end
+%macro PROCESS_4x2x4 5-6 0
+ movd m0, [srcq +%2]
+%if %1 == 1
+ movd m6, [ref1q+%3]
+ movd m4, [ref2q+%3]
+ movd m7, [ref3q+%3]
+ movd m5, [ref4q+%3]
+ punpckldq m0, [srcq +%4]
+ punpckldq m6, [ref1q+%5]
+ punpckldq m4, [ref2q+%5]
+ punpckldq m7, [ref3q+%5]
+ punpckldq m5, [ref4q+%5]
+ psadbw m6, m0
+ psadbw m4, m0
+ psadbw m7, m0
+ psadbw m5, m0
+ punpckldq m6, m4
+ punpckldq m7, m5
+%else
+ movd m1, [ref1q+%3]
+ movd m2, [ref2q+%3]
+ movd m3, [ref3q+%3]
+ movd m4, [ref4q+%3]
+ punpckldq m0, [srcq +%4]
+ punpckldq m1, [ref1q+%5]
+ punpckldq m2, [ref2q+%5]
+ punpckldq m3, [ref3q+%5]
+ punpckldq m4, [ref4q+%5]
+ psadbw m1, m0
+ psadbw m2, m0
+ psadbw m3, m0
+ psadbw m4, m0
+ punpckldq m1, m2
+ punpckldq m3, m4
+ paddd m6, m1
+ paddd m7, m3
+%endif
+%if %6 == 1
+ lea srcq, [srcq +src_strideq*2]
+ lea ref1q, [ref1q+ref_strideq*2]
+ lea ref2q, [ref2q+ref_strideq*2]
+ lea ref3q, [ref3q+ref_strideq*2]
+ lea ref4q, [ref4q+ref_strideq*2]
+%endif
+%endmacro
+
+; PROCESS_8x2x4 first, off_{first,second}_{src,ref}, advance_at_end
+%macro PROCESS_8x2x4 5-6 0
+ movh m0, [srcq +%2]
+%if %1 == 1
+ movh m4, [ref1q+%3]
+ movh m5, [ref2q+%3]
+ movh m6, [ref3q+%3]
+ movh m7, [ref4q+%3]
+ movhps m0, [srcq +%4]
+ movhps m4, [ref1q+%5]
+ movhps m5, [ref2q+%5]
+ movhps m6, [ref3q+%5]
+ movhps m7, [ref4q+%5]
+ psadbw m4, m0
+ psadbw m5, m0
+ psadbw m6, m0
+ psadbw m7, m0
+%else
+ movh m1, [ref1q+%3]
+ movh m2, [ref2q+%3]
+ movh m3, [ref3q+%3]
+ movhps m0, [srcq +%4]
+ movhps m1, [ref1q+%5]
+ movhps m2, [ref2q+%5]
+ movhps m3, [ref3q+%5]
+ psadbw m1, m0
+ psadbw m2, m0
+ psadbw m3, m0
+ paddd m4, m1
+ movh m1, [ref4q+%3]
+ movhps m1, [ref4q+%5]
+ paddd m5, m2
+ paddd m6, m3
+ psadbw m1, m0
+ paddd m7, m1
+%endif
+%if %6 == 1
+ lea srcq, [srcq +src_strideq*2]
+ lea ref1q, [ref1q+ref_strideq*2]
+ lea ref2q, [ref2q+ref_strideq*2]
+ lea ref3q, [ref3q+ref_strideq*2]
+ lea ref4q, [ref4q+ref_strideq*2]
+%endif
+%endmacro
+
+; PROCESS_16x2x4 first, off_{first,second}_{src,ref}, advance_at_end
+%macro PROCESS_16x2x4 5-6 0
+ ; 1st 16 px
+ mova m0, [srcq +%2]
+%if %1 == 1
+ movu m4, [ref1q+%3]
+ movu m5, [ref2q+%3]
+ movu m6, [ref3q+%3]
+ movu m7, [ref4q+%3]
+ psadbw m4, m0
+ psadbw m5, m0
+ psadbw m6, m0
+ psadbw m7, m0
+%else
+ movu m1, [ref1q+%3]
+ movu m2, [ref2q+%3]
+ movu m3, [ref3q+%3]
+ psadbw m1, m0
+ psadbw m2, m0
+ psadbw m3, m0
+ paddd m4, m1
+ movu m1, [ref4q+%3]
+ paddd m5, m2
+ paddd m6, m3
+ psadbw m1, m0
+ paddd m7, m1
+%endif
+
+ ; 2nd 16 px
+ mova m0, [srcq +%4]
+ movu m1, [ref1q+%5]
+ movu m2, [ref2q+%5]
+ movu m3, [ref3q+%5]
+ psadbw m1, m0
+ psadbw m2, m0
+ psadbw m3, m0
+ paddd m4, m1
+ movu m1, [ref4q+%5]
+ paddd m5, m2
+ paddd m6, m3
+%if %6 == 1
+ lea srcq, [srcq +src_strideq*2]
+ lea ref1q, [ref1q+ref_strideq*2]
+ lea ref2q, [ref2q+ref_strideq*2]
+ lea ref3q, [ref3q+ref_strideq*2]
+ lea ref4q, [ref4q+ref_strideq*2]
+%endif
+ psadbw m1, m0
+ paddd m7, m1
+%endmacro
+
+; PROCESS_32x2x4 first, off_{first,second}_{src,ref}, advance_at_end
+%macro PROCESS_32x2x4 5-6 0
+ PROCESS_16x2x4 %1, %2, %3, %2 + 16, %3 + 16
+ PROCESS_16x2x4 0, %4, %5, %4 + 16, %5 + 16, %6
+%endmacro
+
+; PROCESS_64x2x4 first, off_{first,second}_{src,ref}, advance_at_end
+%macro PROCESS_64x2x4 5-6 0
+ PROCESS_32x2x4 %1, %2, %3, %2 + 32, %3 + 32
+ PROCESS_32x2x4 0, %4, %5, %4 + 32, %5 + 32, %6
+%endmacro
+
+; void vp9_sadNxNx4d_sse2(uint8_t *src, int src_stride,
+; uint8_t *ref[4], int ref_stride,
+; unsigned int res[4]);
+; where NxN = 64x64, 32x32, 16x16, 16x8, 8x16 or 8x8
+%macro SADNXN4D 2
+%if UNIX64
+cglobal sad%1x%2x4d, 5, 8, 8, src, src_stride, ref1, ref_stride, \
+ res, ref2, ref3, ref4
+%else
+cglobal sad%1x%2x4d, 4, 7, 8, src, src_stride, ref1, ref_stride, \
+ ref2, ref3, ref4
+%endif
+ movsxdifnidn src_strideq, src_strided
+ movsxdifnidn ref_strideq, ref_strided
+ mov ref2q, [ref1q+gprsize*1]
+ mov ref3q, [ref1q+gprsize*2]
+ mov ref4q, [ref1q+gprsize*3]
+ mov ref1q, [ref1q+gprsize*0]
+
+ PROCESS_%1x2x4 1, 0, 0, src_strideq, ref_strideq, 1
+%rep (%2-4)/2
+ PROCESS_%1x2x4 0, 0, 0, src_strideq, ref_strideq, 1
+%endrep
+ PROCESS_%1x2x4 0, 0, 0, src_strideq, ref_strideq, 0
+
+%if mmsize == 16
+ pslldq m5, 4
+ pslldq m7, 4
+ por m4, m5
+ por m6, m7
+ mova m5, m4
+ mova m7, m6
+ punpcklqdq m4, m6
+ punpckhqdq m5, m7
+ movifnidn r4, r4mp
+ paddd m4, m5
+ movu [r4], m4
+ RET
+%else
+ movifnidn r4, r4mp
+ movq [r4+0], m6
+ movq [r4+8], m7
+ RET
+%endif
+%endmacro
+
+INIT_XMM sse2
+SADNXN4D 64, 64
+SADNXN4D 64, 32
+SADNXN4D 32, 64
+SADNXN4D 32, 32
+SADNXN4D 32, 16
+SADNXN4D 16, 32
+SADNXN4D 16, 16
+SADNXN4D 16, 8
+SADNXN4D 8, 16
+SADNXN4D 8, 8
+SADNXN4D 8, 4
+
+INIT_MMX sse
+SADNXN4D 4, 8
+SADNXN4D 4, 4
diff --git a/libvpx/vp9/encoder/x86/vp9_sad_mmx.asm b/libvpx/vp9/encoder/x86/vp9_sad_mmx.asm
new file mode 100644
index 0000000..32fdd23
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_sad_mmx.asm
@@ -0,0 +1,427 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+global sym(vp9_sad16x16_mmx) PRIVATE
+global sym(vp9_sad8x16_mmx) PRIVATE
+global sym(vp9_sad8x8_mmx) PRIVATE
+global sym(vp9_sad4x4_mmx) PRIVATE
+global sym(vp9_sad16x8_mmx) PRIVATE
+
+;unsigned int vp9_sad16x16_mmx(
+; unsigned char *src_ptr,
+; int src_stride,
+; unsigned char *ref_ptr,
+; int ref_stride)
+sym(vp9_sad16x16_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 4
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;ref_ptr
+
+ movsxd rax, dword ptr arg(1) ;src_stride
+ movsxd rdx, dword ptr arg(3) ;ref_stride
+
+ lea rcx, [rsi+rax*8]
+
+ lea rcx, [rcx+rax*8]
+ pxor mm7, mm7
+
+ pxor mm6, mm6
+
+.x16x16sad_mmx_loop:
+
+ movq mm0, QWORD PTR [rsi]
+ movq mm2, QWORD PTR [rsi+8]
+
+ movq mm1, QWORD PTR [rdi]
+ movq mm3, QWORD PTR [rdi+8]
+
+ movq mm4, mm0
+ movq mm5, mm2
+
+ psubusb mm0, mm1
+ psubusb mm1, mm4
+
+ psubusb mm2, mm3
+ psubusb mm3, mm5
+
+ por mm0, mm1
+ por mm2, mm3
+
+ movq mm1, mm0
+ movq mm3, mm2
+
+ punpcklbw mm0, mm6
+ punpcklbw mm2, mm6
+
+ punpckhbw mm1, mm6
+ punpckhbw mm3, mm6
+
+ paddw mm0, mm2
+ paddw mm1, mm3
+
+
+ lea rsi, [rsi+rax]
+ add rdi, rdx
+
+ paddw mm7, mm0
+ paddw mm7, mm1
+
+ cmp rsi, rcx
+ jne .x16x16sad_mmx_loop
+
+
+ movq mm0, mm7
+
+ punpcklwd mm0, mm6
+ punpckhwd mm7, mm6
+
+ paddw mm0, mm7
+ movq mm7, mm0
+
+
+ psrlq mm0, 32
+ paddw mm7, mm0
+
+ movq rax, mm7
+
+ pop rdi
+ pop rsi
+ mov rsp, rbp
+ ; begin epilog
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;unsigned int vp9_sad8x16_mmx(
+; unsigned char *src_ptr,
+; int src_stride,
+; unsigned char *ref_ptr,
+; int ref_stride)
+sym(vp9_sad8x16_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 4
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;ref_ptr
+
+ movsxd rax, dword ptr arg(1) ;src_stride
+ movsxd rdx, dword ptr arg(3) ;ref_stride
+
+ lea rcx, [rsi+rax*8]
+
+ lea rcx, [rcx+rax*8]
+ pxor mm7, mm7
+
+ pxor mm6, mm6
+
+.x8x16sad_mmx_loop:
+
+ movq mm0, QWORD PTR [rsi]
+ movq mm1, QWORD PTR [rdi]
+
+ movq mm2, mm0
+ psubusb mm0, mm1
+
+ psubusb mm1, mm2
+ por mm0, mm1
+
+ movq mm2, mm0
+ punpcklbw mm0, mm6
+
+ punpckhbw mm2, mm6
+ lea rsi, [rsi+rax]
+
+ add rdi, rdx
+ paddw mm7, mm0
+
+ paddw mm7, mm2
+ cmp rsi, rcx
+
+ jne .x8x16sad_mmx_loop
+
+ movq mm0, mm7
+ punpcklwd mm0, mm6
+
+ punpckhwd mm7, mm6
+ paddw mm0, mm7
+
+ movq mm7, mm0
+ psrlq mm0, 32
+
+ paddw mm7, mm0
+ movq rax, mm7
+
+ pop rdi
+ pop rsi
+ mov rsp, rbp
+ ; begin epilog
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;unsigned int vp9_sad8x8_mmx(
+; unsigned char *src_ptr,
+; int src_stride,
+; unsigned char *ref_ptr,
+; int ref_stride)
+sym(vp9_sad8x8_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 4
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;ref_ptr
+
+ movsxd rax, dword ptr arg(1) ;src_stride
+ movsxd rdx, dword ptr arg(3) ;ref_stride
+
+ lea rcx, [rsi+rax*8]
+ pxor mm7, mm7
+
+ pxor mm6, mm6
+
+.x8x8sad_mmx_loop:
+
+ movq mm0, QWORD PTR [rsi]
+ movq mm1, QWORD PTR [rdi]
+
+ movq mm2, mm0
+ psubusb mm0, mm1
+
+ psubusb mm1, mm2
+ por mm0, mm1
+
+ movq mm2, mm0
+ punpcklbw mm0, mm6
+
+ punpckhbw mm2, mm6
+ paddw mm0, mm2
+
+ lea rsi, [rsi+rax]
+ add rdi, rdx
+
+ paddw mm7, mm0
+ cmp rsi, rcx
+
+ jne .x8x8sad_mmx_loop
+
+ movq mm0, mm7
+ punpcklwd mm0, mm6
+
+ punpckhwd mm7, mm6
+ paddw mm0, mm7
+
+ movq mm7, mm0
+ psrlq mm0, 32
+
+ paddw mm7, mm0
+ movq rax, mm7
+
+ pop rdi
+ pop rsi
+ mov rsp, rbp
+ ; begin epilog
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;unsigned int vp9_sad4x4_mmx(
+; unsigned char *src_ptr,
+; int src_stride,
+; unsigned char *ref_ptr,
+; int ref_stride)
+sym(vp9_sad4x4_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 4
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;ref_ptr
+
+ movsxd rax, dword ptr arg(1) ;src_stride
+ movsxd rdx, dword ptr arg(3) ;ref_stride
+
+ movd mm0, DWORD PTR [rsi]
+ movd mm1, DWORD PTR [rdi]
+
+ movd mm2, DWORD PTR [rsi+rax]
+ movd mm3, DWORD PTR [rdi+rdx]
+
+ punpcklbw mm0, mm2
+ punpcklbw mm1, mm3
+
+ movq mm2, mm0
+ psubusb mm0, mm1
+
+ psubusb mm1, mm2
+ por mm0, mm1
+
+ movq mm2, mm0
+ pxor mm3, mm3
+
+ punpcklbw mm0, mm3
+ punpckhbw mm2, mm3
+
+ paddw mm0, mm2
+
+ lea rsi, [rsi+rax*2]
+ lea rdi, [rdi+rdx*2]
+
+ movd mm4, DWORD PTR [rsi]
+ movd mm5, DWORD PTR [rdi]
+
+ movd mm6, DWORD PTR [rsi+rax]
+ movd mm7, DWORD PTR [rdi+rdx]
+
+ punpcklbw mm4, mm6
+ punpcklbw mm5, mm7
+
+ movq mm6, mm4
+ psubusb mm4, mm5
+
+ psubusb mm5, mm6
+ por mm4, mm5
+
+ movq mm5, mm4
+ punpcklbw mm4, mm3
+
+ punpckhbw mm5, mm3
+ paddw mm4, mm5
+
+ paddw mm0, mm4
+ movq mm1, mm0
+
+ punpcklwd mm0, mm3
+ punpckhwd mm1, mm3
+
+ paddw mm0, mm1
+ movq mm1, mm0
+
+ psrlq mm0, 32
+ paddw mm0, mm1
+
+ movq rax, mm0
+
+ pop rdi
+ pop rsi
+ mov rsp, rbp
+ ; begin epilog
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;unsigned int vp9_sad16x8_mmx(
+; unsigned char *src_ptr,
+; int src_stride,
+; unsigned char *ref_ptr,
+; int ref_stride)
+sym(vp9_sad16x8_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 4
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;ref_ptr
+
+ movsxd rax, dword ptr arg(1) ;src_stride
+ movsxd rdx, dword ptr arg(3) ;ref_stride
+
+ lea rcx, [rsi+rax*8]
+ pxor mm7, mm7
+
+ pxor mm6, mm6
+
+.x16x8sad_mmx_loop:
+
+ movq mm0, [rsi]
+ movq mm1, [rdi]
+
+ movq mm2, [rsi+8]
+ movq mm3, [rdi+8]
+
+ movq mm4, mm0
+ movq mm5, mm2
+
+ psubusb mm0, mm1
+ psubusb mm1, mm4
+
+ psubusb mm2, mm3
+ psubusb mm3, mm5
+
+ por mm0, mm1
+ por mm2, mm3
+
+ movq mm1, mm0
+ movq mm3, mm2
+
+ punpcklbw mm0, mm6
+ punpckhbw mm1, mm6
+
+ punpcklbw mm2, mm6
+ punpckhbw mm3, mm6
+
+
+ paddw mm0, mm2
+ paddw mm1, mm3
+
+ paddw mm0, mm1
+ lea rsi, [rsi+rax]
+
+ add rdi, rdx
+ paddw mm7, mm0
+
+ cmp rsi, rcx
+ jne .x16x8sad_mmx_loop
+
+ movq mm0, mm7
+ punpcklwd mm0, mm6
+
+ punpckhwd mm7, mm6
+ paddw mm0, mm7
+
+ movq mm7, mm0
+ psrlq mm0, 32
+
+ paddw mm7, mm0
+ movq rax, mm7
+
+ pop rdi
+ pop rsi
+ mov rsp, rbp
+ ; begin epilog
+ UNSHADOW_ARGS
+ pop rbp
+ ret
diff --git a/libvpx/vp9/encoder/x86/vp9_sad_sse2.asm b/libvpx/vp9/encoder/x86/vp9_sad_sse2.asm
new file mode 100644
index 0000000..c4c5c54
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_sad_sse2.asm
@@ -0,0 +1,267 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+%include "third_party/x86inc/x86inc.asm"
+
+SECTION .text
+
+%macro SAD_FN 4
+%if %4 == 0
+%if %3 == 5
+cglobal sad%1x%2, 4, %3, 5, src, src_stride, ref, ref_stride, n_rows
+%else ; %3 == 7
+cglobal sad%1x%2, 4, %3, 5, src, src_stride, ref, ref_stride, \
+ src_stride3, ref_stride3, n_rows
+%endif ; %3 == 5/7
+%else ; avg
+%if %3 == 5
+cglobal sad%1x%2_avg, 5, 1 + %3, 5, src, src_stride, ref, ref_stride, \
+ second_pred, n_rows
+%else ; %3 == 7
+cglobal sad%1x%2_avg, 5, ARCH_X86_64 + %3, 5, src, src_stride, \
+ ref, ref_stride, \
+ second_pred, \
+ src_stride3, ref_stride3
+%if ARCH_X86_64
+%define n_rowsd r7d
+%else ; x86-32
+%define n_rowsd dword r0m
+%endif ; x86-32/64
+%endif ; %3 == 5/7
+%endif ; avg/sad
+ movsxdifnidn src_strideq, src_strided
+ movsxdifnidn ref_strideq, ref_strided
+%if %3 == 7
+ lea src_stride3q, [src_strideq*3]
+ lea ref_stride3q, [ref_strideq*3]
+%endif ; %3 == 7
+%endmacro
+
+; unsigned int vp9_sad64x64_sse2(uint8_t *src, int src_stride,
+; uint8_t *ref, int ref_stride);
+%macro SAD64XN 1-2 0
+ SAD_FN 64, %1, 5, %2
+ mov n_rowsd, %1
+ pxor m0, m0
+.loop:
+ movu m1, [refq]
+ movu m2, [refq+16]
+ movu m3, [refq+32]
+ movu m4, [refq+48]
+%if %2 == 1
+ pavgb m1, [second_predq+mmsize*0]
+ pavgb m2, [second_predq+mmsize*1]
+ pavgb m3, [second_predq+mmsize*2]
+ pavgb m4, [second_predq+mmsize*3]
+ lea second_predq, [second_predq+mmsize*4]
+%endif
+ psadbw m1, [srcq]
+ psadbw m2, [srcq+16]
+ psadbw m3, [srcq+32]
+ psadbw m4, [srcq+48]
+ paddd m1, m2
+ paddd m3, m4
+ add refq, ref_strideq
+ paddd m0, m1
+ add srcq, src_strideq
+ paddd m0, m3
+ dec n_rowsd
+ jg .loop
+
+ movhlps m1, m0
+ paddd m0, m1
+ movd eax, m0
+ RET
+%endmacro
+
+INIT_XMM sse2
+SAD64XN 64 ; sad64x64_sse2
+SAD64XN 32 ; sad64x32_sse2
+SAD64XN 64, 1 ; sad64x64_avg_sse2
+SAD64XN 32, 1 ; sad64x32_avg_sse2
+
+; unsigned int vp9_sad32x32_sse2(uint8_t *src, int src_stride,
+; uint8_t *ref, int ref_stride);
+%macro SAD32XN 1-2 0
+ SAD_FN 32, %1, 5, %2
+ mov n_rowsd, %1/2
+ pxor m0, m0
+.loop:
+ movu m1, [refq]
+ movu m2, [refq+16]
+ movu m3, [refq+ref_strideq]
+ movu m4, [refq+ref_strideq+16]
+%if %2 == 1
+ pavgb m1, [second_predq+mmsize*0]
+ pavgb m2, [second_predq+mmsize*1]
+ pavgb m3, [second_predq+mmsize*2]
+ pavgb m4, [second_predq+mmsize*3]
+ lea second_predq, [second_predq+mmsize*4]
+%endif
+ psadbw m1, [srcq]
+ psadbw m2, [srcq+16]
+ psadbw m3, [srcq+src_strideq]
+ psadbw m4, [srcq+src_strideq+16]
+ paddd m1, m2
+ paddd m3, m4
+ lea refq, [refq+ref_strideq*2]
+ paddd m0, m1
+ lea srcq, [srcq+src_strideq*2]
+ paddd m0, m3
+ dec n_rowsd
+ jg .loop
+
+ movhlps m1, m0
+ paddd m0, m1
+ movd eax, m0
+ RET
+%endmacro
+
+INIT_XMM sse2
+SAD32XN 64 ; sad32x64_sse2
+SAD32XN 32 ; sad32x32_sse2
+SAD32XN 16 ; sad32x16_sse2
+SAD32XN 64, 1 ; sad32x64_avg_sse2
+SAD32XN 32, 1 ; sad32x32_avg_sse2
+SAD32XN 16, 1 ; sad32x16_avg_sse2
+
+; unsigned int vp9_sad16x{8,16}_sse2(uint8_t *src, int src_stride,
+; uint8_t *ref, int ref_stride);
+%macro SAD16XN 1-2 0
+ SAD_FN 16, %1, 7, %2
+ mov n_rowsd, %1/4
+ pxor m0, m0
+
+.loop:
+ movu m1, [refq]
+ movu m2, [refq+ref_strideq]
+ movu m3, [refq+ref_strideq*2]
+ movu m4, [refq+ref_stride3q]
+%if %2 == 1
+ pavgb m1, [second_predq+mmsize*0]
+ pavgb m2, [second_predq+mmsize*1]
+ pavgb m3, [second_predq+mmsize*2]
+ pavgb m4, [second_predq+mmsize*3]
+ lea second_predq, [second_predq+mmsize*4]
+%endif
+ psadbw m1, [srcq]
+ psadbw m2, [srcq+src_strideq]
+ psadbw m3, [srcq+src_strideq*2]
+ psadbw m4, [srcq+src_stride3q]
+ paddd m1, m2
+ paddd m3, m4
+ lea refq, [refq+ref_strideq*4]
+ paddd m0, m1
+ lea srcq, [srcq+src_strideq*4]
+ paddd m0, m3
+ dec n_rowsd
+ jg .loop
+
+ movhlps m1, m0
+ paddd m0, m1
+ movd eax, m0
+ RET
+%endmacro
+
+INIT_XMM sse2
+SAD16XN 32 ; sad16x32_sse2
+SAD16XN 16 ; sad16x16_sse2
+SAD16XN 8 ; sad16x8_sse2
+SAD16XN 32, 1 ; sad16x32_avg_sse2
+SAD16XN 16, 1 ; sad16x16_avg_sse2
+SAD16XN 8, 1 ; sad16x8_avg_sse2
+
+; unsigned int vp9_sad8x{8,16}_sse2(uint8_t *src, int src_stride,
+; uint8_t *ref, int ref_stride);
+%macro SAD8XN 1-2 0
+ SAD_FN 8, %1, 7, %2
+ mov n_rowsd, %1/4
+ pxor m0, m0
+
+.loop:
+ movh m1, [refq]
+ movhps m1, [refq+ref_strideq]
+ movh m2, [refq+ref_strideq*2]
+ movhps m2, [refq+ref_stride3q]
+%if %2 == 1
+ pavgb m1, [second_predq+mmsize*0]
+ pavgb m2, [second_predq+mmsize*1]
+ lea second_predq, [second_predq+mmsize*2]
+%endif
+ movh m3, [srcq]
+ movhps m3, [srcq+src_strideq]
+ movh m4, [srcq+src_strideq*2]
+ movhps m4, [srcq+src_stride3q]
+ psadbw m1, m3
+ psadbw m2, m4
+ lea refq, [refq+ref_strideq*4]
+ paddd m0, m1
+ lea srcq, [srcq+src_strideq*4]
+ paddd m0, m2
+ dec n_rowsd
+ jg .loop
+
+ movhlps m1, m0
+ paddd m0, m1
+ movd eax, m0
+ RET
+%endmacro
+
+INIT_XMM sse2
+SAD8XN 16 ; sad8x16_sse2
+SAD8XN 8 ; sad8x8_sse2
+SAD8XN 4 ; sad8x4_sse2
+SAD8XN 16, 1 ; sad8x16_avg_sse2
+SAD8XN 8, 1 ; sad8x8_avg_sse2
+SAD8XN 4, 1 ; sad8x4_avg_sse2
+
+; unsigned int vp9_sad4x{4, 8}_sse(uint8_t *src, int src_stride,
+; uint8_t *ref, int ref_stride);
+%macro SAD4XN 1-2 0
+ SAD_FN 4, %1, 7, %2
+ mov n_rowsd, %1/4
+ pxor m0, m0
+
+.loop:
+ movd m1, [refq]
+ movd m2, [refq+ref_strideq]
+ movd m3, [refq+ref_strideq*2]
+ movd m4, [refq+ref_stride3q]
+ punpckldq m1, m2
+ punpckldq m3, m4
+%if %2 == 1
+ pavgb m1, [second_predq+mmsize*0]
+ pavgb m3, [second_predq+mmsize*1]
+ lea second_predq, [second_predq+mmsize*2]
+%endif
+ movd m2, [srcq]
+ movd m5, [srcq+src_strideq]
+ movd m4, [srcq+src_strideq*2]
+ movd m6, [srcq+src_stride3q]
+ punpckldq m2, m5
+ punpckldq m4, m6
+ psadbw m1, m2
+ psadbw m3, m4
+ lea refq, [refq+ref_strideq*4]
+ paddd m0, m1
+ lea srcq, [srcq+src_strideq*4]
+ paddd m0, m3
+ dec n_rowsd
+ jg .loop
+
+ movd eax, m0
+ RET
+%endmacro
+
+INIT_MMX sse
+SAD4XN 8 ; sad4x8_sse
+SAD4XN 4 ; sad4x4_sse
+SAD4XN 8, 1 ; sad4x8_avg_sse
+SAD4XN 4, 1 ; sad4x4_avg_sse
diff --git a/libvpx/vp9/encoder/x86/vp9_sad_sse3.asm b/libvpx/vp9/encoder/x86/vp9_sad_sse3.asm
new file mode 100644
index 0000000..2b90a5d
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_sad_sse3.asm
@@ -0,0 +1,378 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+%include "vpx_ports/x86_abi_support.asm"
+
+%macro STACK_FRAME_CREATE_X3 0
+%if ABI_IS_32BIT
+ %define src_ptr rsi
+ %define src_stride rax
+ %define ref_ptr rdi
+ %define ref_stride rdx
+ %define end_ptr rcx
+ %define ret_var rbx
+ %define result_ptr arg(4)
+ %define max_err arg(4)
+ %define height dword ptr arg(4)
+ push rbp
+ mov rbp, rsp
+ push rsi
+ push rdi
+ push rbx
+
+ mov rsi, arg(0) ; src_ptr
+ mov rdi, arg(2) ; ref_ptr
+
+ movsxd rax, dword ptr arg(1) ; src_stride
+ movsxd rdx, dword ptr arg(3) ; ref_stride
+%else
+ %if LIBVPX_YASM_WIN64
+ SAVE_XMM 7, u
+ %define src_ptr rcx
+ %define src_stride rdx
+ %define ref_ptr r8
+ %define ref_stride r9
+ %define end_ptr r10
+ %define ret_var r11
+ %define result_ptr [rsp+xmm_stack_space+8+4*8]
+ %define max_err [rsp+xmm_stack_space+8+4*8]
+ %define height dword ptr [rsp+xmm_stack_space+8+4*8]
+ %else
+ %define src_ptr rdi
+ %define src_stride rsi
+ %define ref_ptr rdx
+ %define ref_stride rcx
+ %define end_ptr r9
+ %define ret_var r10
+ %define result_ptr r8
+ %define max_err r8
+ %define height r8
+ %endif
+%endif
+
+%endmacro
+
+%macro STACK_FRAME_DESTROY_X3 0
+ %define src_ptr
+ %define src_stride
+ %define ref_ptr
+ %define ref_stride
+ %define end_ptr
+ %define ret_var
+ %define result_ptr
+ %define max_err
+ %define height
+
+%if ABI_IS_32BIT
+ pop rbx
+ pop rdi
+ pop rsi
+ pop rbp
+%else
+ %if LIBVPX_YASM_WIN64
+ RESTORE_XMM
+ %endif
+%endif
+ ret
+%endmacro
+
+%macro PROCESS_16X2X3 5
+%if %1==0
+ movdqa xmm0, XMMWORD PTR [%2]
+ lddqu xmm5, XMMWORD PTR [%3]
+ lddqu xmm6, XMMWORD PTR [%3+1]
+ lddqu xmm7, XMMWORD PTR [%3+2]
+
+ psadbw xmm5, xmm0
+ psadbw xmm6, xmm0
+ psadbw xmm7, xmm0
+%else
+ movdqa xmm0, XMMWORD PTR [%2]
+ lddqu xmm1, XMMWORD PTR [%3]
+ lddqu xmm2, XMMWORD PTR [%3+1]
+ lddqu xmm3, XMMWORD PTR [%3+2]
+
+ psadbw xmm1, xmm0
+ psadbw xmm2, xmm0
+ psadbw xmm3, xmm0
+
+ paddw xmm5, xmm1
+ paddw xmm6, xmm2
+ paddw xmm7, xmm3
+%endif
+ movdqa xmm0, XMMWORD PTR [%2+%4]
+ lddqu xmm1, XMMWORD PTR [%3+%5]
+ lddqu xmm2, XMMWORD PTR [%3+%5+1]
+ lddqu xmm3, XMMWORD PTR [%3+%5+2]
+
+%if %1==0 || %1==1
+ lea %2, [%2+%4*2]
+ lea %3, [%3+%5*2]
+%endif
+
+ psadbw xmm1, xmm0
+ psadbw xmm2, xmm0
+ psadbw xmm3, xmm0
+
+ paddw xmm5, xmm1
+ paddw xmm6, xmm2
+ paddw xmm7, xmm3
+%endmacro
+
+%macro PROCESS_8X2X3 5
+%if %1==0
+ movq mm0, QWORD PTR [%2]
+ movq mm5, QWORD PTR [%3]
+ movq mm6, QWORD PTR [%3+1]
+ movq mm7, QWORD PTR [%3+2]
+
+ psadbw mm5, mm0
+ psadbw mm6, mm0
+ psadbw mm7, mm0
+%else
+ movq mm0, QWORD PTR [%2]
+ movq mm1, QWORD PTR [%3]
+ movq mm2, QWORD PTR [%3+1]
+ movq mm3, QWORD PTR [%3+2]
+
+ psadbw mm1, mm0
+ psadbw mm2, mm0
+ psadbw mm3, mm0
+
+ paddw mm5, mm1
+ paddw mm6, mm2
+ paddw mm7, mm3
+%endif
+ movq mm0, QWORD PTR [%2+%4]
+ movq mm1, QWORD PTR [%3+%5]
+ movq mm2, QWORD PTR [%3+%5+1]
+ movq mm3, QWORD PTR [%3+%5+2]
+
+%if %1==0 || %1==1
+ lea %2, [%2+%4*2]
+ lea %3, [%3+%5*2]
+%endif
+
+ psadbw mm1, mm0
+ psadbw mm2, mm0
+ psadbw mm3, mm0
+
+ paddw mm5, mm1
+ paddw mm6, mm2
+ paddw mm7, mm3
+%endmacro
+
+;void int vp9_sad16x16x3_sse3(
+; unsigned char *src_ptr,
+; int src_stride,
+; unsigned char *ref_ptr,
+; int ref_stride,
+; int *results)
+global sym(vp9_sad16x16x3_sse3) PRIVATE
+sym(vp9_sad16x16x3_sse3):
+
+ STACK_FRAME_CREATE_X3
+
+ PROCESS_16X2X3 0, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_16X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_16X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_16X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_16X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_16X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_16X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_16X2X3 2, src_ptr, ref_ptr, src_stride, ref_stride
+
+ mov rcx, result_ptr
+
+ movq xmm0, xmm5
+ psrldq xmm5, 8
+
+ paddw xmm0, xmm5
+ movd [rcx], xmm0
+;-
+ movq xmm0, xmm6
+ psrldq xmm6, 8
+
+ paddw xmm0, xmm6
+ movd [rcx+4], xmm0
+;-
+ movq xmm0, xmm7
+ psrldq xmm7, 8
+
+ paddw xmm0, xmm7
+ movd [rcx+8], xmm0
+
+ STACK_FRAME_DESTROY_X3
+
+;void int vp9_sad16x8x3_sse3(
+; unsigned char *src_ptr,
+; int src_stride,
+; unsigned char *ref_ptr,
+; int ref_stride,
+; int *results)
+global sym(vp9_sad16x8x3_sse3) PRIVATE
+sym(vp9_sad16x8x3_sse3):
+
+ STACK_FRAME_CREATE_X3
+
+ PROCESS_16X2X3 0, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_16X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_16X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_16X2X3 2, src_ptr, ref_ptr, src_stride, ref_stride
+
+ mov rcx, result_ptr
+
+ movq xmm0, xmm5
+ psrldq xmm5, 8
+
+ paddw xmm0, xmm5
+ movd [rcx], xmm0
+;-
+ movq xmm0, xmm6
+ psrldq xmm6, 8
+
+ paddw xmm0, xmm6
+ movd [rcx+4], xmm0
+;-
+ movq xmm0, xmm7
+ psrldq xmm7, 8
+
+ paddw xmm0, xmm7
+ movd [rcx+8], xmm0
+
+ STACK_FRAME_DESTROY_X3
+
+;void int vp9_sad8x16x3_sse3(
+; unsigned char *src_ptr,
+; int src_stride,
+; unsigned char *ref_ptr,
+; int ref_stride,
+; int *results)
+global sym(vp9_sad8x16x3_sse3) PRIVATE
+sym(vp9_sad8x16x3_sse3):
+
+ STACK_FRAME_CREATE_X3
+
+ PROCESS_8X2X3 0, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_8X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_8X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_8X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_8X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_8X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_8X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_8X2X3 2, src_ptr, ref_ptr, src_stride, ref_stride
+
+ mov rcx, result_ptr
+
+ punpckldq mm5, mm6
+
+ movq [rcx], mm5
+ movd [rcx+8], mm7
+
+ STACK_FRAME_DESTROY_X3
+
+;void int vp9_sad8x8x3_sse3(
+; unsigned char *src_ptr,
+; int src_stride,
+; unsigned char *ref_ptr,
+; int ref_stride,
+; int *results)
+global sym(vp9_sad8x8x3_sse3) PRIVATE
+sym(vp9_sad8x8x3_sse3):
+
+ STACK_FRAME_CREATE_X3
+
+ PROCESS_8X2X3 0, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_8X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_8X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_8X2X3 2, src_ptr, ref_ptr, src_stride, ref_stride
+
+ mov rcx, result_ptr
+
+ punpckldq mm5, mm6
+
+ movq [rcx], mm5
+ movd [rcx+8], mm7
+
+ STACK_FRAME_DESTROY_X3
+
+;void int vp9_sad4x4x3_sse3(
+; unsigned char *src_ptr,
+; int src_stride,
+; unsigned char *ref_ptr,
+; int ref_stride,
+; int *results)
+global sym(vp9_sad4x4x3_sse3) PRIVATE
+sym(vp9_sad4x4x3_sse3):
+
+ STACK_FRAME_CREATE_X3
+
+ movd mm0, DWORD PTR [src_ptr]
+ movd mm1, DWORD PTR [ref_ptr]
+
+ movd mm2, DWORD PTR [src_ptr+src_stride]
+ movd mm3, DWORD PTR [ref_ptr+ref_stride]
+
+ punpcklbw mm0, mm2
+ punpcklbw mm1, mm3
+
+ movd mm4, DWORD PTR [ref_ptr+1]
+ movd mm5, DWORD PTR [ref_ptr+2]
+
+ movd mm2, DWORD PTR [ref_ptr+ref_stride+1]
+ movd mm3, DWORD PTR [ref_ptr+ref_stride+2]
+
+ psadbw mm1, mm0
+
+ punpcklbw mm4, mm2
+ punpcklbw mm5, mm3
+
+ psadbw mm4, mm0
+ psadbw mm5, mm0
+
+ lea src_ptr, [src_ptr+src_stride*2]
+ lea ref_ptr, [ref_ptr+ref_stride*2]
+
+ movd mm0, DWORD PTR [src_ptr]
+ movd mm2, DWORD PTR [ref_ptr]
+
+ movd mm3, DWORD PTR [src_ptr+src_stride]
+ movd mm6, DWORD PTR [ref_ptr+ref_stride]
+
+ punpcklbw mm0, mm3
+ punpcklbw mm2, mm6
+
+ movd mm3, DWORD PTR [ref_ptr+1]
+ movd mm7, DWORD PTR [ref_ptr+2]
+
+ psadbw mm2, mm0
+
+ paddw mm1, mm2
+
+ movd mm2, DWORD PTR [ref_ptr+ref_stride+1]
+ movd mm6, DWORD PTR [ref_ptr+ref_stride+2]
+
+ punpcklbw mm3, mm2
+ punpcklbw mm7, mm6
+
+ psadbw mm3, mm0
+ psadbw mm7, mm0
+
+ paddw mm3, mm4
+ paddw mm7, mm5
+
+ mov rcx, result_ptr
+
+ punpckldq mm1, mm3
+
+ movq [rcx], mm1
+ movd [rcx+8], mm7
+
+ STACK_FRAME_DESTROY_X3
diff --git a/libvpx/vp9/encoder/x86/vp9_sad_sse4.asm b/libvpx/vp9/encoder/x86/vp9_sad_sse4.asm
new file mode 100644
index 0000000..faf1768
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_sad_sse4.asm
@@ -0,0 +1,359 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+%macro PROCESS_16X2X8 1
+%if %1
+ movdqa xmm0, XMMWORD PTR [rsi]
+ movq xmm1, MMWORD PTR [rdi]
+ movq xmm3, MMWORD PTR [rdi+8]
+ movq xmm2, MMWORD PTR [rdi+16]
+ punpcklqdq xmm1, xmm3
+ punpcklqdq xmm3, xmm2
+
+ movdqa xmm2, xmm1
+ mpsadbw xmm1, xmm0, 0x0
+ mpsadbw xmm2, xmm0, 0x5
+
+ psrldq xmm0, 8
+
+ movdqa xmm4, xmm3
+ mpsadbw xmm3, xmm0, 0x0
+ mpsadbw xmm4, xmm0, 0x5
+
+ paddw xmm1, xmm2
+ paddw xmm1, xmm3
+ paddw xmm1, xmm4
+%else
+ movdqa xmm0, XMMWORD PTR [rsi]
+ movq xmm5, MMWORD PTR [rdi]
+ movq xmm3, MMWORD PTR [rdi+8]
+ movq xmm2, MMWORD PTR [rdi+16]
+ punpcklqdq xmm5, xmm3
+ punpcklqdq xmm3, xmm2
+
+ movdqa xmm2, xmm5
+ mpsadbw xmm5, xmm0, 0x0
+ mpsadbw xmm2, xmm0, 0x5
+
+ psrldq xmm0, 8
+
+ movdqa xmm4, xmm3
+ mpsadbw xmm3, xmm0, 0x0
+ mpsadbw xmm4, xmm0, 0x5
+
+ paddw xmm5, xmm2
+ paddw xmm5, xmm3
+ paddw xmm5, xmm4
+
+ paddw xmm1, xmm5
+%endif
+ movdqa xmm0, XMMWORD PTR [rsi + rax]
+ movq xmm5, MMWORD PTR [rdi+ rdx]
+ movq xmm3, MMWORD PTR [rdi+ rdx+8]
+ movq xmm2, MMWORD PTR [rdi+ rdx+16]
+ punpcklqdq xmm5, xmm3
+ punpcklqdq xmm3, xmm2
+
+ lea rsi, [rsi+rax*2]
+ lea rdi, [rdi+rdx*2]
+
+ movdqa xmm2, xmm5
+ mpsadbw xmm5, xmm0, 0x0
+ mpsadbw xmm2, xmm0, 0x5
+
+ psrldq xmm0, 8
+ movdqa xmm4, xmm3
+ mpsadbw xmm3, xmm0, 0x0
+ mpsadbw xmm4, xmm0, 0x5
+
+ paddw xmm5, xmm2
+ paddw xmm5, xmm3
+ paddw xmm5, xmm4
+
+ paddw xmm1, xmm5
+%endmacro
+
+%macro PROCESS_8X2X8 1
+%if %1
+ movq xmm0, MMWORD PTR [rsi]
+ movq xmm1, MMWORD PTR [rdi]
+ movq xmm3, MMWORD PTR [rdi+8]
+ punpcklqdq xmm1, xmm3
+
+ movdqa xmm2, xmm1
+ mpsadbw xmm1, xmm0, 0x0
+ mpsadbw xmm2, xmm0, 0x5
+ paddw xmm1, xmm2
+%else
+ movq xmm0, MMWORD PTR [rsi]
+ movq xmm5, MMWORD PTR [rdi]
+ movq xmm3, MMWORD PTR [rdi+8]
+ punpcklqdq xmm5, xmm3
+
+ movdqa xmm2, xmm5
+ mpsadbw xmm5, xmm0, 0x0
+ mpsadbw xmm2, xmm0, 0x5
+ paddw xmm5, xmm2
+
+ paddw xmm1, xmm5
+%endif
+ movq xmm0, MMWORD PTR [rsi + rax]
+ movq xmm5, MMWORD PTR [rdi+ rdx]
+ movq xmm3, MMWORD PTR [rdi+ rdx+8]
+ punpcklqdq xmm5, xmm3
+
+ lea rsi, [rsi+rax*2]
+ lea rdi, [rdi+rdx*2]
+
+ movdqa xmm2, xmm5
+ mpsadbw xmm5, xmm0, 0x0
+ mpsadbw xmm2, xmm0, 0x5
+ paddw xmm5, xmm2
+
+ paddw xmm1, xmm5
+%endmacro
+
+%macro PROCESS_4X2X8 1
+%if %1
+ movd xmm0, [rsi]
+ movq xmm1, MMWORD PTR [rdi]
+ movq xmm3, MMWORD PTR [rdi+8]
+ punpcklqdq xmm1, xmm3
+
+ mpsadbw xmm1, xmm0, 0x0
+%else
+ movd xmm0, [rsi]
+ movq xmm5, MMWORD PTR [rdi]
+ movq xmm3, MMWORD PTR [rdi+8]
+ punpcklqdq xmm5, xmm3
+
+ mpsadbw xmm5, xmm0, 0x0
+
+ paddw xmm1, xmm5
+%endif
+ movd xmm0, [rsi + rax]
+ movq xmm5, MMWORD PTR [rdi+ rdx]
+ movq xmm3, MMWORD PTR [rdi+ rdx+8]
+ punpcklqdq xmm5, xmm3
+
+ lea rsi, [rsi+rax*2]
+ lea rdi, [rdi+rdx*2]
+
+ mpsadbw xmm5, xmm0, 0x0
+
+ paddw xmm1, xmm5
+%endmacro
+
+%macro WRITE_AS_INTS 0
+ mov rdi, arg(4) ;Results
+ pxor xmm0, xmm0
+ movdqa xmm2, xmm1
+ punpcklwd xmm1, xmm0
+ punpckhwd xmm2, xmm0
+
+ movdqa [rdi], xmm1
+ movdqa [rdi + 16], xmm2
+%endmacro
+
+;void vp9_sad16x16x8_sse4(
+; const unsigned char *src_ptr,
+; int src_stride,
+; const unsigned char *ref_ptr,
+; int ref_stride,
+; unsigned short *sad_array);
+global sym(vp9_sad16x16x8_sse4) PRIVATE
+sym(vp9_sad16x16x8_sse4):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;ref_ptr
+
+ movsxd rax, dword ptr arg(1) ;src_stride
+ movsxd rdx, dword ptr arg(3) ;ref_stride
+
+ PROCESS_16X2X8 1
+ PROCESS_16X2X8 0
+ PROCESS_16X2X8 0
+ PROCESS_16X2X8 0
+ PROCESS_16X2X8 0
+ PROCESS_16X2X8 0
+ PROCESS_16X2X8 0
+ PROCESS_16X2X8 0
+
+ WRITE_AS_INTS
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void vp9_sad16x8x8_sse4(
+; const unsigned char *src_ptr,
+; int src_stride,
+; const unsigned char *ref_ptr,
+; int ref_stride,
+; unsigned short *sad_array
+;);
+global sym(vp9_sad16x8x8_sse4) PRIVATE
+sym(vp9_sad16x8x8_sse4):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;ref_ptr
+
+ movsxd rax, dword ptr arg(1) ;src_stride
+ movsxd rdx, dword ptr arg(3) ;ref_stride
+
+ PROCESS_16X2X8 1
+ PROCESS_16X2X8 0
+ PROCESS_16X2X8 0
+ PROCESS_16X2X8 0
+
+ WRITE_AS_INTS
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void vp9_sad8x8x8_sse4(
+; const unsigned char *src_ptr,
+; int src_stride,
+; const unsigned char *ref_ptr,
+; int ref_stride,
+; unsigned short *sad_array
+;);
+global sym(vp9_sad8x8x8_sse4) PRIVATE
+sym(vp9_sad8x8x8_sse4):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;ref_ptr
+
+ movsxd rax, dword ptr arg(1) ;src_stride
+ movsxd rdx, dword ptr arg(3) ;ref_stride
+
+ PROCESS_8X2X8 1
+ PROCESS_8X2X8 0
+ PROCESS_8X2X8 0
+ PROCESS_8X2X8 0
+
+ WRITE_AS_INTS
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void vp9_sad8x16x8_sse4(
+; const unsigned char *src_ptr,
+; int src_stride,
+; const unsigned char *ref_ptr,
+; int ref_stride,
+; unsigned short *sad_array
+;);
+global sym(vp9_sad8x16x8_sse4) PRIVATE
+sym(vp9_sad8x16x8_sse4):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;ref_ptr
+
+ movsxd rax, dword ptr arg(1) ;src_stride
+ movsxd rdx, dword ptr arg(3) ;ref_stride
+
+ PROCESS_8X2X8 1
+ PROCESS_8X2X8 0
+ PROCESS_8X2X8 0
+ PROCESS_8X2X8 0
+ PROCESS_8X2X8 0
+ PROCESS_8X2X8 0
+ PROCESS_8X2X8 0
+ PROCESS_8X2X8 0
+
+ WRITE_AS_INTS
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void vp9_sad4x4x8_c(
+; const unsigned char *src_ptr,
+; int src_stride,
+; const unsigned char *ref_ptr,
+; int ref_stride,
+; unsigned short *sad_array
+;);
+global sym(vp9_sad4x4x8_sse4) PRIVATE
+sym(vp9_sad4x4x8_sse4):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;ref_ptr
+
+ movsxd rax, dword ptr arg(1) ;src_stride
+ movsxd rdx, dword ptr arg(3) ;ref_stride
+
+ PROCESS_4X2X8 1
+ PROCESS_4X2X8 0
+
+ WRITE_AS_INTS
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+
+
diff --git a/libvpx/vp9/encoder/x86/vp9_sad_ssse3.asm b/libvpx/vp9/encoder/x86/vp9_sad_ssse3.asm
new file mode 100644
index 0000000..0cb3542
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_sad_ssse3.asm
@@ -0,0 +1,370 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+%macro PROCESS_16X2X3 1
+%if %1
+ movdqa xmm0, XMMWORD PTR [rsi]
+ lddqu xmm5, XMMWORD PTR [rdi]
+ lddqu xmm6, XMMWORD PTR [rdi+1]
+ lddqu xmm7, XMMWORD PTR [rdi+2]
+
+ psadbw xmm5, xmm0
+ psadbw xmm6, xmm0
+ psadbw xmm7, xmm0
+%else
+ movdqa xmm0, XMMWORD PTR [rsi]
+ lddqu xmm1, XMMWORD PTR [rdi]
+ lddqu xmm2, XMMWORD PTR [rdi+1]
+ lddqu xmm3, XMMWORD PTR [rdi+2]
+
+ psadbw xmm1, xmm0
+ psadbw xmm2, xmm0
+ psadbw xmm3, xmm0
+
+ paddw xmm5, xmm1
+ paddw xmm6, xmm2
+ paddw xmm7, xmm3
+%endif
+ movdqa xmm0, XMMWORD PTR [rsi+rax]
+ lddqu xmm1, XMMWORD PTR [rdi+rdx]
+ lddqu xmm2, XMMWORD PTR [rdi+rdx+1]
+ lddqu xmm3, XMMWORD PTR [rdi+rdx+2]
+
+ lea rsi, [rsi+rax*2]
+ lea rdi, [rdi+rdx*2]
+
+ psadbw xmm1, xmm0
+ psadbw xmm2, xmm0
+ psadbw xmm3, xmm0
+
+ paddw xmm5, xmm1
+ paddw xmm6, xmm2
+ paddw xmm7, xmm3
+%endmacro
+
+%macro PROCESS_16X2X3_OFFSET 2
+%if %1
+ movdqa xmm0, XMMWORD PTR [rsi]
+ movdqa xmm4, XMMWORD PTR [rdi]
+ movdqa xmm7, XMMWORD PTR [rdi+16]
+
+ movdqa xmm5, xmm7
+ palignr xmm5, xmm4, %2
+
+ movdqa xmm6, xmm7
+ palignr xmm6, xmm4, (%2+1)
+
+ palignr xmm7, xmm4, (%2+2)
+
+ psadbw xmm5, xmm0
+ psadbw xmm6, xmm0
+ psadbw xmm7, xmm0
+%else
+ movdqa xmm0, XMMWORD PTR [rsi]
+ movdqa xmm4, XMMWORD PTR [rdi]
+ movdqa xmm3, XMMWORD PTR [rdi+16]
+
+ movdqa xmm1, xmm3
+ palignr xmm1, xmm4, %2
+
+ movdqa xmm2, xmm3
+ palignr xmm2, xmm4, (%2+1)
+
+ palignr xmm3, xmm4, (%2+2)
+
+ psadbw xmm1, xmm0
+ psadbw xmm2, xmm0
+ psadbw xmm3, xmm0
+
+ paddw xmm5, xmm1
+ paddw xmm6, xmm2
+ paddw xmm7, xmm3
+%endif
+ movdqa xmm0, XMMWORD PTR [rsi+rax]
+ movdqa xmm4, XMMWORD PTR [rdi+rdx]
+ movdqa xmm3, XMMWORD PTR [rdi+rdx+16]
+
+ movdqa xmm1, xmm3
+ palignr xmm1, xmm4, %2
+
+ movdqa xmm2, xmm3
+ palignr xmm2, xmm4, (%2+1)
+
+ palignr xmm3, xmm4, (%2+2)
+
+ lea rsi, [rsi+rax*2]
+ lea rdi, [rdi+rdx*2]
+
+ psadbw xmm1, xmm0
+ psadbw xmm2, xmm0
+ psadbw xmm3, xmm0
+
+ paddw xmm5, xmm1
+ paddw xmm6, xmm2
+ paddw xmm7, xmm3
+%endmacro
+
+%macro PROCESS_16X16X3_OFFSET 2
+%2_aligned_by_%1:
+
+ sub rdi, %1
+
+ PROCESS_16X2X3_OFFSET 1, %1
+ PROCESS_16X2X3_OFFSET 0, %1
+ PROCESS_16X2X3_OFFSET 0, %1
+ PROCESS_16X2X3_OFFSET 0, %1
+ PROCESS_16X2X3_OFFSET 0, %1
+ PROCESS_16X2X3_OFFSET 0, %1
+ PROCESS_16X2X3_OFFSET 0, %1
+ PROCESS_16X2X3_OFFSET 0, %1
+
+ jmp %2_store_off
+
+%endmacro
+
+%macro PROCESS_16X8X3_OFFSET 2
+%2_aligned_by_%1:
+
+ sub rdi, %1
+
+ PROCESS_16X2X3_OFFSET 1, %1
+ PROCESS_16X2X3_OFFSET 0, %1
+ PROCESS_16X2X3_OFFSET 0, %1
+ PROCESS_16X2X3_OFFSET 0, %1
+
+ jmp %2_store_off
+
+%endmacro
+
+;void int vp9_sad16x16x3_ssse3(
+; unsigned char *src_ptr,
+; int src_stride,
+; unsigned char *ref_ptr,
+; int ref_stride,
+; int *results)
+global sym(vp9_sad16x16x3_ssse3) PRIVATE
+sym(vp9_sad16x16x3_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ SAVE_XMM 7
+ push rsi
+ push rdi
+ push rcx
+ ; end prolog
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;ref_ptr
+
+ mov rdx, 0xf
+ and rdx, rdi
+
+ jmp .vp9_sad16x16x3_ssse3_skiptable
+.vp9_sad16x16x3_ssse3_jumptable:
+ dd .vp9_sad16x16x3_ssse3_aligned_by_0 - .vp9_sad16x16x3_ssse3_do_jump
+ dd .vp9_sad16x16x3_ssse3_aligned_by_1 - .vp9_sad16x16x3_ssse3_do_jump
+ dd .vp9_sad16x16x3_ssse3_aligned_by_2 - .vp9_sad16x16x3_ssse3_do_jump
+ dd .vp9_sad16x16x3_ssse3_aligned_by_3 - .vp9_sad16x16x3_ssse3_do_jump
+ dd .vp9_sad16x16x3_ssse3_aligned_by_4 - .vp9_sad16x16x3_ssse3_do_jump
+ dd .vp9_sad16x16x3_ssse3_aligned_by_5 - .vp9_sad16x16x3_ssse3_do_jump
+ dd .vp9_sad16x16x3_ssse3_aligned_by_6 - .vp9_sad16x16x3_ssse3_do_jump
+ dd .vp9_sad16x16x3_ssse3_aligned_by_7 - .vp9_sad16x16x3_ssse3_do_jump
+ dd .vp9_sad16x16x3_ssse3_aligned_by_8 - .vp9_sad16x16x3_ssse3_do_jump
+ dd .vp9_sad16x16x3_ssse3_aligned_by_9 - .vp9_sad16x16x3_ssse3_do_jump
+ dd .vp9_sad16x16x3_ssse3_aligned_by_10 - .vp9_sad16x16x3_ssse3_do_jump
+ dd .vp9_sad16x16x3_ssse3_aligned_by_11 - .vp9_sad16x16x3_ssse3_do_jump
+ dd .vp9_sad16x16x3_ssse3_aligned_by_12 - .vp9_sad16x16x3_ssse3_do_jump
+ dd .vp9_sad16x16x3_ssse3_aligned_by_13 - .vp9_sad16x16x3_ssse3_do_jump
+ dd .vp9_sad16x16x3_ssse3_aligned_by_14 - .vp9_sad16x16x3_ssse3_do_jump
+ dd .vp9_sad16x16x3_ssse3_aligned_by_15 - .vp9_sad16x16x3_ssse3_do_jump
+.vp9_sad16x16x3_ssse3_skiptable:
+
+ call .vp9_sad16x16x3_ssse3_do_jump
+.vp9_sad16x16x3_ssse3_do_jump:
+ pop rcx ; get the address of do_jump
+ mov rax, .vp9_sad16x16x3_ssse3_jumptable - .vp9_sad16x16x3_ssse3_do_jump
+ add rax, rcx ; get the absolute address of vp9_sad16x16x3_ssse3_jumptable
+
+ movsxd rax, dword [rax + 4*rdx] ; get the 32 bit offset from the jumptable
+ add rcx, rax
+
+ movsxd rax, dword ptr arg(1) ;src_stride
+ movsxd rdx, dword ptr arg(3) ;ref_stride
+
+ jmp rcx
+
+ PROCESS_16X16X3_OFFSET 0, .vp9_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 1, .vp9_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 2, .vp9_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 3, .vp9_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 4, .vp9_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 5, .vp9_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 6, .vp9_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 7, .vp9_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 8, .vp9_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 9, .vp9_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 10, .vp9_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 11, .vp9_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 12, .vp9_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 13, .vp9_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 14, .vp9_sad16x16x3_ssse3
+
+.vp9_sad16x16x3_ssse3_aligned_by_15:
+ PROCESS_16X2X3 1
+ PROCESS_16X2X3 0
+ PROCESS_16X2X3 0
+ PROCESS_16X2X3 0
+ PROCESS_16X2X3 0
+ PROCESS_16X2X3 0
+ PROCESS_16X2X3 0
+ PROCESS_16X2X3 0
+
+.vp9_sad16x16x3_ssse3_store_off:
+ mov rdi, arg(4) ;Results
+
+ movq xmm0, xmm5
+ psrldq xmm5, 8
+
+ paddw xmm0, xmm5
+ movd [rdi], xmm0
+;-
+ movq xmm0, xmm6
+ psrldq xmm6, 8
+
+ paddw xmm0, xmm6
+ movd [rdi+4], xmm0
+;-
+ movq xmm0, xmm7
+ psrldq xmm7, 8
+
+ paddw xmm0, xmm7
+ movd [rdi+8], xmm0
+
+ ; begin epilog
+ pop rcx
+ pop rdi
+ pop rsi
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void int vp9_sad16x8x3_ssse3(
+; unsigned char *src_ptr,
+; int src_stride,
+; unsigned char *ref_ptr,
+; int ref_stride,
+; int *results)
+global sym(vp9_sad16x8x3_ssse3) PRIVATE
+sym(vp9_sad16x8x3_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ SAVE_XMM 7
+ push rsi
+ push rdi
+ push rcx
+ ; end prolog
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;ref_ptr
+
+ mov rdx, 0xf
+ and rdx, rdi
+
+ jmp .vp9_sad16x8x3_ssse3_skiptable
+.vp9_sad16x8x3_ssse3_jumptable:
+ dd .vp9_sad16x8x3_ssse3_aligned_by_0 - .vp9_sad16x8x3_ssse3_do_jump
+ dd .vp9_sad16x8x3_ssse3_aligned_by_1 - .vp9_sad16x8x3_ssse3_do_jump
+ dd .vp9_sad16x8x3_ssse3_aligned_by_2 - .vp9_sad16x8x3_ssse3_do_jump
+ dd .vp9_sad16x8x3_ssse3_aligned_by_3 - .vp9_sad16x8x3_ssse3_do_jump
+ dd .vp9_sad16x8x3_ssse3_aligned_by_4 - .vp9_sad16x8x3_ssse3_do_jump
+ dd .vp9_sad16x8x3_ssse3_aligned_by_5 - .vp9_sad16x8x3_ssse3_do_jump
+ dd .vp9_sad16x8x3_ssse3_aligned_by_6 - .vp9_sad16x8x3_ssse3_do_jump
+ dd .vp9_sad16x8x3_ssse3_aligned_by_7 - .vp9_sad16x8x3_ssse3_do_jump
+ dd .vp9_sad16x8x3_ssse3_aligned_by_8 - .vp9_sad16x8x3_ssse3_do_jump
+ dd .vp9_sad16x8x3_ssse3_aligned_by_9 - .vp9_sad16x8x3_ssse3_do_jump
+ dd .vp9_sad16x8x3_ssse3_aligned_by_10 - .vp9_sad16x8x3_ssse3_do_jump
+ dd .vp9_sad16x8x3_ssse3_aligned_by_11 - .vp9_sad16x8x3_ssse3_do_jump
+ dd .vp9_sad16x8x3_ssse3_aligned_by_12 - .vp9_sad16x8x3_ssse3_do_jump
+ dd .vp9_sad16x8x3_ssse3_aligned_by_13 - .vp9_sad16x8x3_ssse3_do_jump
+ dd .vp9_sad16x8x3_ssse3_aligned_by_14 - .vp9_sad16x8x3_ssse3_do_jump
+ dd .vp9_sad16x8x3_ssse3_aligned_by_15 - .vp9_sad16x8x3_ssse3_do_jump
+.vp9_sad16x8x3_ssse3_skiptable:
+
+ call .vp9_sad16x8x3_ssse3_do_jump
+.vp9_sad16x8x3_ssse3_do_jump:
+ pop rcx ; get the address of do_jump
+ mov rax, .vp9_sad16x8x3_ssse3_jumptable - .vp9_sad16x8x3_ssse3_do_jump
+ add rax, rcx ; get the absolute address of vp9_sad16x8x3_ssse3_jumptable
+
+ movsxd rax, dword [rax + 4*rdx] ; get the 32 bit offset from the jumptable
+ add rcx, rax
+
+ movsxd rax, dword ptr arg(1) ;src_stride
+ movsxd rdx, dword ptr arg(3) ;ref_stride
+
+ jmp rcx
+
+ PROCESS_16X8X3_OFFSET 0, .vp9_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 1, .vp9_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 2, .vp9_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 3, .vp9_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 4, .vp9_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 5, .vp9_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 6, .vp9_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 7, .vp9_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 8, .vp9_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 9, .vp9_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 10, .vp9_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 11, .vp9_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 12, .vp9_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 13, .vp9_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 14, .vp9_sad16x8x3_ssse3
+
+.vp9_sad16x8x3_ssse3_aligned_by_15:
+
+ PROCESS_16X2X3 1
+ PROCESS_16X2X3 0
+ PROCESS_16X2X3 0
+ PROCESS_16X2X3 0
+
+.vp9_sad16x8x3_ssse3_store_off:
+ mov rdi, arg(4) ;Results
+
+ movq xmm0, xmm5
+ psrldq xmm5, 8
+
+ paddw xmm0, xmm5
+ movd [rdi], xmm0
+;-
+ movq xmm0, xmm6
+ psrldq xmm6, 8
+
+ paddw xmm0, xmm6
+ movd [rdi+4], xmm0
+;-
+ movq xmm0, xmm7
+ psrldq xmm7, 8
+
+ paddw xmm0, xmm7
+ movd [rdi+8], xmm0
+
+ ; begin epilog
+ pop rcx
+ pop rdi
+ pop rsi
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
diff --git a/libvpx/vp9/encoder/x86/vp9_ssim_opt.asm b/libvpx/vp9/encoder/x86/vp9_ssim_opt.asm
new file mode 100644
index 0000000..455d10d
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_ssim_opt.asm
@@ -0,0 +1,216 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+%include "vpx_ports/x86_abi_support.asm"
+
+; tabulate_ssim - sums sum_s,sum_r,sum_sq_s,sum_sq_r, sum_sxr
+%macro TABULATE_SSIM 0
+ paddusw xmm15, xmm3 ; sum_s
+ paddusw xmm14, xmm4 ; sum_r
+ movdqa xmm1, xmm3
+ pmaddwd xmm1, xmm1
+ paddd xmm13, xmm1 ; sum_sq_s
+ movdqa xmm2, xmm4
+ pmaddwd xmm2, xmm2
+ paddd xmm12, xmm2 ; sum_sq_r
+ pmaddwd xmm3, xmm4
+ paddd xmm11, xmm3 ; sum_sxr
+%endmacro
+
+; Sum across the register %1 starting with q words
+%macro SUM_ACROSS_Q 1
+ movdqa xmm2,%1
+ punpckldq %1,xmm0
+ punpckhdq xmm2,xmm0
+ paddq %1,xmm2
+ movdqa xmm2,%1
+ punpcklqdq %1,xmm0
+ punpckhqdq xmm2,xmm0
+ paddq %1,xmm2
+%endmacro
+
+; Sum across the register %1 starting with q words
+%macro SUM_ACROSS_W 1
+ movdqa xmm1, %1
+ punpcklwd %1,xmm0
+ punpckhwd xmm1,xmm0
+ paddd %1, xmm1
+ SUM_ACROSS_Q %1
+%endmacro
+;void ssim_parms_sse2(
+; unsigned char *s,
+; int sp,
+; unsigned char *r,
+; int rp
+; unsigned long *sum_s,
+; unsigned long *sum_r,
+; unsigned long *sum_sq_s,
+; unsigned long *sum_sq_r,
+; unsigned long *sum_sxr);
+;
+; TODO: Use parm passing through structure, probably don't need the pxors
+; ( calling app will initialize to 0 ) could easily fit everything in sse2
+; without too much hastle, and can probably do better estimates with psadw
+; or pavgb At this point this is just meant to be first pass for calculating
+; all the parms needed for 16x16 ssim so we can play with dssim as distortion
+; in mode selection code.
+global sym(vp9_ssim_parms_16x16_sse2) PRIVATE
+sym(vp9_ssim_parms_16x16_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 9
+ SAVE_XMM 15
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;s
+ mov rcx, arg(1) ;sp
+ mov rdi, arg(2) ;r
+ mov rax, arg(3) ;rp
+
+ pxor xmm0, xmm0
+ pxor xmm15,xmm15 ;sum_s
+ pxor xmm14,xmm14 ;sum_r
+ pxor xmm13,xmm13 ;sum_sq_s
+ pxor xmm12,xmm12 ;sum_sq_r
+ pxor xmm11,xmm11 ;sum_sxr
+
+ mov rdx, 16 ;row counter
+.NextRow:
+
+ ;grab source and reference pixels
+ movdqu xmm5, [rsi]
+ movdqu xmm6, [rdi]
+ movdqa xmm3, xmm5
+ movdqa xmm4, xmm6
+ punpckhbw xmm3, xmm0 ; high_s
+ punpckhbw xmm4, xmm0 ; high_r
+
+ TABULATE_SSIM
+
+ movdqa xmm3, xmm5
+ movdqa xmm4, xmm6
+ punpcklbw xmm3, xmm0 ; low_s
+ punpcklbw xmm4, xmm0 ; low_r
+
+ TABULATE_SSIM
+
+ add rsi, rcx ; next s row
+ add rdi, rax ; next r row
+
+ dec rdx ; counter
+ jnz .NextRow
+
+ SUM_ACROSS_W xmm15
+ SUM_ACROSS_W xmm14
+ SUM_ACROSS_Q xmm13
+ SUM_ACROSS_Q xmm12
+ SUM_ACROSS_Q xmm11
+
+ mov rdi,arg(4)
+ movd [rdi], xmm15;
+ mov rdi,arg(5)
+ movd [rdi], xmm14;
+ mov rdi,arg(6)
+ movd [rdi], xmm13;
+ mov rdi,arg(7)
+ movd [rdi], xmm12;
+ mov rdi,arg(8)
+ movd [rdi], xmm11;
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void ssim_parms_sse2(
+; unsigned char *s,
+; int sp,
+; unsigned char *r,
+; int rp
+; unsigned long *sum_s,
+; unsigned long *sum_r,
+; unsigned long *sum_sq_s,
+; unsigned long *sum_sq_r,
+; unsigned long *sum_sxr);
+;
+; TODO: Use parm passing through structure, probably don't need the pxors
+; ( calling app will initialize to 0 ) could easily fit everything in sse2
+; without too much hastle, and can probably do better estimates with psadw
+; or pavgb At this point this is just meant to be first pass for calculating
+; all the parms needed for 16x16 ssim so we can play with dssim as distortion
+; in mode selection code.
+global sym(vp9_ssim_parms_8x8_sse2) PRIVATE
+sym(vp9_ssim_parms_8x8_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 9
+ SAVE_XMM 15
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;s
+ mov rcx, arg(1) ;sp
+ mov rdi, arg(2) ;r
+ mov rax, arg(3) ;rp
+
+ pxor xmm0, xmm0
+ pxor xmm15,xmm15 ;sum_s
+ pxor xmm14,xmm14 ;sum_r
+ pxor xmm13,xmm13 ;sum_sq_s
+ pxor xmm12,xmm12 ;sum_sq_r
+ pxor xmm11,xmm11 ;sum_sxr
+
+ mov rdx, 8 ;row counter
+.NextRow:
+
+ ;grab source and reference pixels
+ movq xmm3, [rsi]
+ movq xmm4, [rdi]
+ punpcklbw xmm3, xmm0 ; low_s
+ punpcklbw xmm4, xmm0 ; low_r
+
+ TABULATE_SSIM
+
+ add rsi, rcx ; next s row
+ add rdi, rax ; next r row
+
+ dec rdx ; counter
+ jnz .NextRow
+
+ SUM_ACROSS_W xmm15
+ SUM_ACROSS_W xmm14
+ SUM_ACROSS_Q xmm13
+ SUM_ACROSS_Q xmm12
+ SUM_ACROSS_Q xmm11
+
+ mov rdi,arg(4)
+ movd [rdi], xmm15;
+ mov rdi,arg(5)
+ movd [rdi], xmm14;
+ mov rdi,arg(6)
+ movd [rdi], xmm13;
+ mov rdi,arg(7)
+ movd [rdi], xmm12;
+ mov rdi,arg(8)
+ movd [rdi], xmm11;
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
diff --git a/libvpx/vp9/encoder/x86/vp9_subpel_variance.asm b/libvpx/vp9/encoder/x86/vp9_subpel_variance.asm
new file mode 100644
index 0000000..533456b
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_subpel_variance.asm
@@ -0,0 +1,1300 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+%include "third_party/x86inc/x86inc.asm"
+
+SECTION_RODATA
+pw_8: times 8 dw 8
+bilin_filter_m_sse2: times 8 dw 16
+ times 8 dw 0
+ times 8 dw 15
+ times 8 dw 1
+ times 8 dw 14
+ times 8 dw 2
+ times 8 dw 13
+ times 8 dw 3
+ times 8 dw 12
+ times 8 dw 4
+ times 8 dw 11
+ times 8 dw 5
+ times 8 dw 10
+ times 8 dw 6
+ times 8 dw 9
+ times 8 dw 7
+ times 16 dw 8
+ times 8 dw 7
+ times 8 dw 9
+ times 8 dw 6
+ times 8 dw 10
+ times 8 dw 5
+ times 8 dw 11
+ times 8 dw 4
+ times 8 dw 12
+ times 8 dw 3
+ times 8 dw 13
+ times 8 dw 2
+ times 8 dw 14
+ times 8 dw 1
+ times 8 dw 15
+
+bilin_filter_m_ssse3: times 8 db 16, 0
+ times 8 db 15, 1
+ times 8 db 14, 2
+ times 8 db 13, 3
+ times 8 db 12, 4
+ times 8 db 11, 5
+ times 8 db 10, 6
+ times 8 db 9, 7
+ times 16 db 8
+ times 8 db 7, 9
+ times 8 db 6, 10
+ times 8 db 5, 11
+ times 8 db 4, 12
+ times 8 db 3, 13
+ times 8 db 2, 14
+ times 8 db 1, 15
+
+SECTION .text
+
+; int vp9_sub_pixel_varianceNxh(const uint8_t *src, ptrdiff_t src_stride,
+; int x_offset, int y_offset,
+; const uint8_t *dst, ptrdiff_t dst_stride,
+; int height, unsigned int *sse);
+;
+; This function returns the SE and stores SSE in the given pointer.
+
+%macro SUM_SSE 6 ; src1, dst1, src2, dst2, sum, sse
+ psubw %3, %4
+ psubw %1, %2
+ paddw %5, %3
+ pmaddwd %3, %3
+ paddw %5, %1
+ pmaddwd %1, %1
+ paddd %6, %3
+ paddd %6, %1
+%endmacro
+
+%macro STORE_AND_RET 0
+%if mmsize == 16
+ ; if H=64 and W=16, we have 8 words of each 2(1bit)x64(6bit)x9bit=16bit
+ ; in m6, i.e. it _exactly_ fits in a signed word per word in the xmm reg.
+ ; We have to sign-extend it before adding the words within the register
+ ; and outputing to a dword.
+ pcmpgtw m5, m6 ; mask for 0 > x
+ movhlps m3, m7
+ punpcklwd m4, m6, m5
+ punpckhwd m6, m5 ; sign-extend m6 word->dword
+ paddd m7, m3
+ paddd m6, m4
+ pshufd m3, m7, 0x1
+ movhlps m4, m6
+ paddd m7, m3
+ paddd m6, m4
+ mov r1, ssem ; r1 = unsigned int *sse
+ pshufd m4, m6, 0x1
+ movd [r1], m7 ; store sse
+ paddd m6, m4
+ movd rax, m6 ; store sum as return value
+%else ; mmsize == 8
+ pshufw m4, m6, 0xe
+ pshufw m3, m7, 0xe
+ paddw m6, m4
+ paddd m7, m3
+ pcmpgtw m5, m6 ; mask for 0 > x
+ mov r1, ssem ; r1 = unsigned int *sse
+ punpcklwd m6, m5 ; sign-extend m6 word->dword
+ movd [r1], m7 ; store sse
+ pshufw m4, m6, 0xe
+ paddd m6, m4
+ movd rax, m6 ; store sum as return value
+%endif
+ RET
+%endmacro
+
+%macro SUBPEL_VARIANCE 1-2 0 ; W
+%if cpuflag(ssse3)
+%define bilin_filter_m bilin_filter_m_ssse3
+%define filter_idx_shift 4
+%else
+%define bilin_filter_m bilin_filter_m_sse2
+%define filter_idx_shift 5
+%endif
+; FIXME(rbultje) only bilinear filters use >8 registers, and ssse3 only uses
+; 11, not 13, if the registers are ordered correctly. May make a minor speed
+; difference on Win64
+%ifdef PIC
+%if %2 == 1 ; avg
+cglobal sub_pixel_avg_variance%1xh, 9, 10, 13, src, src_stride, \
+ x_offset, y_offset, \
+ dst, dst_stride, \
+ sec, sec_stride, height, sse
+%define sec_str sec_strideq
+%else
+cglobal sub_pixel_variance%1xh, 7, 8, 13, src, src_stride, x_offset, y_offset, \
+ dst, dst_stride, height, sse
+%endif
+%define h heightd
+%define bilin_filter sseq
+%else
+%if %2 == 1 ; avg
+cglobal sub_pixel_avg_variance%1xh, 7 + 2 * ARCH_X86_64, \
+ 7 + 2 * ARCH_X86_64, 13, src, src_stride, \
+ x_offset, y_offset, \
+ dst, dst_stride, \
+ sec, sec_stride, \
+ height, sse
+%if ARCH_X86_64
+%define h heightd
+%define sec_str sec_strideq
+%else
+%define h dword heightm
+%define sec_str sec_stridemp
+%endif
+%else
+cglobal sub_pixel_variance%1xh, 7, 7, 13, src, src_stride, x_offset, y_offset, \
+ dst, dst_stride, height, sse
+%define h heightd
+%endif
+%define bilin_filter bilin_filter_m
+%endif
+ ASSERT %1 <= 16 ; m6 overflows if w > 16
+ pxor m6, m6 ; sum
+ pxor m7, m7 ; sse
+ ; FIXME(rbultje) if both filters are bilinear, we don't actually use m5; we
+ ; could perhaps use it for something more productive then
+ pxor m5, m5 ; dedicated zero register
+%if %1 < 16
+ sar h, 1
+%if %2 == 1 ; avg
+ shl sec_str, 1
+%endif
+%endif
+
+ ; FIXME(rbultje) replace by jumptable?
+ test x_offsetd, x_offsetd
+ jnz .x_nonzero
+ ; x_offset == 0
+ test y_offsetd, y_offsetd
+ jnz .x_zero_y_nonzero
+
+ ; x_offset == 0 && y_offset == 0
+.x_zero_y_zero_loop:
+%if %1 == 16
+ movu m0, [srcq]
+ mova m1, [dstq]
+%if %2 == 1 ; avg
+ pavgb m0, [secq]
+ punpckhbw m3, m1, m5
+ punpcklbw m1, m5
+%endif
+ punpckhbw m2, m0, m5
+ punpcklbw m0, m5
+%if %2 == 0 ; !avg
+ punpckhbw m3, m1, m5
+ punpcklbw m1, m5
+%endif
+ SUM_SSE m0, m1, m2, m3, m6, m7
+
+ add srcq, src_strideq
+ add dstq, dst_strideq
+%else ; %1 < 16
+ movh m0, [srcq]
+%if %2 == 1 ; avg
+%if mmsize == 16
+ movhps m0, [srcq+src_strideq]
+%else ; mmsize == 8
+ punpckldq m0, [srcq+src_strideq]
+%endif
+%else ; !avg
+ movh m2, [srcq+src_strideq]
+%endif
+ movh m1, [dstq]
+ movh m3, [dstq+dst_strideq]
+%if %2 == 1 ; avg
+ pavgb m0, [secq]
+ punpcklbw m3, m5
+ punpcklbw m1, m5
+ punpckhbw m2, m0, m5
+ punpcklbw m0, m5
+%else ; !avg
+ punpcklbw m0, m5
+ punpcklbw m2, m5
+ punpcklbw m3, m5
+ punpcklbw m1, m5
+%endif
+ SUM_SSE m0, m1, m2, m3, m6, m7
+
+ lea srcq, [srcq+src_strideq*2]
+ lea dstq, [dstq+dst_strideq*2]
+%endif
+%if %2 == 1 ; avg
+ add secq, sec_str
+%endif
+ dec h
+ jg .x_zero_y_zero_loop
+ STORE_AND_RET
+
+.x_zero_y_nonzero:
+ cmp y_offsetd, 8
+ jne .x_zero_y_nonhalf
+
+ ; x_offset == 0 && y_offset == 0.5
+.x_zero_y_half_loop:
+%if %1 == 16
+ movu m0, [srcq]
+ movu m4, [srcq+src_strideq]
+ mova m1, [dstq]
+ pavgb m0, m4
+ punpckhbw m3, m1, m5
+%if %2 == 1 ; avg
+ pavgb m0, [secq]
+%endif
+ punpcklbw m1, m5
+ punpckhbw m2, m0, m5
+ punpcklbw m0, m5
+ SUM_SSE m0, m1, m2, m3, m6, m7
+
+ add srcq, src_strideq
+ add dstq, dst_strideq
+%else ; %1 < 16
+ movh m0, [srcq]
+ movh m2, [srcq+src_strideq]
+%if %2 == 1 ; avg
+%if mmsize == 16
+ movhps m2, [srcq+src_strideq*2]
+%else ; mmsize == 8
+%if %1 == 4
+ movh m1, [srcq+src_strideq*2]
+ punpckldq m2, m1
+%else
+ punpckldq m2, [srcq+src_strideq*2]
+%endif
+%endif
+ movh m1, [dstq]
+%if mmsize == 16
+ movlhps m0, m2
+%else ; mmsize == 8
+ punpckldq m0, m2
+%endif
+ movh m3, [dstq+dst_strideq]
+ pavgb m0, m2
+ punpcklbw m1, m5
+ pavgb m0, [secq]
+ punpcklbw m3, m5
+ punpckhbw m2, m0, m5
+ punpcklbw m0, m5
+%else ; !avg
+ movh m4, [srcq+src_strideq*2]
+ movh m1, [dstq]
+ pavgb m0, m2
+ movh m3, [dstq+dst_strideq]
+ pavgb m2, m4
+ punpcklbw m0, m5
+ punpcklbw m2, m5
+ punpcklbw m3, m5
+ punpcklbw m1, m5
+%endif
+ SUM_SSE m0, m1, m2, m3, m6, m7
+
+ lea srcq, [srcq+src_strideq*2]
+ lea dstq, [dstq+dst_strideq*2]
+%endif
+%if %2 == 1 ; avg
+ add secq, sec_str
+%endif
+ dec h
+ jg .x_zero_y_half_loop
+ STORE_AND_RET
+
+.x_zero_y_nonhalf:
+ ; x_offset == 0 && y_offset == bilin interpolation
+%ifdef PIC
+ lea bilin_filter, [bilin_filter_m]
+%endif
+ shl y_offsetd, filter_idx_shift
+%if ARCH_X86_64 && mmsize == 16
+ mova m8, [bilin_filter+y_offsetq]
+%if notcpuflag(ssse3) ; FIXME(rbultje) don't scatter registers on x86-64
+ mova m9, [bilin_filter+y_offsetq+16]
+%endif
+ mova m10, [pw_8]
+%define filter_y_a m8
+%define filter_y_b m9
+%define filter_rnd m10
+%else ; x86-32 or mmx
+ add y_offsetq, bilin_filter
+%define filter_y_a [y_offsetq]
+%define filter_y_b [y_offsetq+16]
+%define filter_rnd [pw_8]
+%endif
+.x_zero_y_other_loop:
+%if %1 == 16
+ movu m0, [srcq]
+ movu m4, [srcq+src_strideq]
+ mova m1, [dstq]
+%if cpuflag(ssse3)
+ punpckhbw m2, m0, m4
+ punpcklbw m0, m4
+ pmaddubsw m2, filter_y_a
+ pmaddubsw m0, filter_y_a
+ paddw m2, filter_rnd
+ paddw m0, filter_rnd
+%else
+ punpckhbw m2, m0, m5
+ punpckhbw m3, m4, m5
+ punpcklbw m0, m5
+ punpcklbw m4, m5
+ ; FIXME(rbultje) instead of out=((num-x)*in1+x*in2+rnd)>>log2(num), we can
+ ; also do out=in1+(((num-x)*(in2-in1)+rnd)>>log2(num)). Total number of
+ ; instructions is the same (5), but it is 1 mul instead of 2, so might be
+ ; slightly faster because of pmullw latency. It would also cut our rodata
+ ; tables in half for this function, and save 1-2 registers on x86-64.
+ pmullw m2, filter_y_a
+ pmullw m3, filter_y_b
+ paddw m2, filter_rnd
+ pmullw m0, filter_y_a
+ pmullw m4, filter_y_b
+ paddw m0, filter_rnd
+ paddw m2, m3
+ paddw m0, m4
+%endif
+ psraw m2, 4
+ psraw m0, 4
+%if %2 == 1 ; avg
+ ; FIXME(rbultje) pipeline
+ packuswb m0, m2
+ pavgb m0, [secq]
+ punpckhbw m2, m0, m5
+ punpcklbw m0, m5
+%endif
+ punpckhbw m3, m1, m5
+ punpcklbw m1, m5
+ SUM_SSE m0, m1, m2, m3, m6, m7
+
+ add srcq, src_strideq
+ add dstq, dst_strideq
+%else ; %1 < 16
+ movh m0, [srcq]
+ movh m2, [srcq+src_strideq]
+ movh m4, [srcq+src_strideq*2]
+ movh m3, [dstq+dst_strideq]
+%if cpuflag(ssse3)
+ movh m1, [dstq]
+ punpcklbw m0, m2
+ punpcklbw m2, m4
+ pmaddubsw m0, filter_y_a
+ pmaddubsw m2, filter_y_a
+ punpcklbw m3, m5
+ paddw m2, filter_rnd
+ paddw m0, filter_rnd
+%else
+ punpcklbw m0, m5
+ punpcklbw m2, m5
+ punpcklbw m4, m5
+ pmullw m0, filter_y_a
+ pmullw m1, m2, filter_y_b
+ punpcklbw m3, m5
+ paddw m0, filter_rnd
+ pmullw m2, filter_y_a
+ pmullw m4, filter_y_b
+ paddw m0, m1
+ paddw m2, filter_rnd
+ movh m1, [dstq]
+ paddw m2, m4
+%endif
+ psraw m0, 4
+ psraw m2, 4
+%if %2 == 1 ; avg
+ ; FIXME(rbultje) pipeline
+ packuswb m0, m2
+ pavgb m0, [secq]
+ punpckhbw m2, m0, m5
+ punpcklbw m0, m5
+%endif
+ punpcklbw m1, m5
+ SUM_SSE m0, m1, m2, m3, m6, m7
+
+ lea srcq, [srcq+src_strideq*2]
+ lea dstq, [dstq+dst_strideq*2]
+%endif
+%if %2 == 1 ; avg
+ add secq, sec_str
+%endif
+ dec h
+ jg .x_zero_y_other_loop
+%undef filter_y_a
+%undef filter_y_b
+%undef filter_rnd
+ STORE_AND_RET
+
+.x_nonzero:
+ cmp x_offsetd, 8
+ jne .x_nonhalf
+ ; x_offset == 0.5
+ test y_offsetd, y_offsetd
+ jnz .x_half_y_nonzero
+
+ ; x_offset == 0.5 && y_offset == 0
+.x_half_y_zero_loop:
+%if %1 == 16
+ movu m0, [srcq]
+ movu m4, [srcq+1]
+ mova m1, [dstq]
+ pavgb m0, m4
+ punpckhbw m3, m1, m5
+%if %2 == 1 ; avg
+ pavgb m0, [secq]
+%endif
+ punpcklbw m1, m5
+ punpckhbw m2, m0, m5
+ punpcklbw m0, m5
+ SUM_SSE m0, m1, m2, m3, m6, m7
+
+ add srcq, src_strideq
+ add dstq, dst_strideq
+%else ; %1 < 16
+ movh m0, [srcq]
+ movh m4, [srcq+1]
+%if %2 == 1 ; avg
+%if mmsize == 16
+ movhps m0, [srcq+src_strideq]
+ movhps m4, [srcq+src_strideq+1]
+%else ; mmsize == 8
+ punpckldq m0, [srcq+src_strideq]
+ punpckldq m4, [srcq+src_strideq+1]
+%endif
+ movh m1, [dstq]
+ movh m3, [dstq+dst_strideq]
+ pavgb m0, m4
+ punpcklbw m3, m5
+ pavgb m0, [secq]
+ punpcklbw m1, m5
+ punpckhbw m2, m0, m5
+ punpcklbw m0, m5
+%else ; !avg
+ movh m2, [srcq+src_strideq]
+ movh m1, [dstq]
+ pavgb m0, m4
+ movh m4, [srcq+src_strideq+1]
+ movh m3, [dstq+dst_strideq]
+ pavgb m2, m4
+ punpcklbw m0, m5
+ punpcklbw m2, m5
+ punpcklbw m3, m5
+ punpcklbw m1, m5
+%endif
+ SUM_SSE m0, m1, m2, m3, m6, m7
+
+ lea srcq, [srcq+src_strideq*2]
+ lea dstq, [dstq+dst_strideq*2]
+%endif
+%if %2 == 1 ; avg
+ add secq, sec_str
+%endif
+ dec h
+ jg .x_half_y_zero_loop
+ STORE_AND_RET
+
+.x_half_y_nonzero:
+ cmp y_offsetd, 8
+ jne .x_half_y_nonhalf
+
+ ; x_offset == 0.5 && y_offset == 0.5
+%if %1 == 16
+ movu m0, [srcq]
+ movu m3, [srcq+1]
+ add srcq, src_strideq
+ pavgb m0, m3
+.x_half_y_half_loop:
+ movu m4, [srcq]
+ movu m3, [srcq+1]
+ mova m1, [dstq]
+ pavgb m4, m3
+ punpckhbw m3, m1, m5
+ pavgb m0, m4
+%if %2 == 1 ; avg
+ punpcklbw m1, m5
+ pavgb m0, [secq]
+ punpckhbw m2, m0, m5
+ punpcklbw m0, m5
+%else
+ punpckhbw m2, m0, m5
+ punpcklbw m0, m5
+ punpcklbw m1, m5
+%endif
+ SUM_SSE m0, m1, m2, m3, m6, m7
+ mova m0, m4
+
+ add srcq, src_strideq
+ add dstq, dst_strideq
+%else ; %1 < 16
+ movh m0, [srcq]
+ movh m3, [srcq+1]
+ add srcq, src_strideq
+ pavgb m0, m3
+.x_half_y_half_loop:
+ movh m2, [srcq]
+ movh m3, [srcq+1]
+%if %2 == 1 ; avg
+%if mmsize == 16
+ movhps m2, [srcq+src_strideq]
+ movhps m3, [srcq+src_strideq+1]
+%else
+%if %1 == 4
+ movh m1, [srcq+src_strideq]
+ punpckldq m2, m1
+ movh m1, [srcq+src_strideq+1]
+ punpckldq m3, m1
+%else
+ punpckldq m2, [srcq+src_strideq]
+ punpckldq m3, [srcq+src_strideq+1]
+%endif
+%endif
+ pavgb m2, m3
+%if mmsize == 16
+ movlhps m0, m2
+ movhlps m4, m2
+%else ; mmsize == 8
+ punpckldq m0, m2
+ pshufw m4, m2, 0xe
+%endif
+ movh m1, [dstq]
+ pavgb m0, m2
+ movh m3, [dstq+dst_strideq]
+ pavgb m0, [secq]
+ punpcklbw m3, m5
+ punpcklbw m1, m5
+ punpckhbw m2, m0, m5
+ punpcklbw m0, m5
+%else ; !avg
+ movh m4, [srcq+src_strideq]
+ movh m1, [srcq+src_strideq+1]
+ pavgb m2, m3
+ pavgb m4, m1
+ pavgb m0, m2
+ pavgb m2, m4
+ movh m1, [dstq]
+ movh m3, [dstq+dst_strideq]
+ punpcklbw m0, m5
+ punpcklbw m2, m5
+ punpcklbw m3, m5
+ punpcklbw m1, m5
+%endif
+ SUM_SSE m0, m1, m2, m3, m6, m7
+ mova m0, m4
+
+ lea srcq, [srcq+src_strideq*2]
+ lea dstq, [dstq+dst_strideq*2]
+%endif
+%if %2 == 1 ; avg
+ add secq, sec_str
+%endif
+ dec h
+ jg .x_half_y_half_loop
+ STORE_AND_RET
+
+.x_half_y_nonhalf:
+ ; x_offset == 0.5 && y_offset == bilin interpolation
+%ifdef PIC
+ lea bilin_filter, [bilin_filter_m]
+%endif
+ shl y_offsetd, filter_idx_shift
+%if ARCH_X86_64 && mmsize == 16
+ mova m8, [bilin_filter+y_offsetq]
+%if notcpuflag(ssse3) ; FIXME(rbultje) don't scatter registers on x86-64
+ mova m9, [bilin_filter+y_offsetq+16]
+%endif
+ mova m10, [pw_8]
+%define filter_y_a m8
+%define filter_y_b m9
+%define filter_rnd m10
+%else
+ add y_offsetq, bilin_filter
+%define filter_y_a [y_offsetq]
+%define filter_y_b [y_offsetq+16]
+%define filter_rnd [pw_8]
+%endif
+%if %1 == 16
+ movu m0, [srcq]
+ movu m3, [srcq+1]
+ add srcq, src_strideq
+ pavgb m0, m3
+.x_half_y_other_loop:
+ movu m4, [srcq]
+ movu m2, [srcq+1]
+ mova m1, [dstq]
+ pavgb m4, m2
+%if cpuflag(ssse3)
+ punpckhbw m2, m0, m4
+ punpcklbw m0, m4
+ pmaddubsw m2, filter_y_a
+ pmaddubsw m0, filter_y_a
+ paddw m2, filter_rnd
+ paddw m0, filter_rnd
+ psraw m2, 4
+%else
+ punpckhbw m2, m0, m5
+ punpckhbw m3, m4, m5
+ pmullw m2, filter_y_a
+ pmullw m3, filter_y_b
+ paddw m2, filter_rnd
+ punpcklbw m0, m5
+ paddw m2, m3
+ punpcklbw m3, m4, m5
+ pmullw m0, filter_y_a
+ pmullw m3, filter_y_b
+ paddw m0, filter_rnd
+ psraw m2, 4
+ paddw m0, m3
+%endif
+ punpckhbw m3, m1, m5
+ psraw m0, 4
+%if %2 == 1 ; avg
+ ; FIXME(rbultje) pipeline
+ packuswb m0, m2
+ pavgb m0, [secq]
+ punpckhbw m2, m0, m5
+ punpcklbw m0, m5
+%endif
+ punpcklbw m1, m5
+ SUM_SSE m0, m1, m2, m3, m6, m7
+ mova m0, m4
+
+ add srcq, src_strideq
+ add dstq, dst_strideq
+%else ; %1 < 16
+ movh m0, [srcq]
+ movh m3, [srcq+1]
+ add srcq, src_strideq
+ pavgb m0, m3
+%if notcpuflag(ssse3)
+ punpcklbw m0, m5
+%endif
+.x_half_y_other_loop:
+ movh m2, [srcq]
+ movh m1, [srcq+1]
+ movh m4, [srcq+src_strideq]
+ movh m3, [srcq+src_strideq+1]
+ pavgb m2, m1
+ pavgb m4, m3
+ movh m3, [dstq+dst_strideq]
+%if cpuflag(ssse3)
+ movh m1, [dstq]
+ punpcklbw m0, m2
+ punpcklbw m2, m4
+ pmaddubsw m0, filter_y_a
+ pmaddubsw m2, filter_y_a
+ punpcklbw m3, m5
+ paddw m0, filter_rnd
+ paddw m2, filter_rnd
+%else
+ punpcklbw m2, m5
+ punpcklbw m4, m5
+ pmullw m0, filter_y_a
+ pmullw m1, m2, filter_y_b
+ punpcklbw m3, m5
+ paddw m0, filter_rnd
+ pmullw m2, filter_y_a
+ paddw m0, m1
+ pmullw m1, m4, filter_y_b
+ paddw m2, filter_rnd
+ paddw m2, m1
+ movh m1, [dstq]
+%endif
+ psraw m0, 4
+ psraw m2, 4
+%if %2 == 1 ; avg
+ ; FIXME(rbultje) pipeline
+ packuswb m0, m2
+ pavgb m0, [secq]
+ punpckhbw m2, m0, m5
+ punpcklbw m0, m5
+%endif
+ punpcklbw m1, m5
+ SUM_SSE m0, m1, m2, m3, m6, m7
+ mova m0, m4
+
+ lea srcq, [srcq+src_strideq*2]
+ lea dstq, [dstq+dst_strideq*2]
+%endif
+%if %2 == 1 ; avg
+ add secq, sec_str
+%endif
+ dec h
+ jg .x_half_y_other_loop
+%undef filter_y_a
+%undef filter_y_b
+%undef filter_rnd
+ STORE_AND_RET
+
+.x_nonhalf:
+ test y_offsetd, y_offsetd
+ jnz .x_nonhalf_y_nonzero
+
+ ; x_offset == bilin interpolation && y_offset == 0
+%ifdef PIC
+ lea bilin_filter, [bilin_filter_m]
+%endif
+ shl x_offsetd, filter_idx_shift
+%if ARCH_X86_64 && mmsize == 16
+ mova m8, [bilin_filter+x_offsetq]
+%if notcpuflag(ssse3) ; FIXME(rbultje) don't scatter registers on x86-64
+ mova m9, [bilin_filter+x_offsetq+16]
+%endif
+ mova m10, [pw_8]
+%define filter_x_a m8
+%define filter_x_b m9
+%define filter_rnd m10
+%else
+ add x_offsetq, bilin_filter
+%define filter_x_a [x_offsetq]
+%define filter_x_b [x_offsetq+16]
+%define filter_rnd [pw_8]
+%endif
+.x_other_y_zero_loop:
+%if %1 == 16
+ movu m0, [srcq]
+ movu m4, [srcq+1]
+ mova m1, [dstq]
+%if cpuflag(ssse3)
+ punpckhbw m2, m0, m4
+ punpcklbw m0, m4
+ pmaddubsw m2, filter_x_a
+ pmaddubsw m0, filter_x_a
+ paddw m2, filter_rnd
+ paddw m0, filter_rnd
+%else
+ punpckhbw m2, m0, m5
+ punpckhbw m3, m4, m5
+ punpcklbw m0, m5
+ punpcklbw m4, m5
+ pmullw m2, filter_x_a
+ pmullw m3, filter_x_b
+ paddw m2, filter_rnd
+ pmullw m0, filter_x_a
+ pmullw m4, filter_x_b
+ paddw m0, filter_rnd
+ paddw m2, m3
+ paddw m0, m4
+%endif
+ psraw m2, 4
+ psraw m0, 4
+%if %2 == 1 ; avg
+ ; FIXME(rbultje) pipeline
+ packuswb m0, m2
+ pavgb m0, [secq]
+ punpckhbw m2, m0, m5
+ punpcklbw m0, m5
+%endif
+ punpckhbw m3, m1, m5
+ punpcklbw m1, m5
+ SUM_SSE m0, m1, m2, m3, m6, m7
+
+ add srcq, src_strideq
+ add dstq, dst_strideq
+%else ; %1 < 16
+ movh m0, [srcq]
+ movh m1, [srcq+1]
+ movh m2, [srcq+src_strideq]
+ movh m4, [srcq+src_strideq+1]
+ movh m3, [dstq+dst_strideq]
+%if cpuflag(ssse3)
+ punpcklbw m0, m1
+ movh m1, [dstq]
+ punpcklbw m2, m4
+ pmaddubsw m0, filter_x_a
+ pmaddubsw m2, filter_x_a
+ punpcklbw m3, m5
+ paddw m0, filter_rnd
+ paddw m2, filter_rnd
+%else
+ punpcklbw m0, m5
+ punpcklbw m1, m5
+ punpcklbw m2, m5
+ punpcklbw m4, m5
+ pmullw m0, filter_x_a
+ pmullw m1, filter_x_b
+ punpcklbw m3, m5
+ paddw m0, filter_rnd
+ pmullw m2, filter_x_a
+ pmullw m4, filter_x_b
+ paddw m0, m1
+ paddw m2, filter_rnd
+ movh m1, [dstq]
+ paddw m2, m4
+%endif
+ psraw m0, 4
+ psraw m2, 4
+%if %2 == 1 ; avg
+ ; FIXME(rbultje) pipeline
+ packuswb m0, m2
+ pavgb m0, [secq]
+ punpckhbw m2, m0, m5
+ punpcklbw m0, m5
+%endif
+ punpcklbw m1, m5
+ SUM_SSE m0, m1, m2, m3, m6, m7
+
+ lea srcq, [srcq+src_strideq*2]
+ lea dstq, [dstq+dst_strideq*2]
+%endif
+%if %2 == 1 ; avg
+ add secq, sec_str
+%endif
+ dec h
+ jg .x_other_y_zero_loop
+%undef filter_x_a
+%undef filter_x_b
+%undef filter_rnd
+ STORE_AND_RET
+
+.x_nonhalf_y_nonzero:
+ cmp y_offsetd, 8
+ jne .x_nonhalf_y_nonhalf
+
+ ; x_offset == bilin interpolation && y_offset == 0.5
+%ifdef PIC
+ lea bilin_filter, [bilin_filter_m]
+%endif
+ shl x_offsetd, filter_idx_shift
+%if ARCH_X86_64 && mmsize == 16
+ mova m8, [bilin_filter+x_offsetq]
+%if notcpuflag(ssse3) ; FIXME(rbultje) don't scatter registers on x86-64
+ mova m9, [bilin_filter+x_offsetq+16]
+%endif
+ mova m10, [pw_8]
+%define filter_x_a m8
+%define filter_x_b m9
+%define filter_rnd m10
+%else
+ add x_offsetq, bilin_filter
+%define filter_x_a [x_offsetq]
+%define filter_x_b [x_offsetq+16]
+%define filter_rnd [pw_8]
+%endif
+%if %1 == 16
+ movu m0, [srcq]
+ movu m1, [srcq+1]
+%if cpuflag(ssse3)
+ punpckhbw m2, m0, m1
+ punpcklbw m0, m1
+ pmaddubsw m2, filter_x_a
+ pmaddubsw m0, filter_x_a
+ paddw m2, filter_rnd
+ paddw m0, filter_rnd
+%else
+ punpckhbw m2, m0, m5
+ punpckhbw m3, m1, m5
+ punpcklbw m0, m5
+ punpcklbw m1, m5
+ pmullw m0, filter_x_a
+ pmullw m1, filter_x_b
+ paddw m0, filter_rnd
+ pmullw m2, filter_x_a
+ pmullw m3, filter_x_b
+ paddw m2, filter_rnd
+ paddw m0, m1
+ paddw m2, m3
+%endif
+ psraw m0, 4
+ psraw m2, 4
+ add srcq, src_strideq
+ packuswb m0, m2
+.x_other_y_half_loop:
+ movu m4, [srcq]
+ movu m3, [srcq+1]
+%if cpuflag(ssse3)
+ mova m1, [dstq]
+ punpckhbw m2, m4, m3
+ punpcklbw m4, m3
+ pmaddubsw m2, filter_x_a
+ pmaddubsw m4, filter_x_a
+ paddw m2, filter_rnd
+ paddw m4, filter_rnd
+ psraw m2, 4
+ psraw m4, 4
+ packuswb m4, m2
+ pavgb m0, m4
+ punpckhbw m3, m1, m5
+ punpcklbw m1, m5
+%else
+ punpckhbw m2, m4, m5
+ punpckhbw m1, m3, m5
+ punpcklbw m4, m5
+ punpcklbw m3, m5
+ pmullw m4, filter_x_a
+ pmullw m3, filter_x_b
+ paddw m4, filter_rnd
+ pmullw m2, filter_x_a
+ pmullw m1, filter_x_b
+ paddw m2, filter_rnd
+ paddw m4, m3
+ paddw m2, m1
+ mova m1, [dstq]
+ psraw m4, 4
+ psraw m2, 4
+ punpckhbw m3, m1, m5
+ ; FIXME(rbultje) the repeated pack/unpack here around m0/m2 is because we
+ ; have a 1-register shortage to be able to store the backup of the bilin
+ ; filtered second line as words as cache for the next line. Packing into
+ ; a byte costs 1 pack and 2 unpacks, but saves a register.
+ packuswb m4, m2
+ punpcklbw m1, m5
+ pavgb m0, m4
+%endif
+%if %2 == 1 ; avg
+ ; FIXME(rbultje) pipeline
+ pavgb m0, [secq]
+%endif
+ punpckhbw m2, m0, m5
+ punpcklbw m0, m5
+ SUM_SSE m0, m1, m2, m3, m6, m7
+ mova m0, m4
+
+ add srcq, src_strideq
+ add dstq, dst_strideq
+%else ; %1 < 16
+ movh m0, [srcq]
+ movh m1, [srcq+1]
+%if cpuflag(ssse3)
+ punpcklbw m0, m1
+ pmaddubsw m0, filter_x_a
+ paddw m0, filter_rnd
+%else
+ punpcklbw m0, m5
+ punpcklbw m1, m5
+ pmullw m0, filter_x_a
+ pmullw m1, filter_x_b
+ paddw m0, filter_rnd
+ paddw m0, m1
+%endif
+ add srcq, src_strideq
+ psraw m0, 4
+.x_other_y_half_loop:
+ movh m2, [srcq]
+ movh m1, [srcq+1]
+ movh m4, [srcq+src_strideq]
+ movh m3, [srcq+src_strideq+1]
+%if cpuflag(ssse3)
+ punpcklbw m2, m1
+ punpcklbw m4, m3
+ pmaddubsw m2, filter_x_a
+ pmaddubsw m4, filter_x_a
+ movh m1, [dstq]
+ movh m3, [dstq+dst_strideq]
+ paddw m2, filter_rnd
+ paddw m4, filter_rnd
+%else
+ punpcklbw m2, m5
+ punpcklbw m1, m5
+ punpcklbw m4, m5
+ punpcklbw m3, m5
+ pmullw m2, filter_x_a
+ pmullw m1, filter_x_b
+ paddw m2, filter_rnd
+ pmullw m4, filter_x_a
+ pmullw m3, filter_x_b
+ paddw m4, filter_rnd
+ paddw m2, m1
+ movh m1, [dstq]
+ paddw m4, m3
+ movh m3, [dstq+dst_strideq]
+%endif
+ psraw m2, 4
+ psraw m4, 4
+ pavgw m0, m2
+ pavgw m2, m4
+%if %2 == 1 ; avg
+ ; FIXME(rbultje) pipeline - also consider going to bytes here
+ packuswb m0, m2
+ pavgb m0, [secq]
+ punpckhbw m2, m0, m5
+ punpcklbw m0, m5
+%endif
+ punpcklbw m3, m5
+ punpcklbw m1, m5
+ SUM_SSE m0, m1, m2, m3, m6, m7
+ mova m0, m4
+
+ lea srcq, [srcq+src_strideq*2]
+ lea dstq, [dstq+dst_strideq*2]
+%endif
+%if %2 == 1 ; avg
+ add secq, sec_str
+%endif
+ dec h
+ jg .x_other_y_half_loop
+%undef filter_x_a
+%undef filter_x_b
+%undef filter_rnd
+ STORE_AND_RET
+
+.x_nonhalf_y_nonhalf:
+%ifdef PIC
+ lea bilin_filter, [bilin_filter_m]
+%endif
+ shl x_offsetd, filter_idx_shift
+ shl y_offsetd, filter_idx_shift
+%if ARCH_X86_64 && mmsize == 16
+ mova m8, [bilin_filter+x_offsetq]
+%if notcpuflag(ssse3) ; FIXME(rbultje) don't scatter registers on x86-64
+ mova m9, [bilin_filter+x_offsetq+16]
+%endif
+ mova m10, [bilin_filter+y_offsetq]
+%if notcpuflag(ssse3) ; FIXME(rbultje) don't scatter registers on x86-64
+ mova m11, [bilin_filter+y_offsetq+16]
+%endif
+ mova m12, [pw_8]
+%define filter_x_a m8
+%define filter_x_b m9
+%define filter_y_a m10
+%define filter_y_b m11
+%define filter_rnd m12
+%else
+ add x_offsetq, bilin_filter
+ add y_offsetq, bilin_filter
+%define filter_x_a [x_offsetq]
+%define filter_x_b [x_offsetq+16]
+%define filter_y_a [y_offsetq]
+%define filter_y_b [y_offsetq+16]
+%define filter_rnd [pw_8]
+%endif
+ ; x_offset == bilin interpolation && y_offset == bilin interpolation
+%if %1 == 16
+ movu m0, [srcq]
+ movu m1, [srcq+1]
+%if cpuflag(ssse3)
+ punpckhbw m2, m0, m1
+ punpcklbw m0, m1
+ pmaddubsw m2, filter_x_a
+ pmaddubsw m0, filter_x_a
+ paddw m2, filter_rnd
+ paddw m0, filter_rnd
+%else
+ punpckhbw m2, m0, m5
+ punpckhbw m3, m1, m5
+ punpcklbw m0, m5
+ punpcklbw m1, m5
+ pmullw m0, filter_x_a
+ pmullw m1, filter_x_b
+ paddw m0, filter_rnd
+ pmullw m2, filter_x_a
+ pmullw m3, filter_x_b
+ paddw m2, filter_rnd
+ paddw m0, m1
+ paddw m2, m3
+%endif
+ psraw m0, 4
+ psraw m2, 4
+ add srcq, src_strideq
+ packuswb m0, m2
+.x_other_y_other_loop:
+%if cpuflag(ssse3)
+ movu m4, [srcq]
+ movu m3, [srcq+1]
+ mova m1, [dstq]
+ punpckhbw m2, m4, m3
+ punpcklbw m4, m3
+ pmaddubsw m2, filter_x_a
+ pmaddubsw m4, filter_x_a
+ punpckhbw m3, m1, m5
+ paddw m2, filter_rnd
+ paddw m4, filter_rnd
+ psraw m2, 4
+ psraw m4, 4
+ packuswb m4, m2
+ punpckhbw m2, m0, m4
+ punpcklbw m0, m4
+ pmaddubsw m2, filter_y_a
+ pmaddubsw m0, filter_y_a
+ punpcklbw m1, m5
+ paddw m2, filter_rnd
+ paddw m0, filter_rnd
+ psraw m2, 4
+ psraw m0, 4
+%else
+ movu m3, [srcq]
+ movu m4, [srcq+1]
+ punpckhbw m1, m3, m5
+ punpckhbw m2, m4, m5
+ punpcklbw m3, m5
+ punpcklbw m4, m5
+ pmullw m3, filter_x_a
+ pmullw m4, filter_x_b
+ paddw m3, filter_rnd
+ pmullw m1, filter_x_a
+ pmullw m2, filter_x_b
+ paddw m1, filter_rnd
+ paddw m3, m4
+ paddw m1, m2
+ psraw m3, 4
+ psraw m1, 4
+ packuswb m4, m3, m1
+ punpckhbw m2, m0, m5
+ punpcklbw m0, m5
+ pmullw m2, filter_y_a
+ pmullw m1, filter_y_b
+ paddw m2, filter_rnd
+ pmullw m0, filter_y_a
+ pmullw m3, filter_y_b
+ paddw m2, m1
+ mova m1, [dstq]
+ paddw m0, filter_rnd
+ psraw m2, 4
+ paddw m0, m3
+ punpckhbw m3, m1, m5
+ psraw m0, 4
+ punpcklbw m1, m5
+%endif
+%if %2 == 1 ; avg
+ ; FIXME(rbultje) pipeline
+ packuswb m0, m2
+ pavgb m0, [secq]
+ punpckhbw m2, m0, m5
+ punpcklbw m0, m5
+%endif
+ SUM_SSE m0, m1, m2, m3, m6, m7
+ mova m0, m4
+
+ add srcq, src_strideq
+ add dstq, dst_strideq
+%else ; %1 < 16
+ movh m0, [srcq]
+ movh m1, [srcq+1]
+%if cpuflag(ssse3)
+ punpcklbw m0, m1
+ pmaddubsw m0, filter_x_a
+ paddw m0, filter_rnd
+%else
+ punpcklbw m0, m5
+ punpcklbw m1, m5
+ pmullw m0, filter_x_a
+ pmullw m1, filter_x_b
+ paddw m0, filter_rnd
+ paddw m0, m1
+%endif
+ psraw m0, 4
+%if cpuflag(ssse3)
+ packuswb m0, m0
+%endif
+ add srcq, src_strideq
+.x_other_y_other_loop:
+ movh m2, [srcq]
+ movh m1, [srcq+1]
+ movh m4, [srcq+src_strideq]
+ movh m3, [srcq+src_strideq+1]
+%if cpuflag(ssse3)
+ punpcklbw m2, m1
+ punpcklbw m4, m3
+ pmaddubsw m2, filter_x_a
+ pmaddubsw m4, filter_x_a
+ movh m3, [dstq+dst_strideq]
+ movh m1, [dstq]
+ paddw m2, filter_rnd
+ paddw m4, filter_rnd
+ psraw m2, 4
+ psraw m4, 4
+ packuswb m2, m2
+ packuswb m4, m4
+ punpcklbw m0, m2
+ punpcklbw m2, m4
+ pmaddubsw m0, filter_y_a
+ pmaddubsw m2, filter_y_a
+ punpcklbw m3, m5
+ paddw m0, filter_rnd
+ paddw m2, filter_rnd
+ psraw m0, 4
+ psraw m2, 4
+ punpcklbw m1, m5
+%else
+ punpcklbw m2, m5
+ punpcklbw m1, m5
+ punpcklbw m4, m5
+ punpcklbw m3, m5
+ pmullw m2, filter_x_a
+ pmullw m1, filter_x_b
+ paddw m2, filter_rnd
+ pmullw m4, filter_x_a
+ pmullw m3, filter_x_b
+ paddw m4, filter_rnd
+ paddw m2, m1
+ paddw m4, m3
+ psraw m2, 4
+ psraw m4, 4
+ pmullw m0, filter_y_a
+ pmullw m3, m2, filter_y_b
+ paddw m0, filter_rnd
+ pmullw m2, filter_y_a
+ pmullw m1, m4, filter_y_b
+ paddw m2, filter_rnd
+ paddw m0, m3
+ movh m3, [dstq+dst_strideq]
+ paddw m2, m1
+ movh m1, [dstq]
+ psraw m0, 4
+ psraw m2, 4
+ punpcklbw m3, m5
+ punpcklbw m1, m5
+%endif
+%if %2 == 1 ; avg
+ ; FIXME(rbultje) pipeline
+ packuswb m0, m2
+ pavgb m0, [secq]
+ punpckhbw m2, m0, m5
+ punpcklbw m0, m5
+%endif
+ SUM_SSE m0, m1, m2, m3, m6, m7
+ mova m0, m4
+
+ lea srcq, [srcq+src_strideq*2]
+ lea dstq, [dstq+dst_strideq*2]
+%endif
+%if %2 == 1 ; avg
+ add secq, sec_str
+%endif
+ dec h
+ jg .x_other_y_other_loop
+%undef filter_x_a
+%undef filter_x_b
+%undef filter_y_a
+%undef filter_y_b
+%undef filter_rnd
+ STORE_AND_RET
+%endmacro
+
+; FIXME(rbultje) the non-bilinear versions (i.e. x=0,8&&y=0,8) are identical
+; between the ssse3 and non-ssse3 version. It may make sense to merge their
+; code in the sense that the ssse3 version would jump to the appropriate
+; location in the sse/2 version, rather than duplicating that code in the
+; binary.
+
+INIT_MMX sse
+SUBPEL_VARIANCE 4
+INIT_XMM sse2
+SUBPEL_VARIANCE 8
+SUBPEL_VARIANCE 16
+
+INIT_MMX ssse3
+SUBPEL_VARIANCE 4
+INIT_XMM ssse3
+SUBPEL_VARIANCE 8
+SUBPEL_VARIANCE 16
+
+INIT_MMX sse
+SUBPEL_VARIANCE 4, 1
+INIT_XMM sse2
+SUBPEL_VARIANCE 8, 1
+SUBPEL_VARIANCE 16, 1
+
+INIT_MMX ssse3
+SUBPEL_VARIANCE 4, 1
+INIT_XMM ssse3
+SUBPEL_VARIANCE 8, 1
+SUBPEL_VARIANCE 16, 1
diff --git a/libvpx/vp9/encoder/x86/vp9_subpel_variance_impl_sse2.asm b/libvpx/vp9/encoder/x86/vp9_subpel_variance_impl_sse2.asm
new file mode 100644
index 0000000..2ecc23e
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_subpel_variance_impl_sse2.asm
@@ -0,0 +1,337 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+%include "vpx_ports/x86_abi_support.asm"
+
+;void vp9_half_horiz_vert_variance16x_h_sse2
+;(
+; unsigned char *ref_ptr,
+; int ref_pixels_per_line,
+; unsigned char *src_ptr,
+; int src_pixels_per_line,
+; unsigned int Height,
+; int *sum,
+; unsigned int *sumsquared
+;)
+global sym(vp9_half_horiz_vert_variance16x_h_sse2) PRIVATE
+sym(vp9_half_horiz_vert_variance16x_h_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 7
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ pxor xmm6, xmm6 ; error accumulator
+ pxor xmm7, xmm7 ; sse eaccumulator
+ mov rsi, arg(0) ;ref_ptr ;
+
+ mov rdi, arg(2) ;src_ptr ;
+ movsxd rcx, dword ptr arg(4) ;Height ;
+ movsxd rax, dword ptr arg(1) ;ref_pixels_per_line
+ movsxd rdx, dword ptr arg(3) ;src_pixels_per_line
+
+ pxor xmm0, xmm0 ;
+
+ movdqu xmm5, XMMWORD PTR [rsi]
+ movdqu xmm3, XMMWORD PTR [rsi+1]
+ pavgb xmm5, xmm3 ; xmm5 = avg(xmm1,xmm3) horizontal line 1
+
+ lea rsi, [rsi + rax]
+
+.half_horiz_vert_variance16x_h_1:
+ movdqu xmm1, XMMWORD PTR [rsi] ;
+ movdqu xmm2, XMMWORD PTR [rsi+1] ;
+ pavgb xmm1, xmm2 ; xmm1 = avg(xmm1,xmm3) horizontal line i+1
+
+ pavgb xmm5, xmm1 ; xmm = vertical average of the above
+
+ movdqa xmm4, xmm5
+ punpcklbw xmm5, xmm0 ; xmm5 = words of above
+ punpckhbw xmm4, xmm0
+
+ movq xmm3, QWORD PTR [rdi] ; xmm3 = d0,d1,d2..d7
+ punpcklbw xmm3, xmm0 ; xmm3 = words of above
+ psubw xmm5, xmm3 ; xmm5 -= xmm3
+
+ movq xmm3, QWORD PTR [rdi+8]
+ punpcklbw xmm3, xmm0
+ psubw xmm4, xmm3
+
+ paddw xmm6, xmm5 ; xmm6 += accumulated column differences
+ paddw xmm6, xmm4
+ pmaddwd xmm5, xmm5 ; xmm5 *= xmm5
+ pmaddwd xmm4, xmm4
+ paddd xmm7, xmm5 ; xmm7 += accumulated square column differences
+ paddd xmm7, xmm4
+
+ movdqa xmm5, xmm1 ; save xmm1 for use on the next row
+
+ lea rsi, [rsi + rax]
+ lea rdi, [rdi + rdx]
+
+ sub rcx, 1 ;
+ jnz .half_horiz_vert_variance16x_h_1 ;
+
+ pxor xmm1, xmm1
+ pxor xmm5, xmm5
+
+ punpcklwd xmm0, xmm6
+ punpckhwd xmm1, xmm6
+ psrad xmm0, 16
+ psrad xmm1, 16
+ paddd xmm0, xmm1
+ movdqa xmm1, xmm0
+
+ movdqa xmm6, xmm7
+ punpckldq xmm6, xmm5
+ punpckhdq xmm7, xmm5
+ paddd xmm6, xmm7
+
+ punpckldq xmm0, xmm5
+ punpckhdq xmm1, xmm5
+ paddd xmm0, xmm1
+
+ movdqa xmm7, xmm6
+ movdqa xmm1, xmm0
+
+ psrldq xmm7, 8
+ psrldq xmm1, 8
+
+ paddd xmm6, xmm7
+ paddd xmm0, xmm1
+
+ mov rsi, arg(5) ;[Sum]
+ mov rdi, arg(6) ;[SSE]
+
+ movd [rsi], xmm0
+ movd [rdi], xmm6
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void vp9_half_vert_variance16x_h_sse2
+;(
+; unsigned char *ref_ptr,
+; int ref_pixels_per_line,
+; unsigned char *src_ptr,
+; int src_pixels_per_line,
+; unsigned int Height,
+; int *sum,
+; unsigned int *sumsquared
+;)
+global sym(vp9_half_vert_variance16x_h_sse2) PRIVATE
+sym(vp9_half_vert_variance16x_h_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 7
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ pxor xmm6, xmm6 ; error accumulator
+ pxor xmm7, xmm7 ; sse eaccumulator
+ mov rsi, arg(0) ;ref_ptr
+
+ mov rdi, arg(2) ;src_ptr
+ movsxd rcx, dword ptr arg(4) ;Height
+ movsxd rax, dword ptr arg(1) ;ref_pixels_per_line
+ movsxd rdx, dword ptr arg(3) ;src_pixels_per_line
+
+ movdqu xmm5, XMMWORD PTR [rsi]
+ lea rsi, [rsi + rax ]
+ pxor xmm0, xmm0
+
+.half_vert_variance16x_h_1:
+ movdqu xmm3, XMMWORD PTR [rsi]
+
+ pavgb xmm5, xmm3 ; xmm5 = avg(xmm1,xmm3)
+ movdqa xmm4, xmm5
+ punpcklbw xmm5, xmm0
+ punpckhbw xmm4, xmm0
+
+ movq xmm2, QWORD PTR [rdi]
+ punpcklbw xmm2, xmm0
+ psubw xmm5, xmm2
+ movq xmm2, QWORD PTR [rdi+8]
+ punpcklbw xmm2, xmm0
+ psubw xmm4, xmm2
+
+ paddw xmm6, xmm5 ; xmm6 += accumulated column differences
+ paddw xmm6, xmm4
+ pmaddwd xmm5, xmm5 ; xmm5 *= xmm5
+ pmaddwd xmm4, xmm4
+ paddd xmm7, xmm5 ; xmm7 += accumulated square column differences
+ paddd xmm7, xmm4
+
+ movdqa xmm5, xmm3
+
+ lea rsi, [rsi + rax]
+ lea rdi, [rdi + rdx]
+
+ sub rcx, 1
+ jnz .half_vert_variance16x_h_1
+
+ pxor xmm1, xmm1
+ pxor xmm5, xmm5
+
+ punpcklwd xmm0, xmm6
+ punpckhwd xmm1, xmm6
+ psrad xmm0, 16
+ psrad xmm1, 16
+ paddd xmm0, xmm1
+ movdqa xmm1, xmm0
+
+ movdqa xmm6, xmm7
+ punpckldq xmm6, xmm5
+ punpckhdq xmm7, xmm5
+ paddd xmm6, xmm7
+
+ punpckldq xmm0, xmm5
+ punpckhdq xmm1, xmm5
+ paddd xmm0, xmm1
+
+ movdqa xmm7, xmm6
+ movdqa xmm1, xmm0
+
+ psrldq xmm7, 8
+ psrldq xmm1, 8
+
+ paddd xmm6, xmm7
+ paddd xmm0, xmm1
+
+ mov rsi, arg(5) ;[Sum]
+ mov rdi, arg(6) ;[SSE]
+
+ movd [rsi], xmm0
+ movd [rdi], xmm6
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void vp9_half_horiz_variance16x_h_sse2
+;(
+; unsigned char *ref_ptr,
+; int ref_pixels_per_line,
+; unsigned char *src_ptr,
+; int src_pixels_per_line,
+; unsigned int Height,
+; int *sum,
+; unsigned int *sumsquared
+;)
+global sym(vp9_half_horiz_variance16x_h_sse2) PRIVATE
+sym(vp9_half_horiz_variance16x_h_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 7
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ pxor xmm6, xmm6 ; error accumulator
+ pxor xmm7, xmm7 ; sse eaccumulator
+ mov rsi, arg(0) ;ref_ptr ;
+
+ mov rdi, arg(2) ;src_ptr ;
+ movsxd rcx, dword ptr arg(4) ;Height ;
+ movsxd rax, dword ptr arg(1) ;ref_pixels_per_line
+ movsxd rdx, dword ptr arg(3) ;src_pixels_per_line
+
+ pxor xmm0, xmm0 ;
+
+.half_horiz_variance16x_h_1:
+ movdqu xmm5, XMMWORD PTR [rsi] ; xmm5 = s0,s1,s2..s15
+ movdqu xmm3, XMMWORD PTR [rsi+1] ; xmm3 = s1,s2,s3..s16
+
+ pavgb xmm5, xmm3 ; xmm5 = avg(xmm1,xmm3)
+ movdqa xmm1, xmm5
+ punpcklbw xmm5, xmm0 ; xmm5 = words of above
+ punpckhbw xmm1, xmm0
+
+ movq xmm3, QWORD PTR [rdi] ; xmm3 = d0,d1,d2..d7
+ punpcklbw xmm3, xmm0 ; xmm3 = words of above
+ movq xmm2, QWORD PTR [rdi+8]
+ punpcklbw xmm2, xmm0
+
+ psubw xmm5, xmm3 ; xmm5 -= xmm3
+ psubw xmm1, xmm2
+ paddw xmm6, xmm5 ; xmm6 += accumulated column differences
+ paddw xmm6, xmm1
+ pmaddwd xmm5, xmm5 ; xmm5 *= xmm5
+ pmaddwd xmm1, xmm1
+ paddd xmm7, xmm5 ; xmm7 += accumulated square column differences
+ paddd xmm7, xmm1
+
+ lea rsi, [rsi + rax]
+ lea rdi, [rdi + rdx]
+
+ sub rcx, 1 ;
+ jnz .half_horiz_variance16x_h_1 ;
+
+ pxor xmm1, xmm1
+ pxor xmm5, xmm5
+
+ punpcklwd xmm0, xmm6
+ punpckhwd xmm1, xmm6
+ psrad xmm0, 16
+ psrad xmm1, 16
+ paddd xmm0, xmm1
+ movdqa xmm1, xmm0
+
+ movdqa xmm6, xmm7
+ punpckldq xmm6, xmm5
+ punpckhdq xmm7, xmm5
+ paddd xmm6, xmm7
+
+ punpckldq xmm0, xmm5
+ punpckhdq xmm1, xmm5
+ paddd xmm0, xmm1
+
+ movdqa xmm7, xmm6
+ movdqa xmm1, xmm0
+
+ psrldq xmm7, 8
+ psrldq xmm1, 8
+
+ paddd xmm6, xmm7
+ paddd xmm0, xmm1
+
+ mov rsi, arg(5) ;[Sum]
+ mov rdi, arg(6) ;[SSE]
+
+ movd [rsi], xmm0
+ movd [rdi], xmm6
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
diff --git a/libvpx/vp9/encoder/x86/vp9_subtract_sse2.asm b/libvpx/vp9/encoder/x86/vp9_subtract_sse2.asm
new file mode 100644
index 0000000..9824080
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_subtract_sse2.asm
@@ -0,0 +1,127 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+%include "third_party/x86inc/x86inc.asm"
+
+SECTION .text
+
+; void vp9_subtract_block(int rows, int cols,
+; int16_t *diff, ptrdiff_t diff_stride,
+; const uint8_t *src, ptrdiff_t src_stride,
+; const uint8_t *pred, ptrdiff_t pred_stride)
+
+INIT_XMM sse2
+cglobal subtract_block, 7, 7, 8, \
+ rows, cols, diff, diff_stride, src, src_stride, \
+ pred, pred_stride
+%define pred_str colsq
+ pxor m7, m7 ; dedicated zero register
+ cmp colsd, 4
+ je .case_4
+ cmp colsd, 8
+ je .case_8
+ cmp colsd, 16
+ je .case_16
+ cmp colsd, 32
+ je .case_32
+
+%macro loop16 6
+ mova m0, [srcq+%1]
+ mova m4, [srcq+%2]
+ mova m1, [predq+%3]
+ mova m5, [predq+%4]
+ punpckhbw m2, m0, m7
+ punpckhbw m3, m1, m7
+ punpcklbw m0, m7
+ punpcklbw m1, m7
+ psubw m2, m3
+ psubw m0, m1
+ punpckhbw m1, m4, m7
+ punpckhbw m3, m5, m7
+ punpcklbw m4, m7
+ punpcklbw m5, m7
+ psubw m1, m3
+ psubw m4, m5
+ mova [diffq+mmsize*0+%5], m0
+ mova [diffq+mmsize*1+%5], m2
+ mova [diffq+mmsize*0+%6], m4
+ mova [diffq+mmsize*1+%6], m1
+%endmacro
+
+ mov pred_str, pred_stridemp
+.loop_64:
+ loop16 0*mmsize, 1*mmsize, 0*mmsize, 1*mmsize, 0*mmsize, 2*mmsize
+ loop16 2*mmsize, 3*mmsize, 2*mmsize, 3*mmsize, 4*mmsize, 6*mmsize
+ lea diffq, [diffq+diff_strideq*2]
+ add predq, pred_str
+ add srcq, src_strideq
+ dec rowsd
+ jg .loop_64
+ RET
+
+.case_32:
+ mov pred_str, pred_stridemp
+.loop_32:
+ loop16 0, mmsize, 0, mmsize, 0, 2*mmsize
+ lea diffq, [diffq+diff_strideq*2]
+ add predq, pred_str
+ add srcq, src_strideq
+ dec rowsd
+ jg .loop_32
+ RET
+
+.case_16:
+ mov pred_str, pred_stridemp
+.loop_16:
+ loop16 0, src_strideq, 0, pred_str, 0, diff_strideq*2
+ lea diffq, [diffq+diff_strideq*4]
+ lea predq, [predq+pred_str*2]
+ lea srcq, [srcq+src_strideq*2]
+ sub rowsd, 2
+ jg .loop_16
+ RET
+
+%macro loop_h 0
+ movh m0, [srcq]
+ movh m2, [srcq+src_strideq]
+ movh m1, [predq]
+ movh m3, [predq+pred_str]
+ punpcklbw m0, m7
+ punpcklbw m1, m7
+ punpcklbw m2, m7
+ punpcklbw m3, m7
+ psubw m0, m1
+ psubw m2, m3
+ mova [diffq], m0
+ mova [diffq+diff_strideq*2], m2
+%endmacro
+
+.case_8:
+ mov pred_str, pred_stridemp
+.loop_8:
+ loop_h
+ lea diffq, [diffq+diff_strideq*4]
+ lea srcq, [srcq+src_strideq*2]
+ lea predq, [predq+pred_str*2]
+ sub rowsd, 2
+ jg .loop_8
+ RET
+
+INIT_MMX
+.case_4:
+ mov pred_str, pred_stridemp
+.loop_4:
+ loop_h
+ lea diffq, [diffq+diff_strideq*4]
+ lea srcq, [srcq+src_strideq*2]
+ lea predq, [predq+pred_str*2]
+ sub rowsd, 2
+ jg .loop_4
+ RET
diff --git a/libvpx/vp9/encoder/x86/vp9_temporal_filter_apply_sse2.asm b/libvpx/vp9/encoder/x86/vp9_temporal_filter_apply_sse2.asm
new file mode 100644
index 0000000..d2d13b3
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_temporal_filter_apply_sse2.asm
@@ -0,0 +1,207 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+; void vp9_temporal_filter_apply_sse2 | arg
+; (unsigned char *frame1, | 0
+; unsigned int stride, | 1
+; unsigned char *frame2, | 2
+; unsigned int block_size, | 3
+; int strength, | 4
+; int filter_weight, | 5
+; unsigned int *accumulator, | 6
+; unsigned short *count) | 7
+global sym(vp9_temporal_filter_apply_sse2) PRIVATE
+sym(vp9_temporal_filter_apply_sse2):
+
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 8
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ALIGN_STACK 16, rax
+ %define block_size 0
+ %define strength 16
+ %define filter_weight 32
+ %define rounding_bit 48
+ %define rbp_backup 64
+ %define stack_size 80
+ sub rsp, stack_size
+ mov [rsp + rbp_backup], rbp
+ ; end prolog
+
+ mov rdx, arg(3)
+ mov [rsp + block_size], rdx
+ movd xmm6, arg(4)
+ movdqa [rsp + strength], xmm6 ; where strength is used, all 16 bytes are read
+
+ ; calculate the rounding bit outside the loop
+ ; 0x8000 >> (16 - strength)
+ mov rdx, 16
+ sub rdx, arg(4) ; 16 - strength
+ movq xmm4, rdx ; can't use rdx w/ shift
+ movdqa xmm5, [GLOBAL(_const_top_bit)]
+ psrlw xmm5, xmm4
+ movdqa [rsp + rounding_bit], xmm5
+
+ mov rsi, arg(0) ; src/frame1
+ mov rdx, arg(2) ; predictor frame
+ mov rdi, arg(6) ; accumulator
+ mov rax, arg(7) ; count
+
+ ; dup the filter weight and store for later
+ movd xmm0, arg(5) ; filter_weight
+ pshuflw xmm0, xmm0, 0
+ punpcklwd xmm0, xmm0
+ movdqa [rsp + filter_weight], xmm0
+
+ mov rbp, arg(1) ; stride
+ pxor xmm7, xmm7 ; zero for extraction
+
+ lea rcx, [rdx + 16*16*1]
+ cmp dword ptr [rsp + block_size], 8
+ jne .temporal_filter_apply_load_16
+ lea rcx, [rdx + 8*8*1]
+
+.temporal_filter_apply_load_8:
+ movq xmm0, [rsi] ; first row
+ lea rsi, [rsi + rbp] ; += stride
+ punpcklbw xmm0, xmm7 ; src[ 0- 7]
+ movq xmm1, [rsi] ; second row
+ lea rsi, [rsi + rbp] ; += stride
+ punpcklbw xmm1, xmm7 ; src[ 8-15]
+ jmp .temporal_filter_apply_load_finished
+
+.temporal_filter_apply_load_16:
+ movdqa xmm0, [rsi] ; src (frame1)
+ lea rsi, [rsi + rbp] ; += stride
+ movdqa xmm1, xmm0
+ punpcklbw xmm0, xmm7 ; src[ 0- 7]
+ punpckhbw xmm1, xmm7 ; src[ 8-15]
+
+.temporal_filter_apply_load_finished:
+ movdqa xmm2, [rdx] ; predictor (frame2)
+ movdqa xmm3, xmm2
+ punpcklbw xmm2, xmm7 ; pred[ 0- 7]
+ punpckhbw xmm3, xmm7 ; pred[ 8-15]
+
+ ; modifier = src_byte - pixel_value
+ psubw xmm0, xmm2 ; src - pred[ 0- 7]
+ psubw xmm1, xmm3 ; src - pred[ 8-15]
+
+ ; modifier *= modifier
+ pmullw xmm0, xmm0 ; modifer[ 0- 7]^2
+ pmullw xmm1, xmm1 ; modifer[ 8-15]^2
+
+ ; modifier *= 3
+ pmullw xmm0, [GLOBAL(_const_3w)]
+ pmullw xmm1, [GLOBAL(_const_3w)]
+
+ ; modifer += 0x8000 >> (16 - strength)
+ paddw xmm0, [rsp + rounding_bit]
+ paddw xmm1, [rsp + rounding_bit]
+
+ ; modifier >>= strength
+ psrlw xmm0, [rsp + strength]
+ psrlw xmm1, [rsp + strength]
+
+ ; modifier = 16 - modifier
+ ; saturation takes care of modifier > 16
+ movdqa xmm3, [GLOBAL(_const_16w)]
+ movdqa xmm2, [GLOBAL(_const_16w)]
+ psubusw xmm3, xmm1
+ psubusw xmm2, xmm0
+
+ ; modifier *= filter_weight
+ pmullw xmm2, [rsp + filter_weight]
+ pmullw xmm3, [rsp + filter_weight]
+
+ ; count
+ movdqa xmm4, [rax]
+ movdqa xmm5, [rax+16]
+ ; += modifier
+ paddw xmm4, xmm2
+ paddw xmm5, xmm3
+ ; write back
+ movdqa [rax], xmm4
+ movdqa [rax+16], xmm5
+ lea rax, [rax + 16*2] ; count += 16*(sizeof(short))
+
+ ; load and extract the predictor up to shorts
+ pxor xmm7, xmm7
+ movdqa xmm0, [rdx]
+ lea rdx, [rdx + 16*1] ; pred += 16*(sizeof(char))
+ movdqa xmm1, xmm0
+ punpcklbw xmm0, xmm7 ; pred[ 0- 7]
+ punpckhbw xmm1, xmm7 ; pred[ 8-15]
+
+ ; modifier *= pixel_value
+ pmullw xmm0, xmm2
+ pmullw xmm1, xmm3
+
+ ; expand to double words
+ movdqa xmm2, xmm0
+ punpcklwd xmm0, xmm7 ; [ 0- 3]
+ punpckhwd xmm2, xmm7 ; [ 4- 7]
+ movdqa xmm3, xmm1
+ punpcklwd xmm1, xmm7 ; [ 8-11]
+ punpckhwd xmm3, xmm7 ; [12-15]
+
+ ; accumulator
+ movdqa xmm4, [rdi]
+ movdqa xmm5, [rdi+16]
+ movdqa xmm6, [rdi+32]
+ movdqa xmm7, [rdi+48]
+ ; += modifier
+ paddd xmm4, xmm0
+ paddd xmm5, xmm2
+ paddd xmm6, xmm1
+ paddd xmm7, xmm3
+ ; write back
+ movdqa [rdi], xmm4
+ movdqa [rdi+16], xmm5
+ movdqa [rdi+32], xmm6
+ movdqa [rdi+48], xmm7
+ lea rdi, [rdi + 16*4] ; accumulator += 16*(sizeof(int))
+
+ cmp rdx, rcx
+ je .temporal_filter_apply_epilog
+ pxor xmm7, xmm7 ; zero for extraction
+ cmp dword ptr [rsp + block_size], 16
+ je .temporal_filter_apply_load_16
+ jmp .temporal_filter_apply_load_8
+
+.temporal_filter_apply_epilog:
+ ; begin epilog
+ mov rbp, [rsp + rbp_backup]
+ add rsp, stack_size
+ pop rsp
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+SECTION_RODATA
+align 16
+_const_3w:
+ times 8 dw 3
+align 16
+_const_top_bit:
+ times 8 dw 1<<15
+align 16
+_const_16w
+ times 8 dw 16
diff --git a/libvpx/vp9/encoder/x86/vp9_variance_impl_mmx.asm b/libvpx/vp9/encoder/x86/vp9_variance_impl_mmx.asm
new file mode 100644
index 0000000..3501cf1
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_variance_impl_mmx.asm
@@ -0,0 +1,510 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+;unsigned int vp9_get_mb_ss_mmx( short *src_ptr )
+global sym(vp9_get_mb_ss_mmx) PRIVATE
+sym(vp9_get_mb_ss_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ sub rsp, 8
+ ; end prolog
+
+ mov rax, arg(0) ;src_ptr
+ mov rcx, 16
+ pxor mm4, mm4
+
+.NEXTROW:
+ movq mm0, [rax]
+ movq mm1, [rax+8]
+ movq mm2, [rax+16]
+ movq mm3, [rax+24]
+ pmaddwd mm0, mm0
+ pmaddwd mm1, mm1
+ pmaddwd mm2, mm2
+ pmaddwd mm3, mm3
+
+ paddd mm4, mm0
+ paddd mm4, mm1
+ paddd mm4, mm2
+ paddd mm4, mm3
+
+ add rax, 32
+ dec rcx
+ ja .NEXTROW
+ movq QWORD PTR [rsp], mm4
+
+ ;return sum[0]+sum[1];
+ movsxd rax, dword ptr [rsp]
+ movsxd rcx, dword ptr [rsp+4]
+ add rax, rcx
+
+
+ ; begin epilog
+ add rsp, 8
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;unsigned int vp9_get8x8var_mmx
+;(
+; unsigned char *src_ptr,
+; int source_stride,
+; unsigned char *ref_ptr,
+; int recon_stride,
+; unsigned int *SSE,
+; int *Sum
+;)
+global sym(vp9_get8x8var_mmx) PRIVATE
+sym(vp9_get8x8var_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ push rsi
+ push rdi
+ push rbx
+ sub rsp, 16
+ ; end prolog
+
+
+ pxor mm5, mm5 ; Blank mmx6
+ pxor mm6, mm6 ; Blank mmx7
+ pxor mm7, mm7 ; Blank mmx7
+
+ mov rax, arg(0) ;[src_ptr] ; Load base addresses
+ mov rbx, arg(2) ;[ref_ptr]
+ movsxd rcx, dword ptr arg(1) ;[source_stride]
+ movsxd rdx, dword ptr arg(3) ;[recon_stride]
+
+ ; Row 1
+ movq mm0, [rax] ; Copy eight bytes to mm0
+ movq mm1, [rbx] ; Copy eight bytes to mm1
+ movq mm2, mm0 ; Take copies
+ movq mm3, mm1 ; Take copies
+
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ punpcklbw mm1, mm6
+ punpckhbw mm2, mm6 ; unpack to higher prrcision
+ punpckhbw mm3, mm6
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+ psubsw mm2, mm3 ; A-B (high order) to MM2
+
+ paddw mm5, mm0 ; accumulate differences in mm5
+ paddw mm5, mm2 ; accumulate differences in mm5
+
+ pmaddwd mm0, mm0 ; square and accumulate
+ pmaddwd mm2, mm2 ; square and accumulate
+ add rbx,rdx ; Inc pointer into ref data
+ add rax,rcx ; Inc pointer into the new data
+ movq mm1, [rbx] ; Copy eight bytes to mm1
+ paddd mm7, mm0 ; accumulate in mm7
+ paddd mm7, mm2 ; accumulate in mm7
+
+
+ ; Row 2
+ movq mm0, [rax] ; Copy eight bytes to mm0
+ movq mm2, mm0 ; Take copies
+ movq mm3, mm1 ; Take copies
+
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ punpcklbw mm1, mm6
+ punpckhbw mm2, mm6 ; unpack to higher prrcision
+ punpckhbw mm3, mm6
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+ psubsw mm2, mm3 ; A-B (high order) to MM2
+
+ paddw mm5, mm0 ; accumulate differences in mm5
+ paddw mm5, mm2 ; accumulate differences in mm5
+
+ pmaddwd mm0, mm0 ; square and accumulate
+ pmaddwd mm2, mm2 ; square and accumulate
+ add rbx,rdx ; Inc pointer into ref data
+ add rax,rcx ; Inc pointer into the new data
+ movq mm1, [rbx] ; Copy eight bytes to mm1
+ paddd mm7, mm0 ; accumulate in mm7
+ paddd mm7, mm2 ; accumulate in mm7
+
+ ; Row 3
+ movq mm0, [rax] ; Copy eight bytes to mm0
+ movq mm2, mm0 ; Take copies
+ movq mm3, mm1 ; Take copies
+
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ punpcklbw mm1, mm6
+ punpckhbw mm2, mm6 ; unpack to higher prrcision
+ punpckhbw mm3, mm6
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+ psubsw mm2, mm3 ; A-B (high order) to MM2
+
+ paddw mm5, mm0 ; accumulate differences in mm5
+ paddw mm5, mm2 ; accumulate differences in mm5
+
+ pmaddwd mm0, mm0 ; square and accumulate
+ pmaddwd mm2, mm2 ; square and accumulate
+ add rbx,rdx ; Inc pointer into ref data
+ add rax,rcx ; Inc pointer into the new data
+ movq mm1, [rbx] ; Copy eight bytes to mm1
+ paddd mm7, mm0 ; accumulate in mm7
+ paddd mm7, mm2 ; accumulate in mm7
+
+ ; Row 4
+ movq mm0, [rax] ; Copy eight bytes to mm0
+ movq mm2, mm0 ; Take copies
+ movq mm3, mm1 ; Take copies
+
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ punpcklbw mm1, mm6
+ punpckhbw mm2, mm6 ; unpack to higher prrcision
+ punpckhbw mm3, mm6
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+ psubsw mm2, mm3 ; A-B (high order) to MM2
+
+ paddw mm5, mm0 ; accumulate differences in mm5
+ paddw mm5, mm2 ; accumulate differences in mm5
+
+ pmaddwd mm0, mm0 ; square and accumulate
+ pmaddwd mm2, mm2 ; square and accumulate
+ add rbx,rdx ; Inc pointer into ref data
+ add rax,rcx ; Inc pointer into the new data
+ movq mm1, [rbx] ; Copy eight bytes to mm1
+ paddd mm7, mm0 ; accumulate in mm7
+ paddd mm7, mm2 ; accumulate in mm7
+
+ ; Row 5
+ movq mm0, [rax] ; Copy eight bytes to mm0
+ movq mm2, mm0 ; Take copies
+ movq mm3, mm1 ; Take copies
+
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ punpcklbw mm1, mm6
+ punpckhbw mm2, mm6 ; unpack to higher prrcision
+ punpckhbw mm3, mm6
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+ psubsw mm2, mm3 ; A-B (high order) to MM2
+
+ paddw mm5, mm0 ; accumulate differences in mm5
+ paddw mm5, mm2 ; accumulate differences in mm5
+
+ pmaddwd mm0, mm0 ; square and accumulate
+ pmaddwd mm2, mm2 ; square and accumulate
+ add rbx,rdx ; Inc pointer into ref data
+ add rax,rcx ; Inc pointer into the new data
+ movq mm1, [rbx] ; Copy eight bytes to mm1
+ ; movq mm4, [rbx + rdx]
+ paddd mm7, mm0 ; accumulate in mm7
+ paddd mm7, mm2 ; accumulate in mm7
+
+ ; Row 6
+ movq mm0, [rax] ; Copy eight bytes to mm0
+ movq mm2, mm0 ; Take copies
+ movq mm3, mm1 ; Take copies
+
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ punpcklbw mm1, mm6
+ punpckhbw mm2, mm6 ; unpack to higher prrcision
+ punpckhbw mm3, mm6
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+ psubsw mm2, mm3 ; A-B (high order) to MM2
+
+ paddw mm5, mm0 ; accumulate differences in mm5
+ paddw mm5, mm2 ; accumulate differences in mm5
+
+ pmaddwd mm0, mm0 ; square and accumulate
+ pmaddwd mm2, mm2 ; square and accumulate
+ add rbx,rdx ; Inc pointer into ref data
+ add rax,rcx ; Inc pointer into the new data
+ movq mm1, [rbx] ; Copy eight bytes to mm1
+ paddd mm7, mm0 ; accumulate in mm7
+ paddd mm7, mm2 ; accumulate in mm7
+
+ ; Row 7
+ movq mm0, [rax] ; Copy eight bytes to mm0
+ movq mm2, mm0 ; Take copies
+ movq mm3, mm1 ; Take copies
+
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ punpcklbw mm1, mm6
+ punpckhbw mm2, mm6 ; unpack to higher prrcision
+ punpckhbw mm3, mm6
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+ psubsw mm2, mm3 ; A-B (high order) to MM2
+
+ paddw mm5, mm0 ; accumulate differences in mm5
+ paddw mm5, mm2 ; accumulate differences in mm5
+
+ pmaddwd mm0, mm0 ; square and accumulate
+ pmaddwd mm2, mm2 ; square and accumulate
+ add rbx,rdx ; Inc pointer into ref data
+ add rax,rcx ; Inc pointer into the new data
+ movq mm1, [rbx] ; Copy eight bytes to mm1
+ paddd mm7, mm0 ; accumulate in mm7
+ paddd mm7, mm2 ; accumulate in mm7
+
+ ; Row 8
+ movq mm0, [rax] ; Copy eight bytes to mm0
+ movq mm2, mm0 ; Take copies
+ movq mm3, mm1 ; Take copies
+
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ punpcklbw mm1, mm6
+ punpckhbw mm2, mm6 ; unpack to higher prrcision
+ punpckhbw mm3, mm6
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+ psubsw mm2, mm3 ; A-B (high order) to MM2
+
+ paddw mm5, mm0 ; accumulate differences in mm5
+ paddw mm5, mm2 ; accumulate differences in mm5
+
+ pmaddwd mm0, mm0 ; square and accumulate
+ pmaddwd mm2, mm2 ; square and accumulate
+ add rbx,rdx ; Inc pointer into ref data
+ add rax,rcx ; Inc pointer into the new data
+ paddd mm7, mm0 ; accumulate in mm7
+ paddd mm7, mm2 ; accumulate in mm7
+
+ ; Now accumulate the final results.
+ movq QWORD PTR [rsp+8], mm5 ; copy back accumulated results into normal memory
+ movq QWORD PTR [rsp], mm7 ; copy back accumulated results into normal memory
+ movsx rdx, WORD PTR [rsp+8]
+ movsx rcx, WORD PTR [rsp+10]
+ movsx rbx, WORD PTR [rsp+12]
+ movsx rax, WORD PTR [rsp+14]
+ add rdx, rcx
+ add rbx, rax
+ add rdx, rbx ;XSum
+ movsxd rax, DWORD PTR [rsp]
+ movsxd rcx, DWORD PTR [rsp+4]
+ add rax, rcx ;XXSum
+ mov rsi, arg(4) ;SSE
+ mov rdi, arg(5) ;Sum
+ mov dword ptr [rsi], eax
+ mov dword ptr [rdi], edx
+ xor rax, rax ; return 0
+
+
+ ; begin epilog
+ add rsp, 16
+ pop rbx
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+
+;unsigned int
+;vp9_get4x4var_mmx
+;(
+; unsigned char *src_ptr,
+; int source_stride,
+; unsigned char *ref_ptr,
+; int recon_stride,
+; unsigned int *SSE,
+; int *Sum
+;)
+global sym(vp9_get4x4var_mmx) PRIVATE
+sym(vp9_get4x4var_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ push rsi
+ push rdi
+ push rbx
+ sub rsp, 16
+ ; end prolog
+
+
+ pxor mm5, mm5 ; Blank mmx6
+ pxor mm6, mm6 ; Blank mmx7
+ pxor mm7, mm7 ; Blank mmx7
+
+ mov rax, arg(0) ;[src_ptr] ; Load base addresses
+ mov rbx, arg(2) ;[ref_ptr]
+ movsxd rcx, dword ptr arg(1) ;[source_stride]
+ movsxd rdx, dword ptr arg(3) ;[recon_stride]
+
+ ; Row 1
+ movd mm0, [rax] ; Copy 4 bytes to mm0
+ movd mm1, [rbx] ; Copy 4 bytes to mm1
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ punpcklbw mm1, mm6
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+ paddw mm5, mm0 ; accumulate differences in mm5
+ pmaddwd mm0, mm0 ; square and accumulate
+ add rbx,rdx ; Inc pointer into ref data
+ add rax,rcx ; Inc pointer into the new data
+ movd mm1, [rbx] ; Copy 4 bytes to mm1
+ paddd mm7, mm0 ; accumulate in mm7
+
+
+ ; Row 2
+ movd mm0, [rax] ; Copy 4 bytes to mm0
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ punpcklbw mm1, mm6
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+ paddw mm5, mm0 ; accumulate differences in mm5
+
+ pmaddwd mm0, mm0 ; square and accumulate
+ add rbx,rdx ; Inc pointer into ref data
+ add rax,rcx ; Inc pointer into the new data
+ movd mm1, [rbx] ; Copy 4 bytes to mm1
+ paddd mm7, mm0 ; accumulate in mm7
+
+ ; Row 3
+ movd mm0, [rax] ; Copy 4 bytes to mm0
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ punpcklbw mm1, mm6
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+ paddw mm5, mm0 ; accumulate differences in mm5
+
+ pmaddwd mm0, mm0 ; square and accumulate
+ add rbx,rdx ; Inc pointer into ref data
+ add rax,rcx ; Inc pointer into the new data
+ movd mm1, [rbx] ; Copy 4 bytes to mm1
+ paddd mm7, mm0 ; accumulate in mm7
+
+ ; Row 4
+ movd mm0, [rax] ; Copy 4 bytes to mm0
+
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ punpcklbw mm1, mm6
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+
+ paddw mm5, mm0 ; accumulate differences in mm5
+
+ pmaddwd mm0, mm0 ; square and accumulate
+ paddd mm7, mm0 ; accumulate in mm7
+
+
+ ; Now accumulate the final results.
+ movq QWORD PTR [rsp+8], mm5 ; copy back accumulated results into normal memory
+ movq QWORD PTR [rsp], mm7 ; copy back accumulated results into normal memory
+ movsx rdx, WORD PTR [rsp+8]
+ movsx rcx, WORD PTR [rsp+10]
+ movsx rbx, WORD PTR [rsp+12]
+ movsx rax, WORD PTR [rsp+14]
+ add rdx, rcx
+ add rbx, rax
+ add rdx, rbx ;XSum
+ movsxd rax, DWORD PTR [rsp]
+ movsxd rcx, DWORD PTR [rsp+4]
+ add rax, rcx ;XXSum
+ mov rsi, arg(4) ;SSE
+ mov rdi, arg(5) ;Sum
+ mov dword ptr [rsi], eax
+ mov dword ptr [rdi], edx
+ xor rax, rax ; return 0
+
+
+ ; begin epilog
+ add rsp, 16
+ pop rbx
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+
+;unsigned int
+;vp9_get4x4sse_cs_mmx
+;(
+; unsigned char *src_ptr,
+; int source_stride,
+; unsigned char *ref_ptr,
+; int recon_stride
+;)
+global sym(vp9_get4x4sse_cs_mmx) PRIVATE
+sym(vp9_get4x4sse_cs_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 4
+ push rsi
+ push rdi
+ push rbx
+ ; end prolog
+
+
+ pxor mm6, mm6 ; Blank mmx7
+ pxor mm7, mm7 ; Blank mmx7
+
+ mov rax, arg(0) ;[src_ptr] ; Load base addresses
+ mov rbx, arg(2) ;[ref_ptr]
+ movsxd rcx, dword ptr arg(1) ;[source_stride]
+ movsxd rdx, dword ptr arg(3) ;[recon_stride]
+ ; Row 1
+ movd mm0, [rax] ; Copy eight bytes to mm0
+ movd mm1, [rbx] ; Copy eight bytes to mm1
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ punpcklbw mm1, mm6
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+ pmaddwd mm0, mm0 ; square and accumulate
+ add rbx,rdx ; Inc pointer into ref data
+ add rax,rcx ; Inc pointer into the new data
+ movd mm1, [rbx] ; Copy eight bytes to mm1
+ paddd mm7, mm0 ; accumulate in mm7
+
+ ; Row 2
+ movd mm0, [rax] ; Copy eight bytes to mm0
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ punpcklbw mm1, mm6
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+ pmaddwd mm0, mm0 ; square and accumulate
+ add rbx,rdx ; Inc pointer into ref data
+ add rax,rcx ; Inc pointer into the new data
+ movd mm1, [rbx] ; Copy eight bytes to mm1
+ paddd mm7, mm0 ; accumulate in mm7
+
+ ; Row 3
+ movd mm0, [rax] ; Copy eight bytes to mm0
+ punpcklbw mm1, mm6
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+
+ pmaddwd mm0, mm0 ; square and accumulate
+ add rbx,rdx ; Inc pointer into ref data
+ add rax,rcx ; Inc pointer into the new data
+ movd mm1, [rbx] ; Copy eight bytes to mm1
+ paddd mm7, mm0 ; accumulate in mm7
+
+ ; Row 4
+ movd mm0, [rax] ; Copy eight bytes to mm0
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ punpcklbw mm1, mm6
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+ pmaddwd mm0, mm0 ; square and accumulate
+ paddd mm7, mm0 ; accumulate in mm7
+
+ movq mm0, mm7 ;
+ psrlq mm7, 32
+
+ paddd mm0, mm7
+ movq rax, mm0
+
+
+ ; begin epilog
+ pop rbx
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
diff --git a/libvpx/vp9/encoder/x86/vp9_variance_impl_sse2.asm b/libvpx/vp9/encoder/x86/vp9_variance_impl_sse2.asm
new file mode 100644
index 0000000..2c50881
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_variance_impl_sse2.asm
@@ -0,0 +1,734 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+;unsigned int vp9_get_mb_ss_sse2
+;(
+; short *src_ptr
+;)
+global sym(vp9_get_mb_ss_sse2) PRIVATE
+sym(vp9_get_mb_ss_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 1
+ GET_GOT rbx
+ push rsi
+ push rdi
+ sub rsp, 16
+ ; end prolog
+
+
+ mov rax, arg(0) ;[src_ptr]
+ mov rcx, 8
+ pxor xmm4, xmm4
+
+.NEXTROW:
+ movdqa xmm0, [rax]
+ movdqa xmm1, [rax+16]
+ movdqa xmm2, [rax+32]
+ movdqa xmm3, [rax+48]
+ pmaddwd xmm0, xmm0
+ pmaddwd xmm1, xmm1
+ pmaddwd xmm2, xmm2
+ pmaddwd xmm3, xmm3
+
+ paddd xmm0, xmm1
+ paddd xmm2, xmm3
+ paddd xmm4, xmm0
+ paddd xmm4, xmm2
+
+ add rax, 0x40
+ dec rcx
+ ja .NEXTROW
+
+ movdqa xmm3,xmm4
+ psrldq xmm4,8
+ paddd xmm4,xmm3
+ movdqa xmm3,xmm4
+ psrldq xmm4,4
+ paddd xmm4,xmm3
+ movq rax,xmm4
+
+
+ ; begin epilog
+ add rsp, 16
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;unsigned int vp9_get16x16var_sse2
+;(
+; unsigned char * src_ptr,
+; int source_stride,
+; unsigned char * ref_ptr,
+; int recon_stride,
+; unsigned int * SSE,
+; int * Sum
+;)
+global sym(vp9_get16x16var_sse2) PRIVATE
+sym(vp9_get16x16var_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ push rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;[src_ptr]
+ mov rdi, arg(2) ;[ref_ptr]
+
+ movsxd rax, DWORD PTR arg(1) ;[source_stride]
+ movsxd rdx, DWORD PTR arg(3) ;[recon_stride]
+
+ ; Prefetch data
+ lea rcx, [rax+rax*2]
+ prefetcht0 [rsi]
+ prefetcht0 [rsi+rax]
+ prefetcht0 [rsi+rax*2]
+ prefetcht0 [rsi+rcx]
+ lea rbx, [rsi+rax*4]
+ prefetcht0 [rbx]
+ prefetcht0 [rbx+rax]
+ prefetcht0 [rbx+rax*2]
+ prefetcht0 [rbx+rcx]
+
+ lea rcx, [rdx+rdx*2]
+ prefetcht0 [rdi]
+ prefetcht0 [rdi+rdx]
+ prefetcht0 [rdi+rdx*2]
+ prefetcht0 [rdi+rcx]
+ lea rbx, [rdi+rdx*4]
+ prefetcht0 [rbx]
+ prefetcht0 [rbx+rdx]
+ prefetcht0 [rbx+rdx*2]
+ prefetcht0 [rbx+rcx]
+
+ pxor xmm0, xmm0 ; clear xmm0 for unpack
+ pxor xmm7, xmm7 ; clear xmm7 for accumulating diffs
+
+ pxor xmm6, xmm6 ; clear xmm6 for accumulating sse
+ mov rcx, 16
+
+.var16loop:
+ movdqu xmm1, XMMWORD PTR [rsi]
+ movdqu xmm2, XMMWORD PTR [rdi]
+
+ prefetcht0 [rsi+rax*8]
+ prefetcht0 [rdi+rdx*8]
+
+ movdqa xmm3, xmm1
+ movdqa xmm4, xmm2
+
+
+ punpcklbw xmm1, xmm0
+ punpckhbw xmm3, xmm0
+
+ punpcklbw xmm2, xmm0
+ punpckhbw xmm4, xmm0
+
+
+ psubw xmm1, xmm2
+ psubw xmm3, xmm4
+
+ paddw xmm7, xmm1
+ pmaddwd xmm1, xmm1
+
+ paddw xmm7, xmm3
+ pmaddwd xmm3, xmm3
+
+ paddd xmm6, xmm1
+ paddd xmm6, xmm3
+
+ add rsi, rax
+ add rdi, rdx
+
+ sub rcx, 1
+ jnz .var16loop
+
+
+ movdqa xmm1, xmm6
+ pxor xmm6, xmm6
+
+ pxor xmm5, xmm5
+ punpcklwd xmm6, xmm7
+
+ punpckhwd xmm5, xmm7
+ psrad xmm5, 16
+
+ psrad xmm6, 16
+ paddd xmm6, xmm5
+
+ movdqa xmm2, xmm1
+ punpckldq xmm1, xmm0
+
+ punpckhdq xmm2, xmm0
+ movdqa xmm7, xmm6
+
+ paddd xmm1, xmm2
+ punpckldq xmm6, xmm0
+
+ punpckhdq xmm7, xmm0
+ paddd xmm6, xmm7
+
+ movdqa xmm2, xmm1
+ movdqa xmm7, xmm6
+
+ psrldq xmm1, 8
+ psrldq xmm6, 8
+
+ paddd xmm7, xmm6
+ paddd xmm1, xmm2
+
+ mov rax, arg(5) ;[Sum]
+ mov rdi, arg(4) ;[SSE]
+
+ movd DWORD PTR [rax], xmm7
+ movd DWORD PTR [rdi], xmm1
+
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ pop rbx
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+
+
+;unsigned int vp9_get8x8var_sse2
+;(
+; unsigned char * src_ptr,
+; int source_stride,
+; unsigned char * ref_ptr,
+; int recon_stride,
+; unsigned int * SSE,
+; int * Sum
+;)
+global sym(vp9_get8x8var_sse2) PRIVATE
+sym(vp9_get8x8var_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ sub rsp, 16
+ ; end prolog
+
+ mov rsi, arg(0) ;[src_ptr]
+ mov rdi, arg(2) ;[ref_ptr]
+
+ movsxd rax, DWORD PTR arg(1) ;[source_stride]
+ movsxd rdx, DWORD PTR arg(3) ;[recon_stride]
+
+ pxor xmm0, xmm0 ; clear xmm0 for unpack
+ pxor xmm7, xmm7 ; clear xmm7 for accumulating diffs
+
+ movq xmm1, QWORD PTR [rsi]
+ movq xmm2, QWORD PTR [rdi]
+
+ punpcklbw xmm1, xmm0
+ punpcklbw xmm2, xmm0
+
+ psubsw xmm1, xmm2
+ paddw xmm7, xmm1
+
+ pmaddwd xmm1, xmm1
+
+ movq xmm2, QWORD PTR[rsi + rax]
+ movq xmm3, QWORD PTR[rdi + rdx]
+
+ punpcklbw xmm2, xmm0
+ punpcklbw xmm3, xmm0
+
+ psubsw xmm2, xmm3
+ paddw xmm7, xmm2
+
+ pmaddwd xmm2, xmm2
+ paddd xmm1, xmm2
+
+
+ movq xmm2, QWORD PTR[rsi + rax * 2]
+ movq xmm3, QWORD PTR[rdi + rdx * 2]
+
+ punpcklbw xmm2, xmm0
+ punpcklbw xmm3, xmm0
+
+ psubsw xmm2, xmm3
+ paddw xmm7, xmm2
+
+ pmaddwd xmm2, xmm2
+ paddd xmm1, xmm2
+
+
+ lea rsi, [rsi + rax * 2]
+ lea rdi, [rdi + rdx * 2]
+ movq xmm2, QWORD PTR[rsi + rax]
+ movq xmm3, QWORD PTR[rdi + rdx]
+
+ punpcklbw xmm2, xmm0
+ punpcklbw xmm3, xmm0
+
+ psubsw xmm2, xmm3
+ paddw xmm7, xmm2
+
+ pmaddwd xmm2, xmm2
+ paddd xmm1, xmm2
+
+ movq xmm2, QWORD PTR[rsi + rax *2]
+ movq xmm3, QWORD PTR[rdi + rdx *2]
+
+ punpcklbw xmm2, xmm0
+ punpcklbw xmm3, xmm0
+
+ psubsw xmm2, xmm3
+ paddw xmm7, xmm2
+
+ pmaddwd xmm2, xmm2
+ paddd xmm1, xmm2
+
+
+ lea rsi, [rsi + rax * 2]
+ lea rdi, [rdi + rdx * 2]
+
+
+ movq xmm2, QWORD PTR[rsi + rax]
+ movq xmm3, QWORD PTR[rdi + rdx]
+
+ punpcklbw xmm2, xmm0
+ punpcklbw xmm3, xmm0
+
+ psubsw xmm2, xmm3
+ paddw xmm7, xmm2
+
+ pmaddwd xmm2, xmm2
+ paddd xmm1, xmm2
+
+ movq xmm2, QWORD PTR[rsi + rax *2]
+ movq xmm3, QWORD PTR[rdi + rdx *2]
+
+ punpcklbw xmm2, xmm0
+ punpcklbw xmm3, xmm0
+
+ psubsw xmm2, xmm3
+ paddw xmm7, xmm2
+
+ pmaddwd xmm2, xmm2
+ paddd xmm1, xmm2
+
+
+ lea rsi, [rsi + rax * 2]
+ lea rdi, [rdi + rdx * 2]
+
+ movq xmm2, QWORD PTR[rsi + rax]
+ movq xmm3, QWORD PTR[rdi + rdx]
+
+ punpcklbw xmm2, xmm0
+ punpcklbw xmm3, xmm0
+
+ psubsw xmm2, xmm3
+ paddw xmm7, xmm2
+
+ pmaddwd xmm2, xmm2
+ paddd xmm1, xmm2
+
+
+ movdqa xmm6, xmm7
+ punpcklwd xmm6, xmm0
+
+ punpckhwd xmm7, xmm0
+ movdqa xmm2, xmm1
+
+ paddw xmm6, xmm7
+ punpckldq xmm1, xmm0
+
+ punpckhdq xmm2, xmm0
+ movdqa xmm7, xmm6
+
+ paddd xmm1, xmm2
+ punpckldq xmm6, xmm0
+
+ punpckhdq xmm7, xmm0
+ paddw xmm6, xmm7
+
+ movdqa xmm2, xmm1
+ movdqa xmm7, xmm6
+
+ psrldq xmm1, 8
+ psrldq xmm6, 8
+
+ paddw xmm7, xmm6
+ paddd xmm1, xmm2
+
+ mov rax, arg(5) ;[Sum]
+ mov rdi, arg(4) ;[SSE]
+
+ movq rdx, xmm7
+ movsx rcx, dx
+
+ mov dword ptr [rax], ecx
+ movd DWORD PTR [rdi], xmm1
+
+ ; begin epilog
+ add rsp, 16
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void vp9_half_horiz_vert_variance8x_h_sse2
+;(
+; unsigned char *ref_ptr,
+; int ref_pixels_per_line,
+; unsigned char *src_ptr,
+; int src_pixels_per_line,
+; unsigned int Height,
+; int *sum,
+; unsigned int *sumsquared
+;)
+global sym(vp9_half_horiz_vert_variance8x_h_sse2) PRIVATE
+sym(vp9_half_horiz_vert_variance8x_h_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 7
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+%if ABI_IS_32BIT=0
+ movsxd r8, dword ptr arg(1) ;ref_pixels_per_line
+ movsxd r9, dword ptr arg(3) ;src_pixels_per_line
+%endif
+
+ pxor xmm6, xmm6 ; error accumulator
+ pxor xmm7, xmm7 ; sse eaccumulator
+ mov rsi, arg(0) ;ref_ptr ;
+
+ mov rdi, arg(2) ;src_ptr ;
+ movsxd rcx, dword ptr arg(4) ;Height ;
+ movsxd rax, dword ptr arg(1) ;ref_pixels_per_line
+
+ pxor xmm0, xmm0 ;
+
+ movq xmm5, QWORD PTR [rsi] ; xmm5 = s0,s1,s2..s8
+ movq xmm3, QWORD PTR [rsi+1] ; xmm3 = s1,s2,s3..s9
+ pavgb xmm5, xmm3 ; xmm5 = avg(xmm1,xmm3) horizontal line 1
+
+%if ABI_IS_32BIT
+ add rsi, dword ptr arg(1) ;ref_pixels_per_line ; next source
+%else
+ add rsi, r8
+%endif
+
+.half_horiz_vert_variance8x_h_1:
+
+ movq xmm1, QWORD PTR [rsi] ;
+ movq xmm2, QWORD PTR [rsi+1] ;
+ pavgb xmm1, xmm2 ; xmm1 = avg(xmm1,xmm3) horizontal line i+1
+
+ pavgb xmm5, xmm1 ; xmm = vertical average of the above
+ punpcklbw xmm5, xmm0 ; xmm5 = words of above
+
+ movq xmm3, QWORD PTR [rdi] ; xmm3 = d0,d1,d2..d8
+ punpcklbw xmm3, xmm0 ; xmm3 = words of above
+
+ psubw xmm5, xmm3 ; xmm5 -= xmm3
+ paddw xmm6, xmm5 ; xmm6 += accumulated column differences
+ pmaddwd xmm5, xmm5 ; xmm5 *= xmm5
+ paddd xmm7, xmm5 ; xmm7 += accumulated square column differences
+
+ movdqa xmm5, xmm1 ; save xmm1 for use on the next row
+
+%if ABI_IS_32BIT
+ add esi, dword ptr arg(1) ;ref_pixels_per_line ; next source
+ add edi, dword ptr arg(3) ;src_pixels_per_line ; next destination
+%else
+ add rsi, r8
+ add rdi, r9
+%endif
+
+ sub rcx, 1 ;
+ jnz .half_horiz_vert_variance8x_h_1 ;
+
+ movdq2q mm6, xmm6 ;
+ movdq2q mm7, xmm7 ;
+
+ psrldq xmm6, 8
+ psrldq xmm7, 8
+
+ movdq2q mm2, xmm6
+ movdq2q mm3, xmm7
+
+ paddw mm6, mm2
+ paddd mm7, mm3
+
+ pxor mm3, mm3 ;
+ pxor mm2, mm2 ;
+
+ punpcklwd mm2, mm6 ;
+ punpckhwd mm3, mm6 ;
+
+ paddd mm2, mm3 ;
+ movq mm6, mm2 ;
+
+ psrlq mm6, 32 ;
+ paddd mm2, mm6 ;
+
+ psrad mm2, 16 ;
+ movq mm4, mm7 ;
+
+ psrlq mm4, 32 ;
+ paddd mm4, mm7 ;
+
+ mov rsi, arg(5) ; sum
+ mov rdi, arg(6) ; sumsquared
+
+ movd [rsi], mm2 ;
+ movd [rdi], mm4 ;
+
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void vp9_half_vert_variance8x_h_sse2
+;(
+; unsigned char *ref_ptr,
+; int ref_pixels_per_line,
+; unsigned char *src_ptr,
+; int src_pixels_per_line,
+; unsigned int Height,
+; int *sum,
+; unsigned int *sumsquared
+;)
+global sym(vp9_half_vert_variance8x_h_sse2) PRIVATE
+sym(vp9_half_vert_variance8x_h_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 7
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+%if ABI_IS_32BIT=0
+ movsxd r8, dword ptr arg(1) ;ref_pixels_per_line
+ movsxd r9, dword ptr arg(3) ;src_pixels_per_line
+%endif
+
+ pxor xmm6, xmm6 ; error accumulator
+ pxor xmm7, xmm7 ; sse eaccumulator
+ mov rsi, arg(0) ;ref_ptr ;
+
+ mov rdi, arg(2) ;src_ptr ;
+ movsxd rcx, dword ptr arg(4) ;Height ;
+ movsxd rax, dword ptr arg(1) ;ref_pixels_per_line
+
+ pxor xmm0, xmm0 ;
+.half_vert_variance8x_h_1:
+ movq xmm5, QWORD PTR [rsi] ; xmm5 = s0,s1,s2..s8
+ movq xmm3, QWORD PTR [rsi+rax] ; xmm3 = s1,s2,s3..s9
+
+ pavgb xmm5, xmm3 ; xmm5 = avg(xmm1,xmm3)
+ punpcklbw xmm5, xmm0 ; xmm5 = words of above
+
+ movq xmm3, QWORD PTR [rdi] ; xmm3 = d0,d1,d2..d8
+ punpcklbw xmm3, xmm0 ; xmm3 = words of above
+
+ psubw xmm5, xmm3 ; xmm5 -= xmm3
+ paddw xmm6, xmm5 ; xmm6 += accumulated column differences
+ pmaddwd xmm5, xmm5 ; xmm5 *= xmm5
+ paddd xmm7, xmm5 ; xmm7 += accumulated square column differences
+
+%if ABI_IS_32BIT
+ add esi, dword ptr arg(1) ;ref_pixels_per_line ; next source
+ add edi, dword ptr arg(3) ;src_pixels_per_line ; next destination
+%else
+ add rsi, r8
+ add rdi, r9
+%endif
+
+ sub rcx, 1 ;
+ jnz .half_vert_variance8x_h_1 ;
+
+ movdq2q mm6, xmm6 ;
+ movdq2q mm7, xmm7 ;
+
+ psrldq xmm6, 8
+ psrldq xmm7, 8
+
+ movdq2q mm2, xmm6
+ movdq2q mm3, xmm7
+
+ paddw mm6, mm2
+ paddd mm7, mm3
+
+ pxor mm3, mm3 ;
+ pxor mm2, mm2 ;
+
+ punpcklwd mm2, mm6 ;
+ punpckhwd mm3, mm6 ;
+
+ paddd mm2, mm3 ;
+ movq mm6, mm2 ;
+
+ psrlq mm6, 32 ;
+ paddd mm2, mm6 ;
+
+ psrad mm2, 16 ;
+ movq mm4, mm7 ;
+
+ psrlq mm4, 32 ;
+ paddd mm4, mm7 ;
+
+ mov rsi, arg(5) ; sum
+ mov rdi, arg(6) ; sumsquared
+
+ movd [rsi], mm2 ;
+ movd [rdi], mm4 ;
+
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void vp9_half_horiz_variance8x_h_sse2
+;(
+; unsigned char *ref_ptr,
+; int ref_pixels_per_line,
+; unsigned char *src_ptr,
+; int src_pixels_per_line,
+; unsigned int Height,
+; int *sum,
+; unsigned int *sumsquared
+;)
+global sym(vp9_half_horiz_variance8x_h_sse2) PRIVATE
+sym(vp9_half_horiz_variance8x_h_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 7
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+%if ABI_IS_32BIT=0
+ movsxd r8, dword ptr arg(1) ;ref_pixels_per_line
+ movsxd r9, dword ptr arg(3) ;src_pixels_per_line
+%endif
+
+ pxor xmm6, xmm6 ; error accumulator
+ pxor xmm7, xmm7 ; sse eaccumulator
+ mov rsi, arg(0) ;ref_ptr ;
+
+ mov rdi, arg(2) ;src_ptr ;
+ movsxd rcx, dword ptr arg(4) ;Height ;
+
+ pxor xmm0, xmm0 ;
+.half_horiz_variance8x_h_1:
+ movq xmm5, QWORD PTR [rsi] ; xmm5 = s0,s1,s2..s8
+ movq xmm3, QWORD PTR [rsi+1] ; xmm3 = s1,s2,s3..s9
+
+ pavgb xmm5, xmm3 ; xmm5 = avg(xmm1,xmm3)
+ punpcklbw xmm5, xmm0 ; xmm5 = words of above
+
+ movq xmm3, QWORD PTR [rdi] ; xmm3 = d0,d1,d2..d8
+ punpcklbw xmm3, xmm0 ; xmm3 = words of above
+
+ psubw xmm5, xmm3 ; xmm5 -= xmm3
+ paddw xmm6, xmm5 ; xmm6 += accumulated column differences
+ pmaddwd xmm5, xmm5 ; xmm5 *= xmm5
+ paddd xmm7, xmm5 ; xmm7 += accumulated square column differences
+
+%if ABI_IS_32BIT
+ add esi, dword ptr arg(1) ;ref_pixels_per_line ; next source
+ add edi, dword ptr arg(3) ;src_pixels_per_line ; next destination
+%else
+ add rsi, r8
+ add rdi, r9
+%endif
+ sub rcx, 1 ;
+ jnz .half_horiz_variance8x_h_1 ;
+
+ movdq2q mm6, xmm6 ;
+ movdq2q mm7, xmm7 ;
+
+ psrldq xmm6, 8
+ psrldq xmm7, 8
+
+ movdq2q mm2, xmm6
+ movdq2q mm3, xmm7
+
+ paddw mm6, mm2
+ paddd mm7, mm3
+
+ pxor mm3, mm3 ;
+ pxor mm2, mm2 ;
+
+ punpcklwd mm2, mm6 ;
+ punpckhwd mm3, mm6 ;
+
+ paddd mm2, mm3 ;
+ movq mm6, mm2 ;
+
+ psrlq mm6, 32 ;
+ paddd mm2, mm6 ;
+
+ psrad mm2, 16 ;
+ movq mm4, mm7 ;
+
+ psrlq mm4, 32 ;
+ paddd mm4, mm7 ;
+
+ mov rsi, arg(5) ; sum
+ mov rdi, arg(6) ; sumsquared
+
+ movd [rsi], mm2 ;
+ movd [rdi], mm4 ;
+
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
diff --git a/libvpx/vp9/encoder/x86/vp9_variance_mmx.c b/libvpx/vp9/encoder/x86/vp9_variance_mmx.c
new file mode 100644
index 0000000..d141560
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_variance_mmx.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_config.h"
+#include "vp9/encoder/vp9_variance.h"
+#include "vp9/common/vp9_pragmas.h"
+#include "vpx_ports/mem.h"
+
+extern unsigned int vp9_get_mb_ss_mmx(const short *src_ptr);
+extern unsigned int vp9_get8x8var_mmx
+(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *SSE,
+ int *Sum
+);
+extern unsigned int vp9_get4x4var_mmx
+(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *SSE,
+ int *Sum
+);
+
+unsigned int vp9_variance4x4_mmx(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ vp9_get4x4var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg);
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 4));
+
+}
+
+unsigned int vp9_variance8x8_mmx(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ vp9_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg);
+ *sse = var;
+
+ return (var - (((unsigned int)avg * avg) >> 6));
+
+}
+
+unsigned int vp9_mse16x16_mmx(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int sse0, sse1, sse2, sse3, var;
+ int sum0, sum1, sum2, sum3;
+
+
+ vp9_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
+ vp9_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
+ vp9_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse2, &sum2);
+ vp9_get8x8var_mmx(src_ptr + 8 * source_stride + 8, source_stride, ref_ptr + 8 * recon_stride + 8, recon_stride, &sse3, &sum3);
+
+ var = sse0 + sse1 + sse2 + sse3;
+ *sse = var;
+ return var;
+}
+
+
+unsigned int vp9_variance16x16_mmx(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int sse0, sse1, sse2, sse3, var;
+ int sum0, sum1, sum2, sum3, avg;
+
+
+ vp9_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
+ vp9_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
+ vp9_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse2, &sum2);
+ vp9_get8x8var_mmx(src_ptr + 8 * source_stride + 8, source_stride, ref_ptr + 8 * recon_stride + 8, recon_stride, &sse3, &sum3);
+
+ var = sse0 + sse1 + sse2 + sse3;
+ avg = sum0 + sum1 + sum2 + sum3;
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 8));
+}
+
+unsigned int vp9_variance16x8_mmx(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int sse0, sse1, var;
+ int sum0, sum1, avg;
+
+ vp9_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
+ vp9_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
+
+ var = sse0 + sse1;
+ avg = sum0 + sum1;
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 7));
+
+}
+
+
+unsigned int vp9_variance8x16_mmx(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int sse0, sse1, var;
+ int sum0, sum1, avg;
+
+ vp9_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
+ vp9_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse1, &sum1);
+
+ var = sse0 + sse1;
+ avg = sum0 + sum1;
+ *sse = var;
+
+ return (var - (((unsigned int)avg * avg) >> 7));
+
+}
diff --git a/libvpx/vp9/encoder/x86/vp9_variance_sse2.c b/libvpx/vp9/encoder/x86/vp9_variance_sse2.c
new file mode 100644
index 0000000..cea934d
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_variance_sse2.c
@@ -0,0 +1,556 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_config.h"
+
+#include "vp9/encoder/vp9_variance.h"
+#include "vp9/common/vp9_pragmas.h"
+#include "vpx_ports/mem.h"
+
+extern unsigned int vp9_get4x4var_mmx
+(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *SSE,
+ int *Sum
+);
+
+unsigned int vp9_get_mb_ss_sse2
+(
+ const short *src_ptr
+);
+unsigned int vp9_get16x16var_sse2
+(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *SSE,
+ int *Sum
+);
+unsigned int vp9_get8x8var_sse2
+(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *SSE,
+ int *Sum
+);
+void vp9_half_horiz_vert_variance8x_h_sse2
+(
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ unsigned int Height,
+ int *sum,
+ unsigned int *sumsquared
+);
+void vp9_half_horiz_vert_variance16x_h_sse2
+(
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ unsigned int Height,
+ int *sum,
+ unsigned int *sumsquared
+);
+void vp9_half_horiz_variance8x_h_sse2
+(
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ unsigned int Height,
+ int *sum,
+ unsigned int *sumsquared
+);
+void vp9_half_horiz_variance16x_h_sse2
+(
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ unsigned int Height,
+ int *sum,
+ unsigned int *sumsquared
+);
+void vp9_half_vert_variance8x_h_sse2
+(
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ unsigned int Height,
+ int *sum,
+ unsigned int *sumsquared
+);
+void vp9_half_vert_variance16x_h_sse2
+(
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ unsigned int Height,
+ int *sum,
+ unsigned int *sumsquared
+);
+
+typedef unsigned int (*get_var_sse2) (
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *SSE,
+ int *Sum
+);
+
+static void variance_sse2(const unsigned char *src_ptr, int source_stride,
+ const unsigned char *ref_ptr, int recon_stride,
+ int w, int h, unsigned int *sse, int *sum,
+ get_var_sse2 var_fn, int block_size) {
+ unsigned int sse0;
+ int sum0;
+ int i, j;
+
+ *sse = 0;
+ *sum = 0;
+
+ for (i = 0; i < h; i += block_size) {
+ for (j = 0; j < w; j += block_size) {
+ var_fn(src_ptr + source_stride * i + j, source_stride,
+ ref_ptr + recon_stride * i + j, recon_stride, &sse0, &sum0);
+ *sse += sse0;
+ *sum += sum0;
+ }
+ }
+}
+
+unsigned int vp9_variance4x4_sse2(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 4, 4,
+ &var, &avg, vp9_get4x4var_mmx, 4);
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 4));
+}
+
+unsigned int vp9_variance8x4_sse2(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 8, 4,
+ &var, &avg, vp9_get4x4var_mmx, 4);
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 5));
+}
+
+unsigned int vp9_variance4x8_sse2(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 4, 8,
+ &var, &avg, vp9_get4x4var_mmx, 4);
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 5));
+}
+
+unsigned int vp9_variance8x8_sse2
+(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 8, 8,
+ &var, &avg, vp9_get8x8var_sse2, 8);
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 6));
+}
+
+unsigned int vp9_variance16x8_sse2
+(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 16, 8,
+ &var, &avg, vp9_get8x8var_sse2, 8);
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 7));
+}
+
+unsigned int vp9_variance8x16_sse2
+(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 8, 16,
+ &var, &avg, vp9_get8x8var_sse2, 8);
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 7));
+}
+
+unsigned int vp9_variance16x16_sse2
+(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 16, 16,
+ &var, &avg, vp9_get16x16var_sse2, 16);
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 8));
+}
+
+unsigned int vp9_mse16x16_sse2(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+
+ unsigned int sse0;
+ int sum0;
+ vp9_get16x16var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0,
+ &sum0);
+ *sse = sse0;
+ return sse0;
+}
+
+unsigned int vp9_variance32x32_sse2(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 32, 32,
+ &var, &avg, vp9_get16x16var_sse2, 16);
+ *sse = var;
+ return (var - (((int64_t)avg * avg) >> 10));
+}
+
+unsigned int vp9_variance32x16_sse2(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 32, 16,
+ &var, &avg, vp9_get16x16var_sse2, 16);
+ *sse = var;
+ return (var - (((int64_t)avg * avg) >> 9));
+}
+
+unsigned int vp9_variance16x32_sse2(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 16, 32,
+ &var, &avg, vp9_get16x16var_sse2, 16);
+ *sse = var;
+ return (var - (((int64_t)avg * avg) >> 9));
+}
+
+unsigned int vp9_variance64x64_sse2(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 64, 64,
+ &var, &avg, vp9_get16x16var_sse2, 16);
+ *sse = var;
+ return (var - (((int64_t)avg * avg) >> 12));
+}
+
+unsigned int vp9_variance64x32_sse2(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 64, 32,
+ &var, &avg, vp9_get16x16var_sse2, 16);
+ *sse = var;
+ return (var - (((int64_t)avg * avg) >> 11));
+}
+
+unsigned int vp9_variance32x64_sse2(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 32, 64,
+ &var, &avg, vp9_get16x16var_sse2, 16);
+ *sse = var;
+ return (var - (((int64_t)avg * avg) >> 11));
+}
+
+#define DECL(w, opt) \
+int vp9_sub_pixel_variance##w##xh_##opt(const uint8_t *src, \
+ ptrdiff_t src_stride, \
+ int x_offset, int y_offset, \
+ const uint8_t *dst, \
+ ptrdiff_t dst_stride, \
+ int height, unsigned int *sse)
+#define DECLS(opt1, opt2) \
+DECL(4, opt2); \
+DECL(8, opt1); \
+DECL(16, opt1)
+
+DECLS(sse2, sse);
+DECLS(ssse3, ssse3);
+#undef DECLS
+#undef DECL
+
+#define FN(w, h, wf, wlog2, hlog2, opt, cast) \
+unsigned int vp9_sub_pixel_variance##w##x##h##_##opt(const uint8_t *src, \
+ int src_stride, \
+ int x_offset, \
+ int y_offset, \
+ const uint8_t *dst, \
+ int dst_stride, \
+ unsigned int *sse_ptr) { \
+ unsigned int sse; \
+ int se = vp9_sub_pixel_variance##wf##xh_##opt(src, src_stride, x_offset, \
+ y_offset, dst, dst_stride, \
+ h, &sse); \
+ if (w > wf) { \
+ unsigned int sse2; \
+ int se2 = vp9_sub_pixel_variance##wf##xh_##opt(src + 16, src_stride, \
+ x_offset, y_offset, \
+ dst + 16, dst_stride, \
+ h, &sse2); \
+ se += se2; \
+ sse += sse2; \
+ if (w > wf * 2) { \
+ se2 = vp9_sub_pixel_variance##wf##xh_##opt(src + 32, src_stride, \
+ x_offset, y_offset, \
+ dst + 32, dst_stride, \
+ h, &sse2); \
+ se += se2; \
+ sse += sse2; \
+ se2 = vp9_sub_pixel_variance##wf##xh_##opt(src + 48, src_stride, \
+ x_offset, y_offset, \
+ dst + 48, dst_stride, \
+ h, &sse2); \
+ se += se2; \
+ sse += sse2; \
+ } \
+ } \
+ *sse_ptr = sse; \
+ return sse - ((cast se * se) >> (wlog2 + hlog2)); \
+}
+
+#define FNS(opt1, opt2) \
+FN(64, 64, 16, 6, 6, opt1, (int64_t)); \
+FN(64, 32, 16, 6, 5, opt1, (int64_t)); \
+FN(32, 64, 16, 5, 6, opt1, (int64_t)); \
+FN(32, 32, 16, 5, 5, opt1, (int64_t)); \
+FN(32, 16, 16, 5, 4, opt1, (int64_t)); \
+FN(16, 32, 16, 4, 5, opt1, (int64_t)); \
+FN(16, 16, 16, 4, 4, opt1, (unsigned int)); \
+FN(16, 8, 16, 4, 3, opt1,); \
+FN(8, 16, 8, 3, 4, opt1,); \
+FN(8, 8, 8, 3, 3, opt1,); \
+FN(8, 4, 8, 3, 2, opt1,); \
+FN(4, 8, 4, 2, 3, opt2,); \
+FN(4, 4, 4, 2, 2, opt2,)
+
+FNS(sse2, sse);
+FNS(ssse3, ssse3);
+
+#undef FNS
+#undef FN
+
+#define DECL(w, opt) \
+int vp9_sub_pixel_avg_variance##w##xh_##opt(const uint8_t *src, \
+ ptrdiff_t src_stride, \
+ int x_offset, int y_offset, \
+ const uint8_t *dst, \
+ ptrdiff_t dst_stride, \
+ const uint8_t *sec, \
+ ptrdiff_t sec_stride, \
+ int height, unsigned int *sse)
+#define DECLS(opt1, opt2) \
+DECL(4, opt2); \
+DECL(8, opt1); \
+DECL(16, opt1)
+
+DECLS(sse2, sse);
+DECLS(ssse3, ssse3);
+#undef DECL
+#undef DECLS
+
+#define FN(w, h, wf, wlog2, hlog2, opt, cast) \
+unsigned int vp9_sub_pixel_avg_variance##w##x##h##_##opt(const uint8_t *src, \
+ int src_stride, \
+ int x_offset, \
+ int y_offset, \
+ const uint8_t *dst, \
+ int dst_stride, \
+ unsigned int *sseptr, \
+ const uint8_t *sec) { \
+ unsigned int sse; \
+ int se = vp9_sub_pixel_avg_variance##wf##xh_##opt(src, src_stride, x_offset, \
+ y_offset, dst, dst_stride, \
+ sec, w, h, &sse); \
+ if (w > wf) { \
+ unsigned int sse2; \
+ int se2 = vp9_sub_pixel_avg_variance##wf##xh_##opt(src + 16, src_stride, \
+ x_offset, y_offset, \
+ dst + 16, dst_stride, \
+ sec + 16, w, h, &sse2); \
+ se += se2; \
+ sse += sse2; \
+ if (w > wf * 2) { \
+ se2 = vp9_sub_pixel_avg_variance##wf##xh_##opt(src + 32, src_stride, \
+ x_offset, y_offset, \
+ dst + 32, dst_stride, \
+ sec + 32, w, h, &sse2); \
+ se += se2; \
+ sse += sse2; \
+ se2 = vp9_sub_pixel_avg_variance##wf##xh_##opt(src + 48, src_stride, \
+ x_offset, y_offset, \
+ dst + 48, dst_stride, \
+ sec + 48, w, h, &sse2); \
+ se += se2; \
+ sse += sse2; \
+ } \
+ } \
+ *sseptr = sse; \
+ return sse - ((cast se * se) >> (wlog2 + hlog2)); \
+}
+
+#define FNS(opt1, opt2) \
+FN(64, 64, 16, 6, 6, opt1, (int64_t)); \
+FN(64, 32, 16, 6, 5, opt1, (int64_t)); \
+FN(32, 64, 16, 5, 6, opt1, (int64_t)); \
+FN(32, 32, 16, 5, 5, opt1, (int64_t)); \
+FN(32, 16, 16, 5, 4, opt1, (int64_t)); \
+FN(16, 32, 16, 4, 5, opt1, (int64_t)); \
+FN(16, 16, 16, 4, 4, opt1, (unsigned int)); \
+FN(16, 8, 16, 4, 3, opt1,); \
+FN(8, 16, 8, 3, 4, opt1,); \
+FN(8, 8, 8, 3, 3, opt1,); \
+FN(8, 4, 8, 3, 2, opt1,); \
+FN(4, 8, 4, 2, 3, opt2,); \
+FN(4, 4, 4, 2, 2, opt2,)
+
+FNS(sse2, sse);
+FNS(ssse3, ssse3);
+
+#undef FNS
+#undef FN
+
+unsigned int vp9_variance_halfpixvar16x16_h_sse2(
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ int xsum0;
+ unsigned int xxsum0;
+
+ vp9_half_horiz_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ &xsum0, &xxsum0);
+
+ *sse = xxsum0;
+ return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 8));
+}
+
+
+unsigned int vp9_variance_halfpixvar16x16_v_sse2(
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ int xsum0;
+ unsigned int xxsum0;
+ vp9_half_vert_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ &xsum0, &xxsum0);
+
+ *sse = xxsum0;
+ return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 8));
+}
+
+
+unsigned int vp9_variance_halfpixvar16x16_hv_sse2(
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ int xsum0;
+ unsigned int xxsum0;
+
+ vp9_half_horiz_vert_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ &xsum0, &xxsum0);
+
+ *sse = xxsum0;
+ return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 8));
+}
diff --git a/libvpx/vp9/exports_dec b/libvpx/vp9/exports_dec
new file mode 100644
index 0000000..0a61fde
--- /dev/null
+++ b/libvpx/vp9/exports_dec
@@ -0,0 +1,2 @@
+data vpx_codec_vp9_dx_algo
+text vpx_codec_vp9_dx
diff --git a/libvpx/vp9/exports_enc b/libvpx/vp9/exports_enc
new file mode 100644
index 0000000..25156e8
--- /dev/null
+++ b/libvpx/vp9/exports_enc
@@ -0,0 +1,4 @@
+data vpx_codec_vp9_cx_algo
+text vpx_codec_vp9_cx
+data vpx_codec_vp9x_cx_algo
+text vpx_codec_vp9x_cx
diff --git a/libvpx/vp9/vp9_common.mk b/libvpx/vp9/vp9_common.mk
new file mode 100644
index 0000000..687fb48
--- /dev/null
+++ b/libvpx/vp9/vp9_common.mk
@@ -0,0 +1,113 @@
+##
+## Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+##
+## Use of this source code is governed by a BSD-style license
+## that can be found in the LICENSE file in the root of the source
+## tree. An additional intellectual property rights grant can be found
+## in the file PATENTS. All contributing project authors may
+## be found in the AUTHORS file in the root of the source tree.
+##
+
+VP9_COMMON_SRCS-yes += vp9_common.mk
+VP9_COMMON_SRCS-yes += vp9_iface_common.h
+VP9_COMMON_SRCS-yes += common/vp9_pragmas.h
+VP9_COMMON_SRCS-yes += common/vp9_ppflags.h
+VP9_COMMON_SRCS-yes += common/vp9_onyx.h
+VP9_COMMON_SRCS-yes += common/vp9_alloccommon.c
+VP9_COMMON_SRCS-yes += common/vp9_convolve.c
+VP9_COMMON_SRCS-yes += common/vp9_convolve.h
+VP9_COMMON_SRCS-yes += common/vp9_debugmodes.c
+VP9_COMMON_SRCS-yes += common/vp9_default_coef_probs.h
+VP9_COMMON_SRCS-yes += common/vp9_entropy.c
+VP9_COMMON_SRCS-yes += common/vp9_entropymode.c
+VP9_COMMON_SRCS-yes += common/vp9_entropymv.c
+VP9_COMMON_SRCS-yes += common/vp9_extend.c
+VP9_COMMON_SRCS-yes += common/vp9_filter.c
+VP9_COMMON_SRCS-yes += common/vp9_filter.h
+VP9_COMMON_SRCS-yes += common/vp9_findnearmv.c
+VP9_COMMON_SRCS-yes += common/generic/vp9_systemdependent.c
+VP9_COMMON_SRCS-yes += common/vp9_idct.c
+VP9_COMMON_SRCS-yes += common/vp9_alloccommon.h
+VP9_COMMON_SRCS-yes += common/vp9_blockd.h
+VP9_COMMON_SRCS-yes += common/vp9_common.h
+VP9_COMMON_SRCS-yes += common/vp9_entropy.h
+VP9_COMMON_SRCS-yes += common/vp9_entropymode.h
+VP9_COMMON_SRCS-yes += common/vp9_entropymv.h
+VP9_COMMON_SRCS-yes += common/vp9_enums.h
+VP9_COMMON_SRCS-yes += common/vp9_extend.h
+VP9_COMMON_SRCS-yes += common/vp9_findnearmv.h
+VP9_COMMON_SRCS-yes += common/vp9_idct.h
+VP9_COMMON_SRCS-yes += common/vp9_loopfilter.h
+VP9_COMMON_SRCS-yes += common/vp9_mv.h
+VP9_COMMON_SRCS-yes += common/vp9_onyxc_int.h
+VP9_COMMON_SRCS-yes += common/vp9_pred_common.h
+VP9_COMMON_SRCS-yes += common/vp9_pred_common.c
+VP9_COMMON_SRCS-yes += common/vp9_quant_common.h
+VP9_COMMON_SRCS-yes += common/vp9_reconinter.h
+VP9_COMMON_SRCS-yes += common/vp9_reconintra.h
+VP9_COMMON_SRCS-yes += common/vp9_rtcd.c
+VP9_COMMON_SRCS-yes += common/vp9_rtcd_defs.sh
+VP9_COMMON_SRCS-yes += common/vp9_sadmxn.h
+VP9_COMMON_SRCS-yes += common/vp9_subpelvar.h
+VP9_COMMON_SRCS-yes += common/vp9_scale.h
+VP9_COMMON_SRCS-yes += common/vp9_scale.c
+VP9_COMMON_SRCS-yes += common/vp9_seg_common.h
+VP9_COMMON_SRCS-yes += common/vp9_seg_common.c
+VP9_COMMON_SRCS-yes += common/vp9_systemdependent.h
+VP9_COMMON_SRCS-yes += common/vp9_textblit.h
+VP9_COMMON_SRCS-yes += common/vp9_tile_common.h
+VP9_COMMON_SRCS-yes += common/vp9_tile_common.c
+VP9_COMMON_SRCS-yes += common/vp9_treecoder.h
+VP9_COMMON_SRCS-yes += common/vp9_loopfilter.c
+VP9_COMMON_SRCS-yes += common/vp9_loopfilter_filters.c
+VP9_COMMON_SRCS-yes += common/vp9_mvref_common.c
+VP9_COMMON_SRCS-yes += common/vp9_mvref_common.h
+VP9_COMMON_SRCS-yes += common/vp9_quant_common.c
+VP9_COMMON_SRCS-yes += common/vp9_reconinter.c
+VP9_COMMON_SRCS-yes += common/vp9_reconintra.c
+VP9_COMMON_SRCS-$(CONFIG_POSTPROC_VISUALIZER) += common/vp9_textblit.c
+VP9_COMMON_SRCS-yes += common/vp9_treecoder.c
+VP9_COMMON_SRCS-yes += common/vp9_common_data.c
+VP9_COMMON_SRCS-yes += common/vp9_common_data.h
+
+VP9_COMMON_SRCS-$(ARCH_X86)$(ARCH_X86_64) += common/x86/vp9_postproc_x86.h
+VP9_COMMON_SRCS-$(ARCH_X86)$(ARCH_X86_64) += common/x86/vp9_asm_stubs.c
+VP9_COMMON_SRCS-$(ARCH_X86)$(ARCH_X86_64) += common/x86/vp9_loopfilter_intrin_sse2.c
+VP9_COMMON_SRCS-$(CONFIG_VP9_POSTPROC) += common/vp9_postproc.h
+VP9_COMMON_SRCS-$(CONFIG_VP9_POSTPROC) += common/vp9_postproc.c
+VP9_COMMON_SRCS-$(HAVE_MMX) += common/x86/vp9_loopfilter_mmx.asm
+VP9_COMMON_SRCS-$(HAVE_SSSE3) += common/x86/vp9_subpixel_8t_ssse3.asm
+ifeq ($(CONFIG_VP9_POSTPROC),yes)
+VP9_COMMON_SRCS-$(HAVE_MMX) += common/x86/vp9_postproc_mmx.asm
+VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_postproc_sse2.asm
+endif
+
+ifeq ($(USE_X86INC),yes)
+VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_copy_sse2.asm
+VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_intrapred_sse2.asm
+VP9_COMMON_SRCS-$(HAVE_SSSE3) += common/x86/vp9_intrapred_ssse3.asm
+endif
+
+VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_idct_intrin_sse2.c
+
+VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_convolve_neon.c
+VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_idct16x16_neon.c
+VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_idct32x32_neon.c
+VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_convolve8_neon$(ASM)
+VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_convolve8_avg_neon$(ASM)
+VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_loopfilter_neon$(ASM)
+VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_dc_only_idct_add_neon$(ASM)
+VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_short_idct4x4_1_add_neon$(ASM)
+VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_short_idct4x4_add_neon$(ASM)
+VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_short_idct8x8_1_add_neon$(ASM)
+VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_short_idct8x8_add_neon$(ASM)
+VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_short_idct16x16_1_add_neon$(ASM)
+VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_short_idct16x16_add_neon$(ASM)
+VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_short_idct32x32_add_neon$(ASM)
+VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_short_iht4x4_add_neon$(ASM)
+VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_short_iht8x8_add_neon$(ASM)
+VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_mb_lpf_neon$(ASM)
+VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_copy_neon$(ASM)
+VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_avg_neon$(ASM)
+
+$(eval $(call rtcd_h_template,vp9_rtcd,vp9/common/vp9_rtcd_defs.sh))
diff --git a/libvpx/vp9/vp9_cx_iface.c b/libvpx/vp9/vp9_cx_iface.c
new file mode 100644
index 0000000..48866d2
--- /dev/null
+++ b/libvpx/vp9/vp9_cx_iface.c
@@ -0,0 +1,1245 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vpx/vpx_codec.h"
+#include "vpx/internal/vpx_codec_internal.h"
+#include "vpx_version.h"
+#include "vp9/encoder/vp9_onyx_int.h"
+#include "vpx/vp8cx.h"
+#include "vp9/encoder/vp9_firstpass.h"
+#include "vp9/common/vp9_onyx.h"
+#include "vp9/vp9_iface_common.h"
+#include <stdlib.h>
+#include <string.h>
+
+struct vp9_extracfg {
+ struct vpx_codec_pkt_list *pkt_list;
+ int cpu_used; /** available cpu percentage in 1/16*/
+ unsigned int enable_auto_alt_ref; /** if encoder decides to uses alternate reference frame */
+ unsigned int noise_sensitivity;
+ unsigned int Sharpness;
+ unsigned int static_thresh;
+ unsigned int tile_columns;
+ unsigned int tile_rows;
+ unsigned int arnr_max_frames; /* alt_ref Noise Reduction Max Frame Count */
+ unsigned int arnr_strength; /* alt_ref Noise Reduction Strength */
+ unsigned int arnr_type; /* alt_ref filter type */
+ unsigned int experimental;
+ vp8e_tuning tuning;
+ unsigned int cq_level; /* constrained quality level */
+ unsigned int rc_max_intra_bitrate_pct;
+ unsigned int lossless;
+ unsigned int frame_parallel_decoding_mode;
+};
+
+struct extraconfig_map {
+ int usage;
+ struct vp9_extracfg cfg;
+};
+
+static const struct extraconfig_map extracfg_map[] = {
+ {
+ 0,
+ {
+ NULL,
+ 0, /* cpu_used */
+ 1, /* enable_auto_alt_ref */
+ 0, /* noise_sensitivity */
+ 0, /* Sharpness */
+ 0, /* static_thresh */
+ 0, /* tile_columns */
+ 0, /* tile_rows */
+ 7, /* arnr_max_frames */
+ 5, /* arnr_strength */
+ 3, /* arnr_type*/
+ 0, /* experimental mode */
+ 0, /* tuning*/
+ 10, /* cq_level */
+ 0, /* rc_max_intra_bitrate_pct */
+ 0, /* lossless */
+ 0, /* frame_parallel_decoding_mode */
+ }
+ }
+};
+
+struct vpx_codec_alg_priv {
+ vpx_codec_priv_t base;
+ vpx_codec_enc_cfg_t cfg;
+ struct vp9_extracfg vp8_cfg;
+ VP9_CONFIG oxcf;
+ VP9_PTR cpi;
+ unsigned char *cx_data;
+ unsigned int cx_data_sz;
+ unsigned char *pending_cx_data;
+ unsigned int pending_cx_data_sz;
+ int pending_frame_count;
+ uint32_t pending_frame_sizes[8];
+ uint32_t pending_frame_magnitude;
+ vpx_image_t preview_img;
+ vp8_postproc_cfg_t preview_ppcfg;
+ vpx_codec_pkt_list_decl(64) pkt_list; // changed to accomendate the maximum number of lagged frames allowed
+ unsigned int fixed_kf_cntr;
+};
+
+static const VP9_REFFRAME ref_frame_to_vp9_reframe(vpx_ref_frame_type_t frame) {
+ switch (frame) {
+ case VP8_LAST_FRAME:
+ return VP9_LAST_FLAG;
+ case VP8_GOLD_FRAME:
+ return VP9_GOLD_FLAG;
+ case VP8_ALTR_FRAME:
+ return VP9_ALT_FLAG;
+ }
+ assert(!"Invalid Reference Frame");
+ return VP9_LAST_FLAG;
+}
+
+static vpx_codec_err_t
+update_error_state(vpx_codec_alg_priv_t *ctx,
+ const struct vpx_internal_error_info *error) {
+ vpx_codec_err_t res;
+
+ if ((res = error->error_code))
+ ctx->base.err_detail = error->has_detail
+ ? error->detail
+ : NULL;
+
+ return res;
+}
+
+
+#undef ERROR
+#define ERROR(str) do {\
+ ctx->base.err_detail = str;\
+ return VPX_CODEC_INVALID_PARAM;\
+ } while(0)
+
+#define RANGE_CHECK(p,memb,lo,hi) do {\
+ if(!(((p)->memb == lo || (p)->memb > (lo)) && (p)->memb <= hi)) \
+ ERROR(#memb " out of range ["#lo".."#hi"]");\
+ } while(0)
+
+#define RANGE_CHECK_HI(p,memb,hi) do {\
+ if(!((p)->memb <= (hi))) \
+ ERROR(#memb " out of range [.."#hi"]");\
+ } while(0)
+
+#define RANGE_CHECK_LO(p,memb,lo) do {\
+ if(!((p)->memb >= (lo))) \
+ ERROR(#memb " out of range ["#lo"..]");\
+ } while(0)
+
+#define RANGE_CHECK_BOOL(p,memb) do {\
+ if(!!((p)->memb) != (p)->memb) ERROR(#memb " expected boolean");\
+ } while(0)
+
+static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx,
+ const vpx_codec_enc_cfg_t *cfg,
+ const struct vp9_extracfg *vp8_cfg) {
+ RANGE_CHECK(cfg, g_w, 1, 65535); /* 16 bits available */
+ RANGE_CHECK(cfg, g_h, 1, 65535); /* 16 bits available */
+ RANGE_CHECK(cfg, g_timebase.den, 1, 1000000000);
+ RANGE_CHECK(cfg, g_timebase.num, 1, cfg->g_timebase.den);
+ RANGE_CHECK_HI(cfg, g_profile, 3);
+
+ RANGE_CHECK_HI(cfg, rc_max_quantizer, 63);
+ RANGE_CHECK_HI(cfg, rc_min_quantizer, cfg->rc_max_quantizer);
+ RANGE_CHECK_BOOL(vp8_cfg, lossless);
+ if (vp8_cfg->lossless) {
+ RANGE_CHECK_HI(cfg, rc_max_quantizer, 0);
+ RANGE_CHECK_HI(cfg, rc_min_quantizer, 0);
+ }
+
+ RANGE_CHECK_HI(cfg, g_threads, 64);
+ RANGE_CHECK_HI(cfg, g_lag_in_frames, MAX_LAG_BUFFERS);
+ RANGE_CHECK(cfg, rc_end_usage, VPX_VBR, VPX_Q);
+ RANGE_CHECK_HI(cfg, rc_undershoot_pct, 1000);
+ RANGE_CHECK_HI(cfg, rc_overshoot_pct, 1000);
+ RANGE_CHECK_HI(cfg, rc_2pass_vbr_bias_pct, 100);
+ RANGE_CHECK(cfg, kf_mode, VPX_KF_DISABLED, VPX_KF_AUTO);
+ // RANGE_CHECK_BOOL(cfg, g_delete_firstpassfile);
+ RANGE_CHECK_BOOL(cfg, rc_resize_allowed);
+ RANGE_CHECK_HI(cfg, rc_dropframe_thresh, 100);
+ RANGE_CHECK_HI(cfg, rc_resize_up_thresh, 100);
+ RANGE_CHECK_HI(cfg, rc_resize_down_thresh, 100);
+ RANGE_CHECK(cfg, g_pass, VPX_RC_ONE_PASS, VPX_RC_LAST_PASS);
+
+ RANGE_CHECK(cfg, ss_number_layers, 1,
+ VPX_SS_MAX_LAYERS); /*Spatial layers max */
+ /* VP8 does not support a lower bound on the keyframe interval in
+ * automatic keyframe placement mode.
+ */
+ if (cfg->kf_mode != VPX_KF_DISABLED && cfg->kf_min_dist != cfg->kf_max_dist
+ && cfg->kf_min_dist > 0)
+ ERROR("kf_min_dist not supported in auto mode, use 0 "
+ "or kf_max_dist instead.");
+
+ RANGE_CHECK_BOOL(vp8_cfg, enable_auto_alt_ref);
+ RANGE_CHECK(vp8_cfg, cpu_used, -16, 16);
+
+ RANGE_CHECK_HI(vp8_cfg, noise_sensitivity, 6);
+
+ RANGE_CHECK(vp8_cfg, tile_columns, 0, 6);
+ RANGE_CHECK(vp8_cfg, tile_rows, 0, 2);
+ RANGE_CHECK_HI(vp8_cfg, Sharpness, 7);
+ RANGE_CHECK(vp8_cfg, arnr_max_frames, 0, 15);
+ RANGE_CHECK_HI(vp8_cfg, arnr_strength, 6);
+ RANGE_CHECK(vp8_cfg, arnr_type, 1, 3);
+ RANGE_CHECK(vp8_cfg, cq_level, 0, 63);
+
+ if (cfg->g_pass == VPX_RC_LAST_PASS) {
+ size_t packet_sz = sizeof(FIRSTPASS_STATS);
+ int n_packets = (int)(cfg->rc_twopass_stats_in.sz / packet_sz);
+ FIRSTPASS_STATS *stats;
+
+ if (!cfg->rc_twopass_stats_in.buf)
+ ERROR("rc_twopass_stats_in.buf not set.");
+
+ if (cfg->rc_twopass_stats_in.sz % packet_sz)
+ ERROR("rc_twopass_stats_in.sz indicates truncated packet.");
+
+ if (cfg->rc_twopass_stats_in.sz < 2 * packet_sz)
+ ERROR("rc_twopass_stats_in requires at least two packets.");
+
+ stats = (void *)((char *)cfg->rc_twopass_stats_in.buf
+ + (n_packets - 1) * packet_sz);
+
+ if ((int)(stats->count + 0.5) != n_packets - 1)
+ ERROR("rc_twopass_stats_in missing EOS stats packet");
+ }
+
+ return VPX_CODEC_OK;
+}
+
+
+static vpx_codec_err_t validate_img(vpx_codec_alg_priv_t *ctx,
+ const vpx_image_t *img) {
+ switch (img->fmt) {
+ case VPX_IMG_FMT_YV12:
+ case VPX_IMG_FMT_I420:
+ case VPX_IMG_FMT_I422:
+ case VPX_IMG_FMT_I444:
+ break;
+ default:
+ ERROR("Invalid image format. Only YV12, I420, I422, I444 images are "
+ "supported.");
+ }
+
+ if ((img->d_w != ctx->cfg.g_w) || (img->d_h != ctx->cfg.g_h))
+ ERROR("Image size must match encoder init configuration size");
+
+ return VPX_CODEC_OK;
+}
+
+
+static vpx_codec_err_t set_vp9e_config(VP9_CONFIG *oxcf,
+ vpx_codec_enc_cfg_t cfg,
+ struct vp9_extracfg vp8_cfg) {
+ oxcf->version = cfg.g_profile | (vp8_cfg.experimental ? 0x4 : 0);
+ oxcf->width = cfg.g_w;
+ oxcf->height = cfg.g_h;
+ /* guess a frame rate if out of whack, use 30 */
+ oxcf->framerate = (double)(cfg.g_timebase.den) / (double)(cfg.g_timebase.num);
+
+ if (oxcf->framerate > 180) {
+ oxcf->framerate = 30;
+ }
+
+ switch (cfg.g_pass) {
+ case VPX_RC_ONE_PASS:
+ oxcf->Mode = MODE_BESTQUALITY;
+ break;
+ case VPX_RC_FIRST_PASS:
+ oxcf->Mode = MODE_FIRSTPASS;
+ break;
+ case VPX_RC_LAST_PASS:
+ oxcf->Mode = MODE_SECONDPASS_BEST;
+ break;
+ }
+
+ if (cfg.g_pass == VPX_RC_FIRST_PASS) {
+ oxcf->allow_lag = 0;
+ oxcf->lag_in_frames = 0;
+ } else {
+ oxcf->allow_lag = (cfg.g_lag_in_frames) > 0;
+ oxcf->lag_in_frames = cfg.g_lag_in_frames;
+ }
+
+ // VBR only supported for now.
+ // CBR code has been deprectated for experimental phase.
+ // CQ mode not yet tested
+ oxcf->end_usage = USAGE_LOCAL_FILE_PLAYBACK;
+ /*
+ if (cfg.rc_end_usage == VPX_CQ)
+ oxcf->end_usage = USAGE_CONSTRAINED_QUALITY;
+ */
+ if (cfg.rc_end_usage == VPX_Q)
+ oxcf->end_usage = USAGE_CONSTANT_QUALITY;
+
+ oxcf->target_bandwidth = cfg.rc_target_bitrate;
+ oxcf->rc_max_intra_bitrate_pct = vp8_cfg.rc_max_intra_bitrate_pct;
+
+ oxcf->best_allowed_q = cfg.rc_min_quantizer;
+ oxcf->worst_allowed_q = cfg.rc_max_quantizer;
+ oxcf->cq_level = vp8_cfg.cq_level;
+ oxcf->fixed_q = -1;
+
+ oxcf->under_shoot_pct = cfg.rc_undershoot_pct;
+ oxcf->over_shoot_pct = cfg.rc_overshoot_pct;
+
+ oxcf->maximum_buffer_size = cfg.rc_buf_sz;
+ oxcf->starting_buffer_level = cfg.rc_buf_initial_sz;
+ oxcf->optimal_buffer_level = cfg.rc_buf_optimal_sz;
+
+ oxcf->two_pass_vbrbias = cfg.rc_2pass_vbr_bias_pct;
+ oxcf->two_pass_vbrmin_section = cfg.rc_2pass_vbr_minsection_pct;
+ oxcf->two_pass_vbrmax_section = cfg.rc_2pass_vbr_maxsection_pct;
+
+ oxcf->auto_key = cfg.kf_mode == VPX_KF_AUTO
+ && cfg.kf_min_dist != cfg.kf_max_dist;
+ // oxcf->kf_min_dist = cfg.kf_min_dis;
+ oxcf->key_freq = cfg.kf_max_dist;
+
+ // oxcf->delete_first_pass_file = cfg.g_delete_firstpassfile;
+ // strcpy(oxcf->first_pass_file, cfg.g_firstpass_file);
+
+ oxcf->cpu_used = vp8_cfg.cpu_used;
+ oxcf->encode_breakout = vp8_cfg.static_thresh;
+ oxcf->play_alternate = vp8_cfg.enable_auto_alt_ref;
+ oxcf->noise_sensitivity = vp8_cfg.noise_sensitivity;
+ oxcf->Sharpness = vp8_cfg.Sharpness;
+
+ oxcf->two_pass_stats_in = cfg.rc_twopass_stats_in;
+ oxcf->output_pkt_list = vp8_cfg.pkt_list;
+
+ oxcf->arnr_max_frames = vp8_cfg.arnr_max_frames;
+ oxcf->arnr_strength = vp8_cfg.arnr_strength;
+ oxcf->arnr_type = vp8_cfg.arnr_type;
+
+ oxcf->tuning = vp8_cfg.tuning;
+
+ oxcf->tile_columns = vp8_cfg.tile_columns;
+ oxcf->tile_rows = vp8_cfg.tile_rows;
+
+ oxcf->lossless = vp8_cfg.lossless;
+
+ oxcf->error_resilient_mode = cfg.g_error_resilient;
+ oxcf->frame_parallel_decoding_mode = vp8_cfg.frame_parallel_decoding_mode;
+
+ oxcf->ss_number_layers = cfg.ss_number_layers;
+ /*
+ printf("Current VP9 Settings: \n");
+ printf("target_bandwidth: %d\n", oxcf->target_bandwidth);
+ printf("noise_sensitivity: %d\n", oxcf->noise_sensitivity);
+ printf("Sharpness: %d\n", oxcf->Sharpness);
+ printf("cpu_used: %d\n", oxcf->cpu_used);
+ printf("Mode: %d\n", oxcf->Mode);
+ // printf("delete_first_pass_file: %d\n", oxcf->delete_first_pass_file);
+ printf("auto_key: %d\n", oxcf->auto_key);
+ printf("key_freq: %d\n", oxcf->key_freq);
+ printf("end_usage: %d\n", oxcf->end_usage);
+ printf("under_shoot_pct: %d\n", oxcf->under_shoot_pct);
+ printf("over_shoot_pct: %d\n", oxcf->over_shoot_pct);
+ printf("starting_buffer_level: %d\n", oxcf->starting_buffer_level);
+ printf("optimal_buffer_level: %d\n", oxcf->optimal_buffer_level);
+ printf("maximum_buffer_size: %d\n", oxcf->maximum_buffer_size);
+ printf("fixed_q: %d\n", oxcf->fixed_q);
+ printf("worst_allowed_q: %d\n", oxcf->worst_allowed_q);
+ printf("best_allowed_q: %d\n", oxcf->best_allowed_q);
+ printf("two_pass_vbrbias: %d\n", oxcf->two_pass_vbrbias);
+ printf("two_pass_vbrmin_section: %d\n", oxcf->two_pass_vbrmin_section);
+ printf("two_pass_vbrmax_section: %d\n", oxcf->two_pass_vbrmax_section);
+ printf("allow_lag: %d\n", oxcf->allow_lag);
+ printf("lag_in_frames: %d\n", oxcf->lag_in_frames);
+ printf("play_alternate: %d\n", oxcf->play_alternate);
+ printf("Version: %d\n", oxcf->Version);
+ printf("encode_breakout: %d\n", oxcf->encode_breakout);
+ printf("error resilient: %d\n", oxcf->error_resilient_mode);
+ printf("frame parallel detokenization: %d\n",
+ oxcf->frame_parallel_decoding_mode);
+ */
+ return VPX_CODEC_OK;
+}
+
+static vpx_codec_err_t vp9e_set_config(vpx_codec_alg_priv_t *ctx,
+ const vpx_codec_enc_cfg_t *cfg) {
+ vpx_codec_err_t res;
+
+ if ((cfg->g_w != ctx->cfg.g_w) || (cfg->g_h != ctx->cfg.g_h))
+ ERROR("Cannot change width or height after initialization");
+
+ /* Prevent increasing lag_in_frames. This check is stricter than it needs
+ * to be -- the limit is not increasing past the first lag_in_frames
+ * value, but we don't track the initial config, only the last successful
+ * config.
+ */
+ if ((cfg->g_lag_in_frames > ctx->cfg.g_lag_in_frames))
+ ERROR("Cannot increase lag_in_frames");
+
+ res = validate_config(ctx, cfg, &ctx->vp8_cfg);
+
+ if (!res) {
+ ctx->cfg = *cfg;
+ set_vp9e_config(&ctx->oxcf, ctx->cfg, ctx->vp8_cfg);
+ vp9_change_config(ctx->cpi, &ctx->oxcf);
+ }
+
+ return res;
+}
+
+
+int vp9_reverse_trans(int q);
+
+
+static vpx_codec_err_t get_param(vpx_codec_alg_priv_t *ctx,
+ int ctrl_id,
+ va_list args) {
+ void *arg = va_arg(args, void *);
+
+#define MAP(id, var) case id: *(RECAST(id, arg)) = var; break
+
+ if (!arg)
+ return VPX_CODEC_INVALID_PARAM;
+
+ switch (ctrl_id) {
+ MAP(VP8E_GET_LAST_QUANTIZER, vp9_get_quantizer(ctx->cpi));
+ MAP(VP8E_GET_LAST_QUANTIZER_64,
+ vp9_reverse_trans(vp9_get_quantizer(ctx->cpi)));
+ }
+
+ return VPX_CODEC_OK;
+#undef MAP
+}
+
+
+static vpx_codec_err_t set_param(vpx_codec_alg_priv_t *ctx,
+ int ctrl_id,
+ va_list args) {
+ vpx_codec_err_t res = VPX_CODEC_OK;
+ struct vp9_extracfg xcfg = ctx->vp8_cfg;
+
+#define MAP(id, var) case id: var = CAST(id, args); break;
+
+ switch (ctrl_id) {
+ MAP(VP8E_SET_CPUUSED, xcfg.cpu_used);
+ MAP(VP8E_SET_ENABLEAUTOALTREF, xcfg.enable_auto_alt_ref);
+ MAP(VP8E_SET_NOISE_SENSITIVITY, xcfg.noise_sensitivity);
+ MAP(VP8E_SET_SHARPNESS, xcfg.Sharpness);
+ MAP(VP8E_SET_STATIC_THRESHOLD, xcfg.static_thresh);
+ MAP(VP9E_SET_TILE_COLUMNS, xcfg.tile_columns);
+ MAP(VP9E_SET_TILE_ROWS, xcfg.tile_rows);
+ MAP(VP8E_SET_ARNR_MAXFRAMES, xcfg.arnr_max_frames);
+ MAP(VP8E_SET_ARNR_STRENGTH, xcfg.arnr_strength);
+ MAP(VP8E_SET_ARNR_TYPE, xcfg.arnr_type);
+ MAP(VP8E_SET_TUNING, xcfg.tuning);
+ MAP(VP8E_SET_CQ_LEVEL, xcfg.cq_level);
+ MAP(VP9E_SET_MAX_Q, ctx->cfg.rc_max_quantizer);
+ MAP(VP9E_SET_MIN_Q, ctx->cfg.rc_min_quantizer);
+ MAP(VP8E_SET_MAX_INTRA_BITRATE_PCT, xcfg.rc_max_intra_bitrate_pct);
+ MAP(VP9E_SET_LOSSLESS, xcfg.lossless);
+ MAP(VP9E_SET_FRAME_PARALLEL_DECODING, xcfg.frame_parallel_decoding_mode);
+ }
+
+ res = validate_config(ctx, &ctx->cfg, &xcfg);
+
+ if (!res) {
+ ctx->vp8_cfg = xcfg;
+ set_vp9e_config(&ctx->oxcf, ctx->cfg, ctx->vp8_cfg);
+ vp9_change_config(ctx->cpi, &ctx->oxcf);
+ }
+
+ return res;
+#undef MAP
+}
+
+
+static vpx_codec_err_t vp9e_common_init(vpx_codec_ctx_t *ctx,
+ int experimental) {
+ vpx_codec_err_t res = VPX_CODEC_OK;
+ struct vpx_codec_alg_priv *priv;
+ vpx_codec_enc_cfg_t *cfg;
+ unsigned int i;
+
+ VP9_PTR optr;
+
+ if (!ctx->priv) {
+ priv = calloc(1, sizeof(struct vpx_codec_alg_priv));
+
+ if (!priv) {
+ return VPX_CODEC_MEM_ERROR;
+ }
+
+ ctx->priv = &priv->base;
+ ctx->priv->sz = sizeof(*ctx->priv);
+ ctx->priv->iface = ctx->iface;
+ ctx->priv->alg_priv = priv;
+ ctx->priv->init_flags = ctx->init_flags;
+ ctx->priv->enc.total_encoders = 1;
+
+ if (ctx->config.enc) {
+ /* Update the reference to the config structure to an
+ * internal copy.
+ */
+ ctx->priv->alg_priv->cfg = *ctx->config.enc;
+ ctx->config.enc = &ctx->priv->alg_priv->cfg;
+ }
+
+ cfg = &ctx->priv->alg_priv->cfg;
+
+ /* Select the extra vp6 configuration table based on the current
+ * usage value. If the current usage value isn't found, use the
+ * values for usage case 0.
+ */
+ for (i = 0;
+ extracfg_map[i].usage && extracfg_map[i].usage != cfg->g_usage;
+ i++);
+
+ priv->vp8_cfg = extracfg_map[i].cfg;
+ priv->vp8_cfg.pkt_list = &priv->pkt_list.head;
+ priv->vp8_cfg.experimental = experimental;
+
+ // TODO(agrange) Check the limits set on this buffer, or the check that is
+ // applied in vp9e_encode.
+ priv->cx_data_sz = priv->cfg.g_w * priv->cfg.g_h * 3 / 2 * 8;
+// priv->cx_data_sz = priv->cfg.g_w * priv->cfg.g_h * 3 / 2 * 2;
+
+ if (priv->cx_data_sz < 4096) priv->cx_data_sz = 4096;
+
+ priv->cx_data = malloc(priv->cx_data_sz);
+
+ if (!priv->cx_data) {
+ return VPX_CODEC_MEM_ERROR;
+ }
+
+ vp9_initialize_enc();
+
+ res = validate_config(priv, &priv->cfg, &priv->vp8_cfg);
+
+ if (!res) {
+ set_vp9e_config(&ctx->priv->alg_priv->oxcf,
+ ctx->priv->alg_priv->cfg,
+ ctx->priv->alg_priv->vp8_cfg);
+ optr = vp9_create_compressor(&ctx->priv->alg_priv->oxcf);
+
+ if (!optr)
+ res = VPX_CODEC_MEM_ERROR;
+ else
+ ctx->priv->alg_priv->cpi = optr;
+ }
+ }
+
+ return res;
+}
+
+
+static vpx_codec_err_t vp9e_init(vpx_codec_ctx_t *ctx,
+ vpx_codec_priv_enc_mr_cfg_t *data) {
+ return vp9e_common_init(ctx, 0);
+}
+
+
+#if CONFIG_EXPERIMENTAL
+static vpx_codec_err_t vp9e_exp_init(vpx_codec_ctx_t *ctx,
+ vpx_codec_priv_enc_mr_cfg_t *data) {
+ return vp9e_common_init(ctx, 1);
+}
+#endif
+
+
+static vpx_codec_err_t vp9e_destroy(vpx_codec_alg_priv_t *ctx) {
+
+ free(ctx->cx_data);
+ vp9_remove_compressor(&ctx->cpi);
+ free(ctx);
+ return VPX_CODEC_OK;
+}
+
+static void pick_quickcompress_mode(vpx_codec_alg_priv_t *ctx,
+ unsigned long duration,
+ unsigned long deadline) {
+ unsigned int new_qc;
+
+ /* Use best quality mode if no deadline is given. */
+ if (deadline)
+ new_qc = MODE_GOODQUALITY;
+ else
+ new_qc = MODE_BESTQUALITY;
+
+ if (ctx->cfg.g_pass == VPX_RC_FIRST_PASS)
+ new_qc = MODE_FIRSTPASS;
+ else if (ctx->cfg.g_pass == VPX_RC_LAST_PASS)
+ new_qc = (new_qc == MODE_BESTQUALITY)
+ ? MODE_SECONDPASS_BEST
+ : MODE_SECONDPASS;
+
+ if (ctx->oxcf.Mode != new_qc) {
+ ctx->oxcf.Mode = new_qc;
+ vp9_change_config(ctx->cpi, &ctx->oxcf);
+ }
+}
+
+
+static int write_superframe_index(vpx_codec_alg_priv_t *ctx) {
+ uint8_t marker = 0xc0;
+ int mag, mask, index_sz;
+
+ assert(ctx->pending_frame_count);
+ assert(ctx->pending_frame_count <= 8);
+
+ /* Add the number of frames to the marker byte */
+ marker |= ctx->pending_frame_count - 1;
+
+ /* Choose the magnitude */
+ for (mag = 0, mask = 0xff; mag < 4; mag++) {
+ if (ctx->pending_frame_magnitude < mask)
+ break;
+ mask <<= 8;
+ mask |= 0xff;
+ }
+ marker |= mag << 3;
+
+ /* Write the index */
+ index_sz = 2 + (mag + 1) * ctx->pending_frame_count;
+ if (ctx->pending_cx_data_sz + index_sz < ctx->cx_data_sz) {
+ uint8_t *x = ctx->pending_cx_data + ctx->pending_cx_data_sz;
+ int i, j;
+
+ *x++ = marker;
+ for (i = 0; i < ctx->pending_frame_count; i++) {
+ int this_sz = ctx->pending_frame_sizes[i];
+
+ for (j = 0; j <= mag; j++) {
+ *x++ = this_sz & 0xff;
+ this_sz >>= 8;
+ }
+ }
+ *x++ = marker;
+ ctx->pending_cx_data_sz += index_sz;
+ }
+ return index_sz;
+}
+
+static vpx_codec_err_t vp9e_encode(vpx_codec_alg_priv_t *ctx,
+ const vpx_image_t *img,
+ vpx_codec_pts_t pts,
+ unsigned long duration,
+ vpx_enc_frame_flags_t flags,
+ unsigned long deadline) {
+ vpx_codec_err_t res = VPX_CODEC_OK;
+
+ if (img)
+ res = validate_img(ctx, img);
+
+ pick_quickcompress_mode(ctx, duration, deadline);
+ vpx_codec_pkt_list_init(&ctx->pkt_list);
+
+ /* Handle Flags */
+ if (((flags & VP8_EFLAG_NO_UPD_GF) && (flags & VP8_EFLAG_FORCE_GF))
+ || ((flags & VP8_EFLAG_NO_UPD_ARF) && (flags & VP8_EFLAG_FORCE_ARF))) {
+ ctx->base.err_detail = "Conflicting flags.";
+ return VPX_CODEC_INVALID_PARAM;
+ }
+
+ if (flags & (VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF
+ | VP8_EFLAG_NO_REF_ARF)) {
+ int ref = 7;
+
+ if (flags & VP8_EFLAG_NO_REF_LAST)
+ ref ^= VP9_LAST_FLAG;
+
+ if (flags & VP8_EFLAG_NO_REF_GF)
+ ref ^= VP9_GOLD_FLAG;
+
+ if (flags & VP8_EFLAG_NO_REF_ARF)
+ ref ^= VP9_ALT_FLAG;
+
+ vp9_use_as_reference(ctx->cpi, ref);
+ }
+
+ if (flags & (VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF
+ | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_FORCE_GF
+ | VP8_EFLAG_FORCE_ARF)) {
+ int upd = 7;
+
+ if (flags & VP8_EFLAG_NO_UPD_LAST)
+ upd ^= VP9_LAST_FLAG;
+
+ if (flags & VP8_EFLAG_NO_UPD_GF)
+ upd ^= VP9_GOLD_FLAG;
+
+ if (flags & VP8_EFLAG_NO_UPD_ARF)
+ upd ^= VP9_ALT_FLAG;
+
+ vp9_update_reference(ctx->cpi, upd);
+ }
+
+ if (flags & VP8_EFLAG_NO_UPD_ENTROPY) {
+ vp9_update_entropy(ctx->cpi, 0);
+ }
+
+ /* Handle fixed keyframe intervals */
+ if (ctx->cfg.kf_mode == VPX_KF_AUTO
+ && ctx->cfg.kf_min_dist == ctx->cfg.kf_max_dist) {
+ if (++ctx->fixed_kf_cntr > ctx->cfg.kf_min_dist) {
+ flags |= VPX_EFLAG_FORCE_KF;
+ ctx->fixed_kf_cntr = 1;
+ }
+ }
+
+ /* Initialize the encoder instance on the first frame*/
+ if (!res && ctx->cpi) {
+ unsigned int lib_flags;
+ YV12_BUFFER_CONFIG sd;
+ int64_t dst_time_stamp, dst_end_time_stamp;
+ unsigned long size, cx_data_sz;
+ unsigned char *cx_data;
+
+ /* Set up internal flags */
+ if (ctx->base.init_flags & VPX_CODEC_USE_PSNR)
+ ((VP9_COMP *)ctx->cpi)->b_calculate_psnr = 1;
+
+ // if (ctx->base.init_flags & VPX_CODEC_USE_OUTPUT_PARTITION)
+ // ((VP9_COMP *)ctx->cpi)->output_partition = 1;
+
+ /* Convert API flags to internal codec lib flags */
+ lib_flags = (flags & VPX_EFLAG_FORCE_KF) ? FRAMEFLAGS_KEY : 0;
+
+ /* vp8 use 10,000,000 ticks/second as time stamp */
+ dst_time_stamp = pts * 10000000 * ctx->cfg.g_timebase.num / ctx->cfg.g_timebase.den;
+ dst_end_time_stamp = (pts + duration) * 10000000 * ctx->cfg.g_timebase.num / ctx->cfg.g_timebase.den;
+
+ if (img != NULL) {
+ res = image2yuvconfig(img, &sd);
+
+ if (vp9_receive_raw_frame(ctx->cpi, lib_flags,
+ &sd, dst_time_stamp, dst_end_time_stamp)) {
+ VP9_COMP *cpi = (VP9_COMP *)ctx->cpi;
+ res = update_error_state(ctx, &cpi->common.error);
+ }
+ }
+
+ cx_data = ctx->cx_data;
+ cx_data_sz = ctx->cx_data_sz;
+ lib_flags = 0;
+
+ /* Any pending invisible frames? */
+ if (ctx->pending_cx_data) {
+ memmove(cx_data, ctx->pending_cx_data, ctx->pending_cx_data_sz);
+ ctx->pending_cx_data = cx_data;
+ cx_data += ctx->pending_cx_data_sz;
+ cx_data_sz -= ctx->pending_cx_data_sz;
+
+ /* TODO: this is a minimal check, the underlying codec doesn't respect
+ * the buffer size anyway.
+ */
+ if (cx_data_sz < ctx->cx_data_sz / 2) {
+ ctx->base.err_detail = "Compressed data buffer too small";
+ return VPX_CODEC_ERROR;
+ }
+ }
+
+ while (cx_data_sz >= ctx->cx_data_sz / 2 &&
+ -1 != vp9_get_compressed_data(ctx->cpi, &lib_flags, &size,
+ cx_data, &dst_time_stamp,
+ &dst_end_time_stamp, !img)) {
+ if (size) {
+ vpx_codec_pts_t round, delta;
+ vpx_codec_cx_pkt_t pkt;
+ VP9_COMP *cpi = (VP9_COMP *)ctx->cpi;
+
+ /* Pack invisible frames with the next visible frame */
+ if (!cpi->common.show_frame) {
+ if (!ctx->pending_cx_data)
+ ctx->pending_cx_data = cx_data;
+ ctx->pending_cx_data_sz += size;
+ ctx->pending_frame_sizes[ctx->pending_frame_count++] = size;
+ ctx->pending_frame_magnitude |= size;
+ cx_data += size;
+ cx_data_sz -= size;
+ continue;
+ }
+
+ /* Add the frame packet to the list of returned packets. */
+ round = 1000000 * ctx->cfg.g_timebase.num / 2 - 1;
+ delta = (dst_end_time_stamp - dst_time_stamp);
+ pkt.kind = VPX_CODEC_CX_FRAME_PKT;
+ pkt.data.frame.pts =
+ (dst_time_stamp * ctx->cfg.g_timebase.den + round)
+ / ctx->cfg.g_timebase.num / 10000000;
+ pkt.data.frame.duration = (unsigned long)
+ ((delta * ctx->cfg.g_timebase.den + round)
+ / ctx->cfg.g_timebase.num / 10000000);
+ pkt.data.frame.flags = lib_flags << 16;
+
+ if (lib_flags & FRAMEFLAGS_KEY)
+ pkt.data.frame.flags |= VPX_FRAME_IS_KEY;
+
+ if (!cpi->common.show_frame) {
+ pkt.data.frame.flags |= VPX_FRAME_IS_INVISIBLE;
+
+ // This timestamp should be as close as possible to the
+ // prior PTS so that if a decoder uses pts to schedule when
+ // to do this, we start right after last frame was decoded.
+ // Invisible frames have no duration.
+ pkt.data.frame.pts = ((cpi->last_time_stamp_seen
+ * ctx->cfg.g_timebase.den + round)
+ / ctx->cfg.g_timebase.num / 10000000) + 1;
+ pkt.data.frame.duration = 0;
+ }
+
+ if (cpi->droppable)
+ pkt.data.frame.flags |= VPX_FRAME_IS_DROPPABLE;
+
+ /*if (cpi->output_partition)
+ {
+ int i;
+ const int num_partitions = 1;
+
+ pkt.data.frame.flags |= VPX_FRAME_IS_FRAGMENT;
+
+ for (i = 0; i < num_partitions; ++i)
+ {
+ pkt.data.frame.buf = cx_data;
+ pkt.data.frame.sz = cpi->partition_sz[i];
+ pkt.data.frame.partition_id = i;
+ // don't set the fragment bit for the last partition
+ if (i == (num_partitions - 1))
+ pkt.data.frame.flags &= ~VPX_FRAME_IS_FRAGMENT;
+ vpx_codec_pkt_list_add(&ctx->pkt_list.head, &pkt);
+ cx_data += cpi->partition_sz[i];
+ cx_data_sz -= cpi->partition_sz[i];
+ }
+ }
+ else*/
+ {
+ if (ctx->pending_cx_data) {
+ ctx->pending_frame_sizes[ctx->pending_frame_count++] = size;
+ ctx->pending_frame_magnitude |= size;
+ ctx->pending_cx_data_sz += size;
+ size += write_superframe_index(ctx);
+ pkt.data.frame.buf = ctx->pending_cx_data;
+ pkt.data.frame.sz = ctx->pending_cx_data_sz;
+ ctx->pending_cx_data = NULL;
+ ctx->pending_cx_data_sz = 0;
+ ctx->pending_frame_count = 0;
+ ctx->pending_frame_magnitude = 0;
+ } else {
+ pkt.data.frame.buf = cx_data;
+ pkt.data.frame.sz = size;
+ }
+ pkt.data.frame.partition_id = -1;
+ vpx_codec_pkt_list_add(&ctx->pkt_list.head, &pkt);
+ cx_data += size;
+ cx_data_sz -= size;
+ }
+
+ // printf("timestamp: %lld, duration: %d\n", pkt->data.frame.pts, pkt->data.frame.duration);
+ }
+ }
+ }
+
+ return res;
+}
+
+
+static const vpx_codec_cx_pkt_t *vp9e_get_cxdata(vpx_codec_alg_priv_t *ctx,
+ vpx_codec_iter_t *iter) {
+ return vpx_codec_pkt_list_get(&ctx->pkt_list.head, iter);
+}
+
+static vpx_codec_err_t vp9e_set_reference(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+ vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *);
+
+ if (data) {
+ vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data;
+ YV12_BUFFER_CONFIG sd;
+
+ image2yuvconfig(&frame->img, &sd);
+ vp9_set_reference_enc(ctx->cpi, ref_frame_to_vp9_reframe(frame->frame_type),
+ &sd);
+ return VPX_CODEC_OK;
+ } else
+ return VPX_CODEC_INVALID_PARAM;
+
+}
+
+static vpx_codec_err_t vp9e_copy_reference(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+
+ vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *);
+
+ if (data) {
+ vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data;
+ YV12_BUFFER_CONFIG sd;
+
+ image2yuvconfig(&frame->img, &sd);
+ vp9_copy_reference_enc(ctx->cpi,
+ ref_frame_to_vp9_reframe(frame->frame_type), &sd);
+ return VPX_CODEC_OK;
+ } else
+ return VPX_CODEC_INVALID_PARAM;
+}
+
+static vpx_codec_err_t get_reference(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+ vp9_ref_frame_t *data = va_arg(args, vp9_ref_frame_t *);
+
+ if (data) {
+ YV12_BUFFER_CONFIG* fb;
+
+ vp9_get_reference_enc(ctx->cpi, data->idx, &fb);
+ yuvconfig2image(&data->img, fb, NULL);
+ return VPX_CODEC_OK;
+ } else {
+ return VPX_CODEC_INVALID_PARAM;
+ }
+}
+
+static vpx_codec_err_t vp9e_set_previewpp(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+#if CONFIG_VP9_POSTPROC
+ vp8_postproc_cfg_t *data = va_arg(args, vp8_postproc_cfg_t *);
+ (void)ctr_id;
+
+ if (data) {
+ ctx->preview_ppcfg = *((vp8_postproc_cfg_t *)data);
+ return VPX_CODEC_OK;
+ } else
+ return VPX_CODEC_INVALID_PARAM;
+#else
+ (void)ctx;
+ (void)ctr_id;
+ (void)args;
+ return VPX_CODEC_INCAPABLE;
+#endif
+}
+
+
+static vpx_image_t *vp9e_get_preview(vpx_codec_alg_priv_t *ctx) {
+
+ YV12_BUFFER_CONFIG sd;
+ vp9_ppflags_t flags = {0};
+
+ if (ctx->preview_ppcfg.post_proc_flag) {
+ flags.post_proc_flag = ctx->preview_ppcfg.post_proc_flag;
+ flags.deblocking_level = ctx->preview_ppcfg.deblocking_level;
+ flags.noise_level = ctx->preview_ppcfg.noise_level;
+ }
+
+ if (0 == vp9_get_preview_raw_frame(ctx->cpi, &sd, &flags)) {
+ yuvconfig2image(&ctx->preview_img, &sd, NULL);
+ return &ctx->preview_img;
+ } else
+ return NULL;
+}
+
+static vpx_codec_err_t vp9e_update_entropy(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+ int update = va_arg(args, int);
+ vp9_update_entropy(ctx->cpi, update);
+ return VPX_CODEC_OK;
+
+}
+
+static vpx_codec_err_t vp9e_update_reference(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+ int update = va_arg(args, int);
+ vp9_update_reference(ctx->cpi, update);
+ return VPX_CODEC_OK;
+}
+
+static vpx_codec_err_t vp9e_use_reference(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+ int reference_flag = va_arg(args, int);
+ vp9_use_as_reference(ctx->cpi, reference_flag);
+ return VPX_CODEC_OK;
+}
+
+static vpx_codec_err_t vp9e_set_roi_map(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+ vpx_roi_map_t *data = va_arg(args, vpx_roi_map_t *);
+
+ if (data) {
+ vpx_roi_map_t *roi = (vpx_roi_map_t *)data;
+
+ if (!vp9_set_roimap(ctx->cpi, roi->roi_map, roi->rows, roi->cols,
+ roi->delta_q, roi->delta_lf, roi->static_threshold))
+ return VPX_CODEC_OK;
+ else
+ return VPX_CODEC_INVALID_PARAM;
+ } else
+ return VPX_CODEC_INVALID_PARAM;
+}
+
+
+static vpx_codec_err_t vp9e_set_activemap(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+ vpx_active_map_t *data = va_arg(args, vpx_active_map_t *);
+
+ if (data) {
+
+ vpx_active_map_t *map = (vpx_active_map_t *)data;
+
+ if (!vp9_set_active_map(ctx->cpi, map->active_map, map->rows, map->cols))
+ return VPX_CODEC_OK;
+ else
+ return VPX_CODEC_INVALID_PARAM;
+ } else
+ return VPX_CODEC_INVALID_PARAM;
+}
+
+static vpx_codec_err_t vp9e_set_scalemode(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+
+ vpx_scaling_mode_t *data = va_arg(args, vpx_scaling_mode_t *);
+
+ if (data) {
+ int res;
+ vpx_scaling_mode_t scalemode = *(vpx_scaling_mode_t *)data;
+ res = vp9_set_internal_size(ctx->cpi, scalemode.h_scaling_mode,
+ scalemode.v_scaling_mode);
+
+ if (!res) {
+ return VPX_CODEC_OK;
+ } else
+ return VPX_CODEC_INVALID_PARAM;
+ } else
+ return VPX_CODEC_INVALID_PARAM;
+}
+
+static vpx_codec_err_t vp9e_set_width(vpx_codec_alg_priv_t *ctx, int ctr_id,
+ va_list args) {
+ unsigned int *data = va_arg(args, unsigned int *);
+ if (data) {
+ int res;
+ res = vp9_set_size_literal(ctx->cpi, *data, 0);
+ if (!res) {
+ return VPX_CODEC_OK;
+ } else {
+ return VPX_CODEC_INVALID_PARAM;
+ }
+ } else {
+ return VPX_CODEC_INVALID_PARAM;
+ }
+}
+
+static vpx_codec_err_t vp9e_set_height(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+ unsigned int *data = va_arg(args, unsigned int *);
+
+ if (data) {
+ int res;
+ res = vp9_set_size_literal(ctx->cpi, 0, *data);
+
+ if (!res) {
+ return VPX_CODEC_OK;
+ } else {
+ return VPX_CODEC_INVALID_PARAM;
+ }
+ } else {
+ return VPX_CODEC_INVALID_PARAM;
+ }
+}
+
+static vpx_codec_err_t vp9e_set_layer(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+ unsigned int *data = va_arg(args, unsigned int *);
+
+ if (data) {
+ int res;
+ res = 0;
+
+ res = vp9_switch_layer(ctx->cpi, *data);
+
+ if (!res) {
+ return VPX_CODEC_OK;
+ } else {
+ return VPX_CODEC_INVALID_PARAM;
+ }
+ } else {
+ return VPX_CODEC_INVALID_PARAM;
+ }
+}
+
+static vpx_codec_err_t vp9e_set_svc(vpx_codec_alg_priv_t *ctx, int ctr_id,
+ va_list args) {
+ int data = va_arg(args, int);
+ vp9_set_svc(ctx->cpi, data);
+ return VPX_CODEC_OK;
+}
+
+static vpx_codec_ctrl_fn_map_t vp9e_ctf_maps[] = {
+ {VP8_SET_REFERENCE, vp9e_set_reference},
+ {VP8_COPY_REFERENCE, vp9e_copy_reference},
+ {VP8_SET_POSTPROC, vp9e_set_previewpp},
+ {VP8E_UPD_ENTROPY, vp9e_update_entropy},
+ {VP8E_UPD_REFERENCE, vp9e_update_reference},
+ {VP8E_USE_REFERENCE, vp9e_use_reference},
+ {VP8E_SET_ROI_MAP, vp9e_set_roi_map},
+ {VP8E_SET_ACTIVEMAP, vp9e_set_activemap},
+ {VP8E_SET_SCALEMODE, vp9e_set_scalemode},
+ {VP8E_SET_CPUUSED, set_param},
+ {VP8E_SET_NOISE_SENSITIVITY, set_param},
+ {VP8E_SET_ENABLEAUTOALTREF, set_param},
+ {VP8E_SET_SHARPNESS, set_param},
+ {VP8E_SET_STATIC_THRESHOLD, set_param},
+ {VP9E_SET_TILE_COLUMNS, set_param},
+ {VP9E_SET_TILE_ROWS, set_param},
+ {VP8E_GET_LAST_QUANTIZER, get_param},
+ {VP8E_GET_LAST_QUANTIZER_64, get_param},
+ {VP8E_SET_ARNR_MAXFRAMES, set_param},
+ {VP8E_SET_ARNR_STRENGTH, set_param},
+ {VP8E_SET_ARNR_TYPE, set_param},
+ {VP8E_SET_TUNING, set_param},
+ {VP8E_SET_CQ_LEVEL, set_param},
+ {VP9E_SET_MAX_Q, set_param},
+ {VP9E_SET_MIN_Q, set_param},
+ {VP8E_SET_MAX_INTRA_BITRATE_PCT, set_param},
+ {VP9E_SET_LOSSLESS, set_param},
+ {VP9E_SET_FRAME_PARALLEL_DECODING, set_param},
+ {VP9_GET_REFERENCE, get_reference},
+ {VP9E_SET_WIDTH, vp9e_set_width},
+ {VP9E_SET_HEIGHT, vp9e_set_height},
+ {VP9E_SET_LAYER, vp9e_set_layer},
+ {VP9E_SET_SVC, vp9e_set_svc},
+ { -1, NULL},
+};
+
+static vpx_codec_enc_cfg_map_t vp9e_usage_cfg_map[] = {
+ {
+ 0,
+ {
+ 0, /* g_usage */
+ 0, /* g_threads */
+ 0, /* g_profile */
+
+ 320, /* g_width */
+ 240, /* g_height */
+ {1, 30}, /* g_timebase */
+
+ 0, /* g_error_resilient */
+
+ VPX_RC_ONE_PASS, /* g_pass */
+
+ 25, /* g_lag_in_frames */
+
+ 0, /* rc_dropframe_thresh */
+ 0, /* rc_resize_allowed */
+ 60, /* rc_resize_down_thresold */
+ 30, /* rc_resize_up_thresold */
+
+ VPX_VBR, /* rc_end_usage */
+#if VPX_ENCODER_ABI_VERSION > (1 + VPX_CODEC_ABI_VERSION)
+ {0}, /* rc_twopass_stats_in */
+#endif
+ 256, /* rc_target_bandwidth */
+ 0, /* rc_min_quantizer */
+ 63, /* rc_max_quantizer */
+ 100, /* rc_undershoot_pct */
+ 100, /* rc_overshoot_pct */
+
+ 6000, /* rc_max_buffer_size */
+ 4000, /* rc_buffer_initial_size; */
+ 5000, /* rc_buffer_optimal_size; */
+
+ 50, /* rc_two_pass_vbrbias */
+ 0, /* rc_two_pass_vbrmin_section */
+ 2000, /* rc_two_pass_vbrmax_section */
+
+ /* keyframing settings (kf) */
+ VPX_KF_AUTO, /* g_kfmode*/
+ 0, /* kf_min_dist */
+ 9999, /* kf_max_dist */
+
+ VPX_SS_DEFAULT_LAYERS, /* ss_number_layers */
+
+#if VPX_ENCODER_ABI_VERSION == (1 + VPX_CODEC_ABI_VERSION)
+ 1, /* g_delete_first_pass_file */
+ "vp8.fpf" /* first pass filename */
+#endif
+ }
+ },
+ { -1, {NOT_IMPLEMENTED}}
+};
+
+
+#ifndef VERSION_STRING
+#define VERSION_STRING
+#endif
+CODEC_INTERFACE(vpx_codec_vp9_cx) = {
+ "WebM Project VP9 Encoder" VERSION_STRING,
+ VPX_CODEC_INTERNAL_ABI_VERSION,
+ VPX_CODEC_CAP_ENCODER | VPX_CODEC_CAP_PSNR |
+ VPX_CODEC_CAP_OUTPUT_PARTITION,
+ /* vpx_codec_caps_t caps; */
+ vp9e_init, /* vpx_codec_init_fn_t init; */
+ vp9e_destroy, /* vpx_codec_destroy_fn_t destroy; */
+ vp9e_ctf_maps, /* vpx_codec_ctrl_fn_map_t *ctrl_maps; */
+ NOT_IMPLEMENTED, /* vpx_codec_get_mmap_fn_t get_mmap; */
+ NOT_IMPLEMENTED, /* vpx_codec_set_mmap_fn_t set_mmap; */
+ {
+ NOT_IMPLEMENTED, /* vpx_codec_peek_si_fn_t peek_si; */
+ NOT_IMPLEMENTED, /* vpx_codec_get_si_fn_t get_si; */
+ NOT_IMPLEMENTED, /* vpx_codec_decode_fn_t decode; */
+ NOT_IMPLEMENTED, /* vpx_codec_frame_get_fn_t frame_get; */
+ },
+ {
+ vp9e_usage_cfg_map, /* vpx_codec_enc_cfg_map_t peek_si; */
+ vp9e_encode, /* vpx_codec_encode_fn_t encode; */
+ vp9e_get_cxdata, /* vpx_codec_get_cx_data_fn_t frame_get; */
+ vp9e_set_config,
+ NOT_IMPLEMENTED,
+ vp9e_get_preview,
+ } /* encoder functions */
+};
+
+
+#if CONFIG_EXPERIMENTAL
+
+CODEC_INTERFACE(vpx_codec_vp9x_cx) = {
+ "VP8 Experimental Encoder" VERSION_STRING,
+ VPX_CODEC_INTERNAL_ABI_VERSION,
+ VPX_CODEC_CAP_ENCODER | VPX_CODEC_CAP_PSNR,
+ /* vpx_codec_caps_t caps; */
+ vp9e_exp_init, /* vpx_codec_init_fn_t init; */
+ vp9e_destroy, /* vpx_codec_destroy_fn_t destroy; */
+ vp9e_ctf_maps, /* vpx_codec_ctrl_fn_map_t *ctrl_maps; */
+ NOT_IMPLEMENTED, /* vpx_codec_get_mmap_fn_t get_mmap; */
+ NOT_IMPLEMENTED, /* vpx_codec_set_mmap_fn_t set_mmap; */
+ {
+ NOT_IMPLEMENTED, /* vpx_codec_peek_si_fn_t peek_si; */
+ NOT_IMPLEMENTED, /* vpx_codec_get_si_fn_t get_si; */
+ NOT_IMPLEMENTED, /* vpx_codec_decode_fn_t decode; */
+ NOT_IMPLEMENTED, /* vpx_codec_frame_get_fn_t frame_get; */
+ },
+ {
+ vp9e_usage_cfg_map, /* vpx_codec_enc_cfg_map_t peek_si; */
+ vp9e_encode, /* vpx_codec_encode_fn_t encode; */
+ vp9e_get_cxdata, /* vpx_codec_get_cx_data_fn_t frame_get; */
+ vp9e_set_config,
+ NOT_IMPLEMENTED,
+ vp9e_get_preview,
+ } /* encoder functions */
+};
+#endif
diff --git a/libvpx/vp9/vp9_dx_iface.c b/libvpx/vp9/vp9_dx_iface.c
new file mode 100644
index 0000000..10b3238
--- /dev/null
+++ b/libvpx/vp9/vp9_dx_iface.c
@@ -0,0 +1,717 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include <stdlib.h>
+#include <string.h>
+#include "vpx/vpx_decoder.h"
+#include "vpx/vp8dx.h"
+#include "vpx/internal/vpx_codec_internal.h"
+#include "vpx_version.h"
+#include "vp9/decoder/vp9_onyxd.h"
+#include "vp9/decoder/vp9_onyxd_int.h"
+#include "vp9/decoder/vp9_read_bit_buffer.h"
+#include "vp9/vp9_iface_common.h"
+
+#define VP9_CAP_POSTPROC (CONFIG_VP9_POSTPROC ? VPX_CODEC_CAP_POSTPROC : 0)
+typedef vpx_codec_stream_info_t vp9_stream_info_t;
+
+/* Structures for handling memory allocations */
+typedef enum {
+ VP9_SEG_ALG_PRIV = 256,
+ VP9_SEG_MAX
+} mem_seg_id_t;
+#define NELEMENTS(x) ((int)(sizeof(x)/sizeof(x[0])))
+
+static unsigned long priv_sz(const vpx_codec_dec_cfg_t *si,
+ vpx_codec_flags_t flags);
+
+static const mem_req_t vp9_mem_req_segs[] = {
+ {VP9_SEG_ALG_PRIV, 0, 8, VPX_CODEC_MEM_ZERO, priv_sz},
+ {VP9_SEG_MAX, 0, 0, 0, NULL}
+};
+
+struct vpx_codec_alg_priv {
+ vpx_codec_priv_t base;
+ vpx_codec_mmap_t mmaps[NELEMENTS(vp9_mem_req_segs) - 1];
+ vpx_codec_dec_cfg_t cfg;
+ vp9_stream_info_t si;
+ int defer_alloc;
+ int decoder_init;
+ VP9D_PTR pbi;
+ int postproc_cfg_set;
+ vp8_postproc_cfg_t postproc_cfg;
+#if CONFIG_POSTPROC_VISUALIZER
+ unsigned int dbg_postproc_flag;
+ int dbg_color_ref_frame_flag;
+ int dbg_color_mb_modes_flag;
+ int dbg_color_b_modes_flag;
+ int dbg_display_mv_flag;
+#endif
+ vpx_image_t img;
+ int img_setup;
+ int img_avail;
+ int invert_tile_order;
+};
+
+static unsigned long priv_sz(const vpx_codec_dec_cfg_t *si,
+ vpx_codec_flags_t flags) {
+ /* Although this declaration is constant, we can't use it in the requested
+ * segments list because we want to define the requested segments list
+ * before defining the private type (so that the number of memory maps is
+ * known)
+ */
+ (void)si;
+ return sizeof(vpx_codec_alg_priv_t);
+}
+
+static void vp9_init_ctx(vpx_codec_ctx_t *ctx, const vpx_codec_mmap_t *mmap) {
+ int i;
+
+ ctx->priv = mmap->base;
+ ctx->priv->sz = sizeof(*ctx->priv);
+ ctx->priv->iface = ctx->iface;
+ ctx->priv->alg_priv = mmap->base;
+
+ for (i = 0; i < NELEMENTS(ctx->priv->alg_priv->mmaps); i++)
+ ctx->priv->alg_priv->mmaps[i].id = vp9_mem_req_segs[i].id;
+
+ ctx->priv->alg_priv->mmaps[0] = *mmap;
+ ctx->priv->alg_priv->si.sz = sizeof(ctx->priv->alg_priv->si);
+ ctx->priv->init_flags = ctx->init_flags;
+
+ if (ctx->config.dec) {
+ /* Update the reference to the config structure to an internal copy. */
+ ctx->priv->alg_priv->cfg = *ctx->config.dec;
+ ctx->config.dec = &ctx->priv->alg_priv->cfg;
+ }
+}
+
+static void vp9_finalize_mmaps(vpx_codec_alg_priv_t *ctx) {
+ /* nothing to clean up */
+}
+
+static vpx_codec_err_t vp9_init(vpx_codec_ctx_t *ctx,
+ vpx_codec_priv_enc_mr_cfg_t *data) {
+ vpx_codec_err_t res = VPX_CODEC_OK;
+
+ /* This function only allocates space for the vpx_codec_alg_priv_t
+ * structure. More memory may be required at the time the stream
+ * information becomes known.
+ */
+ if (!ctx->priv) {
+ vpx_codec_mmap_t mmap;
+
+ mmap.id = vp9_mem_req_segs[0].id;
+ mmap.sz = sizeof(vpx_codec_alg_priv_t);
+ mmap.align = vp9_mem_req_segs[0].align;
+ mmap.flags = vp9_mem_req_segs[0].flags;
+
+ res = vpx_mmap_alloc(&mmap);
+
+ if (!res) {
+ vp9_init_ctx(ctx, &mmap);
+
+ ctx->priv->alg_priv->defer_alloc = 1;
+ /*post processing level initialized to do nothing */
+ }
+ }
+
+ return res;
+}
+
+static vpx_codec_err_t vp9_destroy(vpx_codec_alg_priv_t *ctx) {
+ int i;
+
+ vp9_remove_decompressor(ctx->pbi);
+
+ for (i = NELEMENTS(ctx->mmaps) - 1; i >= 0; i--) {
+ if (ctx->mmaps[i].dtor)
+ ctx->mmaps[i].dtor(&ctx->mmaps[i]);
+ }
+
+ return VPX_CODEC_OK;
+}
+
+static vpx_codec_err_t vp9_peek_si(const uint8_t *data,
+ unsigned int data_sz,
+ vpx_codec_stream_info_t *si) {
+ if (data_sz <= 8) return VPX_CODEC_UNSUP_BITSTREAM;
+ if (data + data_sz <= data) return VPX_CODEC_INVALID_PARAM;
+
+ si->is_kf = 0;
+ si->w = si->h = 0;
+
+ {
+ struct vp9_read_bit_buffer rb = { data, data + data_sz, 0, NULL, NULL };
+ const int frame_marker = vp9_rb_read_literal(&rb, 2);
+ const int version = vp9_rb_read_bit(&rb) | (vp9_rb_read_bit(&rb) << 1);
+ if (frame_marker != 0x2) return VPX_CODEC_UNSUP_BITSTREAM;
+#if CONFIG_NON420
+ if (version > 1) return VPX_CODEC_UNSUP_BITSTREAM;
+#else
+ if (version != 0) return VPX_CODEC_UNSUP_BITSTREAM;
+#endif
+
+ if (vp9_rb_read_bit(&rb)) { // show an existing frame
+ return VPX_CODEC_OK;
+ }
+
+ si->is_kf = !vp9_rb_read_bit(&rb);
+ if (si->is_kf) {
+ const int sRGB = 7;
+ int colorspace;
+
+ rb.bit_offset += 1; // show frame
+ rb.bit_offset += 1; // error resilient
+
+ if (vp9_rb_read_literal(&rb, 8) != SYNC_CODE_0 ||
+ vp9_rb_read_literal(&rb, 8) != SYNC_CODE_1 ||
+ vp9_rb_read_literal(&rb, 8) != SYNC_CODE_2) {
+ return VPX_CODEC_UNSUP_BITSTREAM;
+ }
+
+ colorspace = vp9_rb_read_literal(&rb, 3);
+ if (colorspace != sRGB) {
+ rb.bit_offset += 1; // [16,235] (including xvycc) vs [0,255] range
+ if (version == 1) {
+ rb.bit_offset += 2; // subsampling x/y
+ rb.bit_offset += 1; // has extra plane
+ }
+ } else {
+ if (version == 1) {
+ rb.bit_offset += 1; // has extra plane
+ } else {
+ // RGB is only available in version 1
+ return VPX_CODEC_UNSUP_BITSTREAM;
+ }
+ }
+
+ // TODO(jzern): these are available on non-keyframes in intra only mode.
+ si->w = vp9_rb_read_literal(&rb, 16) + 1;
+ si->h = vp9_rb_read_literal(&rb, 16) + 1;
+ }
+ }
+
+ return VPX_CODEC_OK;
+}
+
+static vpx_codec_err_t vp9_get_si(vpx_codec_alg_priv_t *ctx,
+ vpx_codec_stream_info_t *si) {
+
+ unsigned int sz;
+
+ if (si->sz >= sizeof(vp9_stream_info_t))
+ sz = sizeof(vp9_stream_info_t);
+ else
+ sz = sizeof(vpx_codec_stream_info_t);
+
+ memcpy(si, &ctx->si, sz);
+ si->sz = sz;
+
+ return VPX_CODEC_OK;
+}
+
+
+static vpx_codec_err_t
+update_error_state(vpx_codec_alg_priv_t *ctx,
+ const struct vpx_internal_error_info *error) {
+ vpx_codec_err_t res;
+
+ if ((res = error->error_code))
+ ctx->base.err_detail = error->has_detail
+ ? error->detail
+ : NULL;
+
+ return res;
+}
+
+static vpx_codec_err_t decode_one(vpx_codec_alg_priv_t *ctx,
+ const uint8_t **data,
+ unsigned int data_sz,
+ void *user_priv,
+ long deadline) {
+ vpx_codec_err_t res = VPX_CODEC_OK;
+
+ ctx->img_avail = 0;
+
+ /* Determine the stream parameters. Note that we rely on peek_si to
+ * validate that we have a buffer that does not wrap around the top
+ * of the heap.
+ */
+ if (!ctx->si.h)
+ res = ctx->base.iface->dec.peek_si(*data, data_sz, &ctx->si);
+
+
+ /* Perform deferred allocations, if required */
+ if (!res && ctx->defer_alloc) {
+ int i;
+
+ for (i = 1; !res && i < NELEMENTS(ctx->mmaps); i++) {
+ vpx_codec_dec_cfg_t cfg;
+
+ cfg.w = ctx->si.w;
+ cfg.h = ctx->si.h;
+ ctx->mmaps[i].id = vp9_mem_req_segs[i].id;
+ ctx->mmaps[i].sz = vp9_mem_req_segs[i].sz;
+ ctx->mmaps[i].align = vp9_mem_req_segs[i].align;
+ ctx->mmaps[i].flags = vp9_mem_req_segs[i].flags;
+
+ if (!ctx->mmaps[i].sz)
+ ctx->mmaps[i].sz = vp9_mem_req_segs[i].calc_sz(&cfg,
+ ctx->base.init_flags);
+
+ res = vpx_mmap_alloc(&ctx->mmaps[i]);
+ }
+
+ if (!res)
+ vp9_finalize_mmaps(ctx);
+
+ ctx->defer_alloc = 0;
+ }
+
+ /* Initialize the decoder instance on the first frame*/
+ if (!res && !ctx->decoder_init) {
+ res = vpx_validate_mmaps(&ctx->si, ctx->mmaps,
+ vp9_mem_req_segs, NELEMENTS(vp9_mem_req_segs),
+ ctx->base.init_flags);
+
+ if (!res) {
+ VP9D_CONFIG oxcf;
+ VP9D_PTR optr;
+
+ vp9_initialize_dec();
+
+ oxcf.width = ctx->si.w;
+ oxcf.height = ctx->si.h;
+ oxcf.version = 9;
+ oxcf.postprocess = 0;
+ oxcf.max_threads = ctx->cfg.threads;
+ oxcf.inv_tile_order = ctx->invert_tile_order;
+ optr = vp9_create_decompressor(&oxcf);
+
+ /* If postprocessing was enabled by the application and a
+ * configuration has not been provided, default it.
+ */
+ if (!ctx->postproc_cfg_set
+ && (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC)) {
+ ctx->postproc_cfg.post_proc_flag =
+ VP8_DEBLOCK | VP8_DEMACROBLOCK;
+ ctx->postproc_cfg.deblocking_level = 4;
+ ctx->postproc_cfg.noise_level = 0;
+ }
+
+ if (!optr)
+ res = VPX_CODEC_ERROR;
+ else
+ ctx->pbi = optr;
+ }
+
+ ctx->decoder_init = 1;
+ }
+
+ if (!res && ctx->pbi) {
+ YV12_BUFFER_CONFIG sd;
+ int64_t time_stamp = 0, time_end_stamp = 0;
+ vp9_ppflags_t flags = {0};
+
+ if (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC) {
+ flags.post_proc_flag = ctx->postproc_cfg.post_proc_flag
+#if CONFIG_POSTPROC_VISUALIZER
+
+ | ((ctx->dbg_color_ref_frame_flag != 0) ? VP9D_DEBUG_CLR_FRM_REF_BLKS : 0)
+ | ((ctx->dbg_color_mb_modes_flag != 0) ? VP9D_DEBUG_CLR_BLK_MODES : 0)
+ | ((ctx->dbg_color_b_modes_flag != 0) ? VP9D_DEBUG_CLR_BLK_MODES : 0)
+ | ((ctx->dbg_display_mv_flag != 0) ? VP9D_DEBUG_DRAW_MV : 0)
+#endif
+;
+ flags.deblocking_level = ctx->postproc_cfg.deblocking_level;
+ flags.noise_level = ctx->postproc_cfg.noise_level;
+#if CONFIG_POSTPROC_VISUALIZER
+ flags.display_ref_frame_flag = ctx->dbg_color_ref_frame_flag;
+ flags.display_mb_modes_flag = ctx->dbg_color_mb_modes_flag;
+ flags.display_b_modes_flag = ctx->dbg_color_b_modes_flag;
+ flags.display_mv_flag = ctx->dbg_display_mv_flag;
+#endif
+ }
+
+ if (vp9_receive_compressed_data(ctx->pbi, data_sz, data, deadline)) {
+ VP9D_COMP *pbi = (VP9D_COMP *)ctx->pbi;
+ res = update_error_state(ctx, &pbi->common.error);
+ }
+
+ if (!res && 0 == vp9_get_raw_frame(ctx->pbi, &sd, &time_stamp,
+ &time_end_stamp, &flags)) {
+ yuvconfig2image(&ctx->img, &sd, user_priv);
+ ctx->img_avail = 1;
+ }
+ }
+
+ return res;
+}
+
+static void parse_superframe_index(const uint8_t *data,
+ size_t data_sz,
+ uint32_t sizes[8],
+ int *count) {
+ uint8_t marker;
+
+ assert(data_sz);
+ marker = data[data_sz - 1];
+ *count = 0;
+
+ if ((marker & 0xe0) == 0xc0) {
+ const uint32_t frames = (marker & 0x7) + 1;
+ const uint32_t mag = ((marker >> 3) & 0x3) + 1;
+ const size_t index_sz = 2 + mag * frames;
+
+ if (data_sz >= index_sz && data[data_sz - index_sz] == marker) {
+ // found a valid superframe index
+ uint32_t i, j;
+ const uint8_t *x = data + data_sz - index_sz + 1;
+
+ for (i = 0; i < frames; i++) {
+ uint32_t this_sz = 0;
+
+ for (j = 0; j < mag; j++)
+ this_sz |= (*x++) << (j * 8);
+ sizes[i] = this_sz;
+ }
+
+ *count = frames;
+ }
+ }
+}
+
+static vpx_codec_err_t vp9_decode(vpx_codec_alg_priv_t *ctx,
+ const uint8_t *data,
+ unsigned int data_sz,
+ void *user_priv,
+ long deadline) {
+ const uint8_t *data_start = data;
+ const uint8_t *data_end = data + data_sz;
+ vpx_codec_err_t res = 0;
+ uint32_t sizes[8];
+ int frames_this_pts, frame_count = 0;
+
+ if (data == NULL || data_sz == 0) return VPX_CODEC_INVALID_PARAM;
+
+ parse_superframe_index(data, data_sz, sizes, &frames_this_pts);
+
+ do {
+ // Skip over the superframe index, if present
+ if (data_sz && (*data_start & 0xe0) == 0xc0) {
+ const uint8_t marker = *data_start;
+ const uint32_t frames = (marker & 0x7) + 1;
+ const uint32_t mag = ((marker >> 3) & 0x3) + 1;
+ const uint32_t index_sz = 2 + mag * frames;
+
+ if (data_sz >= index_sz && data_start[index_sz - 1] == marker) {
+ data_start += index_sz;
+ data_sz -= index_sz;
+ if (data_start < data_end)
+ continue;
+ else
+ break;
+ }
+ }
+
+ // Use the correct size for this frame, if an index is present.
+ if (frames_this_pts) {
+ uint32_t this_sz = sizes[frame_count];
+
+ if (data_sz < this_sz) {
+ ctx->base.err_detail = "Invalid frame size in index";
+ return VPX_CODEC_CORRUPT_FRAME;
+ }
+
+ data_sz = this_sz;
+ frame_count++;
+ }
+
+ res = decode_one(ctx, &data_start, data_sz, user_priv, deadline);
+ assert(data_start >= data);
+ assert(data_start <= data_end);
+
+ /* Early exit if there was a decode error */
+ if (res)
+ break;
+
+ /* Account for suboptimal termination by the encoder. */
+ while (data_start < data_end && *data_start == 0)
+ data_start++;
+
+ data_sz = data_end - data_start;
+ } while (data_start < data_end);
+ return res;
+}
+
+static vpx_image_t *vp9_get_frame(vpx_codec_alg_priv_t *ctx,
+ vpx_codec_iter_t *iter) {
+ vpx_image_t *img = NULL;
+
+ if (ctx->img_avail) {
+ /* iter acts as a flip flop, so an image is only returned on the first
+ * call to get_frame.
+ */
+ if (!(*iter)) {
+ img = &ctx->img;
+ *iter = img;
+ }
+ }
+ ctx->img_avail = 0;
+
+ return img;
+}
+
+static vpx_codec_err_t vp9_xma_get_mmap(const vpx_codec_ctx_t *ctx,
+ vpx_codec_mmap_t *mmap,
+ vpx_codec_iter_t *iter) {
+ vpx_codec_err_t res;
+ const mem_req_t *seg_iter = *iter;
+
+ /* Get address of next segment request */
+ do {
+ if (!seg_iter)
+ seg_iter = vp9_mem_req_segs;
+ else if (seg_iter->id != VP9_SEG_MAX)
+ seg_iter++;
+
+ *iter = (vpx_codec_iter_t)seg_iter;
+
+ if (seg_iter->id != VP9_SEG_MAX) {
+ mmap->id = seg_iter->id;
+ mmap->sz = seg_iter->sz;
+ mmap->align = seg_iter->align;
+ mmap->flags = seg_iter->flags;
+
+ if (!seg_iter->sz)
+ mmap->sz = seg_iter->calc_sz(ctx->config.dec, ctx->init_flags);
+
+ res = VPX_CODEC_OK;
+ } else
+ res = VPX_CODEC_LIST_END;
+ } while (!mmap->sz && res != VPX_CODEC_LIST_END);
+
+ return res;
+}
+
+static vpx_codec_err_t vp9_xma_set_mmap(vpx_codec_ctx_t *ctx,
+ const vpx_codec_mmap_t *mmap) {
+ vpx_codec_err_t res = VPX_CODEC_MEM_ERROR;
+ int i, done;
+
+ if (!ctx->priv) {
+ if (mmap->id == VP9_SEG_ALG_PRIV) {
+ if (!ctx->priv) {
+ vp9_init_ctx(ctx, mmap);
+ res = VPX_CODEC_OK;
+ }
+ }
+ }
+
+ done = 1;
+
+ if (!res && ctx->priv->alg_priv) {
+ for (i = 0; i < NELEMENTS(ctx->priv->alg_priv->mmaps); i++) {
+ if (ctx->priv->alg_priv->mmaps[i].id == mmap->id)
+ if (!ctx->priv->alg_priv->mmaps[i].base) {
+ ctx->priv->alg_priv->mmaps[i] = *mmap;
+ res = VPX_CODEC_OK;
+ }
+
+ done &= (ctx->priv->alg_priv->mmaps[i].base != NULL);
+ }
+ }
+
+ if (done && !res) {
+ vp9_finalize_mmaps(ctx->priv->alg_priv);
+ res = ctx->iface->init(ctx, NULL);
+ }
+
+ return res;
+}
+
+static vpx_codec_err_t set_reference(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+
+ vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *);
+
+ if (data) {
+ vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data;
+ YV12_BUFFER_CONFIG sd;
+
+ image2yuvconfig(&frame->img, &sd);
+
+ return vp9_set_reference_dec(ctx->pbi,
+ (VP9_REFFRAME)frame->frame_type, &sd);
+ } else
+ return VPX_CODEC_INVALID_PARAM;
+
+}
+
+static vpx_codec_err_t copy_reference(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+
+ vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *);
+
+ if (data) {
+ vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data;
+ YV12_BUFFER_CONFIG sd;
+
+ image2yuvconfig(&frame->img, &sd);
+
+ return vp9_copy_reference_dec(ctx->pbi,
+ (VP9_REFFRAME)frame->frame_type, &sd);
+ } else
+ return VPX_CODEC_INVALID_PARAM;
+
+}
+
+static vpx_codec_err_t get_reference(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+ vp9_ref_frame_t *data = va_arg(args, vp9_ref_frame_t *);
+
+ if (data) {
+ YV12_BUFFER_CONFIG* fb;
+
+ vp9_get_reference_dec(ctx->pbi, data->idx, &fb);
+ yuvconfig2image(&data->img, fb, NULL);
+ return VPX_CODEC_OK;
+ } else {
+ return VPX_CODEC_INVALID_PARAM;
+ }
+}
+
+static vpx_codec_err_t set_postproc(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+#if CONFIG_VP9_POSTPROC
+ vp8_postproc_cfg_t *data = va_arg(args, vp8_postproc_cfg_t *);
+
+ if (data) {
+ ctx->postproc_cfg_set = 1;
+ ctx->postproc_cfg = *((vp8_postproc_cfg_t *)data);
+ return VPX_CODEC_OK;
+ } else
+ return VPX_CODEC_INVALID_PARAM;
+
+#else
+ return VPX_CODEC_INCAPABLE;
+#endif
+}
+
+static vpx_codec_err_t set_dbg_options(vpx_codec_alg_priv_t *ctx,
+ int ctrl_id,
+ va_list args) {
+#if CONFIG_POSTPROC_VISUALIZER && CONFIG_POSTPROC
+ int data = va_arg(args, int);
+
+#define MAP(id, var) case id: var = data; break;
+
+ switch (ctrl_id) {
+ MAP(VP8_SET_DBG_COLOR_REF_FRAME, ctx->dbg_color_ref_frame_flag);
+ MAP(VP8_SET_DBG_COLOR_MB_MODES, ctx->dbg_color_mb_modes_flag);
+ MAP(VP8_SET_DBG_COLOR_B_MODES, ctx->dbg_color_b_modes_flag);
+ MAP(VP8_SET_DBG_DISPLAY_MV, ctx->dbg_display_mv_flag);
+ }
+
+ return VPX_CODEC_OK;
+#else
+ return VPX_CODEC_INCAPABLE;
+#endif
+}
+
+static vpx_codec_err_t get_last_ref_updates(vpx_codec_alg_priv_t *ctx,
+ int ctrl_id,
+ va_list args) {
+ int *update_info = va_arg(args, int *);
+ VP9D_COMP *pbi = (VP9D_COMP *)ctx->pbi;
+
+ if (update_info) {
+ *update_info = pbi->refresh_frame_flags;
+
+ return VPX_CODEC_OK;
+ } else
+ return VPX_CODEC_INVALID_PARAM;
+}
+
+
+static vpx_codec_err_t get_frame_corrupted(vpx_codec_alg_priv_t *ctx,
+ int ctrl_id,
+ va_list args) {
+
+ int *corrupted = va_arg(args, int *);
+
+ if (corrupted) {
+ VP9D_COMP *pbi = (VP9D_COMP *)ctx->pbi;
+ *corrupted = pbi->common.frame_to_show->corrupted;
+
+ return VPX_CODEC_OK;
+ } else
+ return VPX_CODEC_INVALID_PARAM;
+
+}
+
+static vpx_codec_err_t set_invert_tile_order(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+ ctx->invert_tile_order = va_arg(args, int);
+ return VPX_CODEC_OK;
+}
+
+static vpx_codec_ctrl_fn_map_t ctf_maps[] = {
+ {VP8_SET_REFERENCE, set_reference},
+ {VP8_COPY_REFERENCE, copy_reference},
+ {VP8_SET_POSTPROC, set_postproc},
+ {VP8_SET_DBG_COLOR_REF_FRAME, set_dbg_options},
+ {VP8_SET_DBG_COLOR_MB_MODES, set_dbg_options},
+ {VP8_SET_DBG_COLOR_B_MODES, set_dbg_options},
+ {VP8_SET_DBG_DISPLAY_MV, set_dbg_options},
+ {VP8D_GET_LAST_REF_UPDATES, get_last_ref_updates},
+ {VP8D_GET_FRAME_CORRUPTED, get_frame_corrupted},
+ {VP9_GET_REFERENCE, get_reference},
+ {VP9_INVERT_TILE_DECODE_ORDER, set_invert_tile_order},
+ { -1, NULL},
+};
+
+
+#ifndef VERSION_STRING
+#define VERSION_STRING
+#endif
+CODEC_INTERFACE(vpx_codec_vp9_dx) = {
+ "WebM Project VP9 Decoder" VERSION_STRING,
+ VPX_CODEC_INTERNAL_ABI_VERSION,
+ VPX_CODEC_CAP_DECODER | VP9_CAP_POSTPROC,
+ /* vpx_codec_caps_t caps; */
+ vp9_init, /* vpx_codec_init_fn_t init; */
+ vp9_destroy, /* vpx_codec_destroy_fn_t destroy; */
+ ctf_maps, /* vpx_codec_ctrl_fn_map_t *ctrl_maps; */
+ vp9_xma_get_mmap, /* vpx_codec_get_mmap_fn_t get_mmap; */
+ vp9_xma_set_mmap, /* vpx_codec_set_mmap_fn_t set_mmap; */
+ {
+ vp9_peek_si, /* vpx_codec_peek_si_fn_t peek_si; */
+ vp9_get_si, /* vpx_codec_get_si_fn_t get_si; */
+ vp9_decode, /* vpx_codec_decode_fn_t decode; */
+ vp9_get_frame, /* vpx_codec_frame_get_fn_t frame_get; */
+ },
+ {
+ /* encoder functions */
+ NOT_IMPLEMENTED,
+ NOT_IMPLEMENTED,
+ NOT_IMPLEMENTED,
+ NOT_IMPLEMENTED,
+ NOT_IMPLEMENTED,
+ NOT_IMPLEMENTED
+ }
+};
diff --git a/libvpx/vp9/vp9_iface_common.h b/libvpx/vp9/vp9_iface_common.h
new file mode 100644
index 0000000..ed0122c
--- /dev/null
+++ b/libvpx/vp9/vp9_iface_common.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef VP9_VP9_IFACE_COMMON_H_
+#define VP9_VP9_IFACE_COMMON_H_
+
+static void yuvconfig2image(vpx_image_t *img, const YV12_BUFFER_CONFIG *yv12,
+ void *user_priv) {
+ /** vpx_img_wrap() doesn't allow specifying independent strides for
+ * the Y, U, and V planes, nor other alignment adjustments that
+ * might be representable by a YV12_BUFFER_CONFIG, so we just
+ * initialize all the fields.*/
+ int bps = 12;
+ if (yv12->uv_height == yv12->y_height) {
+ if (yv12->uv_width == yv12->y_width) {
+ img->fmt = VPX_IMG_FMT_I444;
+ bps = 24;
+ } else {
+ img->fmt = VPX_IMG_FMT_I422;
+ bps = 16;
+ }
+ } else {
+ img->fmt = VPX_IMG_FMT_I420;
+ }
+ img->w = yv12->y_stride;
+ img->h = ALIGN_POWER_OF_TWO(yv12->y_height + 2 * VP9BORDERINPIXELS, 3);
+ img->d_w = yv12->y_crop_width;
+ img->d_h = yv12->y_crop_height;
+ img->x_chroma_shift = yv12->uv_width < yv12->y_width;
+ img->y_chroma_shift = yv12->uv_height < yv12->y_height;
+ img->planes[VPX_PLANE_Y] = yv12->y_buffer;
+ img->planes[VPX_PLANE_U] = yv12->u_buffer;
+ img->planes[VPX_PLANE_V] = yv12->v_buffer;
+ img->planes[VPX_PLANE_ALPHA] = yv12->alpha_buffer;
+ img->stride[VPX_PLANE_Y] = yv12->y_stride;
+ img->stride[VPX_PLANE_U] = yv12->uv_stride;
+ img->stride[VPX_PLANE_V] = yv12->uv_stride;
+ img->stride[VPX_PLANE_ALPHA] = yv12->alpha_stride;
+ img->bps = bps;
+ img->user_priv = user_priv;
+ img->img_data = yv12->buffer_alloc;
+ img->img_data_owner = 0;
+ img->self_allocd = 0;
+}
+
+static vpx_codec_err_t image2yuvconfig(const vpx_image_t *img,
+ YV12_BUFFER_CONFIG *yv12) {
+ yv12->y_buffer = img->planes[VPX_PLANE_Y];
+ yv12->u_buffer = img->planes[VPX_PLANE_U];
+ yv12->v_buffer = img->planes[VPX_PLANE_V];
+ yv12->alpha_buffer = img->planes[VPX_PLANE_ALPHA];
+
+ yv12->y_crop_width = img->d_w;
+ yv12->y_crop_height = img->d_h;
+ yv12->y_width = img->d_w;
+ yv12->y_height = img->d_h;
+
+ yv12->uv_width = img->x_chroma_shift == 1 ? (1 + yv12->y_width) / 2
+ : yv12->y_width;
+ yv12->uv_height = img->y_chroma_shift == 1 ? (1 + yv12->y_height) / 2
+ : yv12->y_height;
+
+ yv12->alpha_width = yv12->alpha_buffer ? img->d_w : 0;
+ yv12->alpha_height = yv12->alpha_buffer ? img->d_h : 0;
+
+ yv12->y_stride = img->stride[VPX_PLANE_Y];
+ yv12->uv_stride = img->stride[VPX_PLANE_U];
+ yv12->alpha_stride = yv12->alpha_buffer ? img->stride[VPX_PLANE_ALPHA] : 0;
+
+ yv12->border = (img->stride[VPX_PLANE_Y] - img->w) / 2;
+#if CONFIG_ALPHA
+ // For development purposes, force alpha to hold the same data a Y for now.
+ yv12->alpha_buffer = yv12->y_buffer;
+ yv12->alpha_width = yv12->y_width;
+ yv12->alpha_height = yv12->y_height;
+ yv12->alpha_stride = yv12->y_stride;
+#endif
+ return VPX_CODEC_OK;
+}
+
+#endif // VP9_VP9_IFACE_COMMON_H_
diff --git a/libvpx/vp9/vp9cx.mk b/libvpx/vp9/vp9cx.mk
new file mode 100644
index 0000000..9fbf100
--- /dev/null
+++ b/libvpx/vp9/vp9cx.mk
@@ -0,0 +1,105 @@
+##
+## Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+##
+## Use of this source code is governed by a BSD-style license
+## that can be found in the LICENSE file in the root of the source
+## tree. An additional intellectual property rights grant can be found
+## in the file PATENTS. All contributing project authors may
+## be found in the AUTHORS file in the root of the source tree.
+##
+
+VP9_CX_EXPORTS += exports_enc
+
+VP9_CX_SRCS-yes += $(VP9_COMMON_SRCS-yes)
+VP9_CX_SRCS-no += $(VP9_COMMON_SRCS-no)
+VP9_CX_SRCS_REMOVE-yes += $(VP9_COMMON_SRCS_REMOVE-yes)
+VP9_CX_SRCS_REMOVE-no += $(VP9_COMMON_SRCS_REMOVE-no)
+
+VP9_CX_SRCS-yes += vp9_cx_iface.c
+
+VP9_CX_SRCS-yes += encoder/vp9_bitstream.c
+VP9_CX_SRCS-yes += encoder/vp9_boolhuff.c
+VP9_CX_SRCS-yes += encoder/vp9_dct.c
+VP9_CX_SRCS-yes += encoder/vp9_encodeframe.c
+VP9_CX_SRCS-yes += encoder/vp9_encodeframe.h
+VP9_CX_SRCS-yes += encoder/vp9_encodeintra.c
+VP9_CX_SRCS-yes += encoder/vp9_encodemb.c
+VP9_CX_SRCS-yes += encoder/vp9_encodemv.c
+VP9_CX_SRCS-yes += encoder/vp9_firstpass.c
+VP9_CX_SRCS-yes += encoder/vp9_block.h
+VP9_CX_SRCS-yes += encoder/vp9_boolhuff.h
+VP9_CX_SRCS-yes += encoder/vp9_write_bit_buffer.h
+VP9_CX_SRCS-yes += encoder/vp9_bitstream.h
+VP9_CX_SRCS-yes += encoder/vp9_encodeintra.h
+VP9_CX_SRCS-yes += encoder/vp9_encodemb.h
+VP9_CX_SRCS-yes += encoder/vp9_encodemv.h
+VP9_CX_SRCS-yes += encoder/vp9_firstpass.h
+VP9_CX_SRCS-yes += encoder/vp9_lookahead.c
+VP9_CX_SRCS-yes += encoder/vp9_lookahead.h
+VP9_CX_SRCS-yes += encoder/vp9_mcomp.h
+VP9_CX_SRCS-yes += encoder/vp9_modecosts.h
+VP9_CX_SRCS-yes += encoder/vp9_onyx_int.h
+VP9_CX_SRCS-yes += encoder/vp9_psnr.h
+VP9_CX_SRCS-yes += encoder/vp9_quantize.h
+VP9_CX_SRCS-yes += encoder/vp9_ratectrl.h
+VP9_CX_SRCS-yes += encoder/vp9_rdopt.h
+VP9_CX_SRCS-yes += encoder/vp9_tokenize.h
+VP9_CX_SRCS-yes += encoder/vp9_treewriter.h
+VP9_CX_SRCS-yes += encoder/vp9_variance.h
+VP9_CX_SRCS-yes += encoder/vp9_mcomp.c
+VP9_CX_SRCS-yes += encoder/vp9_modecosts.c
+VP9_CX_SRCS-yes += encoder/vp9_onyx_if.c
+VP9_CX_SRCS-yes += encoder/vp9_picklpf.c
+VP9_CX_SRCS-yes += encoder/vp9_picklpf.h
+VP9_CX_SRCS-yes += encoder/vp9_psnr.c
+VP9_CX_SRCS-yes += encoder/vp9_quantize.c
+VP9_CX_SRCS-yes += encoder/vp9_ratectrl.c
+VP9_CX_SRCS-yes += encoder/vp9_rdopt.c
+VP9_CX_SRCS-yes += encoder/vp9_sad_c.c
+VP9_CX_SRCS-yes += encoder/vp9_segmentation.c
+VP9_CX_SRCS-yes += encoder/vp9_segmentation.h
+VP9_CX_SRCS-yes += encoder/vp9_subexp.c
+VP9_CX_SRCS-yes += encoder/vp9_subexp.h
+VP9_CX_SRCS-$(CONFIG_INTERNAL_STATS) += encoder/vp9_ssim.c
+VP9_CX_SRCS-yes += encoder/vp9_tokenize.c
+VP9_CX_SRCS-yes += encoder/vp9_treewriter.c
+VP9_CX_SRCS-yes += encoder/vp9_variance_c.c
+ifeq ($(CONFIG_VP9_POSTPROC),yes)
+VP9_CX_SRCS-$(CONFIG_INTERNAL_STATS) += common/vp9_postproc.h
+VP9_CX_SRCS-$(CONFIG_INTERNAL_STATS) += common/vp9_postproc.c
+endif
+VP9_CX_SRCS-yes += encoder/vp9_temporal_filter.c
+VP9_CX_SRCS-yes += encoder/vp9_temporal_filter.h
+VP9_CX_SRCS-yes += encoder/vp9_mbgraph.c
+VP9_CX_SRCS-yes += encoder/vp9_mbgraph.h
+
+
+VP9_CX_SRCS-$(ARCH_X86)$(ARCH_X86_64) += encoder/x86/vp9_mcomp_x86.h
+VP9_CX_SRCS-$(HAVE_MMX) += encoder/x86/vp9_variance_mmx.c
+VP9_CX_SRCS-$(HAVE_MMX) += encoder/x86/vp9_variance_impl_mmx.asm
+VP9_CX_SRCS-$(HAVE_MMX) += encoder/x86/vp9_sad_mmx.asm
+VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_variance_impl_sse2.asm
+VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_sad4d_sse2.asm
+VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_subpel_variance_impl_sse2.asm
+VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_temporal_filter_apply_sse2.asm
+VP9_CX_SRCS-$(HAVE_SSE3) += encoder/x86/vp9_sad_sse3.asm
+
+ifeq ($(USE_X86INC),yes)
+VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_error_sse2.asm
+VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_sad_sse2.asm
+VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_subtract_sse2.asm
+VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_variance_sse2.c
+VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_subpel_variance.asm
+endif
+
+ifeq ($(ARCH_X86_64),yes)
+VP9_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/vp9_quantize_ssse3.asm
+endif
+VP9_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/vp9_sad_ssse3.asm
+VP9_CX_SRCS-$(HAVE_SSE4_1) += encoder/x86/vp9_sad_sse4.asm
+VP9_CX_SRCS-$(ARCH_X86_64) += encoder/x86/vp9_ssim_opt.asm
+
+VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_dct_sse2.c
+VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_dct32x32_sse2.c
+
+VP9_CX_SRCS-yes := $(filter-out $(VP9_CX_SRCS_REMOVE-yes),$(VP9_CX_SRCS-yes))
diff --git a/libvpx/vp9/vp9dx.mk b/libvpx/vp9/vp9dx.mk
new file mode 100644
index 0000000..be3afe8
--- /dev/null
+++ b/libvpx/vp9/vp9dx.mk
@@ -0,0 +1,43 @@
+##
+## Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+##
+## Use of this source code is governed by a BSD-style license
+## that can be found in the LICENSE file in the root of the source
+## tree. An additional intellectual property rights grant can be found
+## in the file PATENTS. All contributing project authors may
+## be found in the AUTHORS file in the root of the source tree.
+##
+
+VP9_DX_EXPORTS += exports_dec
+
+VP9_DX_SRCS-yes += $(VP9_COMMON_SRCS-yes)
+VP9_DX_SRCS-no += $(VP9_COMMON_SRCS-no)
+VP9_DX_SRCS_REMOVE-yes += $(VP9_COMMON_SRCS_REMOVE-yes)
+VP9_DX_SRCS_REMOVE-no += $(VP9_COMMON_SRCS_REMOVE-no)
+
+VP9_DX_SRCS-yes += vp9_dx_iface.c
+
+VP9_DX_SRCS-yes += decoder/vp9_dboolhuff.c
+VP9_DX_SRCS-yes += decoder/vp9_decodemv.c
+VP9_DX_SRCS-yes += decoder/vp9_decodframe.c
+VP9_DX_SRCS-yes += decoder/vp9_decodframe.h
+VP9_DX_SRCS-yes += decoder/vp9_detokenize.c
+VP9_DX_SRCS-yes += decoder/vp9_dboolhuff.h
+VP9_DX_SRCS-yes += decoder/vp9_read_bit_buffer.h
+VP9_DX_SRCS-yes += decoder/vp9_decodemv.h
+VP9_DX_SRCS-yes += decoder/vp9_detokenize.h
+VP9_DX_SRCS-yes += decoder/vp9_onyxd.h
+VP9_DX_SRCS-yes += decoder/vp9_onyxd_int.h
+VP9_DX_SRCS-yes += decoder/vp9_thread.c
+VP9_DX_SRCS-yes += decoder/vp9_thread.h
+VP9_DX_SRCS-yes += decoder/vp9_treereader.h
+VP9_DX_SRCS-yes += decoder/vp9_onyxd_if.c
+VP9_DX_SRCS-yes += decoder/vp9_idct_blk.c
+VP9_DX_SRCS-yes += decoder/vp9_idct_blk.h
+VP9_DX_SRCS-yes += decoder/vp9_dsubexp.c
+VP9_DX_SRCS-yes += decoder/vp9_dsubexp.h
+
+VP9_DX_SRCS-yes := $(filter-out $(VP9_DX_SRCS_REMOVE-yes),$(VP9_DX_SRCS-yes))
+
+VP9_DX_SRCS-$(HAVE_SSE2) += decoder/x86/vp9_dequantize_sse2.c
+VP9_DX_SRCS-$(HAVE_NEON) += decoder/arm/neon/vp9_add_constant_residual_neon$(ASM)
diff --git a/libvpx/vp9_spatial_scalable_encoder.c b/libvpx/vp9_spatial_scalable_encoder.c
new file mode 100644
index 0000000..8bb582f
--- /dev/null
+++ b/libvpx/vp9_spatial_scalable_encoder.c
@@ -0,0 +1,487 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * This is an example demonstrating how to implement a multi-layer
+ * VP9 encoding scheme based on spatial scalability for video applications
+ * that benefit from a scalable bitstream.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <time.h>
+#include <string.h>
+#include <unistd.h>
+#include <libgen.h>
+#define VPX_CODEC_DISABLE_COMPAT 1
+#include "vpx/vpx_encoder.h"
+#include "vpx/vp8cx.h"
+#define interface (vpx_codec_vp9_cx())
+#define fourcc 0x30395056
+#define IVF_FILE_HDR_SZ (32)
+#define IVF_FRAME_HDR_SZ (12)
+#define NUM_BUFFERS 8
+
+char *input_filename;
+char *output_filename;
+unsigned int number_frames_to_code = 60 * 60;
+unsigned int number_frames_to_skip = 0;
+unsigned int number_spatial_layers = 5;
+unsigned int key_period = 100;
+
+typedef enum ENCODING_MODE {
+ INTER_LAYER_PREDICTION_I,
+ INTER_LAYER_PREDICTION_IP,
+ USE_GOLDEN_FRAME
+} ENCODING_MODE;
+
+static void mem_put_le16(char *mem, unsigned int val) {
+ mem[0] = val;
+ mem[1] = val >> 8;
+}
+
+static void mem_put_le32(char *mem, unsigned int val) {
+ mem[0] = val;
+ mem[1] = val >> 8;
+ mem[2] = val >> 16;
+ mem[3] = val >> 24;
+}
+
+static void usage(char *program_name) {
+ printf(
+ "Usage: %s [-f frames] [-s skip_frames] [-w width] [-h height] \n\t"
+ "[-n rate_num] [-d rate_den] [-b bitrate] [-l layers] "
+ "<input_filename> <output_filename>\n",
+ basename(program_name));
+ exit(EXIT_FAILURE);
+}
+
+static void die(const char *fmt, ...) {
+ va_list ap;
+
+ va_start(ap, fmt);
+ vprintf(fmt, ap);
+ if (fmt[strlen(fmt) - 1] != '\n') printf("\n");
+ exit(EXIT_FAILURE);
+}
+
+static void die_codec(vpx_codec_ctx_t *ctx, const char *s) {
+ const char *detail = vpx_codec_error_detail(ctx);
+
+ printf("%s: %s\n", s, vpx_codec_error(ctx));
+ if (detail) printf(" %s\n", detail);
+ exit(EXIT_FAILURE);
+}
+
+static int read_frame(FILE *f, vpx_image_t *img) {
+ size_t nbytes, to_read;
+ int res = 1;
+
+ to_read = img->w * img->h * 3 / 2;
+ nbytes = fread(img->planes[0], 1, to_read, f);
+ if (nbytes != to_read) {
+ res = 0;
+ if (nbytes > 0)
+ printf("Warning: Read partial frame. Check your width & height!\n");
+ }
+ return res;
+}
+
+static int read_dummy_frame(vpx_image_t *img) {
+ size_t to_read;
+
+ to_read = img->w * img->h * 3 / 2;
+ memset(img->planes[0], 129, to_read);
+ return 1;
+}
+
+static void write_ivf_file_header(FILE *outfile, const vpx_codec_enc_cfg_t *cfg,
+ int frame_cnt) {
+ char header[32];
+
+ if (cfg->g_pass != VPX_RC_ONE_PASS && cfg->g_pass != VPX_RC_LAST_PASS) return;
+ header[0] = 'D';
+ header[1] = 'K';
+ header[2] = 'I';
+ header[3] = 'F';
+ mem_put_le16(header + 4, 0); /* version */
+ mem_put_le16(header + 6, 32); /* headersize */
+ mem_put_le32(header + 8, fourcc); /* headersize */
+ mem_put_le16(header + 12, cfg->g_w); /* width */
+ mem_put_le16(header + 14, cfg->g_h); /* height */
+ mem_put_le32(header + 16, cfg->g_timebase.den); /* rate */
+ mem_put_le32(header + 20, cfg->g_timebase.num); /* scale */
+ mem_put_le32(header + 24, frame_cnt); /* length */
+ mem_put_le32(header + 28, 0); /* unused */
+
+ (void)fwrite(header, 1, 32, outfile);
+}
+
+static void write_ivf_frame_header(FILE *outfile,
+ const vpx_codec_cx_pkt_t *pkt) {
+ char header[12];
+ vpx_codec_pts_t pts;
+
+ if (pkt->kind != VPX_CODEC_CX_FRAME_PKT) return;
+
+ pts = pkt->data.frame.pts;
+ mem_put_le32(header, pkt->data.frame.sz);
+ mem_put_le32(header + 4, pts & 0xFFFFFFFF);
+ mem_put_le32(header + 8, pts >> 32);
+
+ (void)fwrite(header, 1, 12, outfile);
+}
+
+static void check_parameters() {
+ if (number_spatial_layers > 5) die("Cannot support more than 5 layers");
+}
+
+static void parse_command_line(int argc, char **argv,
+ vpx_codec_enc_cfg_t *cfg) {
+ unsigned int width = 1920;
+ unsigned int height = 1080;
+ unsigned int timebase_num = 1;
+ unsigned int timebase_den = 60;
+ unsigned int bitrate = 1000;
+ int c;
+ vpx_codec_err_t res;
+
+ opterr = 0;
+ while ((c = getopt(argc, argv, "f:w:h:n:d:b:s:l:p:")) != -1) switch (c) {
+ case 'f':
+ number_frames_to_code = atoi(optarg);
+ break;
+ case 'w':
+ width = atoi(optarg);
+ break;
+ case 'h':
+ height = atoi(optarg);
+ break;
+ case 'n':
+ timebase_num = atoi(optarg);
+ break;
+ case 'd':
+ timebase_den = atoi(optarg);
+ break;
+ case 'b':
+ bitrate = atoi(optarg);
+ break;
+ case 's':
+ number_frames_to_skip = atoi(optarg);
+ break;
+ case 'l':
+ number_spatial_layers = atoi(optarg);
+ break;
+ case 'p':
+ key_period = atoi(optarg);
+ break;
+ case '?':
+ usage(argv[0]);
+ }
+
+ // Parse required parameters
+ if (argc - optind != 2) {
+ usage(argv[0]);
+ }
+
+ input_filename = argv[optind];
+ output_filename = argv[optind + 1];
+
+ if (width < 16 || width % 2 || height < 16 || height % 2)
+ die("Invalid resolution: %d x %d", width, height);
+
+ /* Populate encoder configuration */
+ res = vpx_codec_enc_config_default(interface, cfg, 0);
+ if (res) {
+ die("Failed to get config: %s\n", vpx_codec_err_to_string(res));
+ }
+ printf(
+ "Codec %s\nframes: %d, skip: %d, layers: %d\n"
+ "width %d, height: %d, \n"
+ "num: %d, den: %d, bitrate: %d, \n"
+ "key period: %d \n",
+ vpx_codec_iface_name(interface), number_frames_to_code,
+ number_frames_to_skip, number_spatial_layers, width, height, timebase_num,
+ timebase_den, bitrate, key_period);
+
+ // Do minimal check at the application level. Encoder parameters will be
+ // checked internally
+ check_parameters();
+
+ cfg->rc_target_bitrate = bitrate;
+ cfg->g_w = width;
+ cfg->g_h = height;
+ cfg->g_timebase.num = timebase_num;
+ cfg->g_timebase.den = timebase_den;
+ cfg->ss_number_layers = number_spatial_layers;
+}
+
+static void set_default_configuration(vpx_codec_enc_cfg_t *cfg) {
+ /* Real time parameters */
+ cfg->rc_dropframe_thresh = 0;
+ cfg->rc_end_usage = VPX_CBR;
+ cfg->rc_resize_allowed = 0;
+ cfg->rc_min_quantizer = 33;
+ cfg->rc_max_quantizer = 33;
+ cfg->rc_undershoot_pct = 100;
+ cfg->rc_overshoot_pct = 15;
+ cfg->rc_buf_initial_sz = 500;
+ cfg->rc_buf_optimal_sz = 600;
+ cfg->rc_buf_sz = 1000;
+
+ /* Enable error resilient mode */
+ cfg->g_error_resilient = 1;
+ cfg->g_lag_in_frames = 0;
+
+ /* Disable automatic keyframe placement */
+ cfg->kf_mode = VPX_KF_DISABLED;
+ cfg->kf_min_dist = cfg->kf_max_dist = 3000;
+}
+
+static void initialize_codec(vpx_codec_ctx_t *codec, vpx_codec_enc_cfg_t *cfg) {
+ int max_intra_size_pct;
+
+ /* Initialize codec */
+ if (vpx_codec_enc_init(codec, interface, cfg, VPX_CODEC_USE_PSNR))
+ die_codec(codec, "Failed to initialize encoder");
+
+ vpx_codec_control(codec, VP9E_SET_SVC, 1);
+ /* Cap CPU & first I-frame size */
+ vpx_codec_control(codec, VP8E_SET_CPUUSED, 1);
+ vpx_codec_control(codec, VP8E_SET_STATIC_THRESHOLD, 1);
+ vpx_codec_control(codec, VP8E_SET_NOISE_SENSITIVITY, 1);
+ vpx_codec_control(codec, VP8E_SET_TOKEN_PARTITIONS, 1);
+
+ max_intra_size_pct =
+ (int)(((double)cfg->rc_buf_optimal_sz * 0.5) *
+ ((double)cfg->g_timebase.den / cfg->g_timebase.num) / 10.0);
+ /* printf ("max_intra_size_pct=%d\n", max_intra_size_pct); */
+
+ vpx_codec_control(codec, VP8E_SET_MAX_INTRA_BITRATE_PCT, max_intra_size_pct);
+}
+
+static int calculate_layer(int frame_cnt, int number_spatial_layers) {
+ if (frame_cnt == 0)
+ return 0;
+ else
+ return (frame_cnt + number_spatial_layers - 1) % number_spatial_layers;
+}
+
+static void switch_to_layer(int layer, unsigned int initial_width,
+ unsigned int initial_height,
+ vpx_codec_ctx_t *codec) {
+ // Set layer size
+ int scaling_factor_num[MAX_LAYERS] = {2, 1, 4, 2, 1};
+ int scaling_factor_den[MAX_LAYERS] = {9, 3, 9, 3, 1};
+
+ int quantizer[MAX_LAYERS] = {60, 53, 39, 33, 27};
+
+ unsigned int current_width;
+ unsigned int current_height;
+
+ current_width = initial_width *
+ scaling_factor_num[layer + 5 - number_spatial_layers] /
+ scaling_factor_den[layer + 5 - number_spatial_layers];
+ current_height = initial_height *
+ scaling_factor_num[layer + 5 - number_spatial_layers] /
+ scaling_factor_den[layer + 5 - number_spatial_layers];
+
+ current_width += current_width % 2;
+ current_height += current_height % 2;
+
+ vpx_codec_control(codec, VP9E_SET_WIDTH, &current_width);
+ vpx_codec_control(codec, VP9E_SET_HEIGHT, &current_height);
+
+ // Set layer context
+ vpx_codec_control(codec, VP9E_SET_LAYER, &layer);
+ vpx_codec_control(codec, VP9E_SET_MAX_Q,
+ quantizer[layer + 5 - number_spatial_layers]);
+ vpx_codec_control(codec, VP9E_SET_MIN_Q,
+ quantizer[layer + 5 - number_spatial_layers]);
+}
+
+static int get_flag(int is_I_frame_in_layer, int layer, ENCODING_MODE mode) {
+ // First layer
+ switch (mode) {
+ case INTER_LAYER_PREDICTION_I:
+ if (is_I_frame_in_layer && layer == 0) return VPX_EFLAG_FORCE_KF;
+ if (layer == 0)
+ return VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF;
+ else if (is_I_frame_in_layer)
+ return VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_LAST;
+ else
+ return VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF;
+ break;
+
+ case INTER_LAYER_PREDICTION_IP:
+ if (is_I_frame_in_layer && layer == 0) return VPX_EFLAG_FORCE_KF;
+ if (layer == 0)
+ return VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF;
+ else if (is_I_frame_in_layer)
+ return VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_LAST;
+ else
+ return VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_REF_GF;
+ break;
+
+ case USE_GOLDEN_FRAME:
+ if (is_I_frame_in_layer && layer == 0) return VPX_EFLAG_FORCE_KF;
+ if (2 * number_spatial_layers - NUM_BUFFERS <= layer) {
+ if (layer == 0)
+ return VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_REF_ARF;
+ else if (is_I_frame_in_layer)
+ return VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_REF_LAST;
+ else
+ return VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
+ } else {
+ if (layer == 0)
+ return VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF;
+ else if (is_I_frame_in_layer)
+ return VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_LAST;
+ else
+ return VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF;
+ }
+ break;
+ default:
+ return VPX_EFLAG_FORCE_KF;
+ }
+}
+
+int main(int argc, char **argv) {
+ FILE *infile, *outfile[MAX_LAYERS];
+ vpx_codec_ctx_t codec;
+ vpx_codec_enc_cfg_t cfg;
+ int frame_cnt = 0;
+ vpx_image_t raw;
+ int frame_avail = 1;
+ int got_data = 0;
+ int i;
+ int frames_in_layer[MAX_LAYERS] = {0};
+ clock_t before;
+ clock_t after;
+ int pts = 0; /* PTS starts at 0 */
+ int frame_duration = 1; /* 1 timebase tick per frame */
+
+ parse_command_line(argc, argv, &cfg);
+
+ // Allocate image buffer
+ if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, cfg.g_w, cfg.g_h, 32))
+ die("Failed to allocate image", cfg.g_w, cfg.g_h);
+
+ set_default_configuration(&cfg);
+
+ /* Open input file */
+ if (!(infile = fopen(input_filename, "rb")))
+ die("Failed to open %s for reading", argv[1]);
+
+ /* Open output file */
+ for (i = 0; i < number_spatial_layers; i++) {
+ char file_name[512];
+ snprintf(file_name, sizeof(file_name), "%s_%d.ivf", output_filename, i);
+ if (!(outfile[i] = fopen(file_name, "wb")))
+ die("Failed to open %s for writing", file_name);
+ write_ivf_file_header(outfile[i], &cfg, 0);
+ }
+
+ initialize_codec(&codec, &cfg);
+
+ // skip initial frames
+ for (i = 0; i < number_frames_to_skip; i++) {
+ read_frame(infile, &raw);
+ }
+
+ before = clock();
+ // Encoding frames
+ while ((frame_avail || got_data) &&
+ frame_cnt <= number_frames_to_code * number_spatial_layers) {
+ int flags = 0;
+ vpx_codec_iter_t iter = NULL;
+ const vpx_codec_cx_pkt_t *pkt;
+
+ int layer = calculate_layer(frame_cnt, number_spatial_layers);
+ int is_I_frame_in_layer =
+ (((frame_cnt - 1) / number_spatial_layers % key_period) == 0);
+ int is_dummy = (frame_cnt == 0);
+
+ if (is_dummy) { // Dummy frame
+ flags = VPX_EFLAG_FORCE_KF;
+ frame_avail = read_dummy_frame(&raw);
+
+ } else { // Regular frame
+ // Read a new frame only at the base layer
+ if (layer == 0) frame_avail = read_frame(infile, &raw);
+ switch_to_layer(layer, cfg.g_w, cfg.g_h, &codec);
+ flags = get_flag(is_I_frame_in_layer, layer, INTER_LAYER_PREDICTION_I);
+ }
+
+ // Actual Encoding
+ if (vpx_codec_encode(&codec, frame_avail ? &raw : NULL, pts, 1, flags,
+ VPX_DL_REALTIME))
+ die_codec(&codec, "Failed to encode frame");
+
+ got_data = 0;
+ // Process data / Get PSNR statistics
+ while ((pkt = vpx_codec_get_cx_data(&codec, &iter))) {
+ got_data = 1;
+ switch (pkt->kind) {
+ case VPX_CODEC_CX_FRAME_PKT:
+ for (i = layer; i < number_spatial_layers; i++) {
+ write_ivf_frame_header(outfile[i], pkt);
+ (void)fwrite(pkt->data.frame.buf, 1, pkt->data.frame.sz,
+ outfile[i]);
+ frames_in_layer[i]++;
+ }
+ break;
+ case VPX_CODEC_PSNR_PKT:
+ if (frame_cnt != 0)
+ printf(
+ "Processed Frame %d, layer %d, PSNR(Total/Y/U/V): "
+ "%2.3f %2.3f %2.3f %2.3f \n",
+ (frame_cnt - 1) / number_spatial_layers + 1, layer,
+ pkt->data.psnr.psnr[0], pkt->data.psnr.psnr[1],
+ pkt->data.psnr.psnr[2], pkt->data.psnr.psnr[3]);
+ break;
+ default:
+ break;
+ }
+ }
+ frame_cnt++;
+ // TODO(ivan): Modify ts later if(!layer)
+ pts += frame_duration;
+ }
+ // end while
+
+ after = clock();
+ printf("Processed %d frames in different resolutions in %ld ms.\n",
+ frame_cnt - 1, (int)(after - before) / (CLOCKS_PER_SEC / 1000));
+
+ fclose(infile);
+
+ if (vpx_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec");
+
+ /* Try to rewrite the output file headers with the actual frame count */
+ for (i = 0; i < number_spatial_layers; i++) {
+ if (!fseek(outfile[i], 0, SEEK_SET)) {
+ write_ivf_file_header(outfile[i], &cfg, frames_in_layer[i]);
+ }
+ fclose(outfile[i]);
+ }
+
+ return EXIT_SUCCESS;
+}
diff --git a/libvpx/vpx/internal/vpx_codec_internal.h b/libvpx/vpx/internal/vpx_codec_internal.h
index 4474331..05fed97 100644
--- a/libvpx/vpx/internal/vpx_codec_internal.h
+++ b/libvpx/vpx/internal/vpx_codec_internal.h
@@ -75,7 +75,7 @@ typedef struct vpx_codec_priv_enc_mr_cfg vpx_codec_priv_enc_mr_cfg_t;
* Memory operation failed.
*/
typedef vpx_codec_err_t (*vpx_codec_init_fn_t)(vpx_codec_ctx_t *ctx,
- vpx_codec_priv_enc_mr_cfg_t *data);
+ vpx_codec_priv_enc_mr_cfg_t *data);
/*!\brief destroy function pointer prototype
*
@@ -94,9 +94,10 @@ typedef vpx_codec_err_t (*vpx_codec_destroy_fn_t)(vpx_codec_alg_priv_t *ctx);
/*!\brief parse stream info function pointer prototype
*
- * Performs high level parsing of the bitstream. This function is called by
- * the generic vpx_codec_parse_stream() wrapper function, so plugins implementing
- * this interface may trust the input parameters to be properly initialized.
+ * Performs high level parsing of the bitstream. This function is called by the
+ * generic vpx_codec_peek_stream_info() wrapper function, so plugins
+ * implementing this interface may trust the input parameters to be properly
+ * initialized.
*
* \param[in] data Pointer to a block of data to parse
* \param[in] data_sz Size of the data buffer
@@ -109,8 +110,8 @@ typedef vpx_codec_err_t (*vpx_codec_destroy_fn_t)(vpx_codec_alg_priv_t *ctx);
* Bitstream is parsable and stream information updated
*/
typedef vpx_codec_err_t (*vpx_codec_peek_si_fn_t)(const uint8_t *data,
- unsigned int data_sz,
- vpx_codec_stream_info_t *si);
+ unsigned int data_sz,
+ vpx_codec_stream_info_t *si);
/*!\brief Return information about the current stream.
*
@@ -126,7 +127,7 @@ typedef vpx_codec_err_t (*vpx_codec_peek_si_fn_t)(const uint8_t *data,
* Bitstream is parsable and stream information updated
*/
typedef vpx_codec_err_t (*vpx_codec_get_si_fn_t)(vpx_codec_alg_priv_t *ctx,
- vpx_codec_stream_info_t *si);
+ vpx_codec_stream_info_t *si);
/*!\brief control function pointer prototype
*
@@ -151,8 +152,8 @@ typedef vpx_codec_err_t (*vpx_codec_get_si_fn_t)(vpx_codec_alg_priv_t *ctx,
* The internal state data was deserialized.
*/
typedef vpx_codec_err_t (*vpx_codec_control_fn_t)(vpx_codec_alg_priv_t *ctx,
- int ctrl_id,
- va_list ap);
+ int ctrl_id,
+ va_list ap);
/*!\brief control function pointer mapping
*
@@ -165,10 +166,9 @@ typedef vpx_codec_err_t (*vpx_codec_control_fn_t)(vpx_codec_alg_priv_t *ctx,
* mapping. This implies that ctrl_id values chosen by the algorithm
* \ref MUST be non-zero.
*/
-typedef const struct vpx_codec_ctrl_fn_map
-{
- int ctrl_id;
- vpx_codec_control_fn_t fn;
+typedef const struct vpx_codec_ctrl_fn_map {
+ int ctrl_id;
+ vpx_codec_control_fn_t fn;
} vpx_codec_ctrl_fn_map_t;
/*!\brief decode data function pointer prototype
@@ -192,10 +192,10 @@ typedef const struct vpx_codec_ctrl_fn_map
* for recoverability capabilities.
*/
typedef vpx_codec_err_t (*vpx_codec_decode_fn_t)(vpx_codec_alg_priv_t *ctx,
- const uint8_t *data,
- unsigned int data_sz,
- void *user_priv,
- long deadline);
+ const uint8_t *data,
+ unsigned int data_sz,
+ void *user_priv,
+ long deadline);
/*!\brief Decoded frames iterator
*
@@ -212,8 +212,8 @@ typedef vpx_codec_err_t (*vpx_codec_decode_fn_t)(vpx_codec_alg_priv_t *ctx,
* \return Returns a pointer to an image, if one is ready for display. Frames
* produced will always be in PTS (presentation time stamp) order.
*/
-typedef vpx_image_t*(*vpx_codec_get_frame_fn_t)(vpx_codec_alg_priv_t *ctx,
- vpx_codec_iter_t *iter);
+typedef vpx_image_t *(*vpx_codec_get_frame_fn_t)(vpx_codec_alg_priv_t *ctx,
+ vpx_codec_iter_t *iter);
/*\brief eXternal Memory Allocation memory map get iterator
@@ -228,8 +228,8 @@ typedef vpx_image_t*(*vpx_codec_get_frame_fn_t)(vpx_codec_alg_priv_t *ctx,
* indicate end-of-list.
*/
typedef vpx_codec_err_t (*vpx_codec_get_mmap_fn_t)(const vpx_codec_ctx_t *ctx,
- vpx_codec_mmap_t *mmap,
- vpx_codec_iter_t *iter);
+ vpx_codec_mmap_t *mmap,
+ vpx_codec_iter_t *iter);
/*\brief eXternal Memory Allocation memory map set iterator
@@ -245,17 +245,17 @@ typedef vpx_codec_err_t (*vpx_codec_get_mmap_fn_t)(const vpx_codec_ctx_t *c
* The memory map was rejected.
*/
typedef vpx_codec_err_t (*vpx_codec_set_mmap_fn_t)(vpx_codec_ctx_t *ctx,
- const vpx_codec_mmap_t *mmap);
+ const vpx_codec_mmap_t *mmap);
typedef vpx_codec_err_t (*vpx_codec_encode_fn_t)(vpx_codec_alg_priv_t *ctx,
- const vpx_image_t *img,
- vpx_codec_pts_t pts,
- unsigned long duration,
- vpx_enc_frame_flags_t flags,
- unsigned long deadline);
-typedef const vpx_codec_cx_pkt_t*(*vpx_codec_get_cx_data_fn_t)(vpx_codec_alg_priv_t *ctx,
- vpx_codec_iter_t *iter);
+ const vpx_image_t *img,
+ vpx_codec_pts_t pts,
+ unsigned long duration,
+ vpx_enc_frame_flags_t flags,
+ unsigned long deadline);
+typedef const vpx_codec_cx_pkt_t *(*vpx_codec_get_cx_data_fn_t)(vpx_codec_alg_priv_t *ctx,
+ vpx_codec_iter_t *iter);
typedef vpx_codec_err_t
(*vpx_codec_enc_config_set_fn_t)(vpx_codec_alg_priv_t *ctx,
@@ -268,7 +268,7 @@ typedef vpx_image_t *
typedef vpx_codec_err_t
(*vpx_codec_enc_mr_get_mem_loc_fn_t)(const vpx_codec_enc_cfg_t *cfg,
- void **mem_loc);
+ void **mem_loc);
/*!\brief usage configuration mapping
*
@@ -280,10 +280,9 @@ typedef vpx_codec_err_t
* one mapping must be present, in addition to the end-of-list.
*
*/
-typedef const struct vpx_codec_enc_cfg_map
-{
- int usage;
- vpx_codec_enc_cfg_t cfg;
+typedef const struct vpx_codec_enc_cfg_map {
+ int usage;
+ vpx_codec_enc_cfg_t cfg;
} vpx_codec_enc_cfg_map_t;
#define NOT_IMPLEMENTED 0
@@ -292,44 +291,39 @@ typedef const struct vpx_codec_enc_cfg_map
*
* All decoders \ref MUST expose a variable of this type.
*/
-struct vpx_codec_iface
-{
- const char *name; /**< Identification String */
- int abi_version; /**< Implemented ABI version */
- vpx_codec_caps_t caps; /**< Decoder capabilities */
- vpx_codec_init_fn_t init; /**< \copydoc ::vpx_codec_init_fn_t */
- vpx_codec_destroy_fn_t destroy; /**< \copydoc ::vpx_codec_destroy_fn_t */
- vpx_codec_ctrl_fn_map_t *ctrl_maps; /**< \copydoc ::vpx_codec_ctrl_fn_map_t */
- vpx_codec_get_mmap_fn_t get_mmap; /**< \copydoc ::vpx_codec_get_mmap_fn_t */
- vpx_codec_set_mmap_fn_t set_mmap; /**< \copydoc ::vpx_codec_set_mmap_fn_t */
- struct vpx_codec_dec_iface
- {
- vpx_codec_peek_si_fn_t peek_si; /**< \copydoc ::vpx_codec_peek_si_fn_t */
- vpx_codec_get_si_fn_t get_si; /**< \copydoc ::vpx_codec_peek_si_fn_t */
- vpx_codec_decode_fn_t decode; /**< \copydoc ::vpx_codec_decode_fn_t */
- vpx_codec_get_frame_fn_t get_frame; /**< \copydoc ::vpx_codec_get_frame_fn_t */
- } dec;
- struct vpx_codec_enc_iface
- {
- vpx_codec_enc_cfg_map_t *cfg_maps; /**< \copydoc ::vpx_codec_enc_cfg_map_t */
- vpx_codec_encode_fn_t encode; /**< \copydoc ::vpx_codec_encode_fn_t */
- vpx_codec_get_cx_data_fn_t get_cx_data; /**< \copydoc ::vpx_codec_get_cx_data_fn_t */
- vpx_codec_enc_config_set_fn_t cfg_set; /**< \copydoc ::vpx_codec_enc_config_set_fn_t */
- vpx_codec_get_global_headers_fn_t get_glob_hdrs; /**< \copydoc ::vpx_codec_get_global_headers_fn_t */
- vpx_codec_get_preview_frame_fn_t get_preview; /**< \copydoc ::vpx_codec_get_preview_frame_fn_t */
- vpx_codec_enc_mr_get_mem_loc_fn_t mr_get_mem_loc; /**< \copydoc ::vpx_codec_enc_mr_get_mem_loc_fn_t */
- } enc;
+struct vpx_codec_iface {
+ const char *name; /**< Identification String */
+ int abi_version; /**< Implemented ABI version */
+ vpx_codec_caps_t caps; /**< Decoder capabilities */
+ vpx_codec_init_fn_t init; /**< \copydoc ::vpx_codec_init_fn_t */
+ vpx_codec_destroy_fn_t destroy; /**< \copydoc ::vpx_codec_destroy_fn_t */
+ vpx_codec_ctrl_fn_map_t *ctrl_maps; /**< \copydoc ::vpx_codec_ctrl_fn_map_t */
+ vpx_codec_get_mmap_fn_t get_mmap; /**< \copydoc ::vpx_codec_get_mmap_fn_t */
+ vpx_codec_set_mmap_fn_t set_mmap; /**< \copydoc ::vpx_codec_set_mmap_fn_t */
+ struct vpx_codec_dec_iface {
+ vpx_codec_peek_si_fn_t peek_si; /**< \copydoc ::vpx_codec_peek_si_fn_t */
+ vpx_codec_get_si_fn_t get_si; /**< \copydoc ::vpx_codec_get_si_fn_t */
+ vpx_codec_decode_fn_t decode; /**< \copydoc ::vpx_codec_decode_fn_t */
+ vpx_codec_get_frame_fn_t get_frame; /**< \copydoc ::vpx_codec_get_frame_fn_t */
+ } dec;
+ struct vpx_codec_enc_iface {
+ vpx_codec_enc_cfg_map_t *cfg_maps; /**< \copydoc ::vpx_codec_enc_cfg_map_t */
+ vpx_codec_encode_fn_t encode; /**< \copydoc ::vpx_codec_encode_fn_t */
+ vpx_codec_get_cx_data_fn_t get_cx_data; /**< \copydoc ::vpx_codec_get_cx_data_fn_t */
+ vpx_codec_enc_config_set_fn_t cfg_set; /**< \copydoc ::vpx_codec_enc_config_set_fn_t */
+ vpx_codec_get_global_headers_fn_t get_glob_hdrs; /**< \copydoc ::vpx_codec_get_global_headers_fn_t */
+ vpx_codec_get_preview_frame_fn_t get_preview; /**< \copydoc ::vpx_codec_get_preview_frame_fn_t */
+ vpx_codec_enc_mr_get_mem_loc_fn_t mr_get_mem_loc; /**< \copydoc ::vpx_codec_enc_mr_get_mem_loc_fn_t */
+ } enc;
};
/*!\brief Callback function pointer / user data pair storage */
-typedef struct vpx_codec_priv_cb_pair
-{
- union
- {
- vpx_codec_put_frame_cb_fn_t put_frame;
- vpx_codec_put_slice_cb_fn_t put_slice;
- } u;
- void *user_priv;
+typedef struct vpx_codec_priv_cb_pair {
+ union {
+ vpx_codec_put_frame_cb_fn_t put_frame;
+ vpx_codec_put_slice_cb_fn_t put_slice;
+ } u;
+ void *user_priv;
} vpx_codec_priv_cb_pair_t;
@@ -341,27 +335,24 @@ typedef struct vpx_codec_priv_cb_pair
* structure can be made the first member of the algorithm specific structure,
* and the pointer cast to the proper type.
*/
-struct vpx_codec_priv
-{
- unsigned int sz;
- vpx_codec_iface_t *iface;
- struct vpx_codec_alg_priv *alg_priv;
- const char *err_detail;
- vpx_codec_flags_t init_flags;
- struct
- {
- vpx_codec_priv_cb_pair_t put_frame_cb;
- vpx_codec_priv_cb_pair_t put_slice_cb;
- } dec;
- struct
- {
- int tbd;
- struct vpx_fixed_buf cx_data_dst_buf;
- unsigned int cx_data_pad_before;
- unsigned int cx_data_pad_after;
- vpx_codec_cx_pkt_t cx_data_pkt;
- unsigned int total_encoders;
- } enc;
+struct vpx_codec_priv {
+ unsigned int sz;
+ vpx_codec_iface_t *iface;
+ struct vpx_codec_alg_priv *alg_priv;
+ const char *err_detail;
+ vpx_codec_flags_t init_flags;
+ struct {
+ vpx_codec_priv_cb_pair_t put_frame_cb;
+ vpx_codec_priv_cb_pair_t put_slice_cb;
+ } dec;
+ struct {
+ int tbd;
+ struct vpx_fixed_buf cx_data_dst_buf;
+ unsigned int cx_data_pad_before;
+ unsigned int cx_data_pad_after;
+ vpx_codec_cx_pkt_t cx_data_pkt;
+ unsigned int total_encoders;
+ } enc;
};
/*
@@ -377,32 +368,32 @@ struct vpx_codec_priv_enc_mr_cfg
#undef VPX_CTRL_USE_TYPE
#define VPX_CTRL_USE_TYPE(id, typ) \
- static typ id##__value(va_list args) {return va_arg(args, typ);} \
- static typ id##__convert(void *x)\
+ static typ id##__value(va_list args) {return va_arg(args, typ);} \
+ static typ id##__convert(void *x)\
+ {\
+ union\
{\
- union\
- {\
- void *x;\
- typ d;\
- } u;\
- u.x = x;\
- return u.d;\
- }
+ void *x;\
+ typ d;\
+ } u;\
+ u.x = x;\
+ return u.d;\
+ }
#undef VPX_CTRL_USE_TYPE_DEPRECATED
#define VPX_CTRL_USE_TYPE_DEPRECATED(id, typ) \
- static typ id##__value(va_list args) {return va_arg(args, typ);} \
- static typ id##__convert(void *x)\
+ static typ id##__value(va_list args) {return va_arg(args, typ);} \
+ static typ id##__convert(void *x)\
+ {\
+ union\
{\
- union\
- {\
- void *x;\
- typ d;\
- } u;\
- u.x = x;\
- return u.d;\
- }
+ void *x;\
+ typ d;\
+ } u;\
+ u.x = x;\
+ return u.d;\
+ }
#define CAST(id, arg) id##__value(arg)
#define RECAST(id, x) id##__convert(x)
@@ -418,8 +409,8 @@ struct vpx_codec_priv_enc_mr_cfg
* macro is provided to define this getter function automatically.
*/
#define CODEC_INTERFACE(id)\
-vpx_codec_iface_t* id(void) { return &id##_algo; }\
-vpx_codec_iface_t id##_algo
+ vpx_codec_iface_t* id(void) { return &id##_algo; }\
+ vpx_codec_iface_t id##_algo
/* Internal Utility Functions
@@ -427,64 +418,86 @@ vpx_codec_iface_t id##_algo
* The following functions are intended to be used inside algorithms as
* utilities for manipulating vpx_codec_* data structures.
*/
-struct vpx_codec_pkt_list
-{
- unsigned int cnt;
- unsigned int max;
- struct vpx_codec_cx_pkt pkts[1];
+struct vpx_codec_pkt_list {
+ unsigned int cnt;
+ unsigned int max;
+ struct vpx_codec_cx_pkt pkts[1];
};
#define vpx_codec_pkt_list_decl(n)\
- union {struct vpx_codec_pkt_list head;\
- struct {struct vpx_codec_pkt_list head;\
- struct vpx_codec_cx_pkt pkts[n];} alloc;}
+ union {struct vpx_codec_pkt_list head;\
+ struct {struct vpx_codec_pkt_list head;\
+ struct vpx_codec_cx_pkt pkts[n];} alloc;}
#define vpx_codec_pkt_list_init(m)\
- (m)->alloc.head.cnt = 0,\
- (m)->alloc.head.max = sizeof((m)->alloc.pkts) / sizeof((m)->alloc.pkts[0])
+ (m)->alloc.head.cnt = 0,\
+ (m)->alloc.head.max = sizeof((m)->alloc.pkts) / sizeof((m)->alloc.pkts[0])
int
vpx_codec_pkt_list_add(struct vpx_codec_pkt_list *,
const struct vpx_codec_cx_pkt *);
-const vpx_codec_cx_pkt_t*
+const vpx_codec_cx_pkt_t *
vpx_codec_pkt_list_get(struct vpx_codec_pkt_list *list,
vpx_codec_iter_t *iter);
#include <stdio.h>
#include <setjmp.h>
-struct vpx_internal_error_info
-{
- vpx_codec_err_t error_code;
- int has_detail;
- char detail[80];
- int setjmp;
- jmp_buf jmp;
+struct vpx_internal_error_info {
+ vpx_codec_err_t error_code;
+ int has_detail;
+ char detail[80];
+ int setjmp;
+ jmp_buf jmp;
};
static void vpx_internal_error(struct vpx_internal_error_info *info,
vpx_codec_err_t error,
const char *fmt,
- ...)
-{
- va_list ap;
+ ...) {
+ va_list ap;
- info->error_code = error;
- info->has_detail = 0;
+ info->error_code = error;
+ info->has_detail = 0;
- if (fmt)
- {
- size_t sz = sizeof(info->detail);
+ if (fmt) {
+ size_t sz = sizeof(info->detail);
- info->has_detail = 1;
- va_start(ap, fmt);
- vsnprintf(info->detail, sz - 1, fmt, ap);
- va_end(ap);
- info->detail[sz-1] = '\0';
- }
+ info->has_detail = 1;
+ va_start(ap, fmt);
+ vsnprintf(info->detail, sz - 1, fmt, ap);
+ va_end(ap);
+ info->detail[sz - 1] = '\0';
+ }
- if (info->setjmp)
- longjmp(info->jmp, info->error_code);
+ if (info->setjmp)
+ longjmp(info->jmp, info->error_code);
}
+
+//------------------------------------------------------------------------------
+// mmap interface
+
+typedef struct {
+ unsigned int id;
+ unsigned long sz;
+ unsigned int align;
+ unsigned int flags;
+ unsigned long (*calc_sz)(const vpx_codec_dec_cfg_t *, vpx_codec_flags_t);
+} mem_req_t;
+
+// Allocates mmap.priv and sets mmap.base based on mmap.sz/align/flags
+// requirements.
+// Returns #VPX_CODEC_OK on success, #VPX_CODEC_MEM_ERROR otherwise.
+vpx_codec_err_t vpx_mmap_alloc(vpx_codec_mmap_t *mmap);
+
+// Frees mmap.base allocated by a call to vpx_mmap_alloc().
+void vpx_mmap_dtor(vpx_codec_mmap_t *mmap);
+
+// Checks each mmap has the size requirement specificied by mem_reqs.
+// Returns #VPX_CODEC_OK on success, #VPX_CODEC_MEM_ERROR otherwise.
+vpx_codec_err_t vpx_validate_mmaps(const vpx_codec_stream_info_t *si,
+ const vpx_codec_mmap_t *mmaps,
+ const mem_req_t *mem_reqs, int nreqs,
+ vpx_codec_flags_t init_flags);
#endif
diff --git a/libvpx/vpx/src/vpx_codec.c b/libvpx/vpx/src/vpx_codec.c
index f1a8b67..1f664ae 100644
--- a/libvpx/vpx/src/vpx_codec.c
+++ b/libvpx/vpx/src/vpx_codec.c
@@ -14,137 +14,171 @@
*
*/
#include <stdarg.h>
+#include <stdlib.h>
#include "vpx/vpx_integer.h"
#include "vpx/internal/vpx_codec_internal.h"
#include "vpx_version.h"
#define SAVE_STATUS(ctx,var) (ctx?(ctx->err = var):var)
-int vpx_codec_version(void)
-{
- return VERSION_PACKED;
+int vpx_codec_version(void) {
+ return VERSION_PACKED;
}
-const char *vpx_codec_version_str(void)
-{
- return VERSION_STRING_NOSP;
+const char *vpx_codec_version_str(void) {
+ return VERSION_STRING_NOSP;
}
-const char *vpx_codec_version_extra_str(void)
-{
- return VERSION_EXTRA;
+const char *vpx_codec_version_extra_str(void) {
+ return VERSION_EXTRA;
}
-const char *vpx_codec_iface_name(vpx_codec_iface_t *iface)
-{
- return iface ? iface->name : "<invalid interface>";
+const char *vpx_codec_iface_name(vpx_codec_iface_t *iface) {
+ return iface ? iface->name : "<invalid interface>";
}
-const char *vpx_codec_err_to_string(vpx_codec_err_t err)
-{
- switch (err)
- {
+const char *vpx_codec_err_to_string(vpx_codec_err_t err) {
+ switch (err) {
case VPX_CODEC_OK:
- return "Success";
+ return "Success";
case VPX_CODEC_ERROR:
- return "Unspecified internal error";
+ return "Unspecified internal error";
case VPX_CODEC_MEM_ERROR:
- return "Memory allocation error";
+ return "Memory allocation error";
case VPX_CODEC_ABI_MISMATCH:
- return "ABI version mismatch";
+ return "ABI version mismatch";
case VPX_CODEC_INCAPABLE:
- return "Codec does not implement requested capability";
+ return "Codec does not implement requested capability";
case VPX_CODEC_UNSUP_BITSTREAM:
- return "Bitstream not supported by this decoder";
+ return "Bitstream not supported by this decoder";
case VPX_CODEC_UNSUP_FEATURE:
- return "Bitstream required feature not supported by this decoder";
+ return "Bitstream required feature not supported by this decoder";
case VPX_CODEC_CORRUPT_FRAME:
- return "Corrupt frame detected";
+ return "Corrupt frame detected";
case VPX_CODEC_INVALID_PARAM:
- return "Invalid parameter";
+ return "Invalid parameter";
case VPX_CODEC_LIST_END:
- return "End of iterated list";
- }
+ return "End of iterated list";
+ }
- return "Unrecognized error code";
+ return "Unrecognized error code";
}
-const char *vpx_codec_error(vpx_codec_ctx_t *ctx)
-{
- return (ctx) ? vpx_codec_err_to_string(ctx->err)
- : vpx_codec_err_to_string(VPX_CODEC_INVALID_PARAM);
+const char *vpx_codec_error(vpx_codec_ctx_t *ctx) {
+ return (ctx) ? vpx_codec_err_to_string(ctx->err)
+ : vpx_codec_err_to_string(VPX_CODEC_INVALID_PARAM);
}
-const char *vpx_codec_error_detail(vpx_codec_ctx_t *ctx)
-{
- if (ctx && ctx->err)
- return ctx->priv ? ctx->priv->err_detail : ctx->err_detail;
+const char *vpx_codec_error_detail(vpx_codec_ctx_t *ctx) {
+ if (ctx && ctx->err)
+ return ctx->priv ? ctx->priv->err_detail : ctx->err_detail;
- return NULL;
+ return NULL;
}
-vpx_codec_err_t vpx_codec_destroy(vpx_codec_ctx_t *ctx)
-{
- vpx_codec_err_t res;
+vpx_codec_err_t vpx_codec_destroy(vpx_codec_ctx_t *ctx) {
+ vpx_codec_err_t res;
- if (!ctx)
- res = VPX_CODEC_INVALID_PARAM;
- else if (!ctx->iface || !ctx->priv)
- res = VPX_CODEC_ERROR;
- else
- {
- if (ctx->priv->alg_priv)
- ctx->iface->destroy(ctx->priv->alg_priv);
+ if (!ctx)
+ res = VPX_CODEC_INVALID_PARAM;
+ else if (!ctx->iface || !ctx->priv)
+ res = VPX_CODEC_ERROR;
+ else {
+ if (ctx->priv->alg_priv)
+ ctx->iface->destroy(ctx->priv->alg_priv);
- ctx->iface = NULL;
- ctx->name = NULL;
- ctx->priv = NULL;
- res = VPX_CODEC_OK;
- }
+ ctx->iface = NULL;
+ ctx->name = NULL;
+ ctx->priv = NULL;
+ res = VPX_CODEC_OK;
+ }
- return SAVE_STATUS(ctx, res);
+ return SAVE_STATUS(ctx, res);
}
-vpx_codec_caps_t vpx_codec_get_caps(vpx_codec_iface_t *iface)
-{
- return (iface) ? iface->caps : 0;
+vpx_codec_caps_t vpx_codec_get_caps(vpx_codec_iface_t *iface) {
+ return (iface) ? iface->caps : 0;
}
vpx_codec_err_t vpx_codec_control_(vpx_codec_ctx_t *ctx,
int ctrl_id,
- ...)
-{
- vpx_codec_err_t res;
-
- if (!ctx || !ctrl_id)
- res = VPX_CODEC_INVALID_PARAM;
- else if (!ctx->iface || !ctx->priv || !ctx->iface->ctrl_maps)
- res = VPX_CODEC_ERROR;
- else
- {
- vpx_codec_ctrl_fn_map_t *entry;
-
- res = VPX_CODEC_ERROR;
-
- for (entry = ctx->iface->ctrl_maps; entry && entry->fn; entry++)
- {
- if (!entry->ctrl_id || entry->ctrl_id == ctrl_id)
- {
- va_list ap;
-
- va_start(ap, ctrl_id);
- res = entry->fn(ctx->priv->alg_priv, ctrl_id, ap);
- va_end(ap);
- break;
- }
- }
+ ...) {
+ vpx_codec_err_t res;
+
+ if (!ctx || !ctrl_id)
+ res = VPX_CODEC_INVALID_PARAM;
+ else if (!ctx->iface || !ctx->priv || !ctx->iface->ctrl_maps)
+ res = VPX_CODEC_ERROR;
+ else {
+ vpx_codec_ctrl_fn_map_t *entry;
+
+ res = VPX_CODEC_ERROR;
+
+ for (entry = ctx->iface->ctrl_maps; entry && entry->fn; entry++) {
+ if (!entry->ctrl_id || entry->ctrl_id == ctrl_id) {
+ va_list ap;
+
+ va_start(ap, ctrl_id);
+ res = entry->fn(ctx->priv->alg_priv, ctrl_id, ap);
+ va_end(ap);
+ break;
+ }
}
+ }
+
+ return SAVE_STATUS(ctx, res);
+}
+
+//------------------------------------------------------------------------------
+// mmap interface
+
+vpx_codec_err_t vpx_mmap_alloc(vpx_codec_mmap_t *mmap) {
+ unsigned int align = mmap->align ? mmap->align - 1 : 0;
+
+ if (mmap->flags & VPX_CODEC_MEM_ZERO)
+ mmap->priv = calloc(1, mmap->sz + align);
+ else
+ mmap->priv = malloc(mmap->sz + align);
+
+ if (mmap->priv == NULL) return VPX_CODEC_MEM_ERROR;
+ mmap->base = (void *)((((uintptr_t)mmap->priv) + align) & ~(uintptr_t)align);
+ mmap->dtor = vpx_mmap_dtor;
+ return VPX_CODEC_OK;
+}
- return SAVE_STATUS(ctx, res);
+void vpx_mmap_dtor(vpx_codec_mmap_t *mmap) {
+ free(mmap->priv);
+}
+
+vpx_codec_err_t vpx_validate_mmaps(const vpx_codec_stream_info_t *si,
+ const vpx_codec_mmap_t *mmaps,
+ const mem_req_t *mem_reqs, int nreqs,
+ vpx_codec_flags_t init_flags) {
+ int i;
+
+ for (i = 0; i < nreqs - 1; ++i) {
+ /* Ensure the segment has been allocated */
+ if (mmaps[i].base == NULL) {
+ return VPX_CODEC_MEM_ERROR;
+ }
+
+ /* Verify variable size segment is big enough for the current si. */
+ if (mem_reqs[i].calc_sz != NULL) {
+ vpx_codec_dec_cfg_t cfg;
+
+ cfg.w = si->w;
+ cfg.h = si->h;
+
+ if (mmaps[i].sz < mem_reqs[i].calc_sz(&cfg, init_flags)) {
+ return VPX_CODEC_MEM_ERROR;
+ }
+ }
+ }
+ return VPX_CODEC_OK;
}
diff --git a/libvpx/vpx/src/vpx_decoder.c b/libvpx/vpx/src/vpx_decoder.c
index 59a783d..1f575e0 100644
--- a/libvpx/vpx/src/vpx_decoder.c
+++ b/libvpx/vpx/src/vpx_decoder.c
@@ -22,99 +22,91 @@ vpx_codec_err_t vpx_codec_dec_init_ver(vpx_codec_ctx_t *ctx,
vpx_codec_iface_t *iface,
vpx_codec_dec_cfg_t *cfg,
vpx_codec_flags_t flags,
- int ver)
-{
- vpx_codec_err_t res;
-
- if (ver != VPX_DECODER_ABI_VERSION)
- res = VPX_CODEC_ABI_MISMATCH;
- else if (!ctx || !iface)
- res = VPX_CODEC_INVALID_PARAM;
- else if (iface->abi_version != VPX_CODEC_INTERNAL_ABI_VERSION)
- res = VPX_CODEC_ABI_MISMATCH;
- else if ((flags & VPX_CODEC_USE_XMA) && !(iface->caps & VPX_CODEC_CAP_XMA))
- res = VPX_CODEC_INCAPABLE;
- else if ((flags & VPX_CODEC_USE_POSTPROC) && !(iface->caps & VPX_CODEC_CAP_POSTPROC))
- res = VPX_CODEC_INCAPABLE;
- else if ((flags & VPX_CODEC_USE_ERROR_CONCEALMENT) &&
- !(iface->caps & VPX_CODEC_CAP_ERROR_CONCEALMENT))
- res = VPX_CODEC_INCAPABLE;
- else if ((flags & VPX_CODEC_USE_INPUT_FRAGMENTS) &&
- !(iface->caps & VPX_CODEC_CAP_INPUT_FRAGMENTS))
- res = VPX_CODEC_INCAPABLE;
- else if (!(iface->caps & VPX_CODEC_CAP_DECODER))
- res = VPX_CODEC_INCAPABLE;
- else
- {
- memset(ctx, 0, sizeof(*ctx));
- ctx->iface = iface;
- ctx->name = iface->name;
- ctx->priv = NULL;
- ctx->init_flags = flags;
- ctx->config.dec = cfg;
- res = VPX_CODEC_OK;
-
- if (!(flags & VPX_CODEC_USE_XMA))
- {
- res = ctx->iface->init(ctx, NULL);
-
- if (res)
- {
- ctx->err_detail = ctx->priv ? ctx->priv->err_detail : NULL;
- vpx_codec_destroy(ctx);
- }
-
- if (ctx->priv)
- ctx->priv->iface = ctx->iface;
- }
+ int ver) {
+ vpx_codec_err_t res;
+
+ if (ver != VPX_DECODER_ABI_VERSION)
+ res = VPX_CODEC_ABI_MISMATCH;
+ else if (!ctx || !iface)
+ res = VPX_CODEC_INVALID_PARAM;
+ else if (iface->abi_version != VPX_CODEC_INTERNAL_ABI_VERSION)
+ res = VPX_CODEC_ABI_MISMATCH;
+ else if ((flags & VPX_CODEC_USE_XMA) && !(iface->caps & VPX_CODEC_CAP_XMA))
+ res = VPX_CODEC_INCAPABLE;
+ else if ((flags & VPX_CODEC_USE_POSTPROC) && !(iface->caps & VPX_CODEC_CAP_POSTPROC))
+ res = VPX_CODEC_INCAPABLE;
+ else if ((flags & VPX_CODEC_USE_ERROR_CONCEALMENT) &&
+ !(iface->caps & VPX_CODEC_CAP_ERROR_CONCEALMENT))
+ res = VPX_CODEC_INCAPABLE;
+ else if ((flags & VPX_CODEC_USE_INPUT_FRAGMENTS) &&
+ !(iface->caps & VPX_CODEC_CAP_INPUT_FRAGMENTS))
+ res = VPX_CODEC_INCAPABLE;
+ else if (!(iface->caps & VPX_CODEC_CAP_DECODER))
+ res = VPX_CODEC_INCAPABLE;
+ else {
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->iface = iface;
+ ctx->name = iface->name;
+ ctx->priv = NULL;
+ ctx->init_flags = flags;
+ ctx->config.dec = cfg;
+ res = VPX_CODEC_OK;
+
+ if (!(flags & VPX_CODEC_USE_XMA)) {
+ res = ctx->iface->init(ctx, NULL);
+
+ if (res) {
+ ctx->err_detail = ctx->priv ? ctx->priv->err_detail : NULL;
+ vpx_codec_destroy(ctx);
+ }
+
+ if (ctx->priv)
+ ctx->priv->iface = ctx->iface;
}
+ }
- return SAVE_STATUS(ctx, res);
+ return SAVE_STATUS(ctx, res);
}
vpx_codec_err_t vpx_codec_peek_stream_info(vpx_codec_iface_t *iface,
- const uint8_t *data,
- unsigned int data_sz,
- vpx_codec_stream_info_t *si)
-{
- vpx_codec_err_t res;
-
- if (!iface || !data || !data_sz || !si
- || si->sz < sizeof(vpx_codec_stream_info_t))
- res = VPX_CODEC_INVALID_PARAM;
- else
- {
- /* Set default/unknown values */
- si->w = 0;
- si->h = 0;
-
- res = iface->dec.peek_si(data, data_sz, si);
- }
-
- return res;
+ const uint8_t *data,
+ unsigned int data_sz,
+ vpx_codec_stream_info_t *si) {
+ vpx_codec_err_t res;
+
+ if (!iface || !data || !data_sz || !si
+ || si->sz < sizeof(vpx_codec_stream_info_t))
+ res = VPX_CODEC_INVALID_PARAM;
+ else {
+ /* Set default/unknown values */
+ si->w = 0;
+ si->h = 0;
+
+ res = iface->dec.peek_si(data, data_sz, si);
+ }
+
+ return res;
}
vpx_codec_err_t vpx_codec_get_stream_info(vpx_codec_ctx_t *ctx,
- vpx_codec_stream_info_t *si)
-{
- vpx_codec_err_t res;
-
- if (!ctx || !si || si->sz < sizeof(vpx_codec_stream_info_t))
- res = VPX_CODEC_INVALID_PARAM;
- else if (!ctx->iface || !ctx->priv)
- res = VPX_CODEC_ERROR;
- else
- {
- /* Set default/unknown values */
- si->w = 0;
- si->h = 0;
-
- res = ctx->iface->dec.get_si(ctx->priv->alg_priv, si);
- }
-
- return SAVE_STATUS(ctx, res);
+ vpx_codec_stream_info_t *si) {
+ vpx_codec_err_t res;
+
+ if (!ctx || !si || si->sz < sizeof(vpx_codec_stream_info_t))
+ res = VPX_CODEC_INVALID_PARAM;
+ else if (!ctx->iface || !ctx->priv)
+ res = VPX_CODEC_ERROR;
+ else {
+ /* Set default/unknown values */
+ si->w = 0;
+ si->h = 0;
+
+ res = ctx->iface->dec.get_si(ctx->priv->alg_priv, si);
+ }
+
+ return SAVE_STATUS(ctx, res);
}
@@ -122,126 +114,115 @@ vpx_codec_err_t vpx_codec_decode(vpx_codec_ctx_t *ctx,
const uint8_t *data,
unsigned int data_sz,
void *user_priv,
- long deadline)
-{
- vpx_codec_err_t res;
-
- /* Sanity checks */
- /* NULL data ptr allowed if data_sz is 0 too */
- if (!ctx || (!data && data_sz))
- res = VPX_CODEC_INVALID_PARAM;
- else if (!ctx->iface || !ctx->priv)
- res = VPX_CODEC_ERROR;
- else
- {
- res = ctx->iface->dec.decode(ctx->priv->alg_priv, data, data_sz,
- user_priv, deadline);
- }
-
- return SAVE_STATUS(ctx, res);
+ long deadline) {
+ vpx_codec_err_t res;
+
+ /* Sanity checks */
+ /* NULL data ptr allowed if data_sz is 0 too */
+ if (!ctx || (!data && data_sz))
+ res = VPX_CODEC_INVALID_PARAM;
+ else if (!ctx->iface || !ctx->priv)
+ res = VPX_CODEC_ERROR;
+ else {
+ res = ctx->iface->dec.decode(ctx->priv->alg_priv, data, data_sz,
+ user_priv, deadline);
+ }
+
+ return SAVE_STATUS(ctx, res);
}
vpx_image_t *vpx_codec_get_frame(vpx_codec_ctx_t *ctx,
- vpx_codec_iter_t *iter)
-{
- vpx_image_t *img;
+ vpx_codec_iter_t *iter) {
+ vpx_image_t *img;
- if (!ctx || !iter || !ctx->iface || !ctx->priv)
- img = NULL;
- else
- img = ctx->iface->dec.get_frame(ctx->priv->alg_priv, iter);
+ if (!ctx || !iter || !ctx->iface || !ctx->priv)
+ img = NULL;
+ else
+ img = ctx->iface->dec.get_frame(ctx->priv->alg_priv, iter);
- return img;
+ return img;
}
vpx_codec_err_t vpx_codec_register_put_frame_cb(vpx_codec_ctx_t *ctx,
- vpx_codec_put_frame_cb_fn_t cb,
- void *user_priv)
-{
- vpx_codec_err_t res;
-
- if (!ctx || !cb)
- res = VPX_CODEC_INVALID_PARAM;
- else if (!ctx->iface || !ctx->priv
- || !(ctx->iface->caps & VPX_CODEC_CAP_PUT_FRAME))
- res = VPX_CODEC_ERROR;
- else
- {
- ctx->priv->dec.put_frame_cb.u.put_frame = cb;
- ctx->priv->dec.put_frame_cb.user_priv = user_priv;
- res = VPX_CODEC_OK;
- }
-
- return SAVE_STATUS(ctx, res);
+ vpx_codec_put_frame_cb_fn_t cb,
+ void *user_priv) {
+ vpx_codec_err_t res;
+
+ if (!ctx || !cb)
+ res = VPX_CODEC_INVALID_PARAM;
+ else if (!ctx->iface || !ctx->priv
+ || !(ctx->iface->caps & VPX_CODEC_CAP_PUT_FRAME))
+ res = VPX_CODEC_ERROR;
+ else {
+ ctx->priv->dec.put_frame_cb.u.put_frame = cb;
+ ctx->priv->dec.put_frame_cb.user_priv = user_priv;
+ res = VPX_CODEC_OK;
+ }
+
+ return SAVE_STATUS(ctx, res);
}
vpx_codec_err_t vpx_codec_register_put_slice_cb(vpx_codec_ctx_t *ctx,
- vpx_codec_put_slice_cb_fn_t cb,
- void *user_priv)
-{
- vpx_codec_err_t res;
-
- if (!ctx || !cb)
- res = VPX_CODEC_INVALID_PARAM;
- else if (!ctx->iface || !ctx->priv
- || !(ctx->iface->caps & VPX_CODEC_CAP_PUT_FRAME))
- res = VPX_CODEC_ERROR;
- else
- {
- ctx->priv->dec.put_slice_cb.u.put_slice = cb;
- ctx->priv->dec.put_slice_cb.user_priv = user_priv;
- res = VPX_CODEC_OK;
- }
-
- return SAVE_STATUS(ctx, res);
+ vpx_codec_put_slice_cb_fn_t cb,
+ void *user_priv) {
+ vpx_codec_err_t res;
+
+ if (!ctx || !cb)
+ res = VPX_CODEC_INVALID_PARAM;
+ else if (!ctx->iface || !ctx->priv
+ || !(ctx->iface->caps & VPX_CODEC_CAP_PUT_FRAME))
+ res = VPX_CODEC_ERROR;
+ else {
+ ctx->priv->dec.put_slice_cb.u.put_slice = cb;
+ ctx->priv->dec.put_slice_cb.user_priv = user_priv;
+ res = VPX_CODEC_OK;
+ }
+
+ return SAVE_STATUS(ctx, res);
}
vpx_codec_err_t vpx_codec_get_mem_map(vpx_codec_ctx_t *ctx,
vpx_codec_mmap_t *mmap,
- vpx_codec_iter_t *iter)
-{
- vpx_codec_err_t res = VPX_CODEC_OK;
-
- if (!ctx || !mmap || !iter || !ctx->iface)
- res = VPX_CODEC_INVALID_PARAM;
- else if (!(ctx->iface->caps & VPX_CODEC_CAP_XMA))
- res = VPX_CODEC_ERROR;
- else
- res = ctx->iface->get_mmap(ctx, mmap, iter);
-
- return SAVE_STATUS(ctx, res);
+ vpx_codec_iter_t *iter) {
+ vpx_codec_err_t res = VPX_CODEC_OK;
+
+ if (!ctx || !mmap || !iter || !ctx->iface)
+ res = VPX_CODEC_INVALID_PARAM;
+ else if (!(ctx->iface->caps & VPX_CODEC_CAP_XMA))
+ res = VPX_CODEC_ERROR;
+ else
+ res = ctx->iface->get_mmap(ctx, mmap, iter);
+
+ return SAVE_STATUS(ctx, res);
}
vpx_codec_err_t vpx_codec_set_mem_map(vpx_codec_ctx_t *ctx,
vpx_codec_mmap_t *mmap,
- unsigned int num_maps)
-{
- vpx_codec_err_t res = VPX_CODEC_MEM_ERROR;
-
- if (!ctx || !mmap || !ctx->iface)
- res = VPX_CODEC_INVALID_PARAM;
- else if (!(ctx->iface->caps & VPX_CODEC_CAP_XMA))
- res = VPX_CODEC_ERROR;
- else
- {
- unsigned int i;
-
- for (i = 0; i < num_maps; i++, mmap++)
- {
- if (!mmap->base)
- break;
-
- /* Everything look ok, set the mmap in the decoder */
- res = ctx->iface->set_mmap(ctx, mmap);
-
- if (res)
- break;
- }
+ unsigned int num_maps) {
+ vpx_codec_err_t res = VPX_CODEC_MEM_ERROR;
+
+ if (!ctx || !mmap || !ctx->iface)
+ res = VPX_CODEC_INVALID_PARAM;
+ else if (!(ctx->iface->caps & VPX_CODEC_CAP_XMA))
+ res = VPX_CODEC_ERROR;
+ else {
+ unsigned int i;
+
+ for (i = 0; i < num_maps; i++, mmap++) {
+ if (!mmap->base)
+ break;
+
+ /* Everything look ok, set the mmap in the decoder */
+ res = ctx->iface->set_mmap(ctx, mmap);
+
+ if (res)
+ break;
}
+ }
- return SAVE_STATUS(ctx, res);
+ return SAVE_STATUS(ctx, res);
}
diff --git a/libvpx/vpx/src/vpx_encoder.c b/libvpx/vpx/src/vpx_encoder.c
index 73c1c66..3cec895 100644
--- a/libvpx/vpx/src/vpx_encoder.c
+++ b/libvpx/vpx/src/vpx_encoder.c
@@ -24,46 +24,43 @@ vpx_codec_err_t vpx_codec_enc_init_ver(vpx_codec_ctx_t *ctx,
vpx_codec_iface_t *iface,
vpx_codec_enc_cfg_t *cfg,
vpx_codec_flags_t flags,
- int ver)
-{
- vpx_codec_err_t res;
-
- if (ver != VPX_ENCODER_ABI_VERSION)
- res = VPX_CODEC_ABI_MISMATCH;
- else if (!ctx || !iface || !cfg)
- res = VPX_CODEC_INVALID_PARAM;
- else if (iface->abi_version != VPX_CODEC_INTERNAL_ABI_VERSION)
- res = VPX_CODEC_ABI_MISMATCH;
- else if (!(iface->caps & VPX_CODEC_CAP_ENCODER))
- res = VPX_CODEC_INCAPABLE;
- else if ((flags & VPX_CODEC_USE_XMA) && !(iface->caps & VPX_CODEC_CAP_XMA))
- res = VPX_CODEC_INCAPABLE;
- else if ((flags & VPX_CODEC_USE_PSNR)
- && !(iface->caps & VPX_CODEC_CAP_PSNR))
- res = VPX_CODEC_INCAPABLE;
- else if ((flags & VPX_CODEC_USE_OUTPUT_PARTITION)
- && !(iface->caps & VPX_CODEC_CAP_OUTPUT_PARTITION))
- res = VPX_CODEC_INCAPABLE;
- else
- {
- ctx->iface = iface;
- ctx->name = iface->name;
- ctx->priv = NULL;
- ctx->init_flags = flags;
- ctx->config.enc = cfg;
- res = ctx->iface->init(ctx, NULL);
-
- if (res)
- {
- ctx->err_detail = ctx->priv ? ctx->priv->err_detail : NULL;
- vpx_codec_destroy(ctx);
- }
-
- if (ctx->priv)
- ctx->priv->iface = ctx->iface;
+ int ver) {
+ vpx_codec_err_t res;
+
+ if (ver != VPX_ENCODER_ABI_VERSION)
+ res = VPX_CODEC_ABI_MISMATCH;
+ else if (!ctx || !iface || !cfg)
+ res = VPX_CODEC_INVALID_PARAM;
+ else if (iface->abi_version != VPX_CODEC_INTERNAL_ABI_VERSION)
+ res = VPX_CODEC_ABI_MISMATCH;
+ else if (!(iface->caps & VPX_CODEC_CAP_ENCODER))
+ res = VPX_CODEC_INCAPABLE;
+ else if ((flags & VPX_CODEC_USE_XMA) && !(iface->caps & VPX_CODEC_CAP_XMA))
+ res = VPX_CODEC_INCAPABLE;
+ else if ((flags & VPX_CODEC_USE_PSNR)
+ && !(iface->caps & VPX_CODEC_CAP_PSNR))
+ res = VPX_CODEC_INCAPABLE;
+ else if ((flags & VPX_CODEC_USE_OUTPUT_PARTITION)
+ && !(iface->caps & VPX_CODEC_CAP_OUTPUT_PARTITION))
+ res = VPX_CODEC_INCAPABLE;
+ else {
+ ctx->iface = iface;
+ ctx->name = iface->name;
+ ctx->priv = NULL;
+ ctx->init_flags = flags;
+ ctx->config.enc = cfg;
+ res = ctx->iface->init(ctx, NULL);
+
+ if (res) {
+ ctx->err_detail = ctx->priv ? ctx->priv->err_detail : NULL;
+ vpx_codec_destroy(ctx);
}
- return SAVE_STATUS(ctx, res);
+ if (ctx->priv)
+ ctx->priv->iface = ctx->iface;
+ }
+
+ return SAVE_STATUS(ctx, res);
}
vpx_codec_err_t vpx_codec_enc_init_multi_ver(vpx_codec_ctx_t *ctx,
@@ -72,128 +69,117 @@ vpx_codec_err_t vpx_codec_enc_init_multi_ver(vpx_codec_ctx_t *ctx,
int num_enc,
vpx_codec_flags_t flags,
vpx_rational_t *dsf,
- int ver)
-{
- vpx_codec_err_t res = 0;
-
- if (ver != VPX_ENCODER_ABI_VERSION)
- res = VPX_CODEC_ABI_MISMATCH;
- else if (!ctx || !iface || !cfg || (num_enc > 16 || num_enc < 1))
- res = VPX_CODEC_INVALID_PARAM;
- else if (iface->abi_version != VPX_CODEC_INTERNAL_ABI_VERSION)
- res = VPX_CODEC_ABI_MISMATCH;
- else if (!(iface->caps & VPX_CODEC_CAP_ENCODER))
- res = VPX_CODEC_INCAPABLE;
- else if ((flags & VPX_CODEC_USE_XMA) && !(iface->caps & VPX_CODEC_CAP_XMA))
- res = VPX_CODEC_INCAPABLE;
- else if ((flags & VPX_CODEC_USE_PSNR)
- && !(iface->caps & VPX_CODEC_CAP_PSNR))
- res = VPX_CODEC_INCAPABLE;
- else if ((flags & VPX_CODEC_USE_OUTPUT_PARTITION)
- && !(iface->caps & VPX_CODEC_CAP_OUTPUT_PARTITION))
- res = VPX_CODEC_INCAPABLE;
- else
- {
- int i;
- void *mem_loc = NULL;
-
- if(!(res = iface->enc.mr_get_mem_loc(cfg, &mem_loc)))
- {
- for (i = 0; i < num_enc; i++)
- {
- vpx_codec_priv_enc_mr_cfg_t mr_cfg;
-
- /* Validate down-sampling factor. */
- if(dsf->num < 1 || dsf->num > 4096 || dsf->den < 1 ||
- dsf->den > dsf->num)
- {
- res = VPX_CODEC_INVALID_PARAM;
- break;
- }
-
- mr_cfg.mr_low_res_mode_info = mem_loc;
- mr_cfg.mr_total_resolutions = num_enc;
- mr_cfg.mr_encoder_id = num_enc-1-i;
- mr_cfg.mr_down_sampling_factor.num = dsf->num;
- mr_cfg.mr_down_sampling_factor.den = dsf->den;
-
- /* Force Key-frame synchronization. Namely, encoder at higher
- * resolution always use the same frame_type chosen by the
- * lowest-resolution encoder.
- */
- if(mr_cfg.mr_encoder_id)
- cfg->kf_mode = VPX_KF_DISABLED;
-
- ctx->iface = iface;
- ctx->name = iface->name;
- ctx->priv = NULL;
- ctx->init_flags = flags;
- ctx->config.enc = cfg;
- res = ctx->iface->init(ctx, &mr_cfg);
-
- if (res)
- {
- const char *error_detail =
- ctx->priv ? ctx->priv->err_detail : NULL;
- /* Destroy current ctx */
- ctx->err_detail = error_detail;
- vpx_codec_destroy(ctx);
-
- /* Destroy already allocated high-level ctx */
- while (i)
- {
- ctx--;
- ctx->err_detail = error_detail;
- vpx_codec_destroy(ctx);
- i--;
- }
- }
-
- if (ctx->priv)
- ctx->priv->iface = ctx->iface;
-
- if (res)
- break;
-
- ctx++;
- cfg++;
- dsf++;
- }
+ int ver) {
+ vpx_codec_err_t res = 0;
+
+ if (ver != VPX_ENCODER_ABI_VERSION)
+ res = VPX_CODEC_ABI_MISMATCH;
+ else if (!ctx || !iface || !cfg || (num_enc > 16 || num_enc < 1))
+ res = VPX_CODEC_INVALID_PARAM;
+ else if (iface->abi_version != VPX_CODEC_INTERNAL_ABI_VERSION)
+ res = VPX_CODEC_ABI_MISMATCH;
+ else if (!(iface->caps & VPX_CODEC_CAP_ENCODER))
+ res = VPX_CODEC_INCAPABLE;
+ else if ((flags & VPX_CODEC_USE_XMA) && !(iface->caps & VPX_CODEC_CAP_XMA))
+ res = VPX_CODEC_INCAPABLE;
+ else if ((flags & VPX_CODEC_USE_PSNR)
+ && !(iface->caps & VPX_CODEC_CAP_PSNR))
+ res = VPX_CODEC_INCAPABLE;
+ else if ((flags & VPX_CODEC_USE_OUTPUT_PARTITION)
+ && !(iface->caps & VPX_CODEC_CAP_OUTPUT_PARTITION))
+ res = VPX_CODEC_INCAPABLE;
+ else {
+ int i;
+ void *mem_loc = NULL;
+
+ if (!(res = iface->enc.mr_get_mem_loc(cfg, &mem_loc))) {
+ for (i = 0; i < num_enc; i++) {
+ vpx_codec_priv_enc_mr_cfg_t mr_cfg;
+
+ /* Validate down-sampling factor. */
+ if (dsf->num < 1 || dsf->num > 4096 || dsf->den < 1 ||
+ dsf->den > dsf->num) {
+ res = VPX_CODEC_INVALID_PARAM;
+ break;
}
+
+ mr_cfg.mr_low_res_mode_info = mem_loc;
+ mr_cfg.mr_total_resolutions = num_enc;
+ mr_cfg.mr_encoder_id = num_enc - 1 - i;
+ mr_cfg.mr_down_sampling_factor.num = dsf->num;
+ mr_cfg.mr_down_sampling_factor.den = dsf->den;
+
+ /* Force Key-frame synchronization. Namely, encoder at higher
+ * resolution always use the same frame_type chosen by the
+ * lowest-resolution encoder.
+ */
+ if (mr_cfg.mr_encoder_id)
+ cfg->kf_mode = VPX_KF_DISABLED;
+
+ ctx->iface = iface;
+ ctx->name = iface->name;
+ ctx->priv = NULL;
+ ctx->init_flags = flags;
+ ctx->config.enc = cfg;
+ res = ctx->iface->init(ctx, &mr_cfg);
+
+ if (res) {
+ const char *error_detail =
+ ctx->priv ? ctx->priv->err_detail : NULL;
+ /* Destroy current ctx */
+ ctx->err_detail = error_detail;
+ vpx_codec_destroy(ctx);
+
+ /* Destroy already allocated high-level ctx */
+ while (i) {
+ ctx--;
+ ctx->err_detail = error_detail;
+ vpx_codec_destroy(ctx);
+ i--;
+ }
+ }
+
+ if (ctx->priv)
+ ctx->priv->iface = ctx->iface;
+
+ if (res)
+ break;
+
+ ctx++;
+ cfg++;
+ dsf++;
+ }
}
+ }
- return SAVE_STATUS(ctx, res);
+ return SAVE_STATUS(ctx, res);
}
vpx_codec_err_t vpx_codec_enc_config_default(vpx_codec_iface_t *iface,
- vpx_codec_enc_cfg_t *cfg,
- unsigned int usage)
-{
- vpx_codec_err_t res;
- vpx_codec_enc_cfg_map_t *map;
-
- if (!iface || !cfg || usage > INT_MAX)
- res = VPX_CODEC_INVALID_PARAM;
- else if (!(iface->caps & VPX_CODEC_CAP_ENCODER))
- res = VPX_CODEC_INCAPABLE;
- else
- {
- res = VPX_CODEC_INVALID_PARAM;
-
- for (map = iface->enc.cfg_maps; map->usage >= 0; map++)
- {
- if (map->usage == (int)usage)
- {
- *cfg = map->cfg;
- cfg->g_usage = usage;
- res = VPX_CODEC_OK;
- break;
- }
- }
+ vpx_codec_enc_cfg_t *cfg,
+ unsigned int usage) {
+ vpx_codec_err_t res;
+ vpx_codec_enc_cfg_map_t *map;
+
+ if (!iface || !cfg || usage > INT_MAX)
+ res = VPX_CODEC_INVALID_PARAM;
+ else if (!(iface->caps & VPX_CODEC_CAP_ENCODER))
+ res = VPX_CODEC_INCAPABLE;
+ else {
+ res = VPX_CODEC_INVALID_PARAM;
+
+ for (map = iface->enc.cfg_maps; map->usage >= 0; map++) {
+ if (map->usage == (int)usage) {
+ *cfg = map->cfg;
+ cfg->g_usage = usage;
+ res = VPX_CODEC_OK;
+ break;
+ }
}
+ }
- return res;
+ return res;
}
@@ -203,9 +189,9 @@ vpx_codec_err_t vpx_codec_enc_config_default(vpx_codec_iface_t *iface,
*/
#include "vpx_ports/x86.h"
#define FLOATING_POINT_INIT() do {\
- unsigned short x87_orig_mode = x87_set_double_precision();
+ unsigned short x87_orig_mode = x87_set_double_precision();
#define FLOATING_POINT_RESTORE() \
- x87_set_control_word(x87_orig_mode); }while(0)
+ x87_set_control_word(x87_orig_mode); }while(0)
#else
@@ -219,224 +205,202 @@ vpx_codec_err_t vpx_codec_encode(vpx_codec_ctx_t *ctx,
vpx_codec_pts_t pts,
unsigned long duration,
vpx_enc_frame_flags_t flags,
- unsigned long deadline)
-{
- vpx_codec_err_t res = 0;
-
- if (!ctx || (img && !duration))
- res = VPX_CODEC_INVALID_PARAM;
- else if (!ctx->iface || !ctx->priv)
- res = VPX_CODEC_ERROR;
- else if (!(ctx->iface->caps & VPX_CODEC_CAP_ENCODER))
- res = VPX_CODEC_INCAPABLE;
- else
- {
- /* Execute in a normalized floating point environment, if the platform
- * requires it.
- */
- unsigned int num_enc =ctx->priv->enc.total_encoders;
-
- FLOATING_POINT_INIT();
-
- if (num_enc == 1)
- res = ctx->iface->enc.encode(ctx->priv->alg_priv, img, pts,
- duration, flags, deadline);
- else
- {
- /* Multi-resolution encoding:
- * Encode multi-levels in reverse order. For example,
- * if mr_total_resolutions = 3, first encode level 2,
- * then encode level 1, and finally encode level 0.
- */
- int i;
-
- ctx += num_enc - 1;
- if (img) img += num_enc - 1;
-
- for (i = num_enc-1; i >= 0; i--)
- {
- if ((res = ctx->iface->enc.encode(ctx->priv->alg_priv, img, pts,
- duration, flags, deadline)))
- break;
-
- ctx--;
- if (img) img--;
- }
- ctx++;
- }
-
- FLOATING_POINT_RESTORE();
+ unsigned long deadline) {
+ vpx_codec_err_t res = 0;
+
+ if (!ctx || (img && !duration))
+ res = VPX_CODEC_INVALID_PARAM;
+ else if (!ctx->iface || !ctx->priv)
+ res = VPX_CODEC_ERROR;
+ else if (!(ctx->iface->caps & VPX_CODEC_CAP_ENCODER))
+ res = VPX_CODEC_INCAPABLE;
+ else {
+ /* Execute in a normalized floating point environment, if the platform
+ * requires it.
+ */
+ unsigned int num_enc = ctx->priv->enc.total_encoders;
+
+ FLOATING_POINT_INIT();
+
+ if (num_enc == 1)
+ res = ctx->iface->enc.encode(ctx->priv->alg_priv, img, pts,
+ duration, flags, deadline);
+ else {
+ /* Multi-resolution encoding:
+ * Encode multi-levels in reverse order. For example,
+ * if mr_total_resolutions = 3, first encode level 2,
+ * then encode level 1, and finally encode level 0.
+ */
+ int i;
+
+ ctx += num_enc - 1;
+ if (img) img += num_enc - 1;
+
+ for (i = num_enc - 1; i >= 0; i--) {
+ if ((res = ctx->iface->enc.encode(ctx->priv->alg_priv, img, pts,
+ duration, flags, deadline)))
+ break;
+
+ ctx--;
+ if (img) img--;
+ }
+ ctx++;
}
- return SAVE_STATUS(ctx, res);
+ FLOATING_POINT_RESTORE();
+ }
+
+ return SAVE_STATUS(ctx, res);
}
const vpx_codec_cx_pkt_t *vpx_codec_get_cx_data(vpx_codec_ctx_t *ctx,
- vpx_codec_iter_t *iter)
-{
- const vpx_codec_cx_pkt_t *pkt = NULL;
-
- if (ctx)
- {
- if (!iter)
- ctx->err = VPX_CODEC_INVALID_PARAM;
- else if (!ctx->iface || !ctx->priv)
- ctx->err = VPX_CODEC_ERROR;
- else if (!(ctx->iface->caps & VPX_CODEC_CAP_ENCODER))
- ctx->err = VPX_CODEC_INCAPABLE;
- else
- pkt = ctx->iface->enc.get_cx_data(ctx->priv->alg_priv, iter);
- }
+ vpx_codec_iter_t *iter) {
+ const vpx_codec_cx_pkt_t *pkt = NULL;
- if (pkt && pkt->kind == VPX_CODEC_CX_FRAME_PKT)
- {
- /* If the application has specified a destination area for the
- * compressed data, and the codec has not placed the data there,
- * and it fits, copy it.
- */
- char *dst_buf = ctx->priv->enc.cx_data_dst_buf.buf;
-
- if (dst_buf
- && pkt->data.raw.buf != dst_buf
- && pkt->data.raw.sz
- + ctx->priv->enc.cx_data_pad_before
- + ctx->priv->enc.cx_data_pad_after
- <= ctx->priv->enc.cx_data_dst_buf.sz)
- {
- vpx_codec_cx_pkt_t *modified_pkt = &ctx->priv->enc.cx_data_pkt;
-
- memcpy(dst_buf + ctx->priv->enc.cx_data_pad_before,
- pkt->data.raw.buf, pkt->data.raw.sz);
- *modified_pkt = *pkt;
- modified_pkt->data.raw.buf = dst_buf;
- modified_pkt->data.raw.sz += ctx->priv->enc.cx_data_pad_before
- + ctx->priv->enc.cx_data_pad_after;
- pkt = modified_pkt;
- }
+ if (ctx) {
+ if (!iter)
+ ctx->err = VPX_CODEC_INVALID_PARAM;
+ else if (!ctx->iface || !ctx->priv)
+ ctx->err = VPX_CODEC_ERROR;
+ else if (!(ctx->iface->caps & VPX_CODEC_CAP_ENCODER))
+ ctx->err = VPX_CODEC_INCAPABLE;
+ else
+ pkt = ctx->iface->enc.get_cx_data(ctx->priv->alg_priv, iter);
+ }
+
+ if (pkt && pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
+ /* If the application has specified a destination area for the
+ * compressed data, and the codec has not placed the data there,
+ * and it fits, copy it.
+ */
+ char *dst_buf = ctx->priv->enc.cx_data_dst_buf.buf;
+
+ if (dst_buf
+ && pkt->data.raw.buf != dst_buf
+ && pkt->data.raw.sz
+ + ctx->priv->enc.cx_data_pad_before
+ + ctx->priv->enc.cx_data_pad_after
+ <= ctx->priv->enc.cx_data_dst_buf.sz) {
+ vpx_codec_cx_pkt_t *modified_pkt = &ctx->priv->enc.cx_data_pkt;
+
+ memcpy(dst_buf + ctx->priv->enc.cx_data_pad_before,
+ pkt->data.raw.buf, pkt->data.raw.sz);
+ *modified_pkt = *pkt;
+ modified_pkt->data.raw.buf = dst_buf;
+ modified_pkt->data.raw.sz += ctx->priv->enc.cx_data_pad_before
+ + ctx->priv->enc.cx_data_pad_after;
+ pkt = modified_pkt;
+ }
- if (dst_buf == pkt->data.raw.buf)
- {
- ctx->priv->enc.cx_data_dst_buf.buf = dst_buf + pkt->data.raw.sz;
- ctx->priv->enc.cx_data_dst_buf.sz -= pkt->data.raw.sz;
- }
+ if (dst_buf == pkt->data.raw.buf) {
+ ctx->priv->enc.cx_data_dst_buf.buf = dst_buf + pkt->data.raw.sz;
+ ctx->priv->enc.cx_data_dst_buf.sz -= pkt->data.raw.sz;
}
+ }
- return pkt;
+ return pkt;
}
vpx_codec_err_t vpx_codec_set_cx_data_buf(vpx_codec_ctx_t *ctx,
- const vpx_fixed_buf_t *buf,
- unsigned int pad_before,
- unsigned int pad_after)
-{
- if (!ctx || !ctx->priv)
- return VPX_CODEC_INVALID_PARAM;
-
- if (buf)
- {
- ctx->priv->enc.cx_data_dst_buf = *buf;
- ctx->priv->enc.cx_data_pad_before = pad_before;
- ctx->priv->enc.cx_data_pad_after = pad_after;
- }
- else
- {
- ctx->priv->enc.cx_data_dst_buf.buf = NULL;
- ctx->priv->enc.cx_data_dst_buf.sz = 0;
- ctx->priv->enc.cx_data_pad_before = 0;
- ctx->priv->enc.cx_data_pad_after = 0;
- }
-
- return VPX_CODEC_OK;
+ const vpx_fixed_buf_t *buf,
+ unsigned int pad_before,
+ unsigned int pad_after) {
+ if (!ctx || !ctx->priv)
+ return VPX_CODEC_INVALID_PARAM;
+
+ if (buf) {
+ ctx->priv->enc.cx_data_dst_buf = *buf;
+ ctx->priv->enc.cx_data_pad_before = pad_before;
+ ctx->priv->enc.cx_data_pad_after = pad_after;
+ } else {
+ ctx->priv->enc.cx_data_dst_buf.buf = NULL;
+ ctx->priv->enc.cx_data_dst_buf.sz = 0;
+ ctx->priv->enc.cx_data_pad_before = 0;
+ ctx->priv->enc.cx_data_pad_after = 0;
+ }
+
+ return VPX_CODEC_OK;
}
-const vpx_image_t *vpx_codec_get_preview_frame(vpx_codec_ctx_t *ctx)
-{
- vpx_image_t *img = NULL;
-
- if (ctx)
- {
- if (!ctx->iface || !ctx->priv)
- ctx->err = VPX_CODEC_ERROR;
- else if (!(ctx->iface->caps & VPX_CODEC_CAP_ENCODER))
- ctx->err = VPX_CODEC_INCAPABLE;
- else if (!ctx->iface->enc.get_preview)
- ctx->err = VPX_CODEC_INCAPABLE;
- else
- img = ctx->iface->enc.get_preview(ctx->priv->alg_priv);
- }
+const vpx_image_t *vpx_codec_get_preview_frame(vpx_codec_ctx_t *ctx) {
+ vpx_image_t *img = NULL;
- return img;
+ if (ctx) {
+ if (!ctx->iface || !ctx->priv)
+ ctx->err = VPX_CODEC_ERROR;
+ else if (!(ctx->iface->caps & VPX_CODEC_CAP_ENCODER))
+ ctx->err = VPX_CODEC_INCAPABLE;
+ else if (!ctx->iface->enc.get_preview)
+ ctx->err = VPX_CODEC_INCAPABLE;
+ else
+ img = ctx->iface->enc.get_preview(ctx->priv->alg_priv);
+ }
+
+ return img;
}
-vpx_fixed_buf_t *vpx_codec_get_global_headers(vpx_codec_ctx_t *ctx)
-{
- vpx_fixed_buf_t *buf = NULL;
-
- if (ctx)
- {
- if (!ctx->iface || !ctx->priv)
- ctx->err = VPX_CODEC_ERROR;
- else if (!(ctx->iface->caps & VPX_CODEC_CAP_ENCODER))
- ctx->err = VPX_CODEC_INCAPABLE;
- else if (!ctx->iface->enc.get_glob_hdrs)
- ctx->err = VPX_CODEC_INCAPABLE;
- else
- buf = ctx->iface->enc.get_glob_hdrs(ctx->priv->alg_priv);
- }
+vpx_fixed_buf_t *vpx_codec_get_global_headers(vpx_codec_ctx_t *ctx) {
+ vpx_fixed_buf_t *buf = NULL;
- return buf;
+ if (ctx) {
+ if (!ctx->iface || !ctx->priv)
+ ctx->err = VPX_CODEC_ERROR;
+ else if (!(ctx->iface->caps & VPX_CODEC_CAP_ENCODER))
+ ctx->err = VPX_CODEC_INCAPABLE;
+ else if (!ctx->iface->enc.get_glob_hdrs)
+ ctx->err = VPX_CODEC_INCAPABLE;
+ else
+ buf = ctx->iface->enc.get_glob_hdrs(ctx->priv->alg_priv);
+ }
+
+ return buf;
}
vpx_codec_err_t vpx_codec_enc_config_set(vpx_codec_ctx_t *ctx,
- const vpx_codec_enc_cfg_t *cfg)
-{
- vpx_codec_err_t res;
+ const vpx_codec_enc_cfg_t *cfg) {
+ vpx_codec_err_t res;
- if (!ctx || !ctx->iface || !ctx->priv || !cfg)
- res = VPX_CODEC_INVALID_PARAM;
- else if (!(ctx->iface->caps & VPX_CODEC_CAP_ENCODER))
- res = VPX_CODEC_INCAPABLE;
- else
- res = ctx->iface->enc.cfg_set(ctx->priv->alg_priv, cfg);
+ if (!ctx || !ctx->iface || !ctx->priv || !cfg)
+ res = VPX_CODEC_INVALID_PARAM;
+ else if (!(ctx->iface->caps & VPX_CODEC_CAP_ENCODER))
+ res = VPX_CODEC_INCAPABLE;
+ else
+ res = ctx->iface->enc.cfg_set(ctx->priv->alg_priv, cfg);
- return SAVE_STATUS(ctx, res);
+ return SAVE_STATUS(ctx, res);
}
int vpx_codec_pkt_list_add(struct vpx_codec_pkt_list *list,
- const struct vpx_codec_cx_pkt *pkt)
-{
- if (list->cnt < list->max)
- {
- list->pkts[list->cnt++] = *pkt;
- return 0;
- }
+ const struct vpx_codec_cx_pkt *pkt) {
+ if (list->cnt < list->max) {
+ list->pkts[list->cnt++] = *pkt;
+ return 0;
+ }
- return 1;
+ return 1;
}
const vpx_codec_cx_pkt_t *vpx_codec_pkt_list_get(struct vpx_codec_pkt_list *list,
- vpx_codec_iter_t *iter)
-{
- const vpx_codec_cx_pkt_t *pkt;
+ vpx_codec_iter_t *iter) {
+ const vpx_codec_cx_pkt_t *pkt;
- if (!(*iter))
- {
- *iter = list->pkts;
- }
+ if (!(*iter)) {
+ *iter = list->pkts;
+ }
- pkt = (const void *) * iter;
+ pkt = (const void *) * iter;
- if ((size_t)(pkt - list->pkts) < list->cnt)
- *iter = pkt + 1;
- else
- pkt = NULL;
+ if ((size_t)(pkt - list->pkts) < list->cnt)
+ *iter = pkt + 1;
+ else
+ pkt = NULL;
- return pkt;
+ return pkt;
}
diff --git a/libvpx/vpx/src/vpx_image.c b/libvpx/vpx/src/vpx_image.c
index 336b6e2..36eda95 100644
--- a/libvpx/vpx/src/vpx_image.c
+++ b/libvpx/vpx/src/vpx_image.c
@@ -18,30 +18,26 @@
#define align_addr(addr,align) (void*)(((size_t)(addr) + ((align) - 1)) & (size_t)-(align))
/* Memalign code is copied from vpx_mem.c */
-static void *img_buf_memalign(size_t align, size_t size)
-{
- void *addr,
- * x = NULL;
-
- addr = malloc(size + align - 1 + ADDRESS_STORAGE_SIZE);
-
- if (addr)
- {
- x = align_addr((unsigned char *)addr + ADDRESS_STORAGE_SIZE, (int)align);
- /* save the actual malloc address */
- ((size_t *)x)[-1] = (size_t)addr;
- }
+static void *img_buf_memalign(size_t align, size_t size) {
+ void *addr,
+ * x = NULL;
+
+ addr = malloc(size + align - 1 + ADDRESS_STORAGE_SIZE);
- return x;
+ if (addr) {
+ x = align_addr((unsigned char *)addr + ADDRESS_STORAGE_SIZE, (int)align);
+ /* save the actual malloc address */
+ ((size_t *)x)[-1] = (size_t)addr;
+ }
+
+ return x;
}
-static void img_buf_free(void *memblk)
-{
- if (memblk)
- {
- void *addr = (void *)(((size_t *)memblk)[-1]);
- free(addr);
- }
+static void img_buf_free(void *memblk) {
+ if (memblk) {
+ void *addr = (void *)(((size_t *)memblk)[-1]);
+ free(addr);
+ }
}
static vpx_image_t *img_alloc_helper(vpx_image_t *img,
@@ -50,41 +46,39 @@ static vpx_image_t *img_alloc_helper(vpx_image_t *img,
unsigned int d_h,
unsigned int buf_align,
unsigned int stride_align,
- unsigned char *img_data)
-{
+ unsigned char *img_data) {
- unsigned int h, w, s, xcs, ycs, bps;
- int align;
+ unsigned int h, w, s, xcs, ycs, bps;
+ int align;
- /* Treat align==0 like align==1 */
- if (!buf_align)
- buf_align = 1;
+ /* Treat align==0 like align==1 */
+ if (!buf_align)
+ buf_align = 1;
- /* Validate alignment (must be power of 2) */
- if (buf_align & (buf_align - 1))
- goto fail;
+ /* Validate alignment (must be power of 2) */
+ if (buf_align & (buf_align - 1))
+ goto fail;
- /* Treat align==0 like align==1 */
- if (!stride_align)
- stride_align = 1;
+ /* Treat align==0 like align==1 */
+ if (!stride_align)
+ stride_align = 1;
- /* Validate alignment (must be power of 2) */
- if (stride_align & (stride_align - 1))
- goto fail;
+ /* Validate alignment (must be power of 2) */
+ if (stride_align & (stride_align - 1))
+ goto fail;
- /* Get sample size for this format */
- switch (fmt)
- {
+ /* Get sample size for this format */
+ switch (fmt) {
case VPX_IMG_FMT_RGB32:
case VPX_IMG_FMT_RGB32_LE:
case VPX_IMG_FMT_ARGB:
case VPX_IMG_FMT_ARGB_LE:
- bps = 32;
- break;
+ bps = 32;
+ break;
case VPX_IMG_FMT_RGB24:
case VPX_IMG_FMT_BGR24:
- bps = 24;
- break;
+ bps = 24;
+ break;
case VPX_IMG_FMT_RGB565:
case VPX_IMG_FMT_RGB565_LE:
case VPX_IMG_FMT_RGB555:
@@ -92,108 +86,101 @@ static vpx_image_t *img_alloc_helper(vpx_image_t *img,
case VPX_IMG_FMT_UYVY:
case VPX_IMG_FMT_YUY2:
case VPX_IMG_FMT_YVYU:
- bps = 16;
- break;
+ bps = 16;
+ break;
case VPX_IMG_FMT_I420:
case VPX_IMG_FMT_YV12:
case VPX_IMG_FMT_VPXI420:
case VPX_IMG_FMT_VPXYV12:
- bps = 12;
- break;
+ bps = 12;
+ break;
default:
- bps = 16;
- break;
- }
+ bps = 16;
+ break;
+ }
- /* Get chroma shift values for this format */
- switch (fmt)
- {
+ /* Get chroma shift values for this format */
+ switch (fmt) {
case VPX_IMG_FMT_I420:
case VPX_IMG_FMT_YV12:
case VPX_IMG_FMT_VPXI420:
case VPX_IMG_FMT_VPXYV12:
- xcs = 1;
- break;
+ xcs = 1;
+ break;
default:
- xcs = 0;
- break;
- }
+ xcs = 0;
+ break;
+ }
- switch (fmt)
- {
+ switch (fmt) {
case VPX_IMG_FMT_I420:
case VPX_IMG_FMT_YV12:
case VPX_IMG_FMT_VPXI420:
case VPX_IMG_FMT_VPXYV12:
- ycs = 1;
- break;
+ ycs = 1;
+ break;
default:
- ycs = 0;
- break;
- }
+ ycs = 0;
+ break;
+ }
+
+ /* Calculate storage sizes given the chroma subsampling */
+ align = (1 << xcs) - 1;
+ w = (d_w + align) & ~align;
+ align = (1 << ycs) - 1;
+ h = (d_h + align) & ~align;
+ s = (fmt & VPX_IMG_FMT_PLANAR) ? w : bps * w / 8;
+ s = (s + stride_align - 1) & ~(stride_align - 1);
+
+ /* Allocate the new image */
+ if (!img) {
+ img = (vpx_image_t *)calloc(1, sizeof(vpx_image_t));
- /* Calculate storage sizes given the chroma subsampling */
- align = (1 << xcs) - 1;
- w = (d_w + align) & ~align;
- align = (1 << ycs) - 1;
- h = (d_h + align) & ~align;
- s = (fmt & VPX_IMG_FMT_PLANAR) ? w : bps * w / 8;
- s = (s + stride_align - 1) & ~(stride_align - 1);
-
- /* Allocate the new image */
if (!img)
- {
- img = (vpx_image_t *)calloc(1, sizeof(vpx_image_t));
+ goto fail;
- if (!img)
- goto fail;
+ img->self_allocd = 1;
+ } else {
+ memset(img, 0, sizeof(vpx_image_t));
+ }
- img->self_allocd = 1;
- }
- else
- {
- memset(img, 0, sizeof(vpx_image_t));
- }
+ img->img_data = img_data;
- img->img_data = img_data;
+ if (!img_data) {
+ img->img_data = img_buf_memalign(buf_align, ((fmt & VPX_IMG_FMT_PLANAR) ?
+ h * s * bps / 8 : h * s));
+ img->img_data_owner = 1;
+ }
- if (!img_data)
- {
- img->img_data = img_buf_memalign(buf_align, ((fmt & VPX_IMG_FMT_PLANAR)?
- h * s * bps / 8 : h * s));
- img->img_data_owner = 1;
- }
+ if (!img->img_data)
+ goto fail;
- if (!img->img_data)
- goto fail;
+ img->fmt = fmt;
+ img->w = w;
+ img->h = h;
+ img->x_chroma_shift = xcs;
+ img->y_chroma_shift = ycs;
+ img->bps = bps;
- img->fmt = fmt;
- img->w = w;
- img->h = h;
- img->x_chroma_shift = xcs;
- img->y_chroma_shift = ycs;
- img->bps = bps;
+ /* Calculate strides */
+ img->stride[VPX_PLANE_Y] = img->stride[VPX_PLANE_ALPHA] = s;
+ img->stride[VPX_PLANE_U] = img->stride[VPX_PLANE_V] = s >> xcs;
- /* Calculate strides */
- img->stride[VPX_PLANE_Y] = img->stride[VPX_PLANE_ALPHA] = s;
- img->stride[VPX_PLANE_U] = img->stride[VPX_PLANE_V] = s >> xcs;
-
- /* Default viewport to entire image */
- if (!vpx_img_set_rect(img, 0, 0, d_w, d_h))
- return img;
+ /* Default viewport to entire image */
+ if (!vpx_img_set_rect(img, 0, 0, d_w, d_h))
+ return img;
fail:
- vpx_img_free(img);
- return NULL;
+ vpx_img_free(img);
+ return NULL;
}
vpx_image_t *vpx_img_alloc(vpx_image_t *img,
vpx_img_fmt_t fmt,
unsigned int d_w,
unsigned int d_h,
- unsigned int align)
-{
- return img_alloc_helper(img, fmt, d_w, d_h, align, align, NULL);
+ unsigned int align) {
+ return img_alloc_helper(img, fmt, d_w, d_h, align, align, NULL);
}
vpx_image_t *vpx_img_wrap(vpx_image_t *img,
@@ -201,105 +188,92 @@ vpx_image_t *vpx_img_wrap(vpx_image_t *img,
unsigned int d_w,
unsigned int d_h,
unsigned int stride_align,
- unsigned char *img_data)
-{
- /* By setting buf_align = 1, we don't change buffer alignment in this
- * function. */
- return img_alloc_helper(img, fmt, d_w, d_h, 1, stride_align, img_data);
+ unsigned char *img_data) {
+ /* By setting buf_align = 1, we don't change buffer alignment in this
+ * function. */
+ return img_alloc_helper(img, fmt, d_w, d_h, 1, stride_align, img_data);
}
int vpx_img_set_rect(vpx_image_t *img,
unsigned int x,
unsigned int y,
unsigned int w,
- unsigned int h)
-{
- unsigned char *data;
-
- if (x + w <= img->w && y + h <= img->h)
- {
- img->d_w = w;
- img->d_h = h;
-
- /* Calculate plane pointers */
- if (!(img->fmt & VPX_IMG_FMT_PLANAR))
- {
- img->planes[VPX_PLANE_PACKED] =
- img->img_data + x * img->bps / 8 + y * img->stride[VPX_PLANE_PACKED];
- }
- else
- {
- data = img->img_data;
-
- if (img->fmt & VPX_IMG_FMT_HAS_ALPHA)
- {
- img->planes[VPX_PLANE_ALPHA] =
- data + x + y * img->stride[VPX_PLANE_ALPHA];
- data += img->h * img->stride[VPX_PLANE_ALPHA];
- }
-
- img->planes[VPX_PLANE_Y] = data + x + y * img->stride[VPX_PLANE_Y];
- data += img->h * img->stride[VPX_PLANE_Y];
-
- if (!(img->fmt & VPX_IMG_FMT_UV_FLIP))
- {
- img->planes[VPX_PLANE_U] = data
- + (x >> img->x_chroma_shift)
- + (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_U];
- data += (img->h >> img->y_chroma_shift) * img->stride[VPX_PLANE_U];
- img->planes[VPX_PLANE_V] = data
- + (x >> img->x_chroma_shift)
- + (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_V];
- }
- else
- {
- img->planes[VPX_PLANE_V] = data
- + (x >> img->x_chroma_shift)
- + (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_V];
- data += (img->h >> img->y_chroma_shift) * img->stride[VPX_PLANE_V];
- img->planes[VPX_PLANE_U] = data
- + (x >> img->x_chroma_shift)
- + (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_U];
- }
- }
-
- return 0;
+ unsigned int h) {
+ unsigned char *data;
+
+ if (x + w <= img->w && y + h <= img->h) {
+ img->d_w = w;
+ img->d_h = h;
+
+ /* Calculate plane pointers */
+ if (!(img->fmt & VPX_IMG_FMT_PLANAR)) {
+ img->planes[VPX_PLANE_PACKED] =
+ img->img_data + x * img->bps / 8 + y * img->stride[VPX_PLANE_PACKED];
+ } else {
+ data = img->img_data;
+
+ if (img->fmt & VPX_IMG_FMT_HAS_ALPHA) {
+ img->planes[VPX_PLANE_ALPHA] =
+ data + x + y * img->stride[VPX_PLANE_ALPHA];
+ data += img->h * img->stride[VPX_PLANE_ALPHA];
+ }
+
+ img->planes[VPX_PLANE_Y] = data + x + y * img->stride[VPX_PLANE_Y];
+ data += img->h * img->stride[VPX_PLANE_Y];
+
+ if (!(img->fmt & VPX_IMG_FMT_UV_FLIP)) {
+ img->planes[VPX_PLANE_U] = data
+ + (x >> img->x_chroma_shift)
+ + (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_U];
+ data += (img->h >> img->y_chroma_shift) * img->stride[VPX_PLANE_U];
+ img->planes[VPX_PLANE_V] = data
+ + (x >> img->x_chroma_shift)
+ + (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_V];
+ } else {
+ img->planes[VPX_PLANE_V] = data
+ + (x >> img->x_chroma_shift)
+ + (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_V];
+ data += (img->h >> img->y_chroma_shift) * img->stride[VPX_PLANE_V];
+ img->planes[VPX_PLANE_U] = data
+ + (x >> img->x_chroma_shift)
+ + (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_U];
+ }
}
- return -1;
+ return 0;
+ }
+
+ return -1;
}
-void vpx_img_flip(vpx_image_t *img)
-{
- /* Note: In the calculation pointer adjustment calculation, we want the
- * rhs to be promoted to a signed type. Section 6.3.1.8 of the ISO C99
- * standard indicates that if the adjustment parameter is unsigned, the
- * stride parameter will be promoted to unsigned, causing errors when
- * the lhs is a larger type than the rhs.
- */
- img->planes[VPX_PLANE_Y] += (signed)(img->d_h - 1) * img->stride[VPX_PLANE_Y];
- img->stride[VPX_PLANE_Y] = -img->stride[VPX_PLANE_Y];
-
- img->planes[VPX_PLANE_U] += (signed)((img->d_h >> img->y_chroma_shift) - 1)
- * img->stride[VPX_PLANE_U];
- img->stride[VPX_PLANE_U] = -img->stride[VPX_PLANE_U];
-
- img->planes[VPX_PLANE_V] += (signed)((img->d_h >> img->y_chroma_shift) - 1)
- * img->stride[VPX_PLANE_V];
- img->stride[VPX_PLANE_V] = -img->stride[VPX_PLANE_V];
-
- img->planes[VPX_PLANE_ALPHA] += (signed)(img->d_h - 1) * img->stride[VPX_PLANE_ALPHA];
- img->stride[VPX_PLANE_ALPHA] = -img->stride[VPX_PLANE_ALPHA];
+void vpx_img_flip(vpx_image_t *img) {
+ /* Note: In the calculation pointer adjustment calculation, we want the
+ * rhs to be promoted to a signed type. Section 6.3.1.8 of the ISO C99
+ * standard indicates that if the adjustment parameter is unsigned, the
+ * stride parameter will be promoted to unsigned, causing errors when
+ * the lhs is a larger type than the rhs.
+ */
+ img->planes[VPX_PLANE_Y] += (signed)(img->d_h - 1) * img->stride[VPX_PLANE_Y];
+ img->stride[VPX_PLANE_Y] = -img->stride[VPX_PLANE_Y];
+
+ img->planes[VPX_PLANE_U] += (signed)((img->d_h >> img->y_chroma_shift) - 1)
+ * img->stride[VPX_PLANE_U];
+ img->stride[VPX_PLANE_U] = -img->stride[VPX_PLANE_U];
+
+ img->planes[VPX_PLANE_V] += (signed)((img->d_h >> img->y_chroma_shift) - 1)
+ * img->stride[VPX_PLANE_V];
+ img->stride[VPX_PLANE_V] = -img->stride[VPX_PLANE_V];
+
+ img->planes[VPX_PLANE_ALPHA] += (signed)(img->d_h - 1) * img->stride[VPX_PLANE_ALPHA];
+ img->stride[VPX_PLANE_ALPHA] = -img->stride[VPX_PLANE_ALPHA];
}
-void vpx_img_free(vpx_image_t *img)
-{
- if (img)
- {
- if (img->img_data && img->img_data_owner)
- img_buf_free(img->img_data);
+void vpx_img_free(vpx_image_t *img) {
+ if (img) {
+ if (img->img_data && img->img_data_owner)
+ img_buf_free(img->img_data);
- if (img->self_allocd)
- free(img);
- }
+ if (img->self_allocd)
+ free(img);
+ }
}
diff --git a/libvpx/vpx/vp8.h b/libvpx/vpx/vp8.h
index 2952203..0b4cb1b 100644
--- a/libvpx/vpx/vp8.h
+++ b/libvpx/vpx/vp8.h
@@ -36,34 +36,38 @@
*
* The set of macros define the control functions of VP8 interface
*/
-enum vp8_com_control_id
-{
- VP8_SET_REFERENCE = 1, /**< pass in an external frame into decoder to be used as reference frame */
- VP8_COPY_REFERENCE = 2, /**< get a copy of reference frame from the decoder */
- VP8_SET_POSTPROC = 3, /**< set the decoder's post processing settings */
- VP8_SET_DBG_COLOR_REF_FRAME = 4, /**< set the reference frames to color for each macroblock */
- VP8_SET_DBG_COLOR_MB_MODES = 5, /**< set which macro block modes to color */
- VP8_SET_DBG_COLOR_B_MODES = 6, /**< set which blocks modes to color */
- VP8_SET_DBG_DISPLAY_MV = 7, /**< set which motion vector modes to draw */
- VP8_COMMON_CTRL_ID_MAX,
- VP8_DECODER_CTRL_ID_START = 256
+enum vp8_com_control_id {
+ VP8_SET_REFERENCE = 1, /**< pass in an external frame into decoder to be used as reference frame */
+ VP8_COPY_REFERENCE = 2, /**< get a copy of reference frame from the decoder */
+ VP8_SET_POSTPROC = 3, /**< set the decoder's post processing settings */
+ VP8_SET_DBG_COLOR_REF_FRAME = 4, /**< set the reference frames to color for each macroblock */
+ VP8_SET_DBG_COLOR_MB_MODES = 5, /**< set which macro block modes to color */
+ VP8_SET_DBG_COLOR_B_MODES = 6, /**< set which blocks modes to color */
+ VP8_SET_DBG_DISPLAY_MV = 7, /**< set which motion vector modes to draw */
+
+ /* TODO(jkoleszar): The encoder incorrectly reuses some of these values (5+)
+ * for its control ids. These should be migrated to something like the
+ * VP8_DECODER_CTRL_ID_START range next time we're ready to break the ABI.
+ */
+ VP9_GET_REFERENCE = 128, /**< get a pointer to a reference frame */
+ VP8_COMMON_CTRL_ID_MAX,
+ VP8_DECODER_CTRL_ID_START = 256
};
/*!\brief post process flags
*
* The set of macros define VP8 decoder post processing flags
*/
-enum vp8_postproc_level
-{
- VP8_NOFILTERING = 0,
- VP8_DEBLOCK = 1<<0,
- VP8_DEMACROBLOCK = 1<<1,
- VP8_ADDNOISE = 1<<2,
- VP8_DEBUG_TXT_FRAME_INFO = 1<<3, /**< print frame information */
- VP8_DEBUG_TXT_MBLK_MODES = 1<<4, /**< print macro block modes over each macro block */
- VP8_DEBUG_TXT_DC_DIFF = 1<<5, /**< print dc diff for each macro block */
- VP8_DEBUG_TXT_RATE_INFO = 1<<6, /**< print video rate info (encoder only) */
- VP8_MFQE = 1<<10
+enum vp8_postproc_level {
+ VP8_NOFILTERING = 0,
+ VP8_DEBLOCK = 1 << 0,
+ VP8_DEMACROBLOCK = 1 << 1,
+ VP8_ADDNOISE = 1 << 2,
+ VP8_DEBUG_TXT_FRAME_INFO = 1 << 3, /**< print frame information */
+ VP8_DEBUG_TXT_MBLK_MODES = 1 << 4, /**< print macro block modes over each macro block */
+ VP8_DEBUG_TXT_DC_DIFF = 1 << 5, /**< print dc diff for each macro block */
+ VP8_DEBUG_TXT_RATE_INFO = 1 << 6, /**< print video rate info (encoder only) */
+ VP8_MFQE = 1 << 10
};
/*!\brief post process flags
@@ -73,22 +77,20 @@ enum vp8_postproc_level
* to VP8_DEBLOCK and deblocking_level to 1.
*/
-typedef struct vp8_postproc_cfg
-{
- int post_proc_flag; /**< the types of post processing to be done, should be combination of "vp8_postproc_level" */
- int deblocking_level; /**< the strength of deblocking, valid range [0, 16] */
- int noise_level; /**< the strength of additive noise, valid range [0, 16] */
+typedef struct vp8_postproc_cfg {
+ int post_proc_flag; /**< the types of post processing to be done, should be combination of "vp8_postproc_level" */
+ int deblocking_level; /**< the strength of deblocking, valid range [0, 16] */
+ int noise_level; /**< the strength of additive noise, valid range [0, 16] */
} vp8_postproc_cfg_t;
/*!\brief reference frame type
*
* The set of macros define the type of VP8 reference frames
*/
-typedef enum vpx_ref_frame_type
-{
- VP8_LAST_FRAME = 1,
- VP8_GOLD_FRAME = 2,
- VP8_ALTR_FRAME = 4
+typedef enum vpx_ref_frame_type {
+ VP8_LAST_FRAME = 1,
+ VP8_GOLD_FRAME = 2,
+ VP8_ALTR_FRAME = 4
} vpx_ref_frame_type_t;
/*!\brief reference frame data struct
@@ -96,12 +98,15 @@ typedef enum vpx_ref_frame_type
* define the data struct to access vp8 reference frames
*/
-typedef struct vpx_ref_frame
-{
- vpx_ref_frame_type_t frame_type; /**< which reference frame */
- vpx_image_t img; /**< reference frame data in image format */
+typedef struct vpx_ref_frame {
+ vpx_ref_frame_type_t frame_type; /**< which reference frame */
+ vpx_image_t img; /**< reference frame data in image format */
} vpx_ref_frame_t;
+typedef struct vp9_ref_frame {
+ int idx; /**< frame index to get (input) */
+ vpx_image_t img; /**< img structure to populate (output) */
+} vp9_ref_frame_t;
/*!\brief vp8 decoder control function parameter type
*
@@ -115,6 +120,7 @@ VPX_CTRL_USE_TYPE(VP8_SET_DBG_COLOR_REF_FRAME, int)
VPX_CTRL_USE_TYPE(VP8_SET_DBG_COLOR_MB_MODES, int)
VPX_CTRL_USE_TYPE(VP8_SET_DBG_COLOR_B_MODES, int)
VPX_CTRL_USE_TYPE(VP8_SET_DBG_DISPLAY_MV, int)
+VPX_CTRL_USE_TYPE(VP9_GET_REFERENCE, vp9_ref_frame_t *)
/*! @} - end defgroup vp8 */
diff --git a/libvpx/vpx/vp8cx.h b/libvpx/vpx/vp8cx.h
index a3c95d2..f3ea6d3 100644
--- a/libvpx/vpx/vp8cx.h
+++ b/libvpx/vpx/vp8cx.h
@@ -31,7 +31,14 @@
* @{
*/
extern vpx_codec_iface_t vpx_codec_vp8_cx_algo;
-extern vpx_codec_iface_t* vpx_codec_vp8_cx(void);
+extern vpx_codec_iface_t *vpx_codec_vp8_cx(void);
+
+/* TODO(jkoleszar): These move to VP9 in a later patch set. */
+extern vpx_codec_iface_t vpx_codec_vp9_cx_algo;
+extern vpx_codec_iface_t *vpx_codec_vp9_cx(void);
+extern vpx_codec_iface_t vpx_codec_vp9x_cx_algo;
+extern vpx_codec_iface_t *vpx_codec_vp9x_cx(void);
+
/*!@} - end algorithm interface member group*/
@@ -121,75 +128,88 @@ extern vpx_codec_iface_t* vpx_codec_vp8_cx(void);
*
* \sa #vpx_codec_control
*/
-enum vp8e_enc_control_id
-{
- VP8E_UPD_ENTROPY = 5, /**< control function to set mode of entropy update in encoder */
- VP8E_UPD_REFERENCE, /**< control function to set reference update mode in encoder */
- VP8E_USE_REFERENCE, /**< control function to set which reference frame encoder can use */
- VP8E_SET_ROI_MAP, /**< control function to pass an ROI map to encoder */
- VP8E_SET_ACTIVEMAP, /**< control function to pass an Active map to encoder */
- VP8E_SET_SCALEMODE = 11, /**< control function to set encoder scaling mode */
- /*!\brief control function to set vp8 encoder cpuused
- *
- * Changes in this value influences, among others, the encoder's selection
- * of motion estimation methods. Values greater than 0 will increase encoder
- * speed at the expense of quality.
- * The full set of adjustments can be found in
- * onyx_if.c:vp8_set_speed_features().
- * \todo List highlights of the changes at various levels.
- *
- * \note Valid range: -16..16
- */
- VP8E_SET_CPUUSED = 13,
- VP8E_SET_ENABLEAUTOALTREF, /**< control function to enable vp8 to automatic set and use altref frame */
- VP8E_SET_NOISE_SENSITIVITY, /**< control function to set noise sensitivity */
- VP8E_SET_SHARPNESS, /**< control function to set sharpness */
- VP8E_SET_STATIC_THRESHOLD, /**< control function to set the threshold for macroblocks treated static */
- VP8E_SET_TOKEN_PARTITIONS, /**< control function to set the number of token partitions */
- VP8E_GET_LAST_QUANTIZER, /**< return the quantizer chosen by the
+enum vp8e_enc_control_id {
+ VP8E_UPD_ENTROPY = 5, /**< control function to set mode of entropy update in encoder */
+ VP8E_UPD_REFERENCE, /**< control function to set reference update mode in encoder */
+ VP8E_USE_REFERENCE, /**< control function to set which reference frame encoder can use */
+ VP8E_SET_ROI_MAP, /**< control function to pass an ROI map to encoder */
+ VP8E_SET_ACTIVEMAP, /**< control function to pass an Active map to encoder */
+ VP8E_SET_SCALEMODE = 11, /**< control function to set encoder scaling mode */
+ /*!\brief control function to set vp8 encoder cpuused
+ *
+ * Changes in this value influences, among others, the encoder's selection
+ * of motion estimation methods. Values greater than 0 will increase encoder
+ * speed at the expense of quality.
+ * The full set of adjustments can be found in
+ * onyx_if.c:vp8_set_speed_features().
+ * \todo List highlights of the changes at various levels.
+ *
+ * \note Valid range: -16..16
+ */
+ VP8E_SET_CPUUSED = 13,
+ VP8E_SET_ENABLEAUTOALTREF, /**< control function to enable vp8 to automatic set and use altref frame */
+ VP8E_SET_NOISE_SENSITIVITY, /**< control function to set noise sensitivity */
+ VP8E_SET_SHARPNESS, /**< control function to set sharpness */
+ VP8E_SET_STATIC_THRESHOLD, /**< control function to set the threshold for macroblocks treated static */
+ VP8E_SET_TOKEN_PARTITIONS, /**< control function to set the number of token partitions */
+ VP8E_GET_LAST_QUANTIZER, /**< return the quantizer chosen by the
encoder for the last frame using the internal
scale */
- VP8E_GET_LAST_QUANTIZER_64, /**< return the quantizer chosen by the
+ VP8E_GET_LAST_QUANTIZER_64, /**< return the quantizer chosen by the
encoder for the last frame, using the 0..63
scale as used by the rc_*_quantizer config
parameters */
- VP8E_SET_ARNR_MAXFRAMES, /**< control function to set the max number of frames blurred creating arf*/
- VP8E_SET_ARNR_STRENGTH , /**< control function to set the filter strength for the arf */
- VP8E_SET_ARNR_TYPE , /**< control function to set the type of filter to use for the arf*/
- VP8E_SET_TUNING, /**< control function to set visual tuning */
- /*!\brief control function to set constrained quality level
- *
- * \attention For this value to be used vpx_codec_enc_cfg_t::g_usage must be
- * set to #VPX_CQ.
- * \note Valid range: 0..63
- */
- VP8E_SET_CQ_LEVEL,
-
- /*!\brief Max data rate for Intra frames
- *
- * This value controls additional clamping on the maximum size of a
- * keyframe. It is expressed as a percentage of the average
- * per-frame bitrate, with the special (and default) value 0 meaning
- * unlimited, or no additional clamping beyond the codec's built-in
- * algorithm.
- *
- * For example, to allocate no more than 4.5 frames worth of bitrate
- * to a keyframe, set this to 450.
- *
- */
- VP8E_SET_MAX_INTRA_BITRATE_PCT
+ VP8E_SET_ARNR_MAXFRAMES, /**< control function to set the max number of frames blurred creating arf*/
+ VP8E_SET_ARNR_STRENGTH, /**< control function to set the filter strength for the arf */
+ VP8E_SET_ARNR_TYPE, /**< control function to set the type of filter to use for the arf*/
+ VP8E_SET_TUNING, /**< control function to set visual tuning */
+ /*!\brief control function to set constrained quality level
+ *
+ * \attention For this value to be used vpx_codec_enc_cfg_t::g_usage must be
+ * set to #VPX_CQ.
+ * \note Valid range: 0..63
+ */
+ VP8E_SET_CQ_LEVEL,
+
+ /*!\brief Max data rate for Intra frames
+ *
+ * This value controls additional clamping on the maximum size of a
+ * keyframe. It is expressed as a percentage of the average
+ * per-frame bitrate, with the special (and default) value 0 meaning
+ * unlimited, or no additional clamping beyond the codec's built-in
+ * algorithm.
+ *
+ * For example, to allocate no more than 4.5 frames worth of bitrate
+ * to a keyframe, set this to 450.
+ *
+ */
+ VP8E_SET_MAX_INTRA_BITRATE_PCT,
+
+
+ /* TODO(jkoleszar): Move to vp9cx.h */
+ VP9E_SET_LOSSLESS,
+ VP9E_SET_TILE_COLUMNS,
+ VP9E_SET_TILE_ROWS,
+ VP9E_SET_FRAME_PARALLEL_DECODING,
+
+ VP9E_SET_WIDTH = 99,
+ VP9E_SET_HEIGHT,
+ VP9E_SET_LAYER,
+ VP9E_SET_SVC,
+
+ VP9E_SET_MAX_Q,
+ VP9E_SET_MIN_Q
};
/*!\brief vpx 1-D scaling mode
*
* This set of constants define 1-D vpx scaling modes
*/
-typedef enum vpx_scaling_mode_1d
-{
- VP8E_NORMAL = 0,
- VP8E_FOURFIVE = 1,
- VP8E_THREEFIVE = 2,
- VP8E_ONETWO = 3
+typedef enum vpx_scaling_mode_1d {
+ VP8E_NORMAL = 0,
+ VP8E_FOURFIVE = 1,
+ VP8E_THREEFIVE = 2,
+ VP8E_ONETWO = 3
} VPX_SCALING_MODE;
@@ -199,14 +219,17 @@ typedef enum vpx_scaling_mode_1d
*
*/
-typedef struct vpx_roi_map
-{
- unsigned char *roi_map; /**< specify an id between 0 and 3 for each 16x16 region within a frame */
- unsigned int rows; /**< number of rows */
- unsigned int cols; /**< number of cols */
- int delta_q[4]; /**< quantizer delta [-63, 63] off baseline for regions with id between 0 and 3*/
- int delta_lf[4]; /**< loop filter strength delta [-63, 63] for regions with id between 0 and 3 */
- unsigned int static_threshold[4];/**< threshold for region to be treated as static */
+typedef struct vpx_roi_map {
+ unsigned char *roi_map; /**< specify an id between 0 and 3 for each 16x16 region within a frame */
+ unsigned int rows; /**< number of rows */
+ unsigned int cols; /**< number of cols */
+ // TODO(paulwilkins): broken for VP9 which has 8 segments
+ // q and loop filter deltas for each segment
+ // (see MAX_MB_SEGMENTS)
+ int delta_q[4];
+ int delta_lf[4];
+ // Static breakout threshold for each segment
+ unsigned int static_threshold[4];
} vpx_roi_map_t;
/*!\brief vpx active region map
@@ -216,11 +239,10 @@ typedef struct vpx_roi_map
*/
-typedef struct vpx_active_map
-{
- unsigned char *active_map; /**< specify an on (1) or off (0) each 16x16 region within a frame */
- unsigned int rows; /**< number of rows */
- unsigned int cols; /**< number of cols */
+typedef struct vpx_active_map {
+ unsigned char *active_map; /**< specify an on (1) or off (0) each 16x16 region within a frame */
+ unsigned int rows; /**< number of rows */
+ unsigned int cols; /**< number of cols */
} vpx_active_map_t;
/*!\brief vpx image scaling mode
@@ -228,10 +250,9 @@ typedef struct vpx_active_map
* This defines the data structure for image scaling mode
*
*/
-typedef struct vpx_scaling_mode
-{
- VPX_SCALING_MODE h_scaling_mode; /**< horizontal scaling mode */
- VPX_SCALING_MODE v_scaling_mode; /**< vertical scaling mode */
+typedef struct vpx_scaling_mode {
+ VPX_SCALING_MODE h_scaling_mode; /**< horizontal scaling mode */
+ VPX_SCALING_MODE v_scaling_mode; /**< vertical scaling mode */
} vpx_scaling_mode_t;
/*!\brief VP8 token partition mode
@@ -241,12 +262,11 @@ typedef struct vpx_scaling_mode
*
*/
-typedef enum
-{
- VP8_ONE_TOKENPARTITION = 0,
- VP8_TWO_TOKENPARTITION = 1,
- VP8_FOUR_TOKENPARTITION = 2,
- VP8_EIGHT_TOKENPARTITION = 3
+typedef enum {
+ VP8_ONE_TOKENPARTITION = 0,
+ VP8_TWO_TOKENPARTITION = 1,
+ VP8_FOUR_TOKENPARTITION = 2,
+ VP8_EIGHT_TOKENPARTITION = 3
} vp8e_token_partitions;
@@ -255,10 +275,9 @@ typedef enum
* Changes the encoder to tune for certain types of input material.
*
*/
-typedef enum
-{
- VP8_TUNE_PSNR,
- VP8_TUNE_SSIM
+typedef enum {
+ VP8_TUNE_PSNR,
+ VP8_TUNE_SSIM
} vp8e_tuning;
@@ -281,6 +300,12 @@ VPX_CTRL_USE_TYPE(VP8E_SET_ROI_MAP, vpx_roi_map_t *)
VPX_CTRL_USE_TYPE(VP8E_SET_ACTIVEMAP, vpx_active_map_t *)
VPX_CTRL_USE_TYPE(VP8E_SET_SCALEMODE, vpx_scaling_mode_t *)
+VPX_CTRL_USE_TYPE(VP9E_SET_LAYER, int *)
+VPX_CTRL_USE_TYPE(VP9E_SET_SVC, int)
+
+VPX_CTRL_USE_TYPE(VP9E_SET_WIDTH, unsigned int *)
+VPX_CTRL_USE_TYPE(VP9E_SET_HEIGHT, unsigned int *)
+
VPX_CTRL_USE_TYPE(VP8E_SET_CPUUSED, int)
VPX_CTRL_USE_TYPE(VP8E_SET_ENABLEAUTOALTREF, unsigned int)
VPX_CTRL_USE_TYPE(VP8E_SET_NOISE_SENSITIVITY, unsigned int)
@@ -289,17 +314,25 @@ VPX_CTRL_USE_TYPE(VP8E_SET_STATIC_THRESHOLD, unsigned int)
VPX_CTRL_USE_TYPE(VP8E_SET_TOKEN_PARTITIONS, int) /* vp8e_token_partitions */
VPX_CTRL_USE_TYPE(VP8E_SET_ARNR_MAXFRAMES, unsigned int)
-VPX_CTRL_USE_TYPE(VP8E_SET_ARNR_STRENGTH , unsigned int)
-VPX_CTRL_USE_TYPE(VP8E_SET_ARNR_TYPE , unsigned int)
+VPX_CTRL_USE_TYPE(VP8E_SET_ARNR_STRENGTH, unsigned int)
+VPX_CTRL_USE_TYPE(VP8E_SET_ARNR_TYPE, unsigned int)
VPX_CTRL_USE_TYPE(VP8E_SET_TUNING, int) /* vp8e_tuning */
-VPX_CTRL_USE_TYPE(VP8E_SET_CQ_LEVEL , unsigned int)
+VPX_CTRL_USE_TYPE(VP8E_SET_CQ_LEVEL, unsigned int)
+
+VPX_CTRL_USE_TYPE(VP9E_SET_TILE_COLUMNS, int)
+VPX_CTRL_USE_TYPE(VP9E_SET_TILE_ROWS, int)
VPX_CTRL_USE_TYPE(VP8E_GET_LAST_QUANTIZER, int *)
VPX_CTRL_USE_TYPE(VP8E_GET_LAST_QUANTIZER_64, int *)
VPX_CTRL_USE_TYPE(VP8E_SET_MAX_INTRA_BITRATE_PCT, unsigned int)
+VPX_CTRL_USE_TYPE(VP9E_SET_LOSSLESS, unsigned int)
+
+VPX_CTRL_USE_TYPE(VP9E_SET_FRAME_PARALLEL_DECODING, unsigned int)
+VPX_CTRL_USE_TYPE(VP9E_SET_MAX_Q, unsigned int)
+VPX_CTRL_USE_TYPE(VP9E_SET_MIN_Q, unsigned int)
/*! @} - end defgroup vp8_encoder */
#include "vpx_codec_impl_bottom.h"
#endif
diff --git a/libvpx/vpx/vp8dx.h b/libvpx/vpx/vp8dx.h
index 8661035..7d250cc 100644
--- a/libvpx/vpx/vp8dx.h
+++ b/libvpx/vpx/vp8dx.h
@@ -31,7 +31,11 @@
* @{
*/
extern vpx_codec_iface_t vpx_codec_vp8_dx_algo;
-extern vpx_codec_iface_t* vpx_codec_vp8_dx(void);
+extern vpx_codec_iface_t *vpx_codec_vp8_dx(void);
+
+/* TODO(jkoleszar): These move to VP9 in a later patch set. */
+extern vpx_codec_iface_t vpx_codec_vp9_dx_algo;
+extern vpx_codec_iface_t *vpx_codec_vp9_dx(void);
/*!@} - end algorithm interface member group*/
/* Include controls common to both the encoder and decoder */
@@ -45,24 +49,40 @@ extern vpx_codec_iface_t* vpx_codec_vp8_dx(void);
*
* \sa #vpx_codec_control
*/
-enum vp8_dec_control_id
-{
- /** control function to get info on which reference frames were updated
- * by the last decode
- */
- VP8D_GET_LAST_REF_UPDATES = VP8_DECODER_CTRL_ID_START,
-
- /** check if the indicated frame is corrupted */
- VP8D_GET_FRAME_CORRUPTED,
-
- /** control function to get info on which reference frames were used
- * by the last decode
+enum vp8_dec_control_id {
+ /** control function to get info on which reference frames were updated
+ * by the last decode
+ */
+ VP8D_GET_LAST_REF_UPDATES = VP8_DECODER_CTRL_ID_START,
+
+ /** check if the indicated frame is corrupted */
+ VP8D_GET_FRAME_CORRUPTED,
+
+ /** control function to get info on which reference frames were used
+ * by the last decode
+ */
+ VP8D_GET_LAST_REF_USED,
+
+ /** decryption function to decrypt encoded buffer data immediately
+ * before decoding. Takes a vp8_decrypt_init, which contains
+ * a callback function and opaque context pointer.
+ */
+ VP8D_SET_DECRYPTOR,
+
+ /** For testing. */
+ VP9_INVERT_TILE_DECODE_ORDER,
+
+ VP8_DECODER_CTRL_ID_MAX
+};
+
+typedef struct vp8_decrypt_init {
+ /** Decrypt n bytes of data from input -> output, using the decrypt_state
+ * passed in VP8D_SET_DECRYPTOR.
*/
- VP8D_GET_LAST_REF_USED,
-
- VP8_DECODER_CTRL_ID_MAX
-} ;
-
+ void (*decrypt_cb)(void *decrypt_state, const unsigned char *input,
+ unsigned char *output, int count);
+ void *decrypt_state;
+} vp8_decrypt_init;
/*!\brief VP8 decoder control function parameter type
*
@@ -75,6 +95,8 @@ enum vp8_dec_control_id
VPX_CTRL_USE_TYPE(VP8D_GET_LAST_REF_UPDATES, int *)
VPX_CTRL_USE_TYPE(VP8D_GET_FRAME_CORRUPTED, int *)
VPX_CTRL_USE_TYPE(VP8D_GET_LAST_REF_USED, int *)
+VPX_CTRL_USE_TYPE(VP8D_SET_DECRYPTOR, vp8_decrypt_init *)
+VPX_CTRL_USE_TYPE(VP9_INVERT_TILE_DECODE_ORDER, int)
/*! @} - end defgroup vp8_decoder */
diff --git a/libvpx/vpx/vpx_codec.h b/libvpx/vpx/vpx_codec.h
index d92e165..2e6f1e7 100644
--- a/libvpx/vpx/vpx_codec.h
+++ b/libvpx/vpx/vpx_codec.h
@@ -45,21 +45,28 @@ extern "C" {
#include "vpx_integer.h"
#include "vpx_image.h"
- /*!\brief Decorator indicating a function is deprecated */
+ /*!\brief Decorator indicating a function is deprecated */
#ifndef DEPRECATED
#if defined(__GNUC__) && __GNUC__
#define DEPRECATED __attribute__ ((deprecated))
-#define DECLSPEC_DEPRECATED /**< \copydoc #DEPRECATED */
#elif defined(_MSC_VER)
#define DEPRECATED
-#define DECLSPEC_DEPRECATED __declspec(deprecated) /**< \copydoc #DEPRECATED */
#else
#define DEPRECATED
-#define DECLSPEC_DEPRECATED /**< \copydoc #DEPRECATED */
#endif
+#endif /* DEPRECATED */
+
+#ifndef DECLSPEC_DEPRECATED
+#if defined(__GNUC__) && __GNUC__
+#define DECLSPEC_DEPRECATED /**< \copydoc #DEPRECATED */
+#elif defined(_MSC_VER)
+#define DECLSPEC_DEPRECATED __declspec(deprecated) /**< \copydoc #DEPRECATED */
+#else
+#define DECLSPEC_DEPRECATED /**< \copydoc #DEPRECATED */
#endif
+#endif /* DECLSPEC_DEPRECATED */
- /*!\brief Decorator indicating a function is potentially unused */
+ /*!\brief Decorator indicating a function is potentially unused */
#ifdef UNUSED
#elif __GNUC__
#define UNUSED __attribute__ ((unused))
@@ -67,312 +74,310 @@ extern "C" {
#define UNUSED
#endif
- /*!\brief Current ABI version number
- *
- * \internal
- * If this file is altered in any way that changes the ABI, this value
- * must be bumped. Examples include, but are not limited to, changing
- * types, removing or reassigning enums, adding/removing/rearranging
- * fields to structures
- */
+ /*!\brief Current ABI version number
+ *
+ * \internal
+ * If this file is altered in any way that changes the ABI, this value
+ * must be bumped. Examples include, but are not limited to, changing
+ * types, removing or reassigning enums, adding/removing/rearranging
+ * fields to structures
+ */
#define VPX_CODEC_ABI_VERSION (2 + VPX_IMAGE_ABI_VERSION) /**<\hideinitializer*/
- /*!\brief Algorithm return codes */
- typedef enum {
- /*!\brief Operation completed without error */
- VPX_CODEC_OK,
-
- /*!\brief Unspecified error */
- VPX_CODEC_ERROR,
-
- /*!\brief Memory operation failed */
- VPX_CODEC_MEM_ERROR,
-
- /*!\brief ABI version mismatch */
- VPX_CODEC_ABI_MISMATCH,
-
- /*!\brief Algorithm does not have required capability */
- VPX_CODEC_INCAPABLE,
-
- /*!\brief The given bitstream is not supported.
- *
- * The bitstream was unable to be parsed at the highest level. The decoder
- * is unable to proceed. This error \ref SHOULD be treated as fatal to the
- * stream. */
- VPX_CODEC_UNSUP_BITSTREAM,
-
- /*!\brief Encoded bitstream uses an unsupported feature
- *
- * The decoder does not implement a feature required by the encoder. This
- * return code should only be used for features that prevent future
- * pictures from being properly decoded. This error \ref MAY be treated as
- * fatal to the stream or \ref MAY be treated as fatal to the current GOP.
- */
- VPX_CODEC_UNSUP_FEATURE,
-
- /*!\brief The coded data for this stream is corrupt or incomplete
- *
- * There was a problem decoding the current frame. This return code
- * should only be used for failures that prevent future pictures from
- * being properly decoded. This error \ref MAY be treated as fatal to the
- * stream or \ref MAY be treated as fatal to the current GOP. If decoding
- * is continued for the current GOP, artifacts may be present.
- */
- VPX_CODEC_CORRUPT_FRAME,
-
- /*!\brief An application-supplied parameter is not valid.
- *
- */
- VPX_CODEC_INVALID_PARAM,
-
- /*!\brief An iterator reached the end of list.
- *
- */
- VPX_CODEC_LIST_END
-
- }
- vpx_codec_err_t;
-
-
- /*! \brief Codec capabilities bitfield
- *
- * Each codec advertises the capabilities it supports as part of its
- * ::vpx_codec_iface_t interface structure. Capabilities are extra interfaces
- * or functionality, and are not required to be supported.
- *
- * The available flags are specified by VPX_CODEC_CAP_* defines.
- */
- typedef long vpx_codec_caps_t;
-#define VPX_CODEC_CAP_DECODER 0x1 /**< Is a decoder */
-#define VPX_CODEC_CAP_ENCODER 0x2 /**< Is an encoder */
-#define VPX_CODEC_CAP_XMA 0x4 /**< Supports eXternal Memory Allocation */
+ /*!\brief Algorithm return codes */
+ typedef enum {
+ /*!\brief Operation completed without error */
+ VPX_CODEC_OK,
+ /*!\brief Unspecified error */
+ VPX_CODEC_ERROR,
- /*! \brief Initialization-time Feature Enabling
- *
- * Certain codec features must be known at initialization time, to allow for
- * proper memory allocation.
- *
- * The available flags are specified by VPX_CODEC_USE_* defines.
- */
- typedef long vpx_codec_flags_t;
-#define VPX_CODEC_USE_XMA 0x00000001 /**< Use eXternal Memory Allocation mode */
+ /*!\brief Memory operation failed */
+ VPX_CODEC_MEM_ERROR,
+ /*!\brief ABI version mismatch */
+ VPX_CODEC_ABI_MISMATCH,
- /*!\brief Codec interface structure.
- *
- * Contains function pointers and other data private to the codec
- * implementation. This structure is opaque to the application.
- */
- typedef const struct vpx_codec_iface vpx_codec_iface_t;
-
+ /*!\brief Algorithm does not have required capability */
+ VPX_CODEC_INCAPABLE,
- /*!\brief Codec private data structure.
+ /*!\brief The given bitstream is not supported.
*
- * Contains data private to the codec implementation. This structure is opaque
- * to the application.
- */
- typedef struct vpx_codec_priv vpx_codec_priv_t;
-
+ * The bitstream was unable to be parsed at the highest level. The decoder
+ * is unable to proceed. This error \ref SHOULD be treated as fatal to the
+ * stream. */
+ VPX_CODEC_UNSUP_BITSTREAM,
- /*!\brief Iterator
+ /*!\brief Encoded bitstream uses an unsupported feature
*
- * Opaque storage used for iterating over lists.
+ * The decoder does not implement a feature required by the encoder. This
+ * return code should only be used for features that prevent future
+ * pictures from being properly decoded. This error \ref MAY be treated as
+ * fatal to the stream or \ref MAY be treated as fatal to the current GOP.
*/
- typedef const void *vpx_codec_iter_t;
+ VPX_CODEC_UNSUP_FEATURE,
-
- /*!\brief Codec context structure
+ /*!\brief The coded data for this stream is corrupt or incomplete
*
- * All codecs \ref MUST support this context structure fully. In general,
- * this data should be considered private to the codec algorithm, and
- * not be manipulated or examined by the calling application. Applications
- * may reference the 'name' member to get a printable description of the
- * algorithm.
+ * There was a problem decoding the current frame. This return code
+ * should only be used for failures that prevent future pictures from
+ * being properly decoded. This error \ref MAY be treated as fatal to the
+ * stream or \ref MAY be treated as fatal to the current GOP. If decoding
+ * is continued for the current GOP, artifacts may be present.
*/
- typedef struct vpx_codec_ctx
- {
- const char *name; /**< Printable interface name */
- vpx_codec_iface_t *iface; /**< Interface pointers */
- vpx_codec_err_t err; /**< Last returned error */
- const char *err_detail; /**< Detailed info, if available */
- vpx_codec_flags_t init_flags; /**< Flags passed at init time */
- union
- {
- struct vpx_codec_dec_cfg *dec; /**< Decoder Configuration Pointer */
- struct vpx_codec_enc_cfg *enc; /**< Encoder Configuration Pointer */
- void *raw;
- } config; /**< Configuration pointer aliasing union */
- vpx_codec_priv_t *priv; /**< Algorithm private storage */
- } vpx_codec_ctx_t;
-
+ VPX_CODEC_CORRUPT_FRAME,
- /*
- * Library Version Number Interface
+ /*!\brief An application-supplied parameter is not valid.
*
- * For example, see the following sample return values:
- * vpx_codec_version() (1<<16 | 2<<8 | 3)
- * vpx_codec_version_str() "v1.2.3-rc1-16-gec6a1ba"
- * vpx_codec_version_extra_str() "rc1-16-gec6a1ba"
*/
+ VPX_CODEC_INVALID_PARAM,
- /*!\brief Return the version information (as an integer)
- *
- * Returns a packed encoding of the library version number. This will only include
- * the major.minor.patch component of the version number. Note that this encoded
- * value should be accessed through the macros provided, as the encoding may change
- * in the future.
+ /*!\brief An iterator reached the end of list.
*
*/
- int vpx_codec_version(void);
-#define VPX_VERSION_MAJOR(v) ((v>>16)&0xff) /**< extract major from packed version */
-#define VPX_VERSION_MINOR(v) ((v>>8)&0xff) /**< extract minor from packed version */
-#define VPX_VERSION_PATCH(v) ((v>>0)&0xff) /**< extract patch from packed version */
+ VPX_CODEC_LIST_END
- /*!\brief Return the version major number */
-#define vpx_codec_version_major() ((vpx_codec_version()>>16)&0xff)
-
- /*!\brief Return the version minor number */
-#define vpx_codec_version_minor() ((vpx_codec_version()>>8)&0xff)
-
- /*!\brief Return the version patch number */
-#define vpx_codec_version_patch() ((vpx_codec_version()>>0)&0xff)
-
-
- /*!\brief Return the version information (as a string)
- *
- * Returns a printable string containing the full library version number. This may
- * contain additional text following the three digit version number, as to indicate
- * release candidates, prerelease versions, etc.
- *
- */
- const char *vpx_codec_version_str(void);
-
-
- /*!\brief Return the version information (as a string)
- *
- * Returns a printable "extra string". This is the component of the string returned
- * by vpx_codec_version_str() following the three digit version number.
- *
- */
- const char *vpx_codec_version_extra_str(void);
-
-
- /*!\brief Return the build configuration
- *
- * Returns a printable string containing an encoded version of the build
- * configuration. This may be useful to vpx support.
- *
- */
- const char *vpx_codec_build_config(void);
+ }
+ vpx_codec_err_t;
- /*!\brief Return the name for a given interface
- *
- * Returns a human readable string for name of the given codec interface.
- *
- * \param[in] iface Interface pointer
- *
- */
- const char *vpx_codec_iface_name(vpx_codec_iface_t *iface);
-
-
- /*!\brief Convert error number to printable string
- *
- * Returns a human readable string for the last error returned by the
- * algorithm. The returned error will be one line and will not contain
- * any newline characters.
- *
- *
- * \param[in] err Error number.
- *
- */
- const char *vpx_codec_err_to_string(vpx_codec_err_t err);
-
-
- /*!\brief Retrieve error synopsis for codec context
- *
- * Returns a human readable string for the last error returned by the
- * algorithm. The returned error will be one line and will not contain
- * any newline characters.
- *
- *
- * \param[in] ctx Pointer to this instance's context.
- *
- */
- const char *vpx_codec_error(vpx_codec_ctx_t *ctx);
+ /*! \brief Codec capabilities bitfield
+ *
+ * Each codec advertises the capabilities it supports as part of its
+ * ::vpx_codec_iface_t interface structure. Capabilities are extra interfaces
+ * or functionality, and are not required to be supported.
+ *
+ * The available flags are specified by VPX_CODEC_CAP_* defines.
+ */
+ typedef long vpx_codec_caps_t;
+#define VPX_CODEC_CAP_DECODER 0x1 /**< Is a decoder */
+#define VPX_CODEC_CAP_ENCODER 0x2 /**< Is an encoder */
+#define VPX_CODEC_CAP_XMA 0x4 /**< Supports eXternal Memory Allocation */
- /*!\brief Retrieve detailed error information for codec context
- *
- * Returns a human readable string providing detailed information about
- * the last error.
- *
- * \param[in] ctx Pointer to this instance's context.
- *
- * \retval NULL
- * No detailed information is available.
- */
- const char *vpx_codec_error_detail(vpx_codec_ctx_t *ctx);
+ /*! \brief Initialization-time Feature Enabling
+ *
+ * Certain codec features must be known at initialization time, to allow for
+ * proper memory allocation.
+ *
+ * The available flags are specified by VPX_CODEC_USE_* defines.
+ */
+ typedef long vpx_codec_flags_t;
+#define VPX_CODEC_USE_XMA 0x00000001 /**< Use eXternal Memory Allocation mode */
- /* REQUIRED FUNCTIONS
- *
- * The following functions are required to be implemented for all codecs.
- * They represent the base case functionality expected of all codecs.
- */
+ /*!\brief Codec interface structure.
+ *
+ * Contains function pointers and other data private to the codec
+ * implementation. This structure is opaque to the application.
+ */
+ typedef const struct vpx_codec_iface vpx_codec_iface_t;
+
+
+ /*!\brief Codec private data structure.
+ *
+ * Contains data private to the codec implementation. This structure is opaque
+ * to the application.
+ */
+ typedef struct vpx_codec_priv vpx_codec_priv_t;
+
+
+ /*!\brief Iterator
+ *
+ * Opaque storage used for iterating over lists.
+ */
+ typedef const void *vpx_codec_iter_t;
+
+
+ /*!\brief Codec context structure
+ *
+ * All codecs \ref MUST support this context structure fully. In general,
+ * this data should be considered private to the codec algorithm, and
+ * not be manipulated or examined by the calling application. Applications
+ * may reference the 'name' member to get a printable description of the
+ * algorithm.
+ */
+ typedef struct vpx_codec_ctx {
+ const char *name; /**< Printable interface name */
+ vpx_codec_iface_t *iface; /**< Interface pointers */
+ vpx_codec_err_t err; /**< Last returned error */
+ const char *err_detail; /**< Detailed info, if available */
+ vpx_codec_flags_t init_flags; /**< Flags passed at init time */
+ union {
+ struct vpx_codec_dec_cfg *dec; /**< Decoder Configuration Pointer */
+ struct vpx_codec_enc_cfg *enc; /**< Encoder Configuration Pointer */
+ void *raw;
+ } config; /**< Configuration pointer aliasing union */
+ vpx_codec_priv_t *priv; /**< Algorithm private storage */
+ } vpx_codec_ctx_t;
+
+
+ /*
+ * Library Version Number Interface
+ *
+ * For example, see the following sample return values:
+ * vpx_codec_version() (1<<16 | 2<<8 | 3)
+ * vpx_codec_version_str() "v1.2.3-rc1-16-gec6a1ba"
+ * vpx_codec_version_extra_str() "rc1-16-gec6a1ba"
+ */
+
+ /*!\brief Return the version information (as an integer)
+ *
+ * Returns a packed encoding of the library version number. This will only include
+ * the major.minor.patch component of the version number. Note that this encoded
+ * value should be accessed through the macros provided, as the encoding may change
+ * in the future.
+ *
+ */
+ int vpx_codec_version(void);
+#define VPX_VERSION_MAJOR(v) ((v>>16)&0xff) /**< extract major from packed version */
+#define VPX_VERSION_MINOR(v) ((v>>8)&0xff) /**< extract minor from packed version */
+#define VPX_VERSION_PATCH(v) ((v>>0)&0xff) /**< extract patch from packed version */
- /*!\brief Destroy a codec instance
- *
- * Destroys a codec context, freeing any associated memory buffers.
- *
- * \param[in] ctx Pointer to this instance's context
- *
- * \retval #VPX_CODEC_OK
- * The codec algorithm initialized.
- * \retval #VPX_CODEC_MEM_ERROR
- * Memory allocation failed.
- */
- vpx_codec_err_t vpx_codec_destroy(vpx_codec_ctx_t *ctx);
+ /*!\brief Return the version major number */
+#define vpx_codec_version_major() ((vpx_codec_version()>>16)&0xff)
+ /*!\brief Return the version minor number */
+#define vpx_codec_version_minor() ((vpx_codec_version()>>8)&0xff)
- /*!\brief Get the capabilities of an algorithm.
- *
- * Retrieves the capabilities bitfield from the algorithm's interface.
- *
- * \param[in] iface Pointer to the algorithm interface
- *
- */
- vpx_codec_caps_t vpx_codec_get_caps(vpx_codec_iface_t *iface);
+ /*!\brief Return the version patch number */
+#define vpx_codec_version_patch() ((vpx_codec_version()>>0)&0xff)
- /*!\brief Control algorithm
- *
- * This function is used to exchange algorithm specific data with the codec
- * instance. This can be used to implement features specific to a particular
- * algorithm.
- *
- * This wrapper function dispatches the request to the helper function
- * associated with the given ctrl_id. It tries to call this function
- * transparently, but will return #VPX_CODEC_ERROR if the request could not
- * be dispatched.
- *
- * Note that this function should not be used directly. Call the
- * #vpx_codec_control wrapper macro instead.
- *
- * \param[in] ctx Pointer to this instance's context
- * \param[in] ctrl_id Algorithm specific control identifier
- *
- * \retval #VPX_CODEC_OK
- * The control request was processed.
- * \retval #VPX_CODEC_ERROR
- * The control request was not processed.
- * \retval #VPX_CODEC_INVALID_PARAM
- * The data was not valid.
- */
- vpx_codec_err_t vpx_codec_control_(vpx_codec_ctx_t *ctx,
- int ctrl_id,
- ...);
+ /*!\brief Return the version information (as a string)
+ *
+ * Returns a printable string containing the full library version number. This may
+ * contain additional text following the three digit version number, as to indicate
+ * release candidates, prerelease versions, etc.
+ *
+ */
+ const char *vpx_codec_version_str(void);
+
+
+ /*!\brief Return the version information (as a string)
+ *
+ * Returns a printable "extra string". This is the component of the string returned
+ * by vpx_codec_version_str() following the three digit version number.
+ *
+ */
+ const char *vpx_codec_version_extra_str(void);
+
+
+ /*!\brief Return the build configuration
+ *
+ * Returns a printable string containing an encoded version of the build
+ * configuration. This may be useful to vpx support.
+ *
+ */
+ const char *vpx_codec_build_config(void);
+
+
+ /*!\brief Return the name for a given interface
+ *
+ * Returns a human readable string for name of the given codec interface.
+ *
+ * \param[in] iface Interface pointer
+ *
+ */
+ const char *vpx_codec_iface_name(vpx_codec_iface_t *iface);
+
+
+ /*!\brief Convert error number to printable string
+ *
+ * Returns a human readable string for the last error returned by the
+ * algorithm. The returned error will be one line and will not contain
+ * any newline characters.
+ *
+ *
+ * \param[in] err Error number.
+ *
+ */
+ const char *vpx_codec_err_to_string(vpx_codec_err_t err);
+
+
+ /*!\brief Retrieve error synopsis for codec context
+ *
+ * Returns a human readable string for the last error returned by the
+ * algorithm. The returned error will be one line and will not contain
+ * any newline characters.
+ *
+ *
+ * \param[in] ctx Pointer to this instance's context.
+ *
+ */
+ const char *vpx_codec_error(vpx_codec_ctx_t *ctx);
+
+
+ /*!\brief Retrieve detailed error information for codec context
+ *
+ * Returns a human readable string providing detailed information about
+ * the last error.
+ *
+ * \param[in] ctx Pointer to this instance's context.
+ *
+ * \retval NULL
+ * No detailed information is available.
+ */
+ const char *vpx_codec_error_detail(vpx_codec_ctx_t *ctx);
+
+
+ /* REQUIRED FUNCTIONS
+ *
+ * The following functions are required to be implemented for all codecs.
+ * They represent the base case functionality expected of all codecs.
+ */
+
+ /*!\brief Destroy a codec instance
+ *
+ * Destroys a codec context, freeing any associated memory buffers.
+ *
+ * \param[in] ctx Pointer to this instance's context
+ *
+ * \retval #VPX_CODEC_OK
+ * The codec algorithm initialized.
+ * \retval #VPX_CODEC_MEM_ERROR
+ * Memory allocation failed.
+ */
+ vpx_codec_err_t vpx_codec_destroy(vpx_codec_ctx_t *ctx);
+
+
+ /*!\brief Get the capabilities of an algorithm.
+ *
+ * Retrieves the capabilities bitfield from the algorithm's interface.
+ *
+ * \param[in] iface Pointer to the algorithm interface
+ *
+ */
+ vpx_codec_caps_t vpx_codec_get_caps(vpx_codec_iface_t *iface);
+
+
+ /*!\brief Control algorithm
+ *
+ * This function is used to exchange algorithm specific data with the codec
+ * instance. This can be used to implement features specific to a particular
+ * algorithm.
+ *
+ * This wrapper function dispatches the request to the helper function
+ * associated with the given ctrl_id. It tries to call this function
+ * transparently, but will return #VPX_CODEC_ERROR if the request could not
+ * be dispatched.
+ *
+ * Note that this function should not be used directly. Call the
+ * #vpx_codec_control wrapper macro instead.
+ *
+ * \param[in] ctx Pointer to this instance's context
+ * \param[in] ctrl_id Algorithm specific control identifier
+ *
+ * \retval #VPX_CODEC_OK
+ * The control request was processed.
+ * \retval #VPX_CODEC_ERROR
+ * The control request was not processed.
+ * \retval #VPX_CODEC_INVALID_PARAM
+ * The data was not valid.
+ */
+ vpx_codec_err_t vpx_codec_control_(vpx_codec_ctx_t *ctx,
+ int ctrl_id,
+ ...);
#if defined(VPX_DISABLE_CTRL_TYPECHECKS) && VPX_DISABLE_CTRL_TYPECHECKS
# define vpx_codec_control(ctx,id,data) vpx_codec_control_(ctx,id,data)
# define VPX_CTRL_USE_TYPE(id, typ)
@@ -380,172 +385,171 @@ extern "C" {
# define VPX_CTRL_VOID(id, typ)
#else
- /*!\brief vpx_codec_control wrapper macro
- *
- * This macro allows for type safe conversions across the variadic parameter
- * to vpx_codec_control_().
- *
- * \internal
- * It works by dispatching the call to the control function through a wrapper
- * function named with the id parameter.
- */
+ /*!\brief vpx_codec_control wrapper macro
+ *
+ * This macro allows for type safe conversions across the variadic parameter
+ * to vpx_codec_control_().
+ *
+ * \internal
+ * It works by dispatching the call to the control function through a wrapper
+ * function named with the id parameter.
+ */
# define vpx_codec_control(ctx,id,data) vpx_codec_control_##id(ctx,id,data)\
- /**<\hideinitializer*/
-
-
- /*!\brief vpx_codec_control type definition macro
- *
- * This macro allows for type safe conversions across the variadic parameter
- * to vpx_codec_control_(). It defines the type of the argument for a given
- * control identifier.
- *
- * \internal
- * It defines a static function with
- * the correctly typed arguments as a wrapper to the type-unsafe internal
- * function.
- */
+ /**<\hideinitializer*/
+
+
+ /*!\brief vpx_codec_control type definition macro
+ *
+ * This macro allows for type safe conversions across the variadic parameter
+ * to vpx_codec_control_(). It defines the type of the argument for a given
+ * control identifier.
+ *
+ * \internal
+ * It defines a static function with
+ * the correctly typed arguments as a wrapper to the type-unsafe internal
+ * function.
+ */
# define VPX_CTRL_USE_TYPE(id, typ) \
- static vpx_codec_err_t \
- vpx_codec_control_##id(vpx_codec_ctx_t*, int, typ) UNUSED;\
- \
- static vpx_codec_err_t \
- vpx_codec_control_##id(vpx_codec_ctx_t *ctx, int ctrl_id, typ data) {\
- return vpx_codec_control_(ctx, ctrl_id, data);\
- } /**<\hideinitializer*/
-
-
- /*!\brief vpx_codec_control deprecated type definition macro
- *
- * Like #VPX_CTRL_USE_TYPE, but indicates that the specified control is
- * deprecated and should not be used. Consult the documentation for your
- * codec for more information.
- *
- * \internal
- * It defines a static function with the correctly typed arguments as a
- * wrapper to the type-unsafe internal function.
- */
+ static vpx_codec_err_t \
+ vpx_codec_control_##id(vpx_codec_ctx_t*, int, typ) UNUSED;\
+ \
+ static vpx_codec_err_t \
+ vpx_codec_control_##id(vpx_codec_ctx_t *ctx, int ctrl_id, typ data) {\
+ return vpx_codec_control_(ctx, ctrl_id, data);\
+ } /**<\hideinitializer*/
+
+
+ /*!\brief vpx_codec_control deprecated type definition macro
+ *
+ * Like #VPX_CTRL_USE_TYPE, but indicates that the specified control is
+ * deprecated and should not be used. Consult the documentation for your
+ * codec for more information.
+ *
+ * \internal
+ * It defines a static function with the correctly typed arguments as a
+ * wrapper to the type-unsafe internal function.
+ */
# define VPX_CTRL_USE_TYPE_DEPRECATED(id, typ) \
- DECLSPEC_DEPRECATED static vpx_codec_err_t \
- vpx_codec_control_##id(vpx_codec_ctx_t*, int, typ) DEPRECATED UNUSED;\
- \
- DECLSPEC_DEPRECATED static vpx_codec_err_t \
- vpx_codec_control_##id(vpx_codec_ctx_t *ctx, int ctrl_id, typ data) {\
- return vpx_codec_control_(ctx, ctrl_id, data);\
- } /**<\hideinitializer*/
-
-
- /*!\brief vpx_codec_control void type definition macro
- *
- * This macro allows for type safe conversions across the variadic parameter
- * to vpx_codec_control_(). It indicates that a given control identifier takes
- * no argument.
- *
- * \internal
- * It defines a static function without a data argument as a wrapper to the
- * type-unsafe internal function.
- */
+ DECLSPEC_DEPRECATED static vpx_codec_err_t \
+ vpx_codec_control_##id(vpx_codec_ctx_t*, int, typ) DEPRECATED UNUSED;\
+ \
+ DECLSPEC_DEPRECATED static vpx_codec_err_t \
+ vpx_codec_control_##id(vpx_codec_ctx_t *ctx, int ctrl_id, typ data) {\
+ return vpx_codec_control_(ctx, ctrl_id, data);\
+ } /**<\hideinitializer*/
+
+
+ /*!\brief vpx_codec_control void type definition macro
+ *
+ * This macro allows for type safe conversions across the variadic parameter
+ * to vpx_codec_control_(). It indicates that a given control identifier takes
+ * no argument.
+ *
+ * \internal
+ * It defines a static function without a data argument as a wrapper to the
+ * type-unsafe internal function.
+ */
# define VPX_CTRL_VOID(id) \
- static vpx_codec_err_t \
- vpx_codec_control_##id(vpx_codec_ctx_t*, int) UNUSED;\
- \
- static vpx_codec_err_t \
- vpx_codec_control_##id(vpx_codec_ctx_t *ctx, int ctrl_id) {\
- return vpx_codec_control_(ctx, ctrl_id);\
- } /**<\hideinitializer*/
+ static vpx_codec_err_t \
+ vpx_codec_control_##id(vpx_codec_ctx_t*, int) UNUSED;\
+ \
+ static vpx_codec_err_t \
+ vpx_codec_control_##id(vpx_codec_ctx_t *ctx, int ctrl_id) {\
+ return vpx_codec_control_(ctx, ctrl_id);\
+ } /**<\hideinitializer*/
#endif
- /*!\defgroup cap_xma External Memory Allocation Functions
- *
- * The following functions are required to be implemented for all codecs
- * that advertise the VPX_CODEC_CAP_XMA capability. Calling these functions
- * for codecs that don't advertise this capability will result in an error
- * code being returned, usually VPX_CODEC_INCAPABLE
- * @{
- */
+ /*!\defgroup cap_xma External Memory Allocation Functions
+ *
+ * The following functions are required to be implemented for all codecs
+ * that advertise the VPX_CODEC_CAP_XMA capability. Calling these functions
+ * for codecs that don't advertise this capability will result in an error
+ * code being returned, usually VPX_CODEC_INCAPABLE
+ * @{
+ */
- /*!\brief Memory Map Entry
- *
- * This structure is used to contain the properties of a memory segment. It
- * is populated by the codec in the request phase, and by the calling
- * application once the requested allocation has been performed.
+ /*!\brief Memory Map Entry
+ *
+ * This structure is used to contain the properties of a memory segment. It
+ * is populated by the codec in the request phase, and by the calling
+ * application once the requested allocation has been performed.
+ */
+ typedef struct vpx_codec_mmap {
+ /*
+ * The following members are set by the codec when requesting a segment
*/
- typedef struct vpx_codec_mmap
- {
- /*
- * The following members are set by the codec when requesting a segment
- */
- unsigned int id; /**< identifier for the segment's contents */
- unsigned long sz; /**< size of the segment, in bytes */
- unsigned int align; /**< required alignment of the segment, in bytes */
- unsigned int flags; /**< bitfield containing segment properties */
+ unsigned int id; /**< identifier for the segment's contents */
+ unsigned long sz; /**< size of the segment, in bytes */
+ unsigned int align; /**< required alignment of the segment, in bytes */
+ unsigned int flags; /**< bitfield containing segment properties */
#define VPX_CODEC_MEM_ZERO 0x1 /**< Segment must be zeroed by allocation */
#define VPX_CODEC_MEM_WRONLY 0x2 /**< Segment need not be readable */
#define VPX_CODEC_MEM_FAST 0x4 /**< Place in fast memory, if available */
- /* The following members are to be filled in by the allocation function */
- void *base; /**< pointer to the allocated segment */
- void (*dtor)(struct vpx_codec_mmap *map); /**< destructor to call */
- void *priv; /**< allocator private storage */
- } vpx_codec_mmap_t; /**< alias for struct vpx_codec_mmap */
-
-
- /*!\brief Iterate over the list of segments to allocate.
- *
- * Iterates over a list of the segments to allocate. The iterator storage
- * should be initialized to NULL to start the iteration. Iteration is complete
- * when this function returns VPX_CODEC_LIST_END. The amount of memory needed to
- * allocate is dependent upon the size of the encoded stream. In cases where the
- * stream is not available at allocation time, a fixed size must be requested.
- * The codec will not be able to operate on streams larger than the size used at
- * allocation time.
- *
- * \param[in] ctx Pointer to this instance's context.
- * \param[out] mmap Pointer to the memory map entry to populate.
- * \param[in,out] iter Iterator storage, initialized to NULL
- *
- * \retval #VPX_CODEC_OK
- * The memory map entry was populated.
- * \retval #VPX_CODEC_ERROR
- * Codec does not support XMA mode.
- * \retval #VPX_CODEC_MEM_ERROR
- * Unable to determine segment size from stream info.
- */
- vpx_codec_err_t vpx_codec_get_mem_map(vpx_codec_ctx_t *ctx,
- vpx_codec_mmap_t *mmap,
- vpx_codec_iter_t *iter);
-
-
- /*!\brief Identify allocated segments to codec instance
- *
- * Stores a list of allocated segments in the codec. Segments \ref MUST be
- * passed in the order they are read from vpx_codec_get_mem_map(), but may be
- * passed in groups of any size. Segments \ref MUST be set only once. The
- * allocation function \ref MUST ensure that the vpx_codec_mmap_t::base member
- * is non-NULL. If the segment requires cleanup handling (e.g., calling free()
- * or close()) then the vpx_codec_mmap_t::dtor member \ref MUST be populated.
- *
- * \param[in] ctx Pointer to this instance's context.
- * \param[in] mmaps Pointer to the first memory map entry in the list.
- * \param[in] num_maps Number of entries being set at this time
- *
- * \retval #VPX_CODEC_OK
- * The segment was stored in the codec context.
- * \retval #VPX_CODEC_INCAPABLE
- * Codec does not support XMA mode.
- * \retval #VPX_CODEC_MEM_ERROR
- * Segment base address was not set, or segment was already stored.
-
- */
- vpx_codec_err_t vpx_codec_set_mem_map(vpx_codec_ctx_t *ctx,
- vpx_codec_mmap_t *mmaps,
- unsigned int num_maps);
-
- /*!@} - end defgroup cap_xma*/
- /*!@} - end defgroup codec*/
+ /* The following members are to be filled in by the allocation function */
+ void *base; /**< pointer to the allocated segment */
+ void (*dtor)(struct vpx_codec_mmap *map); /**< destructor to call */
+ void *priv; /**< allocator private storage */
+ } vpx_codec_mmap_t; /**< alias for struct vpx_codec_mmap */
+
+
+ /*!\brief Iterate over the list of segments to allocate.
+ *
+ * Iterates over a list of the segments to allocate. The iterator storage
+ * should be initialized to NULL to start the iteration. Iteration is complete
+ * when this function returns VPX_CODEC_LIST_END. The amount of memory needed to
+ * allocate is dependent upon the size of the encoded stream. In cases where the
+ * stream is not available at allocation time, a fixed size must be requested.
+ * The codec will not be able to operate on streams larger than the size used at
+ * allocation time.
+ *
+ * \param[in] ctx Pointer to this instance's context.
+ * \param[out] mmap Pointer to the memory map entry to populate.
+ * \param[in,out] iter Iterator storage, initialized to NULL
+ *
+ * \retval #VPX_CODEC_OK
+ * The memory map entry was populated.
+ * \retval #VPX_CODEC_ERROR
+ * Codec does not support XMA mode.
+ * \retval #VPX_CODEC_MEM_ERROR
+ * Unable to determine segment size from stream info.
+ */
+ vpx_codec_err_t vpx_codec_get_mem_map(vpx_codec_ctx_t *ctx,
+ vpx_codec_mmap_t *mmap,
+ vpx_codec_iter_t *iter);
+
+
+ /*!\brief Identify allocated segments to codec instance
+ *
+ * Stores a list of allocated segments in the codec. Segments \ref MUST be
+ * passed in the order they are read from vpx_codec_get_mem_map(), but may be
+ * passed in groups of any size. Segments \ref MUST be set only once. The
+ * allocation function \ref MUST ensure that the vpx_codec_mmap_t::base member
+ * is non-NULL. If the segment requires cleanup handling (e.g., calling free()
+ * or close()) then the vpx_codec_mmap_t::dtor member \ref MUST be populated.
+ *
+ * \param[in] ctx Pointer to this instance's context.
+ * \param[in] mmaps Pointer to the first memory map entry in the list.
+ * \param[in] num_maps Number of entries being set at this time
+ *
+ * \retval #VPX_CODEC_OK
+ * The segment was stored in the codec context.
+ * \retval #VPX_CODEC_INCAPABLE
+ * Codec does not support XMA mode.
+ * \retval #VPX_CODEC_MEM_ERROR
+ * Segment base address was not set, or segment was already stored.
+
+ */
+ vpx_codec_err_t vpx_codec_set_mem_map(vpx_codec_ctx_t *ctx,
+ vpx_codec_mmap_t *mmaps,
+ unsigned int num_maps);
+
+ /*!@} - end defgroup cap_xma*/
+ /*!@} - end defgroup codec*/
#endif
diff --git a/libvpx/vpx/vpx_decoder.h b/libvpx/vpx/vpx_decoder.h
index 1ccf1c5..e7701e5 100644
--- a/libvpx/vpx/vpx_decoder.h
+++ b/libvpx/vpx/vpx_decoder.h
@@ -32,299 +32,302 @@ extern "C" {
#define VPX_DECODER_H
#include "vpx_codec.h"
- /*!\brief Current ABI version number
- *
- * \internal
- * If this file is altered in any way that changes the ABI, this value
- * must be bumped. Examples include, but are not limited to, changing
- * types, removing or reassigning enums, adding/removing/rearranging
- * fields to structures
- */
+ /*!\brief Current ABI version number
+ *
+ * \internal
+ * If this file is altered in any way that changes the ABI, this value
+ * must be bumped. Examples include, but are not limited to, changing
+ * types, removing or reassigning enums, adding/removing/rearranging
+ * fields to structures
+ */
#define VPX_DECODER_ABI_VERSION (2 + VPX_CODEC_ABI_VERSION) /**<\hideinitializer*/
- /*! \brief Decoder capabilities bitfield
- *
- * Each decoder advertises the capabilities it supports as part of its
- * ::vpx_codec_iface_t interface structure. Capabilities are extra interfaces
- * or functionality, and are not required to be supported by a decoder.
- *
- * The available flags are specified by VPX_CODEC_CAP_* defines.
- */
+ /*! \brief Decoder capabilities bitfield
+ *
+ * Each decoder advertises the capabilities it supports as part of its
+ * ::vpx_codec_iface_t interface structure. Capabilities are extra interfaces
+ * or functionality, and are not required to be supported by a decoder.
+ *
+ * The available flags are specified by VPX_CODEC_CAP_* defines.
+ */
#define VPX_CODEC_CAP_PUT_SLICE 0x10000 /**< Will issue put_slice callbacks */
#define VPX_CODEC_CAP_PUT_FRAME 0x20000 /**< Will issue put_frame callbacks */
#define VPX_CODEC_CAP_POSTPROC 0x40000 /**< Can postprocess decoded frame */
#define VPX_CODEC_CAP_ERROR_CONCEALMENT 0x80000 /**< Can conceal errors due to
- packet loss */
+ packet loss */
#define VPX_CODEC_CAP_INPUT_FRAGMENTS 0x100000 /**< Can receive encoded frames
- one fragment at a time */
-
- /*! \brief Initialization-time Feature Enabling
- *
- * Certain codec features must be known at initialization time, to allow for
- * proper memory allocation.
- *
- * The available flags are specified by VPX_CODEC_USE_* defines.
- */
+ one fragment at a time */
+
+ /*! \brief Initialization-time Feature Enabling
+ *
+ * Certain codec features must be known at initialization time, to allow for
+ * proper memory allocation.
+ *
+ * The available flags are specified by VPX_CODEC_USE_* defines.
+ */
+#define VPX_CODEC_CAP_FRAME_THREADING 0x200000 /**< Can support frame-based
+ multi-threading */
+
#define VPX_CODEC_USE_POSTPROC 0x10000 /**< Postprocess decoded frame */
#define VPX_CODEC_USE_ERROR_CONCEALMENT 0x20000 /**< Conceal errors in decoded
- frames */
+ frames */
#define VPX_CODEC_USE_INPUT_FRAGMENTS 0x40000 /**< The input frame should be
- passed to the decoder one
- fragment at a time */
-
- /*!\brief Stream properties
- *
- * This structure is used to query or set properties of the decoded
- * stream. Algorithms may extend this structure with data specific
- * to their bitstream by setting the sz member appropriately.
- */
- typedef struct vpx_codec_stream_info
- {
- unsigned int sz; /**< Size of this structure */
- unsigned int w; /**< Width (or 0 for unknown/default) */
- unsigned int h; /**< Height (or 0 for unknown/default) */
- unsigned int is_kf; /**< Current frame is a keyframe */
- } vpx_codec_stream_info_t;
-
- /* REQUIRED FUNCTIONS
- *
- * The following functions are required to be implemented for all decoders.
- * They represent the base case functionality expected of all decoders.
- */
-
-
- /*!\brief Initialization Configurations
- *
- * This structure is used to pass init time configuration options to the
- * decoder.
- */
- typedef struct vpx_codec_dec_cfg
- {
- unsigned int threads; /**< Maximum number of threads to use, default 1 */
- unsigned int w; /**< Width */
- unsigned int h; /**< Height */
- } vpx_codec_dec_cfg_t; /**< alias for struct vpx_codec_dec_cfg */
-
-
- /*!\brief Initialize a decoder instance
- *
- * Initializes a decoder context using the given interface. Applications
- * should call the vpx_codec_dec_init convenience macro instead of this
- * function directly, to ensure that the ABI version number parameter
- * is properly initialized.
- *
- * If the library was configured with --disable-multithread, this call
- * is not thread safe and should be guarded with a lock if being used
- * in a multithreaded context.
- *
- * In XMA mode (activated by setting VPX_CODEC_USE_XMA in the flags
- * parameter), the storage pointed to by the cfg parameter must be
- * kept readable and stable until all memory maps have been set.
- *
- * \param[in] ctx Pointer to this instance's context.
- * \param[in] iface Pointer to the algorithm interface to use.
- * \param[in] cfg Configuration to use, if known. May be NULL.
- * \param[in] flags Bitfield of VPX_CODEC_USE_* flags
- * \param[in] ver ABI version number. Must be set to
- * VPX_DECODER_ABI_VERSION
- * \retval #VPX_CODEC_OK
- * The decoder algorithm initialized.
- * \retval #VPX_CODEC_MEM_ERROR
- * Memory allocation failed.
- */
- vpx_codec_err_t vpx_codec_dec_init_ver(vpx_codec_ctx_t *ctx,
- vpx_codec_iface_t *iface,
- vpx_codec_dec_cfg_t *cfg,
- vpx_codec_flags_t flags,
- int ver);
-
- /*!\brief Convenience macro for vpx_codec_dec_init_ver()
- *
- * Ensures the ABI version parameter is properly set.
- */
+ passed to the decoder one
+ fragment at a time */
+#define VPX_CODEC_USE_FRAME_THREADING 0x80000 /**< Enable frame-based
+ multi-threading */
+
+ /*!\brief Stream properties
+ *
+ * This structure is used to query or set properties of the decoded
+ * stream. Algorithms may extend this structure with data specific
+ * to their bitstream by setting the sz member appropriately.
+ */
+ typedef struct vpx_codec_stream_info {
+ unsigned int sz; /**< Size of this structure */
+ unsigned int w; /**< Width (or 0 for unknown/default) */
+ unsigned int h; /**< Height (or 0 for unknown/default) */
+ unsigned int is_kf; /**< Current frame is a keyframe */
+ } vpx_codec_stream_info_t;
+
+ /* REQUIRED FUNCTIONS
+ *
+ * The following functions are required to be implemented for all decoders.
+ * They represent the base case functionality expected of all decoders.
+ */
+
+
+ /*!\brief Initialization Configurations
+ *
+ * This structure is used to pass init time configuration options to the
+ * decoder.
+ */
+ typedef struct vpx_codec_dec_cfg {
+ unsigned int threads; /**< Maximum number of threads to use, default 1 */
+ unsigned int w; /**< Width */
+ unsigned int h; /**< Height */
+ } vpx_codec_dec_cfg_t; /**< alias for struct vpx_codec_dec_cfg */
+
+
+ /*!\brief Initialize a decoder instance
+ *
+ * Initializes a decoder context using the given interface. Applications
+ * should call the vpx_codec_dec_init convenience macro instead of this
+ * function directly, to ensure that the ABI version number parameter
+ * is properly initialized.
+ *
+ * If the library was configured with --disable-multithread, this call
+ * is not thread safe and should be guarded with a lock if being used
+ * in a multithreaded context.
+ *
+ * In XMA mode (activated by setting VPX_CODEC_USE_XMA in the flags
+ * parameter), the storage pointed to by the cfg parameter must be
+ * kept readable and stable until all memory maps have been set.
+ *
+ * \param[in] ctx Pointer to this instance's context.
+ * \param[in] iface Pointer to the algorithm interface to use.
+ * \param[in] cfg Configuration to use, if known. May be NULL.
+ * \param[in] flags Bitfield of VPX_CODEC_USE_* flags
+ * \param[in] ver ABI version number. Must be set to
+ * VPX_DECODER_ABI_VERSION
+ * \retval #VPX_CODEC_OK
+ * The decoder algorithm initialized.
+ * \retval #VPX_CODEC_MEM_ERROR
+ * Memory allocation failed.
+ */
+ vpx_codec_err_t vpx_codec_dec_init_ver(vpx_codec_ctx_t *ctx,
+ vpx_codec_iface_t *iface,
+ vpx_codec_dec_cfg_t *cfg,
+ vpx_codec_flags_t flags,
+ int ver);
+
+ /*!\brief Convenience macro for vpx_codec_dec_init_ver()
+ *
+ * Ensures the ABI version parameter is properly set.
+ */
#define vpx_codec_dec_init(ctx, iface, cfg, flags) \
- vpx_codec_dec_init_ver(ctx, iface, cfg, flags, VPX_DECODER_ABI_VERSION)
-
-
- /*!\brief Parse stream info from a buffer
- *
- * Performs high level parsing of the bitstream. Construction of a decoder
- * context is not necessary. Can be used to determine if the bitstream is
- * of the proper format, and to extract information from the stream.
- *
- * \param[in] iface Pointer to the algorithm interface
- * \param[in] data Pointer to a block of data to parse
- * \param[in] data_sz Size of the data buffer
- * \param[in,out] si Pointer to stream info to update. The size member
- * \ref MUST be properly initialized, but \ref MAY be
- * clobbered by the algorithm. This parameter \ref MAY
- * be NULL.
- *
- * \retval #VPX_CODEC_OK
- * Bitstream is parsable and stream information updated
- */
- vpx_codec_err_t vpx_codec_peek_stream_info(vpx_codec_iface_t *iface,
- const uint8_t *data,
- unsigned int data_sz,
- vpx_codec_stream_info_t *si);
-
-
- /*!\brief Return information about the current stream.
- *
- * Returns information about the stream that has been parsed during decoding.
- *
- * \param[in] ctx Pointer to this instance's context
- * \param[in,out] si Pointer to stream info to update. The size member
- * \ref MUST be properly initialized, but \ref MAY be
- * clobbered by the algorithm. This parameter \ref MAY
- * be NULL.
- *
- * \retval #VPX_CODEC_OK
- * Bitstream is parsable and stream information updated
- */
- vpx_codec_err_t vpx_codec_get_stream_info(vpx_codec_ctx_t *ctx,
- vpx_codec_stream_info_t *si);
-
-
- /*!\brief Decode data
- *
- * Processes a buffer of coded data. If the processing results in a new
- * decoded frame becoming available, PUT_SLICE and PUT_FRAME events may be
- * generated, as appropriate. Encoded data \ref MUST be passed in DTS (decode
- * time stamp) order. Frames produced will always be in PTS (presentation
- * time stamp) order.
- * If the decoder is configured with VPX_CODEC_USE_INPUT_FRAGMENTS enabled,
- * data and data_sz can contain a fragment of the encoded frame. Fragment
- * \#n must contain at least partition \#n, but can also contain subsequent
- * partitions (\#n+1 - \#n+i), and if so, fragments \#n+1, .., \#n+i must
- * be empty. When no more data is available, this function should be called
- * with NULL as data and 0 as data_sz. The memory passed to this function
- * must be available until the frame has been decoded.
- *
- * \param[in] ctx Pointer to this instance's context
- * \param[in] data Pointer to this block of new coded data. If
- * NULL, a VPX_CODEC_CB_PUT_FRAME event is posted
- * for the previously decoded frame.
- * \param[in] data_sz Size of the coded data, in bytes.
- * \param[in] user_priv Application specific data to associate with
- * this frame.
- * \param[in] deadline Soft deadline the decoder should attempt to meet,
- * in us. Set to zero for unlimited.
- *
- * \return Returns #VPX_CODEC_OK if the coded data was processed completely
- * and future pictures can be decoded without error. Otherwise,
- * see the descriptions of the other error codes in ::vpx_codec_err_t
- * for recoverability capabilities.
- */
- vpx_codec_err_t vpx_codec_decode(vpx_codec_ctx_t *ctx,
- const uint8_t *data,
- unsigned int data_sz,
- void *user_priv,
- long deadline);
-
-
- /*!\brief Decoded frames iterator
- *
- * Iterates over a list of the frames available for display. The iterator
- * storage should be initialized to NULL to start the iteration. Iteration is
- * complete when this function returns NULL.
- *
- * The list of available frames becomes valid upon completion of the
- * vpx_codec_decode call, and remains valid until the next call to vpx_codec_decode.
- *
- * \param[in] ctx Pointer to this instance's context
- * \param[in,out] iter Iterator storage, initialized to NULL
- *
- * \return Returns a pointer to an image, if one is ready for display. Frames
- * produced will always be in PTS (presentation time stamp) order.
- */
- vpx_image_t *vpx_codec_get_frame(vpx_codec_ctx_t *ctx,
- vpx_codec_iter_t *iter);
-
-
- /*!\defgroup cap_put_frame Frame-Based Decoding Functions
- *
- * The following functions are required to be implemented for all decoders
- * that advertise the VPX_CODEC_CAP_PUT_FRAME capability. Calling these functions
- * for codecs that don't advertise this capability will result in an error
- * code being returned, usually VPX_CODEC_ERROR
- * @{
- */
-
- /*!\brief put frame callback prototype
- *
- * This callback is invoked by the decoder to notify the application of
- * the availability of decoded image data.
- */
- typedef void (*vpx_codec_put_frame_cb_fn_t)(void *user_priv,
- const vpx_image_t *img);
-
-
- /*!\brief Register for notification of frame completion.
- *
- * Registers a given function to be called when a decoded frame is
- * available.
- *
- * \param[in] ctx Pointer to this instance's context
- * \param[in] cb Pointer to the callback function
- * \param[in] user_priv User's private data
- *
- * \retval #VPX_CODEC_OK
- * Callback successfully registered.
- * \retval #VPX_CODEC_ERROR
- * Decoder context not initialized, or algorithm not capable of
- * posting slice completion.
- */
- vpx_codec_err_t vpx_codec_register_put_frame_cb(vpx_codec_ctx_t *ctx,
- vpx_codec_put_frame_cb_fn_t cb,
- void *user_priv);
-
-
- /*!@} - end defgroup cap_put_frame */
-
- /*!\defgroup cap_put_slice Slice-Based Decoding Functions
- *
- * The following functions are required to be implemented for all decoders
- * that advertise the VPX_CODEC_CAP_PUT_SLICE capability. Calling these functions
- * for codecs that don't advertise this capability will result in an error
- * code being returned, usually VPX_CODEC_ERROR
- * @{
- */
-
- /*!\brief put slice callback prototype
- *
- * This callback is invoked by the decoder to notify the application of
- * the availability of partially decoded image data. The
- */
- typedef void (*vpx_codec_put_slice_cb_fn_t)(void *user_priv,
- const vpx_image_t *img,
- const vpx_image_rect_t *valid,
- const vpx_image_rect_t *update);
-
-
- /*!\brief Register for notification of slice completion.
- *
- * Registers a given function to be called when a decoded slice is
- * available.
- *
- * \param[in] ctx Pointer to this instance's context
- * \param[in] cb Pointer to the callback function
- * \param[in] user_priv User's private data
- *
- * \retval #VPX_CODEC_OK
- * Callback successfully registered.
- * \retval #VPX_CODEC_ERROR
- * Decoder context not initialized, or algorithm not capable of
- * posting slice completion.
- */
- vpx_codec_err_t vpx_codec_register_put_slice_cb(vpx_codec_ctx_t *ctx,
- vpx_codec_put_slice_cb_fn_t cb,
- void *user_priv);
-
-
- /*!@} - end defgroup cap_put_slice*/
-
- /*!@} - end defgroup decoder*/
+ vpx_codec_dec_init_ver(ctx, iface, cfg, flags, VPX_DECODER_ABI_VERSION)
+
+
+ /*!\brief Parse stream info from a buffer
+ *
+ * Performs high level parsing of the bitstream. Construction of a decoder
+ * context is not necessary. Can be used to determine if the bitstream is
+ * of the proper format, and to extract information from the stream.
+ *
+ * \param[in] iface Pointer to the algorithm interface
+ * \param[in] data Pointer to a block of data to parse
+ * \param[in] data_sz Size of the data buffer
+ * \param[in,out] si Pointer to stream info to update. The size member
+ * \ref MUST be properly initialized, but \ref MAY be
+ * clobbered by the algorithm. This parameter \ref MAY
+ * be NULL.
+ *
+ * \retval #VPX_CODEC_OK
+ * Bitstream is parsable and stream information updated
+ */
+ vpx_codec_err_t vpx_codec_peek_stream_info(vpx_codec_iface_t *iface,
+ const uint8_t *data,
+ unsigned int data_sz,
+ vpx_codec_stream_info_t *si);
+
+
+ /*!\brief Return information about the current stream.
+ *
+ * Returns information about the stream that has been parsed during decoding.
+ *
+ * \param[in] ctx Pointer to this instance's context
+ * \param[in,out] si Pointer to stream info to update. The size member
+ * \ref MUST be properly initialized, but \ref MAY be
+ * clobbered by the algorithm. This parameter \ref MAY
+ * be NULL.
+ *
+ * \retval #VPX_CODEC_OK
+ * Bitstream is parsable and stream information updated
+ */
+ vpx_codec_err_t vpx_codec_get_stream_info(vpx_codec_ctx_t *ctx,
+ vpx_codec_stream_info_t *si);
+
+
+ /*!\brief Decode data
+ *
+ * Processes a buffer of coded data. If the processing results in a new
+ * decoded frame becoming available, PUT_SLICE and PUT_FRAME events may be
+ * generated, as appropriate. Encoded data \ref MUST be passed in DTS (decode
+ * time stamp) order. Frames produced will always be in PTS (presentation
+ * time stamp) order.
+ * If the decoder is configured with VPX_CODEC_USE_INPUT_FRAGMENTS enabled,
+ * data and data_sz can contain a fragment of the encoded frame. Fragment
+ * \#n must contain at least partition \#n, but can also contain subsequent
+ * partitions (\#n+1 - \#n+i), and if so, fragments \#n+1, .., \#n+i must
+ * be empty. When no more data is available, this function should be called
+ * with NULL as data and 0 as data_sz. The memory passed to this function
+ * must be available until the frame has been decoded.
+ *
+ * \param[in] ctx Pointer to this instance's context
+ * \param[in] data Pointer to this block of new coded data. If
+ * NULL, a VPX_CODEC_CB_PUT_FRAME event is posted
+ * for the previously decoded frame.
+ * \param[in] data_sz Size of the coded data, in bytes.
+ * \param[in] user_priv Application specific data to associate with
+ * this frame.
+ * \param[in] deadline Soft deadline the decoder should attempt to meet,
+ * in us. Set to zero for unlimited.
+ *
+ * \return Returns #VPX_CODEC_OK if the coded data was processed completely
+ * and future pictures can be decoded without error. Otherwise,
+ * see the descriptions of the other error codes in ::vpx_codec_err_t
+ * for recoverability capabilities.
+ */
+ vpx_codec_err_t vpx_codec_decode(vpx_codec_ctx_t *ctx,
+ const uint8_t *data,
+ unsigned int data_sz,
+ void *user_priv,
+ long deadline);
+
+
+ /*!\brief Decoded frames iterator
+ *
+ * Iterates over a list of the frames available for display. The iterator
+ * storage should be initialized to NULL to start the iteration. Iteration is
+ * complete when this function returns NULL.
+ *
+ * The list of available frames becomes valid upon completion of the
+ * vpx_codec_decode call, and remains valid until the next call to vpx_codec_decode.
+ *
+ * \param[in] ctx Pointer to this instance's context
+ * \param[in,out] iter Iterator storage, initialized to NULL
+ *
+ * \return Returns a pointer to an image, if one is ready for display. Frames
+ * produced will always be in PTS (presentation time stamp) order.
+ */
+ vpx_image_t *vpx_codec_get_frame(vpx_codec_ctx_t *ctx,
+ vpx_codec_iter_t *iter);
+
+
+ /*!\defgroup cap_put_frame Frame-Based Decoding Functions
+ *
+ * The following functions are required to be implemented for all decoders
+ * that advertise the VPX_CODEC_CAP_PUT_FRAME capability. Calling these functions
+ * for codecs that don't advertise this capability will result in an error
+ * code being returned, usually VPX_CODEC_ERROR
+ * @{
+ */
+
+ /*!\brief put frame callback prototype
+ *
+ * This callback is invoked by the decoder to notify the application of
+ * the availability of decoded image data.
+ */
+ typedef void (*vpx_codec_put_frame_cb_fn_t)(void *user_priv,
+ const vpx_image_t *img);
+
+
+ /*!\brief Register for notification of frame completion.
+ *
+ * Registers a given function to be called when a decoded frame is
+ * available.
+ *
+ * \param[in] ctx Pointer to this instance's context
+ * \param[in] cb Pointer to the callback function
+ * \param[in] user_priv User's private data
+ *
+ * \retval #VPX_CODEC_OK
+ * Callback successfully registered.
+ * \retval #VPX_CODEC_ERROR
+ * Decoder context not initialized, or algorithm not capable of
+ * posting slice completion.
+ */
+ vpx_codec_err_t vpx_codec_register_put_frame_cb(vpx_codec_ctx_t *ctx,
+ vpx_codec_put_frame_cb_fn_t cb,
+ void *user_priv);
+
+
+ /*!@} - end defgroup cap_put_frame */
+
+ /*!\defgroup cap_put_slice Slice-Based Decoding Functions
+ *
+ * The following functions are required to be implemented for all decoders
+ * that advertise the VPX_CODEC_CAP_PUT_SLICE capability. Calling these functions
+ * for codecs that don't advertise this capability will result in an error
+ * code being returned, usually VPX_CODEC_ERROR
+ * @{
+ */
+
+ /*!\brief put slice callback prototype
+ *
+ * This callback is invoked by the decoder to notify the application of
+ * the availability of partially decoded image data. The
+ */
+ typedef void (*vpx_codec_put_slice_cb_fn_t)(void *user_priv,
+ const vpx_image_t *img,
+ const vpx_image_rect_t *valid,
+ const vpx_image_rect_t *update);
+
+
+ /*!\brief Register for notification of slice completion.
+ *
+ * Registers a given function to be called when a decoded slice is
+ * available.
+ *
+ * \param[in] ctx Pointer to this instance's context
+ * \param[in] cb Pointer to the callback function
+ * \param[in] user_priv User's private data
+ *
+ * \retval #VPX_CODEC_OK
+ * Callback successfully registered.
+ * \retval #VPX_CODEC_ERROR
+ * Decoder context not initialized, or algorithm not capable of
+ * posting slice completion.
+ */
+ vpx_codec_err_t vpx_codec_register_put_slice_cb(vpx_codec_ctx_t *ctx,
+ vpx_codec_put_slice_cb_fn_t cb,
+ void *user_priv);
+
+
+ /*!@} - end defgroup cap_put_slice*/
+
+ /*!@} - end defgroup decoder*/
#endif
diff --git a/libvpx/vpx/vpx_encoder.h b/libvpx/vpx/vpx_encoder.h
index 67d9033..56fd2d9 100644
--- a/libvpx/vpx/vpx_encoder.h
+++ b/libvpx/vpx/vpx_encoder.h
@@ -32,902 +32,904 @@ extern "C" {
#define VPX_ENCODER_H
#include "vpx_codec.h"
-/*! Temporal Scalability: Maximum length of the sequence defining frame
- * layer membership
- */
+ /*! Temporal Scalability: Maximum length of the sequence defining frame
+ * layer membership
+ */
#define VPX_TS_MAX_PERIODICITY 16
-/*! Temporal Scalability: Maximum number of coding layers */
+ /*! Temporal Scalability: Maximum number of coding layers */
#define VPX_TS_MAX_LAYERS 5
-/*!\deprecated Use #VPX_TS_MAX_PERIODICITY instead. */
+ /*!\deprecated Use #VPX_TS_MAX_PERIODICITY instead. */
#define MAX_PERIODICITY VPX_TS_MAX_PERIODICITY
-/*!\deprecated Use #VPX_TS_MAX_LAYERS instead. */
+ /*!\deprecated Use #VPX_TS_MAX_LAYERS instead. */
#define MAX_LAYERS VPX_TS_MAX_LAYERS
- /*!\brief Current ABI version number
- *
- * \internal
- * If this file is altered in any way that changes the ABI, this value
- * must be bumped. Examples include, but are not limited to, changing
- * types, removing or reassigning enums, adding/removing/rearranging
- * fields to structures
- */
+/*! Spatial Scalability: Maximum number of coding layers */
+#define VPX_SS_MAX_LAYERS 5
+
+/*! Spatial Scalability: Default number of coding layers */
+#define VPX_SS_DEFAULT_LAYERS 3
+
+ /*!\brief Current ABI version number
+ *
+ * \internal
+ * If this file is altered in any way that changes the ABI, this value
+ * must be bumped. Examples include, but are not limited to, changing
+ * types, removing or reassigning enums, adding/removing/rearranging
+ * fields to structures
+ */
#define VPX_ENCODER_ABI_VERSION (3 + VPX_CODEC_ABI_VERSION) /**<\hideinitializer*/
- /*! \brief Encoder capabilities bitfield
- *
- * Each encoder advertises the capabilities it supports as part of its
- * ::vpx_codec_iface_t interface structure. Capabilities are extra
- * interfaces or functionality, and are not required to be supported
- * by an encoder.
- *
- * The available flags are specified by VPX_CODEC_CAP_* defines.
- */
+ /*! \brief Encoder capabilities bitfield
+ *
+ * Each encoder advertises the capabilities it supports as part of its
+ * ::vpx_codec_iface_t interface structure. Capabilities are extra
+ * interfaces or functionality, and are not required to be supported
+ * by an encoder.
+ *
+ * The available flags are specified by VPX_CODEC_CAP_* defines.
+ */
#define VPX_CODEC_CAP_PSNR 0x10000 /**< Can issue PSNR packets */
- /*! Can output one partition at a time. Each partition is returned in its
- * own VPX_CODEC_CX_FRAME_PKT, with the FRAME_IS_FRAGMENT flag set for
- * every partition but the last. In this mode all frames are always
- * returned partition by partition.
- */
+ /*! Can output one partition at a time. Each partition is returned in its
+ * own VPX_CODEC_CX_FRAME_PKT, with the FRAME_IS_FRAGMENT flag set for
+ * every partition but the last. In this mode all frames are always
+ * returned partition by partition.
+ */
#define VPX_CODEC_CAP_OUTPUT_PARTITION 0x20000
- /*! \brief Initialization-time Feature Enabling
- *
- * Certain codec features must be known at initialization time, to allow
- * for proper memory allocation.
- *
- * The available flags are specified by VPX_CODEC_USE_* defines.
- */
+ /*! \brief Initialization-time Feature Enabling
+ *
+ * Certain codec features must be known at initialization time, to allow
+ * for proper memory allocation.
+ *
+ * The available flags are specified by VPX_CODEC_USE_* defines.
+ */
#define VPX_CODEC_USE_PSNR 0x10000 /**< Calculate PSNR on each frame */
#define VPX_CODEC_USE_OUTPUT_PARTITION 0x20000 /**< Make the encoder output one
- partition at a time. */
-
-
- /*!\brief Generic fixed size buffer structure
- *
- * This structure is able to hold a reference to any fixed size buffer.
- */
- typedef struct vpx_fixed_buf
- {
- void *buf; /**< Pointer to the data */
- size_t sz; /**< Length of the buffer, in chars */
- } vpx_fixed_buf_t; /**< alias for struct vpx_fixed_buf */
-
-
- /*!\brief Time Stamp Type
- *
- * An integer, which when multiplied by the stream's time base, provides
- * the absolute time of a sample.
- */
- typedef int64_t vpx_codec_pts_t;
-
-
- /*!\brief Compressed Frame Flags
- *
- * This type represents a bitfield containing information about a compressed
- * frame that may be useful to an application. The most significant 16 bits
- * can be used by an algorithm to provide additional detail, for example to
- * support frame types that are codec specific (MPEG-1 D-frames for example)
- */
- typedef uint32_t vpx_codec_frame_flags_t;
+ partition at a time. */
+
+
+ /*!\brief Generic fixed size buffer structure
+ *
+ * This structure is able to hold a reference to any fixed size buffer.
+ */
+ typedef struct vpx_fixed_buf {
+ void *buf; /**< Pointer to the data */
+ size_t sz; /**< Length of the buffer, in chars */
+ } vpx_fixed_buf_t; /**< alias for struct vpx_fixed_buf */
+
+
+ /*!\brief Time Stamp Type
+ *
+ * An integer, which when multiplied by the stream's time base, provides
+ * the absolute time of a sample.
+ */
+ typedef int64_t vpx_codec_pts_t;
+
+
+ /*!\brief Compressed Frame Flags
+ *
+ * This type represents a bitfield containing information about a compressed
+ * frame that may be useful to an application. The most significant 16 bits
+ * can be used by an algorithm to provide additional detail, for example to
+ * support frame types that are codec specific (MPEG-1 D-frames for example)
+ */
+ typedef uint32_t vpx_codec_frame_flags_t;
#define VPX_FRAME_IS_KEY 0x1 /**< frame is the start of a GOP */
#define VPX_FRAME_IS_DROPPABLE 0x2 /**< frame can be dropped without affecting
- the stream (no future frame depends on
- this one) */
+ the stream (no future frame depends on
+ this one) */
#define VPX_FRAME_IS_INVISIBLE 0x4 /**< frame should be decoded but will not
- be shown */
+ be shown */
#define VPX_FRAME_IS_FRAGMENT 0x8 /**< this is a fragment of the encoded
- frame */
-
- /*!\brief Error Resilient flags
- *
- * These flags define which error resilient features to enable in the
- * encoder. The flags are specified through the
- * vpx_codec_enc_cfg::g_error_resilient variable.
- */
- typedef uint32_t vpx_codec_er_flags_t;
+ frame */
+
+ /*!\brief Error Resilient flags
+ *
+ * These flags define which error resilient features to enable in the
+ * encoder. The flags are specified through the
+ * vpx_codec_enc_cfg::g_error_resilient variable.
+ */
+ typedef uint32_t vpx_codec_er_flags_t;
#define VPX_ERROR_RESILIENT_DEFAULT 0x1 /**< Improve resiliency against
- losses of whole frames */
+ losses of whole frames */
#define VPX_ERROR_RESILIENT_PARTITIONS 0x2 /**< The frame partitions are
- independently decodable by the
- bool decoder, meaning that
- partitions can be decoded even
- though earlier partitions have
- been lost. Note that intra
- predicition is still done over
- the partition boundary. */
-
- /*!\brief Encoder output packet variants
- *
- * This enumeration lists the different kinds of data packets that can be
- * returned by calls to vpx_codec_get_cx_data(). Algorithms \ref MAY
- * extend this list to provide additional functionality.
- */
- enum vpx_codec_cx_pkt_kind
- {
- VPX_CODEC_CX_FRAME_PKT, /**< Compressed video frame */
- VPX_CODEC_STATS_PKT, /**< Two-pass statistics for this frame */
- VPX_CODEC_PSNR_PKT, /**< PSNR statistics for this frame */
- VPX_CODEC_CUSTOM_PKT = 256 /**< Algorithm extensions */
- };
-
-
- /*!\brief Encoder output packet
- *
- * This structure contains the different kinds of output data the encoder
- * may produce while compressing a frame.
- */
- typedef struct vpx_codec_cx_pkt
- {
- enum vpx_codec_cx_pkt_kind kind; /**< packet variant */
- union
- {
- struct
- {
- void *buf; /**< compressed data buffer */
- size_t sz; /**< length of compressed data */
- vpx_codec_pts_t pts; /**< time stamp to show frame
+ independently decodable by the
+ bool decoder, meaning that
+ partitions can be decoded even
+ though earlier partitions have
+ been lost. Note that intra
+ predicition is still done over
+ the partition boundary. */
+
+ /*!\brief Encoder output packet variants
+ *
+ * This enumeration lists the different kinds of data packets that can be
+ * returned by calls to vpx_codec_get_cx_data(). Algorithms \ref MAY
+ * extend this list to provide additional functionality.
+ */
+ enum vpx_codec_cx_pkt_kind {
+ VPX_CODEC_CX_FRAME_PKT, /**< Compressed video frame */
+ VPX_CODEC_STATS_PKT, /**< Two-pass statistics for this frame */
+ VPX_CODEC_PSNR_PKT, /**< PSNR statistics for this frame */
+ VPX_CODEC_CUSTOM_PKT = 256 /**< Algorithm extensions */
+ };
+
+
+ /*!\brief Encoder output packet
+ *
+ * This structure contains the different kinds of output data the encoder
+ * may produce while compressing a frame.
+ */
+ typedef struct vpx_codec_cx_pkt {
+ enum vpx_codec_cx_pkt_kind kind; /**< packet variant */
+ union {
+ struct {
+ void *buf; /**< compressed data buffer */
+ size_t sz; /**< length of compressed data */
+ vpx_codec_pts_t pts; /**< time stamp to show frame
(in timebase units) */
- unsigned long duration; /**< duration to show frame
+ unsigned long duration; /**< duration to show frame
(in timebase units) */
- vpx_codec_frame_flags_t flags; /**< flags for this frame */
- int partition_id; /**< the partition id
+ vpx_codec_frame_flags_t flags; /**< flags for this frame */
+ int partition_id; /**< the partition id
defines the decoding order
of the partitions. Only
applicable when "output partition"
mode is enabled. First partition
has id 0.*/
- } frame; /**< data for compressed frame packet */
- struct vpx_fixed_buf twopass_stats; /**< data for two-pass packet */
- struct vpx_psnr_pkt
- {
- unsigned int samples[4]; /**< Number of samples, total/y/u/v */
- uint64_t sse[4]; /**< sum squared error, total/y/u/v */
- double psnr[4]; /**< PSNR, total/y/u/v */
- } psnr; /**< data for PSNR packet */
- struct vpx_fixed_buf raw; /**< data for arbitrary packets */
+ } frame; /**< data for compressed frame packet */
+ struct vpx_fixed_buf twopass_stats; /**< data for two-pass packet */
+ struct vpx_psnr_pkt {
+ unsigned int samples[4]; /**< Number of samples, total/y/u/v */
+ uint64_t sse[4]; /**< sum squared error, total/y/u/v */
+ double psnr[4]; /**< PSNR, total/y/u/v */
+ } psnr; /**< data for PSNR packet */
+ struct vpx_fixed_buf raw; /**< data for arbitrary packets */
+
+ /* This packet size is fixed to allow codecs to extend this
+ * interface without having to manage storage for raw packets,
+ * i.e., if it's smaller than 128 bytes, you can store in the
+ * packet list directly.
+ */
+ char pad[128 - sizeof(enum vpx_codec_cx_pkt_kind)]; /**< fixed sz */
+ } data; /**< packet data */
+ } vpx_codec_cx_pkt_t; /**< alias for struct vpx_codec_cx_pkt */
+
+
+ /*!\brief Rational Number
+ *
+ * This structure holds a fractional value.
+ */
+ typedef struct vpx_rational {
+ int num; /**< fraction numerator */
+ int den; /**< fraction denominator */
+ } vpx_rational_t; /**< alias for struct vpx_rational */
+
+
+ /*!\brief Multi-pass Encoding Pass */
+ enum vpx_enc_pass {
+ VPX_RC_ONE_PASS, /**< Single pass mode */
+ VPX_RC_FIRST_PASS, /**< First pass of multi-pass mode */
+ VPX_RC_LAST_PASS /**< Final pass of multi-pass mode */
+ };
+
+
+ /*!\brief Rate control mode */
+ enum vpx_rc_mode {
+ VPX_VBR, /**< Variable Bit Rate (VBR) mode */
+ VPX_CBR, /**< Constant Bit Rate (CBR) mode */
+ VPX_CQ, /**< Constrained Quality (CQ) mode */
+ VPX_Q, /**< Constant Quality (Q) mode */
+ };
+
+
+ /*!\brief Keyframe placement mode.
+ *
+ * This enumeration determines whether keyframes are placed automatically by
+ * the encoder or whether this behavior is disabled. Older releases of this
+ * SDK were implemented such that VPX_KF_FIXED meant keyframes were disabled.
+ * This name is confusing for this behavior, so the new symbols to be used
+ * are VPX_KF_AUTO and VPX_KF_DISABLED.
+ */
+ enum vpx_kf_mode {
+ VPX_KF_FIXED, /**< deprecated, implies VPX_KF_DISABLED */
+ VPX_KF_AUTO, /**< Encoder determines optimal placement automatically */
+ VPX_KF_DISABLED = 0 /**< Encoder does not place keyframes. */
+ };
+
+
+ /*!\brief Encoded Frame Flags
+ *
+ * This type indicates a bitfield to be passed to vpx_codec_encode(), defining
+ * per-frame boolean values. By convention, bits common to all codecs will be
+ * named VPX_EFLAG_*, and bits specific to an algorithm will be named
+ * /algo/_eflag_*. The lower order 16 bits are reserved for common use.
+ */
+ typedef long vpx_enc_frame_flags_t;
+#define VPX_EFLAG_FORCE_KF (1<<0) /**< Force this frame to be a keyframe */
- /* This packet size is fixed to allow codecs to extend this
- * interface without having to manage storage for raw packets,
- * i.e., if it's smaller than 128 bytes, you can store in the
- * packet list directly.
- */
- char pad[128 - sizeof(enum vpx_codec_cx_pkt_kind)]; /**< fixed sz */
- } data; /**< packet data */
- } vpx_codec_cx_pkt_t; /**< alias for struct vpx_codec_cx_pkt */
+ /*!\brief Encoder configuration structure
+ *
+ * This structure contains the encoder settings that have common representations
+ * across all codecs. This doesn't imply that all codecs support all features,
+ * however.
+ */
+ typedef struct vpx_codec_enc_cfg {
+ /*
+ * generic settings (g)
+ */
- /*!\brief Rational Number
+ /*!\brief Algorithm specific "usage" value
*
- * This structure holds a fractional value.
+ * Algorithms may define multiple values for usage, which may convey the
+ * intent of how the application intends to use the stream. If this value
+ * is non-zero, consult the documentation for the codec to determine its
+ * meaning.
*/
- typedef struct vpx_rational
- {
- int num; /**< fraction numerator */
- int den; /**< fraction denominator */
- } vpx_rational_t; /**< alias for struct vpx_rational */
+ unsigned int g_usage;
+
+ /*!\brief Maximum number of threads to use
+ *
+ * For multi-threaded implementations, use no more than this number of
+ * threads. The codec may use fewer threads than allowed. The value
+ * 0 is equivalent to the value 1.
+ */
+ unsigned int g_threads;
- /*!\brief Multi-pass Encoding Pass */
- enum vpx_enc_pass
- {
- VPX_RC_ONE_PASS, /**< Single pass mode */
- VPX_RC_FIRST_PASS, /**< First pass of multi-pass mode */
- VPX_RC_LAST_PASS /**< Final pass of multi-pass mode */
- };
+ /*!\brief Bitstream profile to use
+ *
+ * Some codecs support a notion of multiple bitstream profiles. Typically
+ * this maps to a set of features that are turned on or off. Often the
+ * profile to use is determined by the features of the intended decoder.
+ * Consult the documentation for the codec to determine the valid values
+ * for this parameter, or set to zero for a sane default.
+ */
+ unsigned int g_profile; /**< profile of bitstream to use */
- /*!\brief Rate control mode */
- enum vpx_rc_mode
- {
- VPX_VBR, /**< Variable Bit Rate (VBR) mode */
- VPX_CBR, /**< Constant Bit Rate (CBR) mode */
- VPX_CQ /**< Constant Quality (CQ) mode */
- };
- /*!\brief Keyframe placement mode.
+ /*!\brief Width of the frame
*
- * This enumeration determines whether keyframes are placed automatically by
- * the encoder or whether this behavior is disabled. Older releases of this
- * SDK were implemented such that VPX_KF_FIXED meant keyframes were disabled.
- * This name is confusing for this behavior, so the new symbols to be used
- * are VPX_KF_AUTO and VPX_KF_DISABLED.
+ * This value identifies the presentation resolution of the frame,
+ * in pixels. Note that the frames passed as input to the encoder must
+ * have this resolution. Frames will be presented by the decoder in this
+ * resolution, independent of any spatial resampling the encoder may do.
*/
- enum vpx_kf_mode
- {
- VPX_KF_FIXED, /**< deprecated, implies VPX_KF_DISABLED */
- VPX_KF_AUTO, /**< Encoder determines optimal placement automatically */
- VPX_KF_DISABLED = 0 /**< Encoder does not place keyframes. */
- };
+ unsigned int g_w;
- /*!\brief Encoded Frame Flags
+ /*!\brief Height of the frame
*
- * This type indicates a bitfield to be passed to vpx_codec_encode(), defining
- * per-frame boolean values. By convention, bits common to all codecs will be
- * named VPX_EFLAG_*, and bits specific to an algorithm will be named
- * /algo/_eflag_*. The lower order 16 bits are reserved for common use.
+ * This value identifies the presentation resolution of the frame,
+ * in pixels. Note that the frames passed as input to the encoder must
+ * have this resolution. Frames will be presented by the decoder in this
+ * resolution, independent of any spatial resampling the encoder may do.
*/
- typedef long vpx_enc_frame_flags_t;
-#define VPX_EFLAG_FORCE_KF (1<<0) /**< Force this frame to be a keyframe */
+ unsigned int g_h;
- /*!\brief Encoder configuration structure
- *
- * This structure contains the encoder settings that have common representations
- * across all codecs. This doesn't imply that all codecs support all features,
- * however.
- */
- typedef struct vpx_codec_enc_cfg
- {
- /*
- * generic settings (g)
- */
-
- /*!\brief Algorithm specific "usage" value
- *
- * Algorithms may define multiple values for usage, which may convey the
- * intent of how the application intends to use the stream. If this value
- * is non-zero, consult the documentation for the codec to determine its
- * meaning.
- */
- unsigned int g_usage;
-
-
- /*!\brief Maximum number of threads to use
- *
- * For multi-threaded implementations, use no more than this number of
- * threads. The codec may use fewer threads than allowed. The value
- * 0 is equivalent to the value 1.
- */
- unsigned int g_threads;
-
-
- /*!\brief Bitstream profile to use
- *
- * Some codecs support a notion of multiple bitstream profiles. Typically
- * this maps to a set of features that are turned on or off. Often the
- * profile to use is determined by the features of the intended decoder.
- * Consult the documentation for the codec to determine the valid values
- * for this parameter, or set to zero for a sane default.
- */
- unsigned int g_profile; /**< profile of bitstream to use */
-
-
-
- /*!\brief Width of the frame
- *
- * This value identifies the presentation resolution of the frame,
- * in pixels. Note that the frames passed as input to the encoder must
- * have this resolution. Frames will be presented by the decoder in this
- * resolution, independent of any spatial resampling the encoder may do.
- */
- unsigned int g_w;
-
-
- /*!\brief Height of the frame
- *
- * This value identifies the presentation resolution of the frame,
- * in pixels. Note that the frames passed as input to the encoder must
- * have this resolution. Frames will be presented by the decoder in this
- * resolution, independent of any spatial resampling the encoder may do.
- */
- unsigned int g_h;
-
-
- /*!\brief Stream timebase units
- *
- * Indicates the smallest interval of time, in seconds, used by the stream.
- * For fixed frame rate material, or variable frame rate material where
- * frames are timed at a multiple of a given clock (ex: video capture),
- * the \ref RECOMMENDED method is to set the timebase to the reciprocal
- * of the frame rate (ex: 1001/30000 for 29.970 Hz NTSC). This allows the
- * pts to correspond to the frame number, which can be handy. For
- * re-encoding video from containers with absolute time timestamps, the
- * \ref RECOMMENDED method is to set the timebase to that of the parent
- * container or multimedia framework (ex: 1/1000 for ms, as in FLV).
- */
- struct vpx_rational g_timebase;
-
-
- /*!\brief Enable error resilient modes.
- *
- * The error resilient bitfield indicates to the encoder which features
- * it should enable to take measures for streaming over lossy or noisy
- * links.
- */
- vpx_codec_er_flags_t g_error_resilient;
-
-
- /*!\brief Multi-pass Encoding Mode
- *
- * This value should be set to the current phase for multi-pass encoding.
- * For single pass, set to #VPX_RC_ONE_PASS.
- */
- enum vpx_enc_pass g_pass;
-
-
- /*!\brief Allow lagged encoding
- *
- * If set, this value allows the encoder to consume a number of input
- * frames before producing output frames. This allows the encoder to
- * base decisions for the current frame on future frames. This does
- * increase the latency of the encoding pipeline, so it is not appropriate
- * in all situations (ex: realtime encoding).
- *
- * Note that this is a maximum value -- the encoder may produce frames
- * sooner than the given limit. Set this value to 0 to disable this
- * feature.
- */
- unsigned int g_lag_in_frames;
-
-
- /*
- * rate control settings (rc)
- */
-
- /*!\brief Temporal resampling configuration, if supported by the codec.
- *
- * Temporal resampling allows the codec to "drop" frames as a strategy to
- * meet its target data rate. This can cause temporal discontinuities in
- * the encoded video, which may appear as stuttering during playback. This
- * trade-off is often acceptable, but for many applications is not. It can
- * be disabled in these cases.
- *
- * Note that not all codecs support this feature. All vpx VPx codecs do.
- * For other codecs, consult the documentation for that algorithm.
- *
- * This threshold is described as a percentage of the target data buffer.
- * When the data buffer falls below this percentage of fullness, a
- * dropped frame is indicated. Set the threshold to zero (0) to disable
- * this feature.
- */
- unsigned int rc_dropframe_thresh;
-
-
- /*!\brief Enable/disable spatial resampling, if supported by the codec.
- *
- * Spatial resampling allows the codec to compress a lower resolution
- * version of the frame, which is then upscaled by the encoder to the
- * correct presentation resolution. This increases visual quality at
- * low data rates, at the expense of CPU time on the encoder/decoder.
- */
- unsigned int rc_resize_allowed;
-
-
- /*!\brief Spatial resampling up watermark.
- *
- * This threshold is described as a percentage of the target data buffer.
- * When the data buffer rises above this percentage of fullness, the
- * encoder will step up to a higher resolution version of the frame.
- */
- unsigned int rc_resize_up_thresh;
-
-
- /*!\brief Spatial resampling down watermark.
- *
- * This threshold is described as a percentage of the target data buffer.
- * When the data buffer falls below this percentage of fullness, the
- * encoder will step down to a lower resolution version of the frame.
- */
- unsigned int rc_resize_down_thresh;
-
-
- /*!\brief Rate control algorithm to use.
- *
- * Indicates whether the end usage of this stream is to be streamed over
- * a bandwidth constrained link, indicating that Constant Bit Rate (CBR)
- * mode should be used, or whether it will be played back on a high
- * bandwidth link, as from a local disk, where higher variations in
- * bitrate are acceptable.
- */
- enum vpx_rc_mode rc_end_usage;
-
-
- /*!\brief Two-pass stats buffer.
- *
- * A buffer containing all of the stats packets produced in the first
- * pass, concatenated.
- */
- struct vpx_fixed_buf rc_twopass_stats_in;
-
-
- /*!\brief Target data rate
- *
- * Target bandwidth to use for this stream, in kilobits per second.
- */
- unsigned int rc_target_bitrate;
-
-
- /*
- * quantizer settings
- */
-
-
- /*!\brief Minimum (Best Quality) Quantizer
- *
- * The quantizer is the most direct control over the quality of the
- * encoded image. The range of valid values for the quantizer is codec
- * specific. Consult the documentation for the codec to determine the
- * values to use. To determine the range programmatically, call
- * vpx_codec_enc_config_default() with a usage value of 0.
- */
- unsigned int rc_min_quantizer;
-
-
- /*!\brief Maximum (Worst Quality) Quantizer
- *
- * The quantizer is the most direct control over the quality of the
- * encoded image. The range of valid values for the quantizer is codec
- * specific. Consult the documentation for the codec to determine the
- * values to use. To determine the range programmatically, call
- * vpx_codec_enc_config_default() with a usage value of 0.
- */
- unsigned int rc_max_quantizer;
-
-
- /*
- * bitrate tolerance
- */
-
-
- /*!\brief Rate control adaptation undershoot control
- *
- * This value, expressed as a percentage of the target bitrate,
- * controls the maximum allowed adaptation speed of the codec.
- * This factor controls the maximum amount of bits that can
- * be subtracted from the target bitrate in order to compensate
- * for prior overshoot.
- *
- * Valid values in the range 0-1000.
- */
- unsigned int rc_undershoot_pct;
-
-
- /*!\brief Rate control adaptation overshoot control
- *
- * This value, expressed as a percentage of the target bitrate,
- * controls the maximum allowed adaptation speed of the codec.
- * This factor controls the maximum amount of bits that can
- * be added to the target bitrate in order to compensate for
- * prior undershoot.
- *
- * Valid values in the range 0-1000.
- */
- unsigned int rc_overshoot_pct;
-
-
- /*
- * decoder buffer model parameters
- */
-
-
- /*!\brief Decoder Buffer Size
- *
- * This value indicates the amount of data that may be buffered by the
- * decoding application. Note that this value is expressed in units of
- * time (milliseconds). For example, a value of 5000 indicates that the
- * client will buffer (at least) 5000ms worth of encoded data. Use the
- * target bitrate (#rc_target_bitrate) to convert to bits/bytes, if
- * necessary.
- */
- unsigned int rc_buf_sz;
-
-
- /*!\brief Decoder Buffer Initial Size
- *
- * This value indicates the amount of data that will be buffered by the
- * decoding application prior to beginning playback. This value is
- * expressed in units of time (milliseconds). Use the target bitrate
- * (#rc_target_bitrate) to convert to bits/bytes, if necessary.
- */
- unsigned int rc_buf_initial_sz;
-
-
- /*!\brief Decoder Buffer Optimal Size
- *
- * This value indicates the amount of data that the encoder should try
- * to maintain in the decoder's buffer. This value is expressed in units
- * of time (milliseconds). Use the target bitrate (#rc_target_bitrate)
- * to convert to bits/bytes, if necessary.
- */
- unsigned int rc_buf_optimal_sz;
-
-
- /*
- * 2 pass rate control parameters
- */
-
-
- /*!\brief Two-pass mode CBR/VBR bias
- *
- * Bias, expressed on a scale of 0 to 100, for determining target size
- * for the current frame. The value 0 indicates the optimal CBR mode
- * value should be used. The value 100 indicates the optimal VBR mode
- * value should be used. Values in between indicate which way the
- * encoder should "lean."
- */
- unsigned int rc_2pass_vbr_bias_pct; /**< RC mode bias between CBR and VBR(0-100: 0->CBR, 100->VBR) */
-
-
- /*!\brief Two-pass mode per-GOP minimum bitrate
- *
- * This value, expressed as a percentage of the target bitrate, indicates
- * the minimum bitrate to be used for a single GOP (aka "section")
- */
- unsigned int rc_2pass_vbr_minsection_pct;
-
-
- /*!\brief Two-pass mode per-GOP maximum bitrate
- *
- * This value, expressed as a percentage of the target bitrate, indicates
- * the maximum bitrate to be used for a single GOP (aka "section")
- */
- unsigned int rc_2pass_vbr_maxsection_pct;
-
-
- /*
- * keyframing settings (kf)
- */
-
- /*!\brief Keyframe placement mode
- *
- * This value indicates whether the encoder should place keyframes at a
- * fixed interval, or determine the optimal placement automatically
- * (as governed by the #kf_min_dist and #kf_max_dist parameters)
- */
- enum vpx_kf_mode kf_mode;
-
-
- /*!\brief Keyframe minimum interval
- *
- * This value, expressed as a number of frames, prevents the encoder from
- * placing a keyframe nearer than kf_min_dist to the previous keyframe. At
- * least kf_min_dist frames non-keyframes will be coded before the next
- * keyframe. Set kf_min_dist equal to kf_max_dist for a fixed interval.
- */
- unsigned int kf_min_dist;
-
-
- /*!\brief Keyframe maximum interval
- *
- * This value, expressed as a number of frames, forces the encoder to code
- * a keyframe if one has not been coded in the last kf_max_dist frames.
- * A value of 0 implies all frames will be keyframes. Set kf_min_dist
- * equal to kf_max_dist for a fixed interval.
- */
- unsigned int kf_max_dist;
-
- /*
- * Temporal scalability settings (ts)
- */
-
- /*!\brief Number of coding layers
- *
- * This value specifies the number of coding layers to be used.
- */
- unsigned int ts_number_layers;
-
- /*!\brief Target bitrate for each layer
- *
- * These values specify the target coding bitrate for each coding layer.
- */
- unsigned int ts_target_bitrate[VPX_TS_MAX_LAYERS];
-
- /*!\brief Frame rate decimation factor for each layer
- *
- * These values specify the frame rate decimation factors to apply
- * to each layer.
- */
- unsigned int ts_rate_decimator[VPX_TS_MAX_LAYERS];
-
- /*!\brief Length of the sequence defining frame layer membership
- *
- * This value specifies the length of the sequence that defines the
- * membership of frames to layers. For example, if ts_periodicity=8 then
- * frames are assigned to coding layers with a repeated sequence of
- * length 8.
- */
- unsigned int ts_periodicity;
-
- /*!\brief Template defining the membership of frames to coding layers
- *
- * This array defines the membership of frames to coding layers. For a
- * 2-layer encoding that assigns even numbered frames to one layer (0)
- * and odd numbered frames to a second layer (1) with ts_periodicity=8,
- * then ts_layer_id = (0,1,0,1,0,1,0,1).
- */
- unsigned int ts_layer_id[VPX_TS_MAX_PERIODICITY];
- } vpx_codec_enc_cfg_t; /**< alias for struct vpx_codec_enc_cfg */
-
-
- /*!\brief Initialize an encoder instance
- *
- * Initializes a encoder context using the given interface. Applications
- * should call the vpx_codec_enc_init convenience macro instead of this
- * function directly, to ensure that the ABI version number parameter
- * is properly initialized.
- *
- * If the library was configured with --disable-multithread, this call
- * is not thread safe and should be guarded with a lock if being used
- * in a multithreaded context.
- *
- * In XMA mode (activated by setting VPX_CODEC_USE_XMA in the flags
- * parameter), the storage pointed to by the cfg parameter must be
- * kept readable and stable until all memory maps have been set.
- *
- * \param[in] ctx Pointer to this instance's context.
- * \param[in] iface Pointer to the algorithm interface to use.
- * \param[in] cfg Configuration to use, if known. May be NULL.
- * \param[in] flags Bitfield of VPX_CODEC_USE_* flags
- * \param[in] ver ABI version number. Must be set to
- * VPX_ENCODER_ABI_VERSION
- * \retval #VPX_CODEC_OK
- * The decoder algorithm initialized.
- * \retval #VPX_CODEC_MEM_ERROR
- * Memory allocation failed.
- */
- vpx_codec_err_t vpx_codec_enc_init_ver(vpx_codec_ctx_t *ctx,
- vpx_codec_iface_t *iface,
- vpx_codec_enc_cfg_t *cfg,
- vpx_codec_flags_t flags,
- int ver);
-
-
- /*!\brief Convenience macro for vpx_codec_enc_init_ver()
- *
- * Ensures the ABI version parameter is properly set.
+ /*!\brief Stream timebase units
+ *
+ * Indicates the smallest interval of time, in seconds, used by the stream.
+ * For fixed frame rate material, or variable frame rate material where
+ * frames are timed at a multiple of a given clock (ex: video capture),
+ * the \ref RECOMMENDED method is to set the timebase to the reciprocal
+ * of the frame rate (ex: 1001/30000 for 29.970 Hz NTSC). This allows the
+ * pts to correspond to the frame number, which can be handy. For
+ * re-encoding video from containers with absolute time timestamps, the
+ * \ref RECOMMENDED method is to set the timebase to that of the parent
+ * container or multimedia framework (ex: 1/1000 for ms, as in FLV).
*/
-#define vpx_codec_enc_init(ctx, iface, cfg, flags) \
- vpx_codec_enc_init_ver(ctx, iface, cfg, flags, VPX_ENCODER_ABI_VERSION)
+ struct vpx_rational g_timebase;
- /*!\brief Initialize multi-encoder instance
+ /*!\brief Enable error resilient modes.
*
- * Initializes multi-encoder context using the given interface.
- * Applications should call the vpx_codec_enc_init_multi convenience macro
- * instead of this function directly, to ensure that the ABI version number
- * parameter is properly initialized.
- *
- * In XMA mode (activated by setting VPX_CODEC_USE_XMA in the flags
- * parameter), the storage pointed to by the cfg parameter must be
- * kept readable and stable until all memory maps have been set.
+ * The error resilient bitfield indicates to the encoder which features
+ * it should enable to take measures for streaming over lossy or noisy
+ * links.
+ */
+ vpx_codec_er_flags_t g_error_resilient;
+
+
+ /*!\brief Multi-pass Encoding Mode
*
- * \param[in] ctx Pointer to this instance's context.
- * \param[in] iface Pointer to the algorithm interface to use.
- * \param[in] cfg Configuration to use, if known. May be NULL.
- * \param[in] num_enc Total number of encoders.
- * \param[in] flags Bitfield of VPX_CODEC_USE_* flags
- * \param[in] dsf Pointer to down-sampling factors.
- * \param[in] ver ABI version number. Must be set to
- * VPX_ENCODER_ABI_VERSION
- * \retval #VPX_CODEC_OK
- * The decoder algorithm initialized.
- * \retval #VPX_CODEC_MEM_ERROR
- * Memory allocation failed.
+ * This value should be set to the current phase for multi-pass encoding.
+ * For single pass, set to #VPX_RC_ONE_PASS.
*/
- vpx_codec_err_t vpx_codec_enc_init_multi_ver(vpx_codec_ctx_t *ctx,
- vpx_codec_iface_t *iface,
- vpx_codec_enc_cfg_t *cfg,
- int num_enc,
- vpx_codec_flags_t flags,
- vpx_rational_t *dsf,
- int ver);
+ enum vpx_enc_pass g_pass;
- /*!\brief Convenience macro for vpx_codec_enc_init_multi_ver()
+ /*!\brief Allow lagged encoding
*
- * Ensures the ABI version parameter is properly set.
+ * If set, this value allows the encoder to consume a number of input
+ * frames before producing output frames. This allows the encoder to
+ * base decisions for the current frame on future frames. This does
+ * increase the latency of the encoding pipeline, so it is not appropriate
+ * in all situations (ex: realtime encoding).
+ *
+ * Note that this is a maximum value -- the encoder may produce frames
+ * sooner than the given limit. Set this value to 0 to disable this
+ * feature.
*/
-#define vpx_codec_enc_init_multi(ctx, iface, cfg, num_enc, flags, dsf) \
- vpx_codec_enc_init_multi_ver(ctx, iface, cfg, num_enc, flags, dsf, \
- VPX_ENCODER_ABI_VERSION)
+ unsigned int g_lag_in_frames;
- /*!\brief Get a default configuration
+ /*
+ * rate control settings (rc)
+ */
+
+ /*!\brief Temporal resampling configuration, if supported by the codec.
*
- * Initializes a encoder configuration structure with default values. Supports
- * the notion of "usages" so that an algorithm may offer different default
- * settings depending on the user's intended goal. This function \ref SHOULD
- * be called by all applications to initialize the configuration structure
- * before specializing the configuration with application specific values.
+ * Temporal resampling allows the codec to "drop" frames as a strategy to
+ * meet its target data rate. This can cause temporal discontinuities in
+ * the encoded video, which may appear as stuttering during playback. This
+ * trade-off is often acceptable, but for many applications is not. It can
+ * be disabled in these cases.
*
- * \param[in] iface Pointer to the algorithm interface to use.
- * \param[out] cfg Configuration buffer to populate
- * \param[in] usage End usage. Set to 0 or use codec specific values.
+ * Note that not all codecs support this feature. All vpx VPx codecs do.
+ * For other codecs, consult the documentation for that algorithm.
*
- * \retval #VPX_CODEC_OK
- * The configuration was populated.
- * \retval #VPX_CODEC_INCAPABLE
- * Interface is not an encoder interface.
- * \retval #VPX_CODEC_INVALID_PARAM
- * A parameter was NULL, or the usage value was not recognized.
+ * This threshold is described as a percentage of the target data buffer.
+ * When the data buffer falls below this percentage of fullness, a
+ * dropped frame is indicated. Set the threshold to zero (0) to disable
+ * this feature.
*/
- vpx_codec_err_t vpx_codec_enc_config_default(vpx_codec_iface_t *iface,
- vpx_codec_enc_cfg_t *cfg,
- unsigned int usage);
+ unsigned int rc_dropframe_thresh;
- /*!\brief Set or change configuration
+ /*!\brief Enable/disable spatial resampling, if supported by the codec.
*
- * Reconfigures an encoder instance according to the given configuration.
+ * Spatial resampling allows the codec to compress a lower resolution
+ * version of the frame, which is then upscaled by the encoder to the
+ * correct presentation resolution. This increases visual quality at
+ * low data rates, at the expense of CPU time on the encoder/decoder.
+ */
+ unsigned int rc_resize_allowed;
+
+
+ /*!\brief Spatial resampling up watermark.
*
- * \param[in] ctx Pointer to this instance's context
- * \param[in] cfg Configuration buffer to use
+ * This threshold is described as a percentage of the target data buffer.
+ * When the data buffer rises above this percentage of fullness, the
+ * encoder will step up to a higher resolution version of the frame.
+ */
+ unsigned int rc_resize_up_thresh;
+
+
+ /*!\brief Spatial resampling down watermark.
*
- * \retval #VPX_CODEC_OK
- * The configuration was populated.
- * \retval #VPX_CODEC_INCAPABLE
- * Interface is not an encoder interface.
- * \retval #VPX_CODEC_INVALID_PARAM
- * A parameter was NULL, or the usage value was not recognized.
+ * This threshold is described as a percentage of the target data buffer.
+ * When the data buffer falls below this percentage of fullness, the
+ * encoder will step down to a lower resolution version of the frame.
*/
- vpx_codec_err_t vpx_codec_enc_config_set(vpx_codec_ctx_t *ctx,
- const vpx_codec_enc_cfg_t *cfg);
+ unsigned int rc_resize_down_thresh;
- /*!\brief Get global stream headers
+ /*!\brief Rate control algorithm to use.
*
- * Retrieves a stream level global header packet, if supported by the codec.
+ * Indicates whether the end usage of this stream is to be streamed over
+ * a bandwidth constrained link, indicating that Constant Bit Rate (CBR)
+ * mode should be used, or whether it will be played back on a high
+ * bandwidth link, as from a local disk, where higher variations in
+ * bitrate are acceptable.
+ */
+ enum vpx_rc_mode rc_end_usage;
+
+
+ /*!\brief Two-pass stats buffer.
*
- * \param[in] ctx Pointer to this instance's context
+ * A buffer containing all of the stats packets produced in the first
+ * pass, concatenated.
+ */
+ struct vpx_fixed_buf rc_twopass_stats_in;
+
+
+ /*!\brief Target data rate
*
- * \retval NULL
- * Encoder does not support global header
- * \retval Non-NULL
- * Pointer to buffer containing global header packet
+ * Target bandwidth to use for this stream, in kilobits per second.
*/
- vpx_fixed_buf_t *vpx_codec_get_global_headers(vpx_codec_ctx_t *ctx);
+ unsigned int rc_target_bitrate;
-#define VPX_DL_REALTIME (1) /**< deadline parameter analogous to
- * VPx REALTIME mode. */
-#define VPX_DL_GOOD_QUALITY (1000000) /**< deadline parameter analogous to
- * VPx GOOD QUALITY mode. */
-#define VPX_DL_BEST_QUALITY (0) /**< deadline parameter analogous to
- * VPx BEST QUALITY mode. */
- /*!\brief Encode a frame
- *
- * Encodes a video frame at the given "presentation time." The presentation
- * time stamp (PTS) \ref MUST be strictly increasing.
- *
- * The encoder supports the notion of a soft real-time deadline. Given a
- * non-zero value to the deadline parameter, the encoder will make a "best
- * effort" guarantee to return before the given time slice expires. It is
- * implicit that limiting the available time to encode will degrade the
- * output quality. The encoder can be given an unlimited time to produce the
- * best possible frame by specifying a deadline of '0'. This deadline
- * supercedes the VPx notion of "best quality, good quality, realtime".
- * Applications that wish to map these former settings to the new deadline
- * based system can use the symbols #VPX_DL_REALTIME, #VPX_DL_GOOD_QUALITY,
- * and #VPX_DL_BEST_QUALITY.
- *
- * When the last frame has been passed to the encoder, this function should
- * continue to be called, with the img parameter set to NULL. This will
- * signal the end-of-stream condition to the encoder and allow it to encode
- * any held buffers. Encoding is complete when vpx_codec_encode() is called
- * and vpx_codec_get_cx_data() returns no data.
- *
- * \param[in] ctx Pointer to this instance's context
- * \param[in] img Image data to encode, NULL to flush.
- * \param[in] pts Presentation time stamp, in timebase units.
- * \param[in] duration Duration to show frame, in timebase units.
- * \param[in] flags Flags to use for encoding this frame.
- * \param[in] deadline Time to spend encoding, in microseconds. (0=infinite)
- *
- * \retval #VPX_CODEC_OK
- * The configuration was populated.
- * \retval #VPX_CODEC_INCAPABLE
- * Interface is not an encoder interface.
- * \retval #VPX_CODEC_INVALID_PARAM
- * A parameter was NULL, the image format is unsupported, etc.
+ /*
+ * quantizer settings
+ */
+
+
+ /*!\brief Minimum (Best Quality) Quantizer
+ *
+ * The quantizer is the most direct control over the quality of the
+ * encoded image. The range of valid values for the quantizer is codec
+ * specific. Consult the documentation for the codec to determine the
+ * values to use. To determine the range programmatically, call
+ * vpx_codec_enc_config_default() with a usage value of 0.
*/
- vpx_codec_err_t vpx_codec_encode(vpx_codec_ctx_t *ctx,
- const vpx_image_t *img,
- vpx_codec_pts_t pts,
- unsigned long duration,
- vpx_enc_frame_flags_t flags,
- unsigned long deadline);
+ unsigned int rc_min_quantizer;
- /*!\brief Set compressed data output buffer
+
+ /*!\brief Maximum (Worst Quality) Quantizer
*
- * Sets the buffer that the codec should output the compressed data
- * into. This call effectively sets the buffer pointer returned in the
- * next VPX_CODEC_CX_FRAME_PKT packet. Subsequent packets will be
- * appended into this buffer. The buffer is preserved across frames,
- * so applications must periodically call this function after flushing
- * the accumulated compressed data to disk or to the network to reset
- * the pointer to the buffer's head.
+ * The quantizer is the most direct control over the quality of the
+ * encoded image. The range of valid values for the quantizer is codec
+ * specific. Consult the documentation for the codec to determine the
+ * values to use. To determine the range programmatically, call
+ * vpx_codec_enc_config_default() with a usage value of 0.
+ */
+ unsigned int rc_max_quantizer;
+
+
+ /*
+ * bitrate tolerance
+ */
+
+
+ /*!\brief Rate control adaptation undershoot control
*
- * `pad_before` bytes will be skipped before writing the compressed
- * data, and `pad_after` bytes will be appended to the packet. The size
- * of the packet will be the sum of the size of the actual compressed
- * data, pad_before, and pad_after. The padding bytes will be preserved
- * (not overwritten).
+ * This value, expressed as a percentage of the target bitrate,
+ * controls the maximum allowed adaptation speed of the codec.
+ * This factor controls the maximum amount of bits that can
+ * be subtracted from the target bitrate in order to compensate
+ * for prior overshoot.
*
- * Note that calling this function does not guarantee that the returned
- * compressed data will be placed into the specified buffer. In the
- * event that the encoded data will not fit into the buffer provided,
- * the returned packet \ref MAY point to an internal buffer, as it would
- * if this call were never used. In this event, the output packet will
- * NOT have any padding, and the application must free space and copy it
- * to the proper place. This is of particular note in configurations
- * that may output multiple packets for a single encoded frame (e.g., lagged
- * encoding) or if the application does not reset the buffer periodically.
+ * Valid values in the range 0-1000.
+ */
+ unsigned int rc_undershoot_pct;
+
+
+ /*!\brief Rate control adaptation overshoot control
*
- * Applications may restore the default behavior of the codec providing
- * the compressed data buffer by calling this function with a NULL
- * buffer.
+ * This value, expressed as a percentage of the target bitrate,
+ * controls the maximum allowed adaptation speed of the codec.
+ * This factor controls the maximum amount of bits that can
+ * be added to the target bitrate in order to compensate for
+ * prior undershoot.
*
- * Applications \ref MUSTNOT call this function during iteration of
- * vpx_codec_get_cx_data().
+ * Valid values in the range 0-1000.
+ */
+ unsigned int rc_overshoot_pct;
+
+
+ /*
+ * decoder buffer model parameters
+ */
+
+
+ /*!\brief Decoder Buffer Size
*
- * \param[in] ctx Pointer to this instance's context
- * \param[in] buf Buffer to store compressed data into
- * \param[in] pad_before Bytes to skip before writing compressed data
- * \param[in] pad_after Bytes to skip after writing compressed data
+ * This value indicates the amount of data that may be buffered by the
+ * decoding application. Note that this value is expressed in units of
+ * time (milliseconds). For example, a value of 5000 indicates that the
+ * client will buffer (at least) 5000ms worth of encoded data. Use the
+ * target bitrate (#rc_target_bitrate) to convert to bits/bytes, if
+ * necessary.
+ */
+ unsigned int rc_buf_sz;
+
+
+ /*!\brief Decoder Buffer Initial Size
*
- * \retval #VPX_CODEC_OK
- * The buffer was set successfully.
- * \retval #VPX_CODEC_INVALID_PARAM
- * A parameter was NULL, the image format is unsupported, etc.
+ * This value indicates the amount of data that will be buffered by the
+ * decoding application prior to beginning playback. This value is
+ * expressed in units of time (milliseconds). Use the target bitrate
+ * (#rc_target_bitrate) to convert to bits/bytes, if necessary.
*/
- vpx_codec_err_t vpx_codec_set_cx_data_buf(vpx_codec_ctx_t *ctx,
- const vpx_fixed_buf_t *buf,
- unsigned int pad_before,
- unsigned int pad_after);
+ unsigned int rc_buf_initial_sz;
- /*!\brief Encoded data iterator
+ /*!\brief Decoder Buffer Optimal Size
*
- * Iterates over a list of data packets to be passed from the encoder to the
- * application. The different kinds of packets available are enumerated in
- * #vpx_codec_cx_pkt_kind.
+ * This value indicates the amount of data that the encoder should try
+ * to maintain in the decoder's buffer. This value is expressed in units
+ * of time (milliseconds). Use the target bitrate (#rc_target_bitrate)
+ * to convert to bits/bytes, if necessary.
+ */
+ unsigned int rc_buf_optimal_sz;
+
+
+ /*
+ * 2 pass rate control parameters
+ */
+
+
+ /*!\brief Two-pass mode CBR/VBR bias
+ *
+ * Bias, expressed on a scale of 0 to 100, for determining target size
+ * for the current frame. The value 0 indicates the optimal CBR mode
+ * value should be used. The value 100 indicates the optimal VBR mode
+ * value should be used. Values in between indicate which way the
+ * encoder should "lean."
+ */
+ unsigned int rc_2pass_vbr_bias_pct; /**< RC mode bias between CBR and VBR(0-100: 0->CBR, 100->VBR) */
+
+
+ /*!\brief Two-pass mode per-GOP minimum bitrate
*
- * #VPX_CODEC_CX_FRAME_PKT packets should be passed to the application's
- * muxer. Multiple compressed frames may be in the list.
- * #VPX_CODEC_STATS_PKT packets should be appended to a global buffer.
+ * This value, expressed as a percentage of the target bitrate, indicates
+ * the minimum bitrate to be used for a single GOP (aka "section")
+ */
+ unsigned int rc_2pass_vbr_minsection_pct;
+
+
+ /*!\brief Two-pass mode per-GOP maximum bitrate
*
- * The application \ref MUST silently ignore any packet kinds that it does
- * not recognize or support.
+ * This value, expressed as a percentage of the target bitrate, indicates
+ * the maximum bitrate to be used for a single GOP (aka "section")
+ */
+ unsigned int rc_2pass_vbr_maxsection_pct;
+
+
+ /*
+ * keyframing settings (kf)
+ */
+
+ /*!\brief Keyframe placement mode
*
- * The data buffers returned from this function are only guaranteed to be
- * valid until the application makes another call to any vpx_codec_* function.
+ * This value indicates whether the encoder should place keyframes at a
+ * fixed interval, or determine the optimal placement automatically
+ * (as governed by the #kf_min_dist and #kf_max_dist parameters)
+ */
+ enum vpx_kf_mode kf_mode;
+
+
+ /*!\brief Keyframe minimum interval
*
- * \param[in] ctx Pointer to this instance's context
- * \param[in,out] iter Iterator storage, initialized to NULL
+ * This value, expressed as a number of frames, prevents the encoder from
+ * placing a keyframe nearer than kf_min_dist to the previous keyframe. At
+ * least kf_min_dist frames non-keyframes will be coded before the next
+ * keyframe. Set kf_min_dist equal to kf_max_dist for a fixed interval.
+ */
+ unsigned int kf_min_dist;
+
+
+ /*!\brief Keyframe maximum interval
*
- * \return Returns a pointer to an output data packet (compressed frame data,
- * two-pass statistics, etc.) or NULL to signal end-of-list.
+ * This value, expressed as a number of frames, forces the encoder to code
+ * a keyframe if one has not been coded in the last kf_max_dist frames.
+ * A value of 0 implies all frames will be keyframes. Set kf_min_dist
+ * equal to kf_max_dist for a fixed interval.
+ */
+ unsigned int kf_max_dist;
+
+ /*
+ * Spatial scalability settings (ss)
+ */
+
+ /*!\brief Number of coding layers (spatial)
*
+ * This value specifies the number of coding layers to be used.
*/
- const vpx_codec_cx_pkt_t *vpx_codec_get_cx_data(vpx_codec_ctx_t *ctx,
- vpx_codec_iter_t *iter);
+ unsigned int ss_number_layers;
+ /*!\brief Number of coding layers
+ *
+ * This value specifies the number of coding layers to be used.
+ */
+ unsigned int ts_number_layers;
- /*!\brief Get Preview Frame
+ /*!\brief Target bitrate for each layer
*
- * Returns an image that can be used as a preview. Shows the image as it would
- * exist at the decompressor. The application \ref MUST NOT write into this
- * image buffer.
+ * These values specify the target coding bitrate for each coding layer.
+ */
+ unsigned int ts_target_bitrate[VPX_TS_MAX_LAYERS];
+
+ /*!\brief Frame rate decimation factor for each layer
*
- * \param[in] ctx Pointer to this instance's context
+ * These values specify the frame rate decimation factors to apply
+ * to each layer.
+ */
+ unsigned int ts_rate_decimator[VPX_TS_MAX_LAYERS];
+
+ /*!\brief Length of the sequence defining frame layer membership
*
- * \return Returns a pointer to a preview image, or NULL if no image is
- * available.
+ * This value specifies the length of the sequence that defines the
+ * membership of frames to layers. For example, if ts_periodicity=8 then
+ * frames are assigned to coding layers with a repeated sequence of
+ * length 8.
+ */
+ unsigned int ts_periodicity;
+
+ /*!\brief Template defining the membership of frames to coding layers
*
+ * This array defines the membership of frames to coding layers. For a
+ * 2-layer encoding that assigns even numbered frames to one layer (0)
+ * and odd numbered frames to a second layer (1) with ts_periodicity=8,
+ * then ts_layer_id = (0,1,0,1,0,1,0,1).
*/
- const vpx_image_t *vpx_codec_get_preview_frame(vpx_codec_ctx_t *ctx);
+ unsigned int ts_layer_id[VPX_TS_MAX_PERIODICITY];
+ } vpx_codec_enc_cfg_t; /**< alias for struct vpx_codec_enc_cfg */
+
+
+ /*!\brief Initialize an encoder instance
+ *
+ * Initializes a encoder context using the given interface. Applications
+ * should call the vpx_codec_enc_init convenience macro instead of this
+ * function directly, to ensure that the ABI version number parameter
+ * is properly initialized.
+ *
+ * If the library was configured with --disable-multithread, this call
+ * is not thread safe and should be guarded with a lock if being used
+ * in a multithreaded context.
+ *
+ * In XMA mode (activated by setting VPX_CODEC_USE_XMA in the flags
+ * parameter), the storage pointed to by the cfg parameter must be
+ * kept readable and stable until all memory maps have been set.
+ *
+ * \param[in] ctx Pointer to this instance's context.
+ * \param[in] iface Pointer to the algorithm interface to use.
+ * \param[in] cfg Configuration to use, if known. May be NULL.
+ * \param[in] flags Bitfield of VPX_CODEC_USE_* flags
+ * \param[in] ver ABI version number. Must be set to
+ * VPX_ENCODER_ABI_VERSION
+ * \retval #VPX_CODEC_OK
+ * The decoder algorithm initialized.
+ * \retval #VPX_CODEC_MEM_ERROR
+ * Memory allocation failed.
+ */
+ vpx_codec_err_t vpx_codec_enc_init_ver(vpx_codec_ctx_t *ctx,
+ vpx_codec_iface_t *iface,
+ vpx_codec_enc_cfg_t *cfg,
+ vpx_codec_flags_t flags,
+ int ver);
+
+
+ /*!\brief Convenience macro for vpx_codec_enc_init_ver()
+ *
+ * Ensures the ABI version parameter is properly set.
+ */
+#define vpx_codec_enc_init(ctx, iface, cfg, flags) \
+ vpx_codec_enc_init_ver(ctx, iface, cfg, flags, VPX_ENCODER_ABI_VERSION)
+
+
+ /*!\brief Initialize multi-encoder instance
+ *
+ * Initializes multi-encoder context using the given interface.
+ * Applications should call the vpx_codec_enc_init_multi convenience macro
+ * instead of this function directly, to ensure that the ABI version number
+ * parameter is properly initialized.
+ *
+ * In XMA mode (activated by setting VPX_CODEC_USE_XMA in the flags
+ * parameter), the storage pointed to by the cfg parameter must be
+ * kept readable and stable until all memory maps have been set.
+ *
+ * \param[in] ctx Pointer to this instance's context.
+ * \param[in] iface Pointer to the algorithm interface to use.
+ * \param[in] cfg Configuration to use, if known. May be NULL.
+ * \param[in] num_enc Total number of encoders.
+ * \param[in] flags Bitfield of VPX_CODEC_USE_* flags
+ * \param[in] dsf Pointer to down-sampling factors.
+ * \param[in] ver ABI version number. Must be set to
+ * VPX_ENCODER_ABI_VERSION
+ * \retval #VPX_CODEC_OK
+ * The decoder algorithm initialized.
+ * \retval #VPX_CODEC_MEM_ERROR
+ * Memory allocation failed.
+ */
+ vpx_codec_err_t vpx_codec_enc_init_multi_ver(vpx_codec_ctx_t *ctx,
+ vpx_codec_iface_t *iface,
+ vpx_codec_enc_cfg_t *cfg,
+ int num_enc,
+ vpx_codec_flags_t flags,
+ vpx_rational_t *dsf,
+ int ver);
+
+
+ /*!\brief Convenience macro for vpx_codec_enc_init_multi_ver()
+ *
+ * Ensures the ABI version parameter is properly set.
+ */
+#define vpx_codec_enc_init_multi(ctx, iface, cfg, num_enc, flags, dsf) \
+ vpx_codec_enc_init_multi_ver(ctx, iface, cfg, num_enc, flags, dsf, \
+ VPX_ENCODER_ABI_VERSION)
+
+
+ /*!\brief Get a default configuration
+ *
+ * Initializes a encoder configuration structure with default values. Supports
+ * the notion of "usages" so that an algorithm may offer different default
+ * settings depending on the user's intended goal. This function \ref SHOULD
+ * be called by all applications to initialize the configuration structure
+ * before specializing the configuration with application specific values.
+ *
+ * \param[in] iface Pointer to the algorithm interface to use.
+ * \param[out] cfg Configuration buffer to populate
+ * \param[in] usage End usage. Set to 0 or use codec specific values.
+ *
+ * \retval #VPX_CODEC_OK
+ * The configuration was populated.
+ * \retval #VPX_CODEC_INCAPABLE
+ * Interface is not an encoder interface.
+ * \retval #VPX_CODEC_INVALID_PARAM
+ * A parameter was NULL, or the usage value was not recognized.
+ */
+ vpx_codec_err_t vpx_codec_enc_config_default(vpx_codec_iface_t *iface,
+ vpx_codec_enc_cfg_t *cfg,
+ unsigned int usage);
+
+
+ /*!\brief Set or change configuration
+ *
+ * Reconfigures an encoder instance according to the given configuration.
+ *
+ * \param[in] ctx Pointer to this instance's context
+ * \param[in] cfg Configuration buffer to use
+ *
+ * \retval #VPX_CODEC_OK
+ * The configuration was populated.
+ * \retval #VPX_CODEC_INCAPABLE
+ * Interface is not an encoder interface.
+ * \retval #VPX_CODEC_INVALID_PARAM
+ * A parameter was NULL, or the usage value was not recognized.
+ */
+ vpx_codec_err_t vpx_codec_enc_config_set(vpx_codec_ctx_t *ctx,
+ const vpx_codec_enc_cfg_t *cfg);
+
+
+ /*!\brief Get global stream headers
+ *
+ * Retrieves a stream level global header packet, if supported by the codec.
+ *
+ * \param[in] ctx Pointer to this instance's context
+ *
+ * \retval NULL
+ * Encoder does not support global header
+ * \retval Non-NULL
+ * Pointer to buffer containing global header packet
+ */
+ vpx_fixed_buf_t *vpx_codec_get_global_headers(vpx_codec_ctx_t *ctx);
- /*!@} - end defgroup encoder*/
+#define VPX_DL_REALTIME (1) /**< deadline parameter analogous to
+ * VPx REALTIME mode. */
+#define VPX_DL_GOOD_QUALITY (1000000) /**< deadline parameter analogous to
+ * VPx GOOD QUALITY mode. */
+#define VPX_DL_BEST_QUALITY (0) /**< deadline parameter analogous to
+ * VPx BEST QUALITY mode. */
+ /*!\brief Encode a frame
+ *
+ * Encodes a video frame at the given "presentation time." The presentation
+ * time stamp (PTS) \ref MUST be strictly increasing.
+ *
+ * The encoder supports the notion of a soft real-time deadline. Given a
+ * non-zero value to the deadline parameter, the encoder will make a "best
+ * effort" guarantee to return before the given time slice expires. It is
+ * implicit that limiting the available time to encode will degrade the
+ * output quality. The encoder can be given an unlimited time to produce the
+ * best possible frame by specifying a deadline of '0'. This deadline
+ * supercedes the VPx notion of "best quality, good quality, realtime".
+ * Applications that wish to map these former settings to the new deadline
+ * based system can use the symbols #VPX_DL_REALTIME, #VPX_DL_GOOD_QUALITY,
+ * and #VPX_DL_BEST_QUALITY.
+ *
+ * When the last frame has been passed to the encoder, this function should
+ * continue to be called, with the img parameter set to NULL. This will
+ * signal the end-of-stream condition to the encoder and allow it to encode
+ * any held buffers. Encoding is complete when vpx_codec_encode() is called
+ * and vpx_codec_get_cx_data() returns no data.
+ *
+ * \param[in] ctx Pointer to this instance's context
+ * \param[in] img Image data to encode, NULL to flush.
+ * \param[in] pts Presentation time stamp, in timebase units.
+ * \param[in] duration Duration to show frame, in timebase units.
+ * \param[in] flags Flags to use for encoding this frame.
+ * \param[in] deadline Time to spend encoding, in microseconds. (0=infinite)
+ *
+ * \retval #VPX_CODEC_OK
+ * The configuration was populated.
+ * \retval #VPX_CODEC_INCAPABLE
+ * Interface is not an encoder interface.
+ * \retval #VPX_CODEC_INVALID_PARAM
+ * A parameter was NULL, the image format is unsupported, etc.
+ */
+ vpx_codec_err_t vpx_codec_encode(vpx_codec_ctx_t *ctx,
+ const vpx_image_t *img,
+ vpx_codec_pts_t pts,
+ unsigned long duration,
+ vpx_enc_frame_flags_t flags,
+ unsigned long deadline);
+
+ /*!\brief Set compressed data output buffer
+ *
+ * Sets the buffer that the codec should output the compressed data
+ * into. This call effectively sets the buffer pointer returned in the
+ * next VPX_CODEC_CX_FRAME_PKT packet. Subsequent packets will be
+ * appended into this buffer. The buffer is preserved across frames,
+ * so applications must periodically call this function after flushing
+ * the accumulated compressed data to disk or to the network to reset
+ * the pointer to the buffer's head.
+ *
+ * `pad_before` bytes will be skipped before writing the compressed
+ * data, and `pad_after` bytes will be appended to the packet. The size
+ * of the packet will be the sum of the size of the actual compressed
+ * data, pad_before, and pad_after. The padding bytes will be preserved
+ * (not overwritten).
+ *
+ * Note that calling this function does not guarantee that the returned
+ * compressed data will be placed into the specified buffer. In the
+ * event that the encoded data will not fit into the buffer provided,
+ * the returned packet \ref MAY point to an internal buffer, as it would
+ * if this call were never used. In this event, the output packet will
+ * NOT have any padding, and the application must free space and copy it
+ * to the proper place. This is of particular note in configurations
+ * that may output multiple packets for a single encoded frame (e.g., lagged
+ * encoding) or if the application does not reset the buffer periodically.
+ *
+ * Applications may restore the default behavior of the codec providing
+ * the compressed data buffer by calling this function with a NULL
+ * buffer.
+ *
+ * Applications \ref MUSTNOT call this function during iteration of
+ * vpx_codec_get_cx_data().
+ *
+ * \param[in] ctx Pointer to this instance's context
+ * \param[in] buf Buffer to store compressed data into
+ * \param[in] pad_before Bytes to skip before writing compressed data
+ * \param[in] pad_after Bytes to skip after writing compressed data
+ *
+ * \retval #VPX_CODEC_OK
+ * The buffer was set successfully.
+ * \retval #VPX_CODEC_INVALID_PARAM
+ * A parameter was NULL, the image format is unsupported, etc.
+ */
+ vpx_codec_err_t vpx_codec_set_cx_data_buf(vpx_codec_ctx_t *ctx,
+ const vpx_fixed_buf_t *buf,
+ unsigned int pad_before,
+ unsigned int pad_after);
+
+
+ /*!\brief Encoded data iterator
+ *
+ * Iterates over a list of data packets to be passed from the encoder to the
+ * application. The different kinds of packets available are enumerated in
+ * #vpx_codec_cx_pkt_kind.
+ *
+ * #VPX_CODEC_CX_FRAME_PKT packets should be passed to the application's
+ * muxer. Multiple compressed frames may be in the list.
+ * #VPX_CODEC_STATS_PKT packets should be appended to a global buffer.
+ *
+ * The application \ref MUST silently ignore any packet kinds that it does
+ * not recognize or support.
+ *
+ * The data buffers returned from this function are only guaranteed to be
+ * valid until the application makes another call to any vpx_codec_* function.
+ *
+ * \param[in] ctx Pointer to this instance's context
+ * \param[in,out] iter Iterator storage, initialized to NULL
+ *
+ * \return Returns a pointer to an output data packet (compressed frame data,
+ * two-pass statistics, etc.) or NULL to signal end-of-list.
+ *
+ */
+ const vpx_codec_cx_pkt_t *vpx_codec_get_cx_data(vpx_codec_ctx_t *ctx,
+ vpx_codec_iter_t *iter);
+
+
+ /*!\brief Get Preview Frame
+ *
+ * Returns an image that can be used as a preview. Shows the image as it would
+ * exist at the decompressor. The application \ref MUST NOT write into this
+ * image buffer.
+ *
+ * \param[in] ctx Pointer to this instance's context
+ *
+ * \return Returns a pointer to a preview image, or NULL if no image is
+ * available.
+ *
+ */
+ const vpx_image_t *vpx_codec_get_preview_frame(vpx_codec_ctx_t *ctx);
+
+
+ /*!@} - end defgroup encoder*/
#endif
#ifdef __cplusplus
diff --git a/libvpx/vpx/vpx_image.h b/libvpx/vpx/vpx_image.h
index 3e42447..c304bac 100644
--- a/libvpx/vpx/vpx_image.h
+++ b/libvpx/vpx/vpx_image.h
@@ -20,14 +20,14 @@ extern "C" {
#ifndef VPX_IMAGE_H
#define VPX_IMAGE_H
- /*!\brief Current ABI version number
- *
- * \internal
- * If this file is altered in any way that changes the ABI, this value
- * must be bumped. Examples include, but are not limited to, changing
- * types, removing or reassigning enums, adding/removing/rearranging
- * fields to structures
- */
+ /*!\brief Current ABI version number
+ *
+ * \internal
+ * If this file is altered in any way that changes the ABI, this value
+ * must be bumped. Examples include, but are not limited to, changing
+ * types, removing or reassigning enums, adding/removing/rearranging
+ * fields to structures
+ */
#define VPX_IMAGE_ABI_VERSION (1) /**<\hideinitializer*/
@@ -36,41 +36,43 @@ extern "C" {
#define VPX_IMG_FMT_HAS_ALPHA 0x400 /**< Image has an alpha channel component */
- /*!\brief List of supported image formats */
- typedef enum vpx_img_fmt {
- VPX_IMG_FMT_NONE,
- VPX_IMG_FMT_RGB24, /**< 24 bit per pixel packed RGB */
- VPX_IMG_FMT_RGB32, /**< 32 bit per pixel packed 0RGB */
- VPX_IMG_FMT_RGB565, /**< 16 bit per pixel, 565 */
- VPX_IMG_FMT_RGB555, /**< 16 bit per pixel, 555 */
- VPX_IMG_FMT_UYVY, /**< UYVY packed YUV */
- VPX_IMG_FMT_YUY2, /**< YUYV packed YUV */
- VPX_IMG_FMT_YVYU, /**< YVYU packed YUV */
- VPX_IMG_FMT_BGR24, /**< 24 bit per pixel packed BGR */
- VPX_IMG_FMT_RGB32_LE, /**< 32 bit packed BGR0 */
- VPX_IMG_FMT_ARGB, /**< 32 bit packed ARGB, alpha=255 */
- VPX_IMG_FMT_ARGB_LE, /**< 32 bit packed BGRA, alpha=255 */
- VPX_IMG_FMT_RGB565_LE, /**< 16 bit per pixel, gggbbbbb rrrrrggg */
- VPX_IMG_FMT_RGB555_LE, /**< 16 bit per pixel, gggbbbbb 0rrrrrgg */
- VPX_IMG_FMT_YV12 = VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_UV_FLIP | 1, /**< planar YVU */
- VPX_IMG_FMT_I420 = VPX_IMG_FMT_PLANAR | 2,
- VPX_IMG_FMT_VPXYV12 = VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_UV_FLIP | 3, /** < planar 4:2:0 format with vpx color space */
- VPX_IMG_FMT_VPXI420 = VPX_IMG_FMT_PLANAR | 4 /** < planar 4:2:0 format with vpx color space */
- }
- vpx_img_fmt_t; /**< alias for enum vpx_img_fmt */
+ /*!\brief List of supported image formats */
+ typedef enum vpx_img_fmt {
+ VPX_IMG_FMT_NONE,
+ VPX_IMG_FMT_RGB24, /**< 24 bit per pixel packed RGB */
+ VPX_IMG_FMT_RGB32, /**< 32 bit per pixel packed 0RGB */
+ VPX_IMG_FMT_RGB565, /**< 16 bit per pixel, 565 */
+ VPX_IMG_FMT_RGB555, /**< 16 bit per pixel, 555 */
+ VPX_IMG_FMT_UYVY, /**< UYVY packed YUV */
+ VPX_IMG_FMT_YUY2, /**< YUYV packed YUV */
+ VPX_IMG_FMT_YVYU, /**< YVYU packed YUV */
+ VPX_IMG_FMT_BGR24, /**< 24 bit per pixel packed BGR */
+ VPX_IMG_FMT_RGB32_LE, /**< 32 bit packed BGR0 */
+ VPX_IMG_FMT_ARGB, /**< 32 bit packed ARGB, alpha=255 */
+ VPX_IMG_FMT_ARGB_LE, /**< 32 bit packed BGRA, alpha=255 */
+ VPX_IMG_FMT_RGB565_LE, /**< 16 bit per pixel, gggbbbbb rrrrrggg */
+ VPX_IMG_FMT_RGB555_LE, /**< 16 bit per pixel, gggbbbbb 0rrrrrgg */
+ VPX_IMG_FMT_YV12 = VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_UV_FLIP | 1, /**< planar YVU */
+ VPX_IMG_FMT_I420 = VPX_IMG_FMT_PLANAR | 2,
+ VPX_IMG_FMT_VPXYV12 = VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_UV_FLIP | 3, /** < planar 4:2:0 format with vpx color space */
+ VPX_IMG_FMT_VPXI420 = VPX_IMG_FMT_PLANAR | 4,
+ VPX_IMG_FMT_I422 = VPX_IMG_FMT_PLANAR | 5,
+ VPX_IMG_FMT_I444 = VPX_IMG_FMT_PLANAR | 6,
+ VPX_IMG_FMT_444A = VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_HAS_ALPHA | 7
+ } vpx_img_fmt_t; /**< alias for enum vpx_img_fmt */
#if !defined(VPX_CODEC_DISABLE_COMPAT) || !VPX_CODEC_DISABLE_COMPAT
#define IMG_FMT_PLANAR VPX_IMG_FMT_PLANAR /**< \deprecated Use #VPX_IMG_FMT_PLANAR */
#define IMG_FMT_UV_FLIP VPX_IMG_FMT_UV_FLIP /**< \deprecated Use #VPX_IMG_FMT_UV_FLIP */
#define IMG_FMT_HAS_ALPHA VPX_IMG_FMT_HAS_ALPHA /**< \deprecated Use #VPX_IMG_FMT_HAS_ALPHA */
- /*!\brief Deprecated list of supported image formats
- * \deprecated New code should use #vpx_img_fmt
- */
+ /*!\brief Deprecated list of supported image formats
+ * \deprecated New code should use #vpx_img_fmt
+ */
#define img_fmt vpx_img_fmt
- /*!\brief alias for enum img_fmt.
- * \deprecated New code should use #vpx_img_fmt_t
- */
+ /*!\brief alias for enum img_fmt.
+ * \deprecated New code should use #vpx_img_fmt_t
+ */
#define img_fmt_t vpx_img_fmt_t
#define IMG_FMT_NONE VPX_IMG_FMT_NONE /**< \deprecated Use #VPX_IMG_FMT_NONE */
@@ -93,24 +95,23 @@ extern "C" {
#define IMG_FMT_VPXI420 VPX_IMG_FMT_VPXI420 /**< \deprecated Use #VPX_IMG_FMT_VPXI420 */
#endif /* VPX_CODEC_DISABLE_COMPAT */
- /**\brief Image Descriptor */
- typedef struct vpx_image
- {
- vpx_img_fmt_t fmt; /**< Image Format */
+ /**\brief Image Descriptor */
+ typedef struct vpx_image {
+ vpx_img_fmt_t fmt; /**< Image Format */
- /* Image storage dimensions */
- unsigned int w; /**< Stored image width */
- unsigned int h; /**< Stored image height */
+ /* Image storage dimensions */
+ unsigned int w; /**< Stored image width */
+ unsigned int h; /**< Stored image height */
- /* Image display dimensions */
- unsigned int d_w; /**< Displayed image width */
- unsigned int d_h; /**< Displayed image height */
+ /* Image display dimensions */
+ unsigned int d_w; /**< Displayed image width */
+ unsigned int d_h; /**< Displayed image height */
- /* Chroma subsampling info */
- unsigned int x_chroma_shift; /**< subsampling order, X */
- unsigned int y_chroma_shift; /**< subsampling order, Y */
+ /* Chroma subsampling info */
+ unsigned int x_chroma_shift; /**< subsampling order, X */
+ unsigned int y_chroma_shift; /**< subsampling order, Y */
- /* Image data pointers. */
+ /* Image data pointers. */
#define VPX_PLANE_PACKED 0 /**< To be used for all packed formats */
#define VPX_PLANE_Y 0 /**< Y (Luminance) plane */
#define VPX_PLANE_U 1 /**< U (Chroma) plane */
@@ -123,119 +124,118 @@ extern "C" {
#define PLANE_V VPX_PLANE_V
#define PLANE_ALPHA VPX_PLANE_ALPHA
#endif
- unsigned char *planes[4]; /**< pointer to the top left pixel for each plane */
- int stride[4]; /**< stride between rows for each plane */
+ unsigned char *planes[4]; /**< pointer to the top left pixel for each plane */
+ int stride[4]; /**< stride between rows for each plane */
- int bps; /**< bits per sample (for packed formats) */
-
- /* The following member may be set by the application to associate data
- * with this image.
- */
- void *user_priv; /**< may be set by the application to associate data
- * with this image. */
+ int bps; /**< bits per sample (for packed formats) */
- /* The following members should be treated as private. */
- unsigned char *img_data; /**< private */
- int img_data_owner; /**< private */
- int self_allocd; /**< private */
- } vpx_image_t; /**< alias for struct vpx_image */
-
- /**\brief Representation of a rectangle on a surface */
- typedef struct vpx_image_rect
- {
- unsigned int x; /**< leftmost column */
- unsigned int y; /**< topmost row */
- unsigned int w; /**< width */
- unsigned int h; /**< height */
- } vpx_image_rect_t; /**< alias for struct vpx_image_rect */
-
- /*!\brief Open a descriptor, allocating storage for the underlying image
- *
- * Returns a descriptor for storing an image of the given format. The
- * storage for the descriptor is allocated on the heap.
- *
- * \param[in] img Pointer to storage for descriptor. If this parameter
- * is NULL, the storage for the descriptor will be
- * allocated on the heap.
- * \param[in] fmt Format for the image
- * \param[in] d_w Width of the image
- * \param[in] d_h Height of the image
- * \param[in] align Alignment, in bytes, of the image buffer and
- * each row in the image(stride).
- *
- * \return Returns a pointer to the initialized image descriptor. If the img
- * parameter is non-null, the value of the img parameter will be
- * returned.
+ /* The following member may be set by the application to associate data
+ * with this image.
*/
- vpx_image_t *vpx_img_alloc(vpx_image_t *img,
- vpx_img_fmt_t fmt,
- unsigned int d_w,
- unsigned int d_h,
- unsigned int align);
-
- /*!\brief Open a descriptor, using existing storage for the underlying image
- *
- * Returns a descriptor for storing an image of the given format. The
- * storage for descriptor has been allocated elsewhere, and a descriptor is
- * desired to "wrap" that storage.
- *
- * \param[in] img Pointer to storage for descriptor. If this parameter
- * is NULL, the storage for the descriptor will be
- * allocated on the heap.
- * \param[in] fmt Format for the image
- * \param[in] d_w Width of the image
- * \param[in] d_h Height of the image
- * \param[in] align Alignment, in bytes, of each row in the image.
- * \param[in] img_data Storage to use for the image
- *
- * \return Returns a pointer to the initialized image descriptor. If the img
- * parameter is non-null, the value of the img parameter will be
- * returned.
- */
- vpx_image_t *vpx_img_wrap(vpx_image_t *img,
- vpx_img_fmt_t fmt,
- unsigned int d_w,
- unsigned int d_h,
- unsigned int align,
- unsigned char *img_data);
-
-
- /*!\brief Set the rectangle identifying the displayed portion of the image
- *
- * Updates the displayed rectangle (aka viewport) on the image surface to
- * match the specified coordinates and size.
- *
- * \param[in] img Image descriptor
- * \param[in] x leftmost column
- * \param[in] y topmost row
- * \param[in] w width
- * \param[in] h height
- *
- * \return 0 if the requested rectangle is valid, nonzero otherwise.
- */
- int vpx_img_set_rect(vpx_image_t *img,
- unsigned int x,
- unsigned int y,
- unsigned int w,
- unsigned int h);
-
-
- /*!\brief Flip the image vertically (top for bottom)
- *
- * Adjusts the image descriptor's pointers and strides to make the image
- * be referenced upside-down.
- *
- * \param[in] img Image descriptor
- */
- void vpx_img_flip(vpx_image_t *img);
+ void *user_priv; /**< may be set by the application to associate data
+ * with this image. */
- /*!\brief Close an image descriptor
- *
- * Frees all allocated storage associated with an image descriptor.
- *
- * \param[in] img Image descriptor
- */
- void vpx_img_free(vpx_image_t *img);
+ /* The following members should be treated as private. */
+ unsigned char *img_data; /**< private */
+ int img_data_owner; /**< private */
+ int self_allocd; /**< private */
+ } vpx_image_t; /**< alias for struct vpx_image */
+
+ /**\brief Representation of a rectangle on a surface */
+ typedef struct vpx_image_rect {
+ unsigned int x; /**< leftmost column */
+ unsigned int y; /**< topmost row */
+ unsigned int w; /**< width */
+ unsigned int h; /**< height */
+ } vpx_image_rect_t; /**< alias for struct vpx_image_rect */
+
+ /*!\brief Open a descriptor, allocating storage for the underlying image
+ *
+ * Returns a descriptor for storing an image of the given format. The
+ * storage for the descriptor is allocated on the heap.
+ *
+ * \param[in] img Pointer to storage for descriptor. If this parameter
+ * is NULL, the storage for the descriptor will be
+ * allocated on the heap.
+ * \param[in] fmt Format for the image
+ * \param[in] d_w Width of the image
+ * \param[in] d_h Height of the image
+ * \param[in] align Alignment, in bytes, of the image buffer and
+ * each row in the image(stride).
+ *
+ * \return Returns a pointer to the initialized image descriptor. If the img
+ * parameter is non-null, the value of the img parameter will be
+ * returned.
+ */
+ vpx_image_t *vpx_img_alloc(vpx_image_t *img,
+ vpx_img_fmt_t fmt,
+ unsigned int d_w,
+ unsigned int d_h,
+ unsigned int align);
+
+ /*!\brief Open a descriptor, using existing storage for the underlying image
+ *
+ * Returns a descriptor for storing an image of the given format. The
+ * storage for descriptor has been allocated elsewhere, and a descriptor is
+ * desired to "wrap" that storage.
+ *
+ * \param[in] img Pointer to storage for descriptor. If this parameter
+ * is NULL, the storage for the descriptor will be
+ * allocated on the heap.
+ * \param[in] fmt Format for the image
+ * \param[in] d_w Width of the image
+ * \param[in] d_h Height of the image
+ * \param[in] align Alignment, in bytes, of each row in the image.
+ * \param[in] img_data Storage to use for the image
+ *
+ * \return Returns a pointer to the initialized image descriptor. If the img
+ * parameter is non-null, the value of the img parameter will be
+ * returned.
+ */
+ vpx_image_t *vpx_img_wrap(vpx_image_t *img,
+ vpx_img_fmt_t fmt,
+ unsigned int d_w,
+ unsigned int d_h,
+ unsigned int align,
+ unsigned char *img_data);
+
+
+ /*!\brief Set the rectangle identifying the displayed portion of the image
+ *
+ * Updates the displayed rectangle (aka viewport) on the image surface to
+ * match the specified coordinates and size.
+ *
+ * \param[in] img Image descriptor
+ * \param[in] x leftmost column
+ * \param[in] y topmost row
+ * \param[in] w width
+ * \param[in] h height
+ *
+ * \return 0 if the requested rectangle is valid, nonzero otherwise.
+ */
+ int vpx_img_set_rect(vpx_image_t *img,
+ unsigned int x,
+ unsigned int y,
+ unsigned int w,
+ unsigned int h);
+
+
+ /*!\brief Flip the image vertically (top for bottom)
+ *
+ * Adjusts the image descriptor's pointers and strides to make the image
+ * be referenced upside-down.
+ *
+ * \param[in] img Image descriptor
+ */
+ void vpx_img_flip(vpx_image_t *img);
+
+ /*!\brief Close an image descriptor
+ *
+ * Frees all allocated storage associated with an image descriptor.
+ *
+ * \param[in] img Image descriptor
+ */
+ void vpx_img_free(vpx_image_t *img);
#endif
#ifdef __cplusplus
diff --git a/libvpx/vpx/vpx_integer.h b/libvpx/vpx/vpx_integer.h
index 218bca7..0ccc96c 100644
--- a/libvpx/vpx/vpx_integer.h
+++ b/libvpx/vpx/vpx_integer.h
@@ -27,6 +27,9 @@ typedef unsigned int uint32_t;
#if (defined(_MSC_VER) && (_MSC_VER < 1600))
typedef signed __int64 int64_t;
typedef unsigned __int64 uint64_t;
+#define INT64_MAX _I64_MAX
+#define INT16_MAX _I16_MAX
+#define INT16_MIN _I16_MIN
#endif
#ifndef _UINTPTR_T_DEFINED
diff --git a/libvpx/vpx_mem/include/vpx_mem_intrnl.h b/libvpx/vpx_mem/include/vpx_mem_intrnl.h
index 63c6b77..2248ad5 100644
--- a/libvpx/vpx_mem/include/vpx_mem_intrnl.h
+++ b/libvpx/vpx_mem/include/vpx_mem_intrnl.h
@@ -11,7 +11,7 @@
#ifndef __VPX_MEM_INTRNL_H__
#define __VPX_MEM_INTRNL_H__
-#include "vpx_config.h"
+#include "./vpx_config.h"
#ifndef CONFIG_MEM_MANAGER
# if defined(VXWORKS)
@@ -47,22 +47,18 @@ vpx_memcpy, _memset, and _memmove*/
#ifndef DEFAULT_ALIGNMENT
# if defined(VXWORKS)
# define DEFAULT_ALIGNMENT 32 /*default addr alignment to use in
- calls to vpx_* functions other
- than vpx_memalign*/
+calls to vpx_* functions other
+than vpx_memalign*/
# else
-# define DEFAULT_ALIGNMENT 1
+# define DEFAULT_ALIGNMENT (2 * sizeof(void*)) /* NOLINT */
# endif
#endif
-#if DEFAULT_ALIGNMENT < 1
-# error "DEFAULT_ALIGNMENT must be >= 1!"
-#endif
-
#if CONFIG_MEM_TRACKER
# define TRY_BOUNDS_CHECK 1 /*when set to 1 pads each allocation,
- integrity can be checked using
- vpx_memory_tracker_check_integrity
- or on free by defining*/
+integrity can be checked using
+vpx_memory_tracker_check_integrity
+or on free by defining*/
/*TRY_BOUNDS_CHECK_ON_FREE*/
#else
# define TRY_BOUNDS_CHECK 0
@@ -70,13 +66,13 @@ vpx_memcpy, _memset, and _memmove*/
#if TRY_BOUNDS_CHECK
# define TRY_BOUNDS_CHECK_ON_FREE 0 /*checks mem integrity on every
- free, very expensive*/
+free, very expensive*/
# define BOUNDS_CHECK_VALUE 0xdeadbeef /*value stored before/after ea.
- mem addr for bounds checking*/
+mem addr for bounds checking*/
# define BOUNDS_CHECK_PAD_SIZE 32 /*size of the padding before and
- after ea allocation to be filled
- with BOUNDS_CHECK_VALUE.
- this should be a multiple of 4*/
+after ea allocation to be filled
+with BOUNDS_CHECK_VALUE.
+this should be a multiple of 4*/
#else
# define BOUNDS_CHECK_VALUE 0
# define BOUNDS_CHECK_PAD_SIZE 0
diff --git a/libvpx/vpx_mem/include/vpx_mem_tracker.h b/libvpx/vpx_mem/include/vpx_mem_tracker.h
index ef2b29b..3be0d2d 100644
--- a/libvpx/vpx_mem/include/vpx_mem_tracker.h
+++ b/libvpx/vpx_mem/include/vpx_mem_tracker.h
@@ -23,158 +23,157 @@
#include <stdarg.h>
-struct mem_block
-{
- size_t addr;
- unsigned int size,
- line;
- char *file;
- struct mem_block *prev,
- * next;
-
- int padded; // This mem_block has padding for integrity checks.
- // As of right now, this should only be 0 if
- // using vpx_mem_alloc to allocate cache memory.
- // 2005-01-11 tjf
+struct mem_block {
+ size_t addr;
+ unsigned int size,
+ line;
+ char *file;
+ struct mem_block *prev,
+ * next;
+
+ int padded; // This mem_block has padding for integrity checks.
+ // As of right now, this should only be 0 if
+ // using vpx_mem_alloc to allocate cache memory.
+ // 2005-01-11 tjf
};
#if defined(__cplusplus)
extern "C" {
#endif
- /*
- vpx_memory_tracker_init(int padding_size, int pad_value)
- padding_size - the size of the padding before and after each mem addr.
- Values > 0 indicate that integrity checks can be performed
- by inspecting these areas.
- pad_value - the initial value within the padding area before and after
- each mem addr.
-
- Initializes the memory tracker interface. Should be called before any
- other calls to the memory tracker.
- */
- int vpx_memory_tracker_init(int padding_size, int pad_value);
-
- /*
- vpx_memory_tracker_destroy()
- Deinitializes the memory tracker interface
- */
- void vpx_memory_tracker_destroy();
-
- /*
- vpx_memory_tracker_add(size_t addr, unsigned int size,
- char * file, unsigned int line)
- addr - memory address to be added to list
- size - size of addr
- file - the file addr was referenced from
- line - the line in file addr was referenced from
- Adds memory address addr, it's size, file and line it came from
- to the memory tracker allocation table
- */
- void vpx_memory_tracker_add(size_t addr, unsigned int size,
- char *file, unsigned int line,
- int padded);
-
- /*
- vpx_memory_tracker_add(size_t addr, unsigned int size, char * file, unsigned int line)
- addr - memory address to be added to be removed
- padded - if 0, disables bounds checking on this memory block even if bounds
- checking is enabled. (for example, when allocating cache memory, we still want
- to check for memory leaks, but we do not waste cache space for bounds check padding)
- Removes the specified address from the memory tracker's allocation
- table
- Return:
- 0: on success
- -1: if memory allocation table's mutex could not be locked
- -2: if the addr was not found in the list
- */
- int vpx_memory_tracker_remove(size_t addr);
-
- /*
- vpx_memory_tracker_find(unsigned int addr)
- addr - address to be found in the memory tracker's
- allocation table
- Return:
- If found, pointer to the memory block that matches addr
- NULL otherwise
- */
- struct mem_block *vpx_memory_tracker_find(size_t addr);
-
- /*
- vpx_memory_tracker_dump()
- Dumps the current contents of the memory
- tracker allocation table
- */
- void vpx_memory_tracker_dump();
-
- /*
- vpx_memory_tracker_check_integrity()
- If a padding_size was provided to vpx_memory_tracker_init()
- This function will verify that the region before and after each
- memory address contains the specified pad_value. Should the check
- fail, the filename and line of the check will be printed out.
- */
- void vpx_memory_tracker_check_integrity(char *file, unsigned int line);
-
- /*
- vpx_memory_tracker_set_log_type
- type - value representing the logging type to use
- option - type specific option. This will be interpreted differently
- based on the type.
- Sets the logging type for the memory tracker.
- Values currently supported:
- 0: if option is NULL, log to stderr, otherwise interpret option as a
- filename and attempt to open it.
- 1: Use output_debug_string (WIN32 only), option ignored
- Return:
- 0: on success
- -1: if the logging type could not be set, because the value was invalid
- or because a file could not be opened
- */
- int vpx_memory_tracker_set_log_type(int type, char *option);
-
- /*
- vpx_memory_tracker_set_log_func
- userdata - ptr to be passed to the supplied logfunc, can be NULL
- logfunc - the logging function to be used to output data from
- vpx_memory_track_dump/check_integrity
- Sets a logging function to be used by the memory tracker.
- Return:
- 0: on success
- -1: if the logging type could not be set because logfunc was NULL
- */
- int vpx_memory_tracker_set_log_func(void *userdata,
- void(*logfunc)(void *userdata,
- const char *fmt, va_list args));
-
- /* Wrappers to standard library functions. */
- typedef void*(* mem_track_malloc_func)(size_t);
- typedef void*(* mem_track_calloc_func)(size_t, size_t);
- typedef void*(* mem_track_realloc_func)(void *, size_t);
- typedef void (* mem_track_free_func)(void *);
- typedef void*(* mem_track_memcpy_func)(void *, const void *, size_t);
- typedef void*(* mem_track_memset_func)(void *, int, size_t);
- typedef void*(* mem_track_memmove_func)(void *, const void *, size_t);
-
- /*
- vpx_memory_tracker_set_functions
-
- Sets the function pointers for the standard library functions.
-
- Return:
- 0: on success
- -1: if the use global function pointers is not set.
- */
- int vpx_memory_tracker_set_functions(mem_track_malloc_func g_malloc_l
- , mem_track_calloc_func g_calloc_l
- , mem_track_realloc_func g_realloc_l
- , mem_track_free_func g_free_l
- , mem_track_memcpy_func g_memcpy_l
- , mem_track_memset_func g_memset_l
- , mem_track_memmove_func g_memmove_l);
+ /*
+ vpx_memory_tracker_init(int padding_size, int pad_value)
+ padding_size - the size of the padding before and after each mem addr.
+ Values > 0 indicate that integrity checks can be performed
+ by inspecting these areas.
+ pad_value - the initial value within the padding area before and after
+ each mem addr.
+
+ Initializes the memory tracker interface. Should be called before any
+ other calls to the memory tracker.
+ */
+ int vpx_memory_tracker_init(int padding_size, int pad_value);
+
+ /*
+ vpx_memory_tracker_destroy()
+ Deinitializes the memory tracker interface
+ */
+ void vpx_memory_tracker_destroy();
+
+ /*
+ vpx_memory_tracker_add(size_t addr, unsigned int size,
+ char * file, unsigned int line)
+ addr - memory address to be added to list
+ size - size of addr
+ file - the file addr was referenced from
+ line - the line in file addr was referenced from
+ Adds memory address addr, it's size, file and line it came from
+ to the memory tracker allocation table
+ */
+ void vpx_memory_tracker_add(size_t addr, unsigned int size,
+ char *file, unsigned int line,
+ int padded);
+
+ /*
+ vpx_memory_tracker_add(size_t addr, unsigned int size, char * file, unsigned int line)
+ addr - memory address to be added to be removed
+ padded - if 0, disables bounds checking on this memory block even if bounds
+ checking is enabled. (for example, when allocating cache memory, we still want
+ to check for memory leaks, but we do not waste cache space for bounds check padding)
+ Removes the specified address from the memory tracker's allocation
+ table
+ Return:
+ 0: on success
+ -1: if memory allocation table's mutex could not be locked
+ -2: if the addr was not found in the list
+ */
+ int vpx_memory_tracker_remove(size_t addr);
+
+ /*
+ vpx_memory_tracker_find(unsigned int addr)
+ addr - address to be found in the memory tracker's
+ allocation table
+ Return:
+ If found, pointer to the memory block that matches addr
+ NULL otherwise
+ */
+ struct mem_block *vpx_memory_tracker_find(size_t addr);
+
+ /*
+ vpx_memory_tracker_dump()
+ Dumps the current contents of the memory
+ tracker allocation table
+ */
+ void vpx_memory_tracker_dump();
+
+ /*
+ vpx_memory_tracker_check_integrity()
+ If a padding_size was provided to vpx_memory_tracker_init()
+ This function will verify that the region before and after each
+ memory address contains the specified pad_value. Should the check
+ fail, the filename and line of the check will be printed out.
+ */
+ void vpx_memory_tracker_check_integrity(char *file, unsigned int line);
+
+ /*
+ vpx_memory_tracker_set_log_type
+ type - value representing the logging type to use
+ option - type specific option. This will be interpreted differently
+ based on the type.
+ Sets the logging type for the memory tracker.
+ Values currently supported:
+ 0: if option is NULL, log to stderr, otherwise interpret option as a
+ filename and attempt to open it.
+ 1: Use output_debug_string (WIN32 only), option ignored
+ Return:
+ 0: on success
+ -1: if the logging type could not be set, because the value was invalid
+ or because a file could not be opened
+ */
+ int vpx_memory_tracker_set_log_type(int type, char *option);
+
+ /*
+ vpx_memory_tracker_set_log_func
+ userdata - ptr to be passed to the supplied logfunc, can be NULL
+ logfunc - the logging function to be used to output data from
+ vpx_memory_track_dump/check_integrity
+ Sets a logging function to be used by the memory tracker.
+ Return:
+ 0: on success
+ -1: if the logging type could not be set because logfunc was NULL
+ */
+ int vpx_memory_tracker_set_log_func(void *userdata,
+ void(*logfunc)(void *userdata,
+ const char *fmt, va_list args));
+
+ /* Wrappers to standard library functions. */
+ typedef void *(* mem_track_malloc_func)(size_t);
+ typedef void *(* mem_track_calloc_func)(size_t, size_t);
+ typedef void *(* mem_track_realloc_func)(void *, size_t);
+ typedef void (* mem_track_free_func)(void *);
+ typedef void *(* mem_track_memcpy_func)(void *, const void *, size_t);
+ typedef void *(* mem_track_memset_func)(void *, int, size_t);
+ typedef void *(* mem_track_memmove_func)(void *, const void *, size_t);
+
+ /*
+ vpx_memory_tracker_set_functions
+
+ Sets the function pointers for the standard library functions.
+
+ Return:
+ 0: on success
+ -1: if the use global function pointers is not set.
+ */
+ int vpx_memory_tracker_set_functions(mem_track_malloc_func g_malloc_l
+, mem_track_calloc_func g_calloc_l
+, mem_track_realloc_func g_realloc_l
+, mem_track_free_func g_free_l
+, mem_track_memcpy_func g_memcpy_l
+, mem_track_memset_func g_memset_l
+, mem_track_memmove_func g_memmove_l);
#if defined(__cplusplus)
}
#endif
-#endif //__VPX_MEM_TRACKER_H__
+#endif // __VPX_MEM_TRACKER_H__
diff --git a/libvpx/vpx_mem/memory_manager/hmm_alloc.c b/libvpx/vpx_mem/memory_manager/hmm_alloc.c
index 22c4a54..ab3562d 100644
--- a/libvpx/vpx_mem/memory_manager/hmm_alloc.c
+++ b/libvpx/vpx_mem/memory_manager/hmm_alloc.c
@@ -15,46 +15,44 @@
#include "hmm_intrnl.h"
-void *U(alloc)(U(descriptor) *desc, U(size_aau) n)
-{
+void *U(alloc)(U(descriptor) *desc, U(size_aau) n) {
#ifdef HMM_AUDIT_FAIL
- if (desc->avl_tree_root)
- AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root))
+ if (desc->avl_tree_root)
+ AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root))
#endif
- if (desc->last_freed)
- {
+ if (desc->last_freed) {
#ifdef HMM_AUDIT_FAIL
- AUDIT_BLOCK(desc->last_freed)
+ AUDIT_BLOCK(desc->last_freed)
#endif
- U(into_free_collection)(desc, (head_record *)(desc->last_freed));
+ U(into_free_collection)(desc, (head_record *)(desc->last_freed));
- desc->last_freed = 0;
- }
-
- /* Add space for block header. */
- n += HEAD_AAUS;
-
- /* Convert n from number of address alignment units to block alignment
- ** units. */
- n = DIV_ROUND_UP(n, HMM_BLOCK_ALIGN_UNIT);
-
- if (n < MIN_BLOCK_BAUS)
- n = MIN_BLOCK_BAUS;
-
- {
- /* Search for the first node of the bin containing the smallest
- ** block big enough to satisfy request. */
- ptr_record *ptr_rec_ptr =
- U(avl_search)(
- (U(avl_avl) *) & (desc->avl_tree_root), (U(size_bau)) n,
- AVL_GREATER_EQUAL);
-
- /* If an approprate bin is found, satisfy the allocation request,
- ** otherwise return null pointer. */
- return(ptr_rec_ptr ?
- U(alloc_from_bin)(desc, ptr_rec_ptr, (U(size_bau)) n) : 0);
+ desc->last_freed = 0;
}
+
+ /* Add space for block header. */
+ n += HEAD_AAUS;
+
+ /* Convert n from number of address alignment units to block alignment
+ ** units. */
+ n = DIV_ROUND_UP(n, HMM_BLOCK_ALIGN_UNIT);
+
+ if (n < MIN_BLOCK_BAUS)
+ n = MIN_BLOCK_BAUS;
+
+ {
+ /* Search for the first node of the bin containing the smallest
+ ** block big enough to satisfy request. */
+ ptr_record *ptr_rec_ptr =
+ U(avl_search)(
+ (U(avl_avl) *) & (desc->avl_tree_root), (U(size_bau)) n,
+ AVL_GREATER_EQUAL);
+
+ /* If an approprate bin is found, satisfy the allocation request,
+ ** otherwise return null pointer. */
+ return(ptr_rec_ptr ?
+ U(alloc_from_bin)(desc, ptr_rec_ptr, (U(size_bau)) n) : 0);
+ }
}
diff --git a/libvpx/vpx_mem/memory_manager/hmm_base.c b/libvpx/vpx_mem/memory_manager/hmm_base.c
index ad1da03..0eff59d 100644
--- a/libvpx/vpx_mem/memory_manager/hmm_base.c
+++ b/libvpx/vpx_mem/memory_manager/hmm_base.c
@@ -15,58 +15,53 @@
#include "hmm_intrnl.h"
-void U(init)(U(descriptor) *desc)
-{
- desc->avl_tree_root = 0;
- desc->last_freed = 0;
+void U(init)(U(descriptor) *desc) {
+ desc->avl_tree_root = 0;
+ desc->last_freed = 0;
}
/* Remove a free block from a bin's doubly-linked list when it is not,
** the first block in the bin.
*/
void U(dll_remove)(
- /* Pointer to pointer record in the block to be removed. */
- ptr_record *to_remove)
-{
- to_remove->prev->next = to_remove->next;
+ /* Pointer to pointer record in the block to be removed. */
+ ptr_record *to_remove) {
+ to_remove->prev->next = to_remove->next;
- if (to_remove->next)
- to_remove->next->prev = to_remove->prev;
+ if (to_remove->next)
+ to_remove->next->prev = to_remove->prev;
}
/* Put a block into the free collection of a heap.
*/
void U(into_free_collection)(
- /* Pointer to heap descriptor. */
- U(descriptor) *desc,
- /* Pointer to head record of block. */
- head_record *head_ptr)
-{
- ptr_record *ptr_rec_ptr = HEAD_TO_PTR_REC(head_ptr);
-
- ptr_record *bin_front_ptr =
- U(avl_insert)((U(avl_avl) *) & (desc->avl_tree_root), ptr_rec_ptr);
-
- if (bin_front_ptr != ptr_rec_ptr)
- {
- /* The block was not inserted into the AVL tree because there is
- ** already a bin for the size of the block. */
-
- MARK_SUCCESSIVE_BLOCK_IN_FREE_BIN(head_ptr)
- ptr_rec_ptr->self = ptr_rec_ptr;
-
- /* Make the block the new second block in the bin's doubly-linked
- ** list. */
- ptr_rec_ptr->prev = bin_front_ptr;
- ptr_rec_ptr->next = bin_front_ptr->next;
- bin_front_ptr->next = ptr_rec_ptr;
-
- if (ptr_rec_ptr->next)
- ptr_rec_ptr->next->prev = ptr_rec_ptr;
- }
- else
- /* Block is first block in new bin. */
- ptr_rec_ptr->next = 0;
+ /* Pointer to heap descriptor. */
+ U(descriptor) *desc,
+ /* Pointer to head record of block. */
+ head_record *head_ptr) {
+ ptr_record *ptr_rec_ptr = HEAD_TO_PTR_REC(head_ptr);
+
+ ptr_record *bin_front_ptr =
+ U(avl_insert)((U(avl_avl) *) & (desc->avl_tree_root), ptr_rec_ptr);
+
+ if (bin_front_ptr != ptr_rec_ptr) {
+ /* The block was not inserted into the AVL tree because there is
+ ** already a bin for the size of the block. */
+
+ MARK_SUCCESSIVE_BLOCK_IN_FREE_BIN(head_ptr)
+ ptr_rec_ptr->self = ptr_rec_ptr;
+
+ /* Make the block the new second block in the bin's doubly-linked
+ ** list. */
+ ptr_rec_ptr->prev = bin_front_ptr;
+ ptr_rec_ptr->next = bin_front_ptr->next;
+ bin_front_ptr->next = ptr_rec_ptr;
+
+ if (ptr_rec_ptr->next)
+ ptr_rec_ptr->next->prev = ptr_rec_ptr;
+ } else
+ /* Block is first block in new bin. */
+ ptr_rec_ptr->next = 0;
}
/* Allocate a block from a given bin. Returns a pointer to the payload
@@ -74,268 +69,245 @@ void U(into_free_collection)(
** to calling this function.
*/
void *U(alloc_from_bin)(
- /* Pointer to heap descriptor. */
- U(descriptor) *desc,
- /* Pointer to pointer record of first block in bin. */
- ptr_record *bin_front_ptr,
- /* Number of BAUs needed in the allocated block. If the block taken
- ** from the bin is significantly larger than the number of BAUs needed,
- ** the "extra" BAUs are split off to form a new free block. */
- U(size_bau) n_baus)
-{
- head_record *head_ptr;
- U(size_bau) rem_baus;
-
- if (bin_front_ptr->next)
- {
- /* There are multiple blocks in this bin. Use the 2nd block in
- ** the bin to avoid needless change to the AVL tree.
- */
-
- ptr_record *ptr_rec_ptr = bin_front_ptr->next;
- head_ptr = PTR_REC_TO_HEAD(ptr_rec_ptr);
+ /* Pointer to heap descriptor. */
+ U(descriptor) *desc,
+ /* Pointer to pointer record of first block in bin. */
+ ptr_record *bin_front_ptr,
+ /* Number of BAUs needed in the allocated block. If the block taken
+ ** from the bin is significantly larger than the number of BAUs needed,
+ ** the "extra" BAUs are split off to form a new free block. */
+ U(size_bau) n_baus) {
+ head_record *head_ptr;
+ U(size_bau) rem_baus;
+
+ if (bin_front_ptr->next) {
+ /* There are multiple blocks in this bin. Use the 2nd block in
+ ** the bin to avoid needless change to the AVL tree.
+ */
+
+ ptr_record *ptr_rec_ptr = bin_front_ptr->next;
+ head_ptr = PTR_REC_TO_HEAD(ptr_rec_ptr);
#ifdef AUDIT_FAIL
- AUDIT_BLOCK(head_ptr)
+ AUDIT_BLOCK(head_ptr)
#endif
- U(dll_remove)(ptr_rec_ptr);
- }
- else
- {
- /* There is only one block in the bin, so it has to be removed
- ** from the AVL tree.
- */
+ U(dll_remove)(ptr_rec_ptr);
+ } else {
+ /* There is only one block in the bin, so it has to be removed
+ ** from the AVL tree.
+ */
- head_ptr = PTR_REC_TO_HEAD(bin_front_ptr);
+ head_ptr = PTR_REC_TO_HEAD(bin_front_ptr);
- U(avl_remove)(
- (U(avl_avl) *) &(desc->avl_tree_root), BLOCK_BAUS(head_ptr));
- }
+ U(avl_remove)(
+ (U(avl_avl) *) & (desc->avl_tree_root), BLOCK_BAUS(head_ptr));
+ }
- MARK_BLOCK_ALLOCATED(head_ptr)
+ MARK_BLOCK_ALLOCATED(head_ptr)
- rem_baus = BLOCK_BAUS(head_ptr) - n_baus;
+ rem_baus = BLOCK_BAUS(head_ptr) - n_baus;
- if (rem_baus >= MIN_BLOCK_BAUS)
- {
- /* Since there are enough "extra" BAUs, split them off to form
- ** a new free block.
- */
+ if (rem_baus >= MIN_BLOCK_BAUS) {
+ /* Since there are enough "extra" BAUs, split them off to form
+ ** a new free block.
+ */
- head_record *rem_head_ptr =
- (head_record *) BAUS_FORWARD(head_ptr, n_baus);
+ head_record *rem_head_ptr =
+ (head_record *) BAUS_FORWARD(head_ptr, n_baus);
- /* Change the next block's header to reflect the fact that the
- ** block preceeding it is now smaller.
- */
- SET_PREV_BLOCK_BAUS(
- BAUS_FORWARD(head_ptr, head_ptr->block_size), rem_baus)
+ /* Change the next block's header to reflect the fact that the
+ ** block preceeding it is now smaller.
+ */
+ SET_PREV_BLOCK_BAUS(
+ BAUS_FORWARD(head_ptr, head_ptr->block_size), rem_baus)
- head_ptr->block_size = n_baus;
+ head_ptr->block_size = n_baus;
- rem_head_ptr->previous_block_size = n_baus;
- rem_head_ptr->block_size = rem_baus;
+ rem_head_ptr->previous_block_size = n_baus;
+ rem_head_ptr->block_size = rem_baus;
- desc->last_freed = rem_head_ptr;
- }
+ desc->last_freed = rem_head_ptr;
+ }
- return(HEAD_TO_PTR_REC(head_ptr));
+ return(HEAD_TO_PTR_REC(head_ptr));
}
/* Take a block out of the free collection.
*/
void U(out_of_free_collection)(
- /* Descriptor of heap that block is in. */
- U(descriptor) *desc,
- /* Pointer to head of block to take out of free collection. */
- head_record *head_ptr)
-{
- ptr_record *ptr_rec_ptr = HEAD_TO_PTR_REC(head_ptr);
-
- if (ptr_rec_ptr->self == ptr_rec_ptr)
- /* Block is not the front block in its bin, so all we have to
- ** do is take it out of the bin's doubly-linked list. */
- U(dll_remove)(ptr_rec_ptr);
+ /* Descriptor of heap that block is in. */
+ U(descriptor) *desc,
+ /* Pointer to head of block to take out of free collection. */
+ head_record *head_ptr) {
+ ptr_record *ptr_rec_ptr = HEAD_TO_PTR_REC(head_ptr);
+
+ if (ptr_rec_ptr->self == ptr_rec_ptr)
+ /* Block is not the front block in its bin, so all we have to
+ ** do is take it out of the bin's doubly-linked list. */
+ U(dll_remove)(ptr_rec_ptr);
+ else {
+ ptr_record *next = ptr_rec_ptr->next;
+
+ if (next)
+ /* Block is the front block in its bin, and there is at least
+ ** one other block in the bin. Substitute the next block for
+ ** the front block. */
+ U(avl_subst)((U(avl_avl) *) & (desc->avl_tree_root), next);
else
- {
- ptr_record *next = ptr_rec_ptr->next;
-
- if (next)
- /* Block is the front block in its bin, and there is at least
- ** one other block in the bin. Substitute the next block for
- ** the front block. */
- U(avl_subst)((U(avl_avl) *) &(desc->avl_tree_root), next);
- else
- /* Block is the front block in its bin, but there is no other
- ** block in the bin. Eliminate the bin. */
- U(avl_remove)(
- (U(avl_avl) *) &(desc->avl_tree_root), BLOCK_BAUS(head_ptr));
- }
+ /* Block is the front block in its bin, but there is no other
+ ** block in the bin. Eliminate the bin. */
+ U(avl_remove)(
+ (U(avl_avl) *) & (desc->avl_tree_root), BLOCK_BAUS(head_ptr));
+ }
}
-void U(free)(U(descriptor) *desc, void *payload_ptr)
-{
- /* Flags if coalesce with adjacent block. */
- int coalesce;
+void U(free)(U(descriptor) *desc, void *payload_ptr) {
+ /* Flags if coalesce with adjacent block. */
+ int coalesce;
- head_record *fwd_head_ptr;
- head_record *free_head_ptr = PTR_REC_TO_HEAD(payload_ptr);
+ head_record *fwd_head_ptr;
+ head_record *free_head_ptr = PTR_REC_TO_HEAD(payload_ptr);
- desc->num_baus_can_shrink = 0;
+ desc->num_baus_can_shrink = 0;
#ifdef HMM_AUDIT_FAIL
- AUDIT_BLOCK(free_head_ptr)
+ AUDIT_BLOCK(free_head_ptr)
- /* Make sure not freeing an already free block. */
- if (!IS_BLOCK_ALLOCATED(free_head_ptr))
- HMM_AUDIT_FAIL
+ /* Make sure not freeing an already free block. */
+ if (!IS_BLOCK_ALLOCATED(free_head_ptr))
+ HMM_AUDIT_FAIL
- if (desc->avl_tree_root)
- /* Audit root block in AVL tree. */
- AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root))
+ if (desc->avl_tree_root)
+ /* Audit root block in AVL tree. */
+ AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root))
#endif
- fwd_head_ptr =
- (head_record *) BAUS_FORWARD(free_head_ptr, free_head_ptr->block_size);
+ fwd_head_ptr =
+ (head_record *) BAUS_FORWARD(free_head_ptr, free_head_ptr->block_size);
- if (free_head_ptr->previous_block_size)
- {
- /* Coalesce with backward block if possible. */
+ if (free_head_ptr->previous_block_size) {
+ /* Coalesce with backward block if possible. */
- head_record *bkwd_head_ptr =
- (head_record *) BAUS_BACKWARD(
- free_head_ptr, free_head_ptr->previous_block_size);
+ head_record *bkwd_head_ptr =
+ (head_record *) BAUS_BACKWARD(
+ free_head_ptr, free_head_ptr->previous_block_size);
#ifdef HMM_AUDIT_FAIL
- AUDIT_BLOCK(bkwd_head_ptr)
+ AUDIT_BLOCK(bkwd_head_ptr)
#endif
- if (bkwd_head_ptr == (head_record *)(desc->last_freed))
- {
- desc->last_freed = 0;
- coalesce = 1;
- }
- else if (IS_BLOCK_ALLOCATED(bkwd_head_ptr))
- coalesce = 0;
- else
- {
- U(out_of_free_collection)(desc, bkwd_head_ptr);
- coalesce = 1;
- }
-
- if (coalesce)
- {
- bkwd_head_ptr->block_size += free_head_ptr->block_size;
- SET_PREV_BLOCK_BAUS(fwd_head_ptr, BLOCK_BAUS(bkwd_head_ptr))
- free_head_ptr = bkwd_head_ptr;
- }
+ if (bkwd_head_ptr == (head_record *)(desc->last_freed)) {
+ desc->last_freed = 0;
+ coalesce = 1;
+ } else if (IS_BLOCK_ALLOCATED(bkwd_head_ptr))
+ coalesce = 0;
+ else {
+ U(out_of_free_collection)(desc, bkwd_head_ptr);
+ coalesce = 1;
}
- if (fwd_head_ptr->block_size == 0)
- {
- /* Block to be freed is last block before dummy end-of-chunk block. */
- desc->end_of_shrinkable_chunk =
- BAUS_FORWARD(fwd_head_ptr, DUMMY_END_BLOCK_BAUS);
- desc->num_baus_can_shrink = BLOCK_BAUS(free_head_ptr);
-
- if (PREV_BLOCK_BAUS(free_head_ptr) == 0)
- /* Free block is the entire chunk, so shrinking can eliminate
- ** entire chunk including dummy end block. */
- desc->num_baus_can_shrink += DUMMY_END_BLOCK_BAUS;
+ if (coalesce) {
+ bkwd_head_ptr->block_size += free_head_ptr->block_size;
+ SET_PREV_BLOCK_BAUS(fwd_head_ptr, BLOCK_BAUS(bkwd_head_ptr))
+ free_head_ptr = bkwd_head_ptr;
}
- else
- {
- /* Coalesce with forward block if possible. */
+ }
+
+ if (fwd_head_ptr->block_size == 0) {
+ /* Block to be freed is last block before dummy end-of-chunk block. */
+ desc->end_of_shrinkable_chunk =
+ BAUS_FORWARD(fwd_head_ptr, DUMMY_END_BLOCK_BAUS);
+ desc->num_baus_can_shrink = BLOCK_BAUS(free_head_ptr);
+
+ if (PREV_BLOCK_BAUS(free_head_ptr) == 0)
+ /* Free block is the entire chunk, so shrinking can eliminate
+ ** entire chunk including dummy end block. */
+ desc->num_baus_can_shrink += DUMMY_END_BLOCK_BAUS;
+ } else {
+ /* Coalesce with forward block if possible. */
#ifdef HMM_AUDIT_FAIL
- AUDIT_BLOCK(fwd_head_ptr)
+ AUDIT_BLOCK(fwd_head_ptr)
#endif
- if (fwd_head_ptr == (head_record *)(desc->last_freed))
- {
- desc->last_freed = 0;
- coalesce = 1;
- }
- else if (IS_BLOCK_ALLOCATED(fwd_head_ptr))
- coalesce = 0;
- else
- {
- U(out_of_free_collection)(desc, fwd_head_ptr);
- coalesce = 1;
- }
-
- if (coalesce)
- {
- free_head_ptr->block_size += fwd_head_ptr->block_size;
-
- fwd_head_ptr =
- (head_record *) BAUS_FORWARD(
- fwd_head_ptr, BLOCK_BAUS(fwd_head_ptr));
-
- SET_PREV_BLOCK_BAUS(fwd_head_ptr, BLOCK_BAUS(free_head_ptr))
-
- if (fwd_head_ptr->block_size == 0)
- {
- /* Coalesced block to be freed is last block before dummy
- ** end-of-chunk block. */
- desc->end_of_shrinkable_chunk =
- BAUS_FORWARD(fwd_head_ptr, DUMMY_END_BLOCK_BAUS);
- desc->num_baus_can_shrink = BLOCK_BAUS(free_head_ptr);
-
- if (PREV_BLOCK_BAUS(free_head_ptr) == 0)
- /* Free block is the entire chunk, so shrinking can
- ** eliminate entire chunk including dummy end block. */
- desc->num_baus_can_shrink += DUMMY_END_BLOCK_BAUS;
- }
- }
+ if (fwd_head_ptr == (head_record *)(desc->last_freed)) {
+ desc->last_freed = 0;
+ coalesce = 1;
+ } else if (IS_BLOCK_ALLOCATED(fwd_head_ptr))
+ coalesce = 0;
+ else {
+ U(out_of_free_collection)(desc, fwd_head_ptr);
+ coalesce = 1;
+ }
+
+ if (coalesce) {
+ free_head_ptr->block_size += fwd_head_ptr->block_size;
+
+ fwd_head_ptr =
+ (head_record *) BAUS_FORWARD(
+ fwd_head_ptr, BLOCK_BAUS(fwd_head_ptr));
+
+ SET_PREV_BLOCK_BAUS(fwd_head_ptr, BLOCK_BAUS(free_head_ptr))
+
+ if (fwd_head_ptr->block_size == 0) {
+ /* Coalesced block to be freed is last block before dummy
+ ** end-of-chunk block. */
+ desc->end_of_shrinkable_chunk =
+ BAUS_FORWARD(fwd_head_ptr, DUMMY_END_BLOCK_BAUS);
+ desc->num_baus_can_shrink = BLOCK_BAUS(free_head_ptr);
+
+ if (PREV_BLOCK_BAUS(free_head_ptr) == 0)
+ /* Free block is the entire chunk, so shrinking can
+ ** eliminate entire chunk including dummy end block. */
+ desc->num_baus_can_shrink += DUMMY_END_BLOCK_BAUS;
+ }
}
+ }
- if (desc->last_freed)
- {
- /* There is a last freed block, but it is not adjacent to the
- ** block being freed by this call to free, so put the last
- ** freed block into the free collection.
- */
+ if (desc->last_freed) {
+ /* There is a last freed block, but it is not adjacent to the
+ ** block being freed by this call to free, so put the last
+ ** freed block into the free collection.
+ */
#ifdef HMM_AUDIT_FAIL
- AUDIT_BLOCK(desc->last_freed)
+ AUDIT_BLOCK(desc->last_freed)
#endif
- U(into_free_collection)(desc, (head_record *)(desc->last_freed));
- }
+ U(into_free_collection)(desc, (head_record *)(desc->last_freed));
+ }
- desc->last_freed = free_head_ptr;
+ desc->last_freed = free_head_ptr;
}
-void U(new_chunk)(U(descriptor) *desc, void *start, U(size_bau) n_baus)
-{
+void U(new_chunk)(U(descriptor) *desc, void *start, U(size_bau) n_baus) {
#ifdef HMM_AUDIT_FAIL
- if (desc->avl_tree_root)
- /* Audit root block in AVL tree. */
- AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root))
+ if (desc->avl_tree_root)
+ /* Audit root block in AVL tree. */
+ AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root))
#endif
#undef HEAD_PTR
#define HEAD_PTR ((head_record *) start)
- /* Make the chunk one big free block followed by a dummy end block.
- */
+ /* Make the chunk one big free block followed by a dummy end block.
+ */
- n_baus -= DUMMY_END_BLOCK_BAUS;
+ n_baus -= DUMMY_END_BLOCK_BAUS;
- HEAD_PTR->previous_block_size = 0;
- HEAD_PTR->block_size = n_baus;
+ HEAD_PTR->previous_block_size = 0;
+ HEAD_PTR->block_size = n_baus;
- U(into_free_collection)(desc, HEAD_PTR);
+ U(into_free_collection)(desc, HEAD_PTR);
- /* Set up the dummy end block. */
- start = BAUS_FORWARD(start, n_baus);
- HEAD_PTR->previous_block_size = n_baus;
- HEAD_PTR->block_size = 0;
+ /* Set up the dummy end block. */
+ start = BAUS_FORWARD(start, n_baus);
+ HEAD_PTR->previous_block_size = n_baus;
+ HEAD_PTR->block_size = 0;
#undef HEAD_PTR
}
@@ -345,12 +317,11 @@ void U(new_chunk)(U(descriptor) *desc, void *start, U(size_bau) n_baus)
/* Function that does audit fail actions defined my preprocessor symbol,
** and returns a dummy integer value.
*/
-int U(audit_block_fail_dummy_return)(void)
-{
- HMM_AUDIT_FAIL
+int U(audit_block_fail_dummy_return)(void) {
+ HMM_AUDIT_FAIL
- /* Dummy return. */
- return(0);
+ /* Dummy return. */
+ return(0);
}
#endif
@@ -372,9 +343,9 @@ int U(audit_block_fail_dummy_return)(void)
*/
#define AVL_GET_LESS(H, ACCESS) \
- (((ACCESS) ? AUDIT_BLOCK_AS_EXPR(PTR_REC_TO_HEAD(H)) : 0), (H)->self)
+ (((ACCESS) ? AUDIT_BLOCK_AS_EXPR(PTR_REC_TO_HEAD(H)) : 0), (H)->self)
#define AVL_GET_GREATER(H, ACCESS) \
- (((ACCESS) ? AUDIT_BLOCK_AS_EXPR(PTR_REC_TO_HEAD(H)) : 0), (H)->prev)
+ (((ACCESS) ? AUDIT_BLOCK_AS_EXPR(PTR_REC_TO_HEAD(H)) : 0), (H)->prev)
#else
@@ -396,39 +367,39 @@ int U(audit_block_fail_dummy_return)(void)
*/
#define AVL_GET_BALANCE_FACTOR(H) \
- ((((head_record *) (PTR_REC_TO_HEAD(H)))->block_size & \
- HIGH_BIT_BAU_SIZE) ? \
- (((head_record *) (PTR_REC_TO_HEAD(H)))->previous_block_size & \
- HIGH_BIT_BAU_SIZE ? 0 : -1) : 1)
+ ((((head_record *) (PTR_REC_TO_HEAD(H)))->block_size & \
+ HIGH_BIT_BAU_SIZE) ? \
+ (((head_record *) (PTR_REC_TO_HEAD(H)))->previous_block_size & \
+ HIGH_BIT_BAU_SIZE ? 0 : -1) : 1)
#define AVL_SET_BALANCE_FACTOR(H, BF) \
- { \
- register head_record *p = \
- (head_record *) PTR_REC_TO_HEAD(H); \
- register int bal_f = (BF); \
- \
- if (bal_f <= 0) \
- p->block_size |= HIGH_BIT_BAU_SIZE; \
- else \
- p->block_size &= ~HIGH_BIT_BAU_SIZE; \
- if (bal_f >= 0) \
- p->previous_block_size |= HIGH_BIT_BAU_SIZE; \
- else \
- p->previous_block_size &= ~HIGH_BIT_BAU_SIZE; \
- }
+ { \
+ register head_record *p = \
+ (head_record *) PTR_REC_TO_HEAD(H); \
+ register int bal_f = (BF); \
+ \
+ if (bal_f <= 0) \
+ p->block_size |= HIGH_BIT_BAU_SIZE; \
+ else \
+ p->block_size &= ~HIGH_BIT_BAU_SIZE; \
+ if (bal_f >= 0) \
+ p->previous_block_size |= HIGH_BIT_BAU_SIZE; \
+ else \
+ p->previous_block_size &= ~HIGH_BIT_BAU_SIZE; \
+ }
#define COMPARE_KEY_KEY(K1, K2) ((K1) == (K2) ? 0 : ((K1) > (K2) ? 1 : -1))
#define AVL_COMPARE_KEY_NODE(K, H) \
- COMPARE_KEY_KEY(K, BLOCK_BAUS(PTR_REC_TO_HEAD(H)))
+ COMPARE_KEY_KEY(K, BLOCK_BAUS(PTR_REC_TO_HEAD(H)))
#define AVL_COMPARE_NODE_NODE(H1, H2) \
- COMPARE_KEY_KEY(BLOCK_BAUS(PTR_REC_TO_HEAD(H1)), \
- BLOCK_BAUS(PTR_REC_TO_HEAD(H2)))
+ COMPARE_KEY_KEY(BLOCK_BAUS(PTR_REC_TO_HEAD(H1)), \
+ BLOCK_BAUS(PTR_REC_TO_HEAD(H2)))
#define AVL_NULL ((ptr_record *) 0)
#define AVL_IMPL_MASK \
- ( AVL_IMPL_INSERT | AVL_IMPL_SEARCH | AVL_IMPL_REMOVE | AVL_IMPL_SUBST )
+ ( AVL_IMPL_INSERT | AVL_IMPL_SEARCH | AVL_IMPL_REMOVE | AVL_IMPL_SUBST )
#include "cavl_impl.h"
diff --git a/libvpx/vpx_mem/memory_manager/hmm_dflt_abort.c b/libvpx/vpx_mem/memory_manager/hmm_dflt_abort.c
index d92435c..51c3cc2 100644
--- a/libvpx/vpx_mem/memory_manager/hmm_dflt_abort.c
+++ b/libvpx/vpx_mem/memory_manager/hmm_dflt_abort.c
@@ -29,26 +29,25 @@ static int entered = 0;
/* Print abort message, file and line. Terminate execution.
*/
-void hmm_dflt_abort(const char *file, const char *line)
-{
- /* Avoid use of printf(), which is more likely to use heap. */
+void hmm_dflt_abort(const char *file, const char *line) {
+ /* Avoid use of printf(), which is more likely to use heap. */
- if (entered)
+ if (entered)
- /* The standard I/O functions called a heap function and caused
- ** an indirect recursive call to this function. So we'll have
- ** to just exit without printing a message. */
- while (1);
+ /* The standard I/O functions called a heap function and caused
+ ** an indirect recursive call to this function. So we'll have
+ ** to just exit without printing a message. */
+ while (1);
- entered = 1;
+ entered = 1;
- fputs("\n_abort - Heap corruption\n" "File: ", stderr);
- fputs(file, stderr);
- fputs(" Line: ", stderr);
- fputs(line, stderr);
- fputs("\n\n", stderr);
- fputs("hmm_dflt_abort: while(1)!!!\n", stderr);
- fflush(stderr);
+ fputs("\n_abort - Heap corruption\n" "File: ", stderr);
+ fputs(file, stderr);
+ fputs(" Line: ", stderr);
+ fputs(line, stderr);
+ fputs("\n\n", stderr);
+ fputs("hmm_dflt_abort: while(1)!!!\n", stderr);
+ fflush(stderr);
- while (1);
+ while (1);
}
diff --git a/libvpx/vpx_mem/memory_manager/hmm_grow.c b/libvpx/vpx_mem/memory_manager/hmm_grow.c
index 9a4b6e4..0e86373 100644
--- a/libvpx/vpx_mem/memory_manager/hmm_grow.c
+++ b/libvpx/vpx_mem/memory_manager/hmm_grow.c
@@ -15,36 +15,35 @@
#include "hmm_intrnl.h"
-void U(grow_chunk)(U(descriptor) *desc, void *end, U(size_bau) n_baus)
-{
+void U(grow_chunk)(U(descriptor) *desc, void *end, U(size_bau) n_baus) {
#undef HEAD_PTR
#define HEAD_PTR ((head_record *) end)
- end = BAUS_BACKWARD(end, DUMMY_END_BLOCK_BAUS);
+ end = BAUS_BACKWARD(end, DUMMY_END_BLOCK_BAUS);
#ifdef HMM_AUDIT_FAIL
- if (HEAD_PTR->block_size != 0)
- /* Chunk does not have valid dummy end block. */
- HMM_AUDIT_FAIL
+ if (HEAD_PTR->block_size != 0)
+ /* Chunk does not have valid dummy end block. */
+ HMM_AUDIT_FAIL
#endif
- /* Create a new block that absorbs the old dummy end block. */
- HEAD_PTR->block_size = n_baus;
-
- /* Set up the new dummy end block. */
- {
- head_record *dummy = (head_record *) BAUS_FORWARD(end, n_baus);
- dummy->previous_block_size = n_baus;
- dummy->block_size = 0;
- }
-
- /* Simply free the new block, allowing it to coalesce with any
- ** free block at that was the last block in the chunk prior to
- ** growth.
- */
- U(free)(desc, HEAD_TO_PTR_REC(end));
+ /* Create a new block that absorbs the old dummy end block. */
+ HEAD_PTR->block_size = n_baus;
+
+ /* Set up the new dummy end block. */
+ {
+ head_record *dummy = (head_record *) BAUS_FORWARD(end, n_baus);
+ dummy->previous_block_size = n_baus;
+ dummy->block_size = 0;
+ }
+
+ /* Simply free the new block, allowing it to coalesce with any
+ ** free block at that was the last block in the chunk prior to
+ ** growth.
+ */
+ U(free)(desc, HEAD_TO_PTR_REC(end));
#undef HEAD_PTR
}
diff --git a/libvpx/vpx_mem/memory_manager/hmm_largest.c b/libvpx/vpx_mem/memory_manager/hmm_largest.c
index c3c6f2c..192758d 100644
--- a/libvpx/vpx_mem/memory_manager/hmm_largest.c
+++ b/libvpx/vpx_mem/memory_manager/hmm_largest.c
@@ -15,46 +15,43 @@
#include "hmm_intrnl.h"
-U(size_aau) U(largest_available)(U(descriptor) *desc)
-{
- U(size_bau) largest;
-
- if (!(desc->avl_tree_root))
- largest = 0;
- else
- {
+U(size_aau) U(largest_available)(U(descriptor) *desc) {
+ U(size_bau) largest;
+
+ if (!(desc->avl_tree_root))
+ largest = 0;
+ else {
#ifdef HMM_AUDIT_FAIL
- /* Audit root block in AVL tree. */
- AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root))
+ /* Audit root block in AVL tree. */
+ AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root))
#endif
- largest =
- BLOCK_BAUS(
- PTR_REC_TO_HEAD(
- U(avl_search)(
- (U(avl_avl) *) & (desc->avl_tree_root),
- (U(size_bau)) ~(U(size_bau)) 0, AVL_LESS)));
- }
+ largest =
+ BLOCK_BAUS(
+ PTR_REC_TO_HEAD(
+ U(avl_search)(
+ (U(avl_avl) *) & (desc->avl_tree_root),
+ (U(size_bau)) ~(U(size_bau)) 0, AVL_LESS)));
+ }
- if (desc->last_freed)
- {
- /* Size of last freed block. */
- register U(size_bau) lf_size;
+ if (desc->last_freed) {
+ /* Size of last freed block. */
+ register U(size_bau) lf_size;
#ifdef HMM_AUDIT_FAIL
- AUDIT_BLOCK(desc->last_freed)
+ AUDIT_BLOCK(desc->last_freed)
#endif
- lf_size = BLOCK_BAUS(desc->last_freed);
+ lf_size = BLOCK_BAUS(desc->last_freed);
- if (lf_size > largest)
- largest = lf_size;
- }
+ if (lf_size > largest)
+ largest = lf_size;
+ }
- /* Convert largest size to AAUs and subract head size leaving payload
- ** size.
- */
- return(largest ?
- ((largest * ((U(size_aau)) HMM_BLOCK_ALIGN_UNIT)) - HEAD_AAUS) :
- 0);
+ /* Convert largest size to AAUs and subract head size leaving payload
+ ** size.
+ */
+ return(largest ?
+ ((largest * ((U(size_aau)) HMM_BLOCK_ALIGN_UNIT)) - HEAD_AAUS) :
+ 0);
}
diff --git a/libvpx/vpx_mem/memory_manager/hmm_resize.c b/libvpx/vpx_mem/memory_manager/hmm_resize.c
index f90da96..baa5a8f 100644
--- a/libvpx/vpx_mem/memory_manager/hmm_resize.c
+++ b/libvpx/vpx_mem/memory_manager/hmm_resize.c
@@ -15,105 +15,100 @@
#include "hmm_intrnl.h"
-int U(resize)(U(descriptor) *desc, void *mem, U(size_aau) n)
-{
- U(size_aau) i;
- head_record *next_head_ptr;
- head_record *head_ptr = PTR_REC_TO_HEAD(mem);
+int U(resize)(U(descriptor) *desc, void *mem, U(size_aau) n) {
+ U(size_aau) i;
+ head_record *next_head_ptr;
+ head_record *head_ptr = PTR_REC_TO_HEAD(mem);
- /* Flag. */
- int next_block_free;
+ /* Flag. */
+ int next_block_free;
- /* Convert n from desired block size in AAUs to BAUs. */
- n += HEAD_AAUS;
- n = DIV_ROUND_UP(n, HMM_BLOCK_ALIGN_UNIT);
+ /* Convert n from desired block size in AAUs to BAUs. */
+ n += HEAD_AAUS;
+ n = DIV_ROUND_UP(n, HMM_BLOCK_ALIGN_UNIT);
- if (n < MIN_BLOCK_BAUS)
- n = MIN_BLOCK_BAUS;
+ if (n < MIN_BLOCK_BAUS)
+ n = MIN_BLOCK_BAUS;
#ifdef HMM_AUDIT_FAIL
- AUDIT_BLOCK(head_ptr)
+ AUDIT_BLOCK(head_ptr)
- if (!IS_BLOCK_ALLOCATED(head_ptr))
- HMM_AUDIT_FAIL
+ if (!IS_BLOCK_ALLOCATED(head_ptr))
+ HMM_AUDIT_FAIL
- if (desc->avl_tree_root)
- AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root))
+ if (desc->avl_tree_root)
+ AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root))
#endif
- i = head_ptr->block_size;
+ i = head_ptr->block_size;
- next_head_ptr =
- (head_record *) BAUS_FORWARD(head_ptr, head_ptr->block_size);
+ next_head_ptr =
+ (head_record *) BAUS_FORWARD(head_ptr, head_ptr->block_size);
- next_block_free =
- (next_head_ptr == desc->last_freed) ||
- !IS_BLOCK_ALLOCATED(next_head_ptr);
+ next_block_free =
+ (next_head_ptr == desc->last_freed) ||
+ !IS_BLOCK_ALLOCATED(next_head_ptr);
- if (next_block_free)
- /* Block can expand into next free block. */
- i += BLOCK_BAUS(next_head_ptr);
+ if (next_block_free)
+ /* Block can expand into next free block. */
+ i += BLOCK_BAUS(next_head_ptr);
- if (n > i)
- /* Not enough room for block to expand. */
- return(-1);
+ if (n > i)
+ /* Not enough room for block to expand. */
+ return(-1);
- if (next_block_free)
- {
+ if (next_block_free) {
#ifdef HMM_AUDIT_FAIL
- AUDIT_BLOCK(next_head_ptr)
+ AUDIT_BLOCK(next_head_ptr)
#endif
- if (next_head_ptr == desc->last_freed)
- desc->last_freed = 0;
- else
- U(out_of_free_collection)(desc, next_head_ptr);
+ if (next_head_ptr == desc->last_freed)
+ desc->last_freed = 0;
+ else
+ U(out_of_free_collection)(desc, next_head_ptr);
- next_head_ptr =
- (head_record *) BAUS_FORWARD(head_ptr, (U(size_bau)) i);
- }
+ next_head_ptr =
+ (head_record *) BAUS_FORWARD(head_ptr, (U(size_bau)) i);
+ }
- /* Set i to number of "extra" BAUs. */
- i -= n;
+ /* Set i to number of "extra" BAUs. */
+ i -= n;
- if (i < MIN_BLOCK_BAUS)
- /* Not enough extra BAUs to be a block on their own, so just keep them
- ** in the block being resized.
- */
- {
- n += i;
- i = n;
- }
- else
- {
- /* There are enough "leftover" BAUs in the next block to
- ** form a remainder block. */
+ if (i < MIN_BLOCK_BAUS)
+ /* Not enough extra BAUs to be a block on their own, so just keep them
+ ** in the block being resized.
+ */
+ {
+ n += i;
+ i = n;
+ } else {
+ /* There are enough "leftover" BAUs in the next block to
+ ** form a remainder block. */
- head_record *rem_head_ptr;
+ head_record *rem_head_ptr;
- rem_head_ptr = (head_record *) BAUS_FORWARD(head_ptr, n);
+ rem_head_ptr = (head_record *) BAUS_FORWARD(head_ptr, n);
- rem_head_ptr->previous_block_size = (U(size_bau)) n;
- rem_head_ptr->block_size = (U(size_bau)) i;
+ rem_head_ptr->previous_block_size = (U(size_bau)) n;
+ rem_head_ptr->block_size = (U(size_bau)) i;
- if (desc->last_freed)
- {
+ if (desc->last_freed) {
#ifdef HMM_AUDIT_FAIL
- AUDIT_BLOCK(desc->last_freed)
+ AUDIT_BLOCK(desc->last_freed)
#endif
- U(into_free_collection)(desc, (head_record *)(desc->last_freed));
+ U(into_free_collection)(desc, (head_record *)(desc->last_freed));
- desc->last_freed = 0;
- }
-
- desc->last_freed = rem_head_ptr;
+ desc->last_freed = 0;
}
- head_ptr->block_size = (U(size_bau)) n;
- next_head_ptr->previous_block_size = (U(size_bau)) i;
+ desc->last_freed = rem_head_ptr;
+ }
+
+ head_ptr->block_size = (U(size_bau)) n;
+ next_head_ptr->previous_block_size = (U(size_bau)) i;
- return(0);
+ return(0);
}
diff --git a/libvpx/vpx_mem/memory_manager/hmm_shrink.c b/libvpx/vpx_mem/memory_manager/hmm_shrink.c
index 78fe268..f80aeea 100644
--- a/libvpx/vpx_mem/memory_manager/hmm_shrink.c
+++ b/libvpx/vpx_mem/memory_manager/hmm_shrink.c
@@ -15,97 +15,89 @@
#include "hmm_intrnl.h"
-void U(shrink_chunk)(U(descriptor) *desc, U(size_bau) n_baus_to_shrink)
-{
- head_record *dummy_end_block = (head_record *)
- BAUS_BACKWARD(desc->end_of_shrinkable_chunk, DUMMY_END_BLOCK_BAUS);
+void U(shrink_chunk)(U(descriptor) *desc, U(size_bau) n_baus_to_shrink) {
+ head_record *dummy_end_block = (head_record *)
+ BAUS_BACKWARD(desc->end_of_shrinkable_chunk, DUMMY_END_BLOCK_BAUS);
#ifdef HMM_AUDIT_FAIL
- if (dummy_end_block->block_size != 0)
- /* Chunk does not have valid dummy end block. */
- HMM_AUDIT_FAIL
+ if (dummy_end_block->block_size != 0)
+ /* Chunk does not have valid dummy end block. */
+ HMM_AUDIT_FAIL
#endif
- if (n_baus_to_shrink)
- {
- head_record *last_block = (head_record *)
- BAUS_BACKWARD(
- dummy_end_block, dummy_end_block->previous_block_size);
+ if (n_baus_to_shrink) {
+ head_record *last_block = (head_record *)
+ BAUS_BACKWARD(
+ dummy_end_block, dummy_end_block->previous_block_size);
#ifdef HMM_AUDIT_FAIL
- AUDIT_BLOCK(last_block)
+ AUDIT_BLOCK(last_block)
#endif
- if (last_block == desc->last_freed)
- {
- U(size_bau) bs = BLOCK_BAUS(last_block);
-
- /* Chunk will not be shrunk out of existence if
- ** 1. There is at least one allocated block in the chunk
- ** and the amount to shrink is exactly the size of the
- ** last block, OR
- ** 2. After the last block is shrunk, there will be enough
- ** BAUs left in it to form a minimal size block. */
- int chunk_will_survive =
- (PREV_BLOCK_BAUS(last_block) && (n_baus_to_shrink == bs)) ||
- (n_baus_to_shrink <= (U(size_bau))(bs - MIN_BLOCK_BAUS));
-
- if (chunk_will_survive ||
- (!PREV_BLOCK_BAUS(last_block) &&
- (n_baus_to_shrink ==
- (U(size_bau))(bs + DUMMY_END_BLOCK_BAUS))))
- {
- desc->last_freed = 0;
-
- if (chunk_will_survive)
- {
- bs -= n_baus_to_shrink;
-
- if (bs)
- {
- /* The last (non-dummy) block was not completely
- ** eliminated by the shrink. */
-
- last_block->block_size = bs;
-
- /* Create new dummy end record.
- */
- dummy_end_block =
- (head_record *) BAUS_FORWARD(last_block, bs);
- dummy_end_block->previous_block_size = bs;
- dummy_end_block->block_size = 0;
+ if (last_block == desc->last_freed) {
+ U(size_bau) bs = BLOCK_BAUS(last_block);
+
+ /* Chunk will not be shrunk out of existence if
+ ** 1. There is at least one allocated block in the chunk
+ ** and the amount to shrink is exactly the size of the
+ ** last block, OR
+ ** 2. After the last block is shrunk, there will be enough
+ ** BAUs left in it to form a minimal size block. */
+ int chunk_will_survive =
+ (PREV_BLOCK_BAUS(last_block) && (n_baus_to_shrink == bs)) ||
+ (n_baus_to_shrink <= (U(size_bau))(bs - MIN_BLOCK_BAUS));
+
+ if (chunk_will_survive ||
+ (!PREV_BLOCK_BAUS(last_block) &&
+ (n_baus_to_shrink ==
+ (U(size_bau))(bs + DUMMY_END_BLOCK_BAUS)))) {
+ desc->last_freed = 0;
+
+ if (chunk_will_survive) {
+ bs -= n_baus_to_shrink;
+
+ if (bs) {
+ /* The last (non-dummy) block was not completely
+ ** eliminated by the shrink. */
+
+ last_block->block_size = bs;
+
+ /* Create new dummy end record.
+ */
+ dummy_end_block =
+ (head_record *) BAUS_FORWARD(last_block, bs);
+ dummy_end_block->previous_block_size = bs;
+ dummy_end_block->block_size = 0;
#ifdef HMM_AUDIT_FAIL
- if (desc->avl_tree_root)
- AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root))
+ if (desc->avl_tree_root)
+ AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root))
#endif
- U(into_free_collection)(desc, last_block);
- }
- else
- {
- /* The last (non-dummy) block was completely
- ** eliminated by the shrink. Make its head
- ** the new dummy end block.
- */
- last_block->block_size = 0;
- last_block->previous_block_size &= ~HIGH_BIT_BAU_SIZE;
- }
- }
- }
+ U(into_free_collection)(desc, last_block);
+ } else {
+ /* The last (non-dummy) block was completely
+ ** eliminated by the shrink. Make its head
+ ** the new dummy end block.
+ */
+ last_block->block_size = 0;
+ last_block->previous_block_size &= ~HIGH_BIT_BAU_SIZE;
+ }
+ }
+ }
#ifdef HMM_AUDIT_FAIL
- else
- HMM_AUDIT_FAIL
+ else
+ HMM_AUDIT_FAIL
#endif
- }
+ }
#ifdef HMM_AUDIT_FAIL
- else
- HMM_AUDIT_FAIL
+ else
+ HMM_AUDIT_FAIL
#endif
- }
+ }
}
diff --git a/libvpx/vpx_mem/memory_manager/hmm_true.c b/libvpx/vpx_mem/memory_manager/hmm_true.c
index 3f7be8f..4428c3e 100644
--- a/libvpx/vpx_mem/memory_manager/hmm_true.c
+++ b/libvpx/vpx_mem/memory_manager/hmm_true.c
@@ -15,18 +15,17 @@
#include "hmm_intrnl.h"
-U(size_aau) U(true_size)(void *payload_ptr)
-{
- register head_record *head_ptr = PTR_REC_TO_HEAD(payload_ptr);
+U(size_aau) U(true_size)(void *payload_ptr) {
+ register head_record *head_ptr = PTR_REC_TO_HEAD(payload_ptr);
#ifdef HMM_AUDIT_FAIL
- AUDIT_BLOCK(head_ptr)
+ AUDIT_BLOCK(head_ptr)
#endif
- /* Convert block size from BAUs to AAUs. Subtract head size, leaving
- ** payload size.
- */
- return(
- (BLOCK_BAUS(head_ptr) * ((U(size_aau)) HMM_BLOCK_ALIGN_UNIT)) -
- HEAD_AAUS);
+ /* Convert block size from BAUs to AAUs. Subtract head size, leaving
+ ** payload size.
+ */
+ return(
+ (BLOCK_BAUS(head_ptr) * ((U(size_aau)) HMM_BLOCK_ALIGN_UNIT)) -
+ HEAD_AAUS);
}
diff --git a/libvpx/vpx_mem/memory_manager/include/cavl_if.h b/libvpx/vpx_mem/memory_manager/include/cavl_if.h
index 1b2c9b7..ec6e525 100644
--- a/libvpx/vpx_mem/memory_manager/include/cavl_if.h
+++ b/libvpx/vpx_mem/memory_manager/include/cavl_if.h
@@ -32,13 +32,12 @@
#ifndef AVL_SEARCH_TYPE_DEFINED_
#define AVL_SEARCH_TYPE_DEFINED_
-typedef enum
-{
- AVL_EQUAL = 1,
- AVL_LESS = 2,
- AVL_GREATER = 4,
- AVL_LESS_EQUAL = AVL_EQUAL | AVL_LESS,
- AVL_GREATER_EQUAL = AVL_EQUAL | AVL_GREATER
+typedef enum {
+ AVL_EQUAL = 1,
+ AVL_LESS = 2,
+ AVL_GREATER = 4,
+ AVL_LESS_EQUAL = AVL_EQUAL | AVL_LESS,
+ AVL_GREATER_EQUAL = AVL_EQUAL | AVL_GREATER
}
avl_search_type;
@@ -75,15 +74,14 @@ avl_search_type;
#endif
-typedef struct
-{
+typedef struct {
#ifdef AVL_INSIDE_STRUCT
- AVL_INSIDE_STRUCT
+ AVL_INSIDE_STRUCT
#endif
- AVL_HANDLE root;
+ AVL_HANDLE root;
}
L_(avl);
@@ -108,7 +106,7 @@ L_SC AVL_HANDLE L_(subst)(L_(avl) *tree, AVL_HANDLE new_node);
#ifdef AVL_BUILD_ITER_TYPE
L_SC int L_(build)(
- L_(avl) *tree, AVL_BUILD_ITER_TYPE p, L_SIZE num_nodes);
+ L_(avl) *tree, AVL_BUILD_ITER_TYPE p, L_SIZE num_nodes);
#endif
@@ -153,7 +151,7 @@ L_SC int L_(build)(
/* Maximum depth may be more than number of bits in a long. */
#define L_BIT_ARR_DEFN(NAME) \
- unsigned long NAME[((AVL_MAX_DEPTH) + L_LONG_BIT - 1) / L_LONG_BIT];
+ unsigned long NAME[((AVL_MAX_DEPTH) + L_LONG_BIT - 1) / L_LONG_BIT];
#else
@@ -164,29 +162,28 @@ L_SC int L_(build)(
#endif
/* Iterator structure. */
-typedef struct
-{
- /* Tree being iterated over. */
- L_(avl) *tree_;
-
- /* Records a path into the tree. If bit n is true, indicates
- ** take greater branch from the nth node in the path, otherwise
- ** take the less branch. bit 0 gives branch from root, and
- ** so on. */
- L_BIT_ARR_DEFN(branch)
-
- /* Zero-based depth of path into tree. */
- unsigned depth;
-
- /* Handles of nodes in path from root to current node (returned by *). */
- AVL_HANDLE path_h[(AVL_MAX_DEPTH) - 1];
+typedef struct {
+ /* Tree being iterated over. */
+ L_(avl) *tree_;
+
+ /* Records a path into the tree. If bit n is true, indicates
+ ** take greater branch from the nth node in the path, otherwise
+ ** take the less branch. bit 0 gives branch from root, and
+ ** so on. */
+ L_BIT_ARR_DEFN(branch)
+
+ /* Zero-based depth of path into tree. */
+ unsigned depth;
+
+ /* Handles of nodes in path from root to current node (returned by *). */
+ AVL_HANDLE path_h[(AVL_MAX_DEPTH) - 1];
}
L_(iter);
/* Iterator function prototypes. */
L_SC void L_(start_iter)(
- L_(avl) *tree, L_(iter) *iter, AVL_KEY k, avl_search_type st);
+ L_(avl) *tree, L_(iter) *iter, AVL_KEY k, avl_search_type st);
L_SC void L_(start_iter_least)(L_(avl) *tree, L_(iter) *iter);
diff --git a/libvpx/vpx_mem/memory_manager/include/cavl_impl.h b/libvpx/vpx_mem/memory_manager/include/cavl_impl.h
index 5e165dd..cf7deb7 100644
--- a/libvpx/vpx_mem/memory_manager/include/cavl_impl.h
+++ b/libvpx/vpx_mem/memory_manager/include/cavl_impl.h
@@ -110,16 +110,16 @@
#define L_BIT_ARR_DEFN(NAME) unsigned long NAME[L_BIT_ARR_LONGS];
#define L_BIT_ARR_VAL(BIT_ARR, BIT_NUM) \
- ((BIT_ARR)[(BIT_NUM) / L_LONG_BIT] & (1L << ((BIT_NUM) % L_LONG_BIT)))
+ ((BIT_ARR)[(BIT_NUM) / L_LONG_BIT] & (1L << ((BIT_NUM) % L_LONG_BIT)))
#define L_BIT_ARR_0(BIT_ARR, BIT_NUM) \
- (BIT_ARR)[(BIT_NUM) / L_LONG_BIT] &= ~(1L << ((BIT_NUM) % L_LONG_BIT));
+ (BIT_ARR)[(BIT_NUM) / L_LONG_BIT] &= ~(1L << ((BIT_NUM) % L_LONG_BIT));
#define L_BIT_ARR_1(BIT_ARR, BIT_NUM) \
- (BIT_ARR)[(BIT_NUM) / L_LONG_BIT] |= 1L << ((BIT_NUM) % L_LONG_BIT);
+ (BIT_ARR)[(BIT_NUM) / L_LONG_BIT] |= 1L << ((BIT_NUM) % L_LONG_BIT);
#define L_BIT_ARR_ALL(BIT_ARR, BIT_VAL) \
- { int i = L_BIT_ARR_LONGS; do (BIT_ARR)[--i] = 0L - (BIT_VAL); while(i); }
+ { int i = L_BIT_ARR_LONGS; do (BIT_ARR)[--i] = 0L - (BIT_VAL); while(i); }
#else /* The bit array can definitely fit in one long */
@@ -138,7 +138,7 @@
#ifdef AVL_READ_ERRORS_HAPPEN
#define L_CHECK_READ_ERROR(ERROR_RETURN) \
- { if (AVL_READ_ERROR) return(ERROR_RETURN); }
+ { if (AVL_READ_ERROR) return(ERROR_RETURN); }
#else
@@ -179,18 +179,16 @@
#if (L_IMPL_MASK & AVL_IMPL_INIT)
-L_SC void L_(init)(L_(avl) *l_tree)
-{
- l_tree->root = AVL_NULL;
+L_SC void L_(init)(L_(avl) *l_tree) {
+ l_tree->root = AVL_NULL;
}
#endif
#if (L_IMPL_MASK & AVL_IMPL_IS_EMPTY)
-L_SC int L_(is_empty)(L_(avl) *l_tree)
-{
- return(l_tree->root == AVL_NULL);
+L_SC int L_(is_empty)(L_(avl) *l_tree) {
+ return(l_tree->root == AVL_NULL);
}
#endif
@@ -201,358 +199,305 @@ L_SC int L_(is_empty)(L_(avl) *l_tree)
/* Balances subtree, returns handle of root node of subtree after balancing.
*/
-L_SC AVL_HANDLE L_(balance)(L_BALANCE_PARAM_DECL_PREFIX AVL_HANDLE bal_h)
-{
- AVL_HANDLE deep_h;
+L_SC AVL_HANDLE L_(balance)(L_BALANCE_PARAM_DECL_PREFIX AVL_HANDLE bal_h) {
+ AVL_HANDLE deep_h;
- /* Either the "greater than" or the "less than" subtree of
- ** this node has to be 2 levels deeper (or else it wouldn't
- ** need balancing).
- */
- if (AVL_GET_BALANCE_FACTOR(bal_h) > 0)
- {
- /* "Greater than" subtree is deeper. */
+ /* Either the "greater than" or the "less than" subtree of
+ ** this node has to be 2 levels deeper (or else it wouldn't
+ ** need balancing).
+ */
+ if (AVL_GET_BALANCE_FACTOR(bal_h) > 0) {
+ /* "Greater than" subtree is deeper. */
- deep_h = AVL_GET_GREATER(bal_h, 1);
+ deep_h = AVL_GET_GREATER(bal_h, 1);
- L_CHECK_READ_ERROR(AVL_NULL)
+ L_CHECK_READ_ERROR(AVL_NULL)
- if (AVL_GET_BALANCE_FACTOR(deep_h) < 0)
- {
- int bf;
-
- AVL_HANDLE old_h = bal_h;
- bal_h = AVL_GET_LESS(deep_h, 1);
- L_CHECK_READ_ERROR(AVL_NULL)
- AVL_SET_GREATER(old_h, AVL_GET_LESS(bal_h, 1))
- AVL_SET_LESS(deep_h, AVL_GET_GREATER(bal_h, 1))
- AVL_SET_LESS(bal_h, old_h)
- AVL_SET_GREATER(bal_h, deep_h)
-
- bf = AVL_GET_BALANCE_FACTOR(bal_h);
-
- if (bf != 0)
- {
- if (bf > 0)
- {
- AVL_SET_BALANCE_FACTOR(old_h, -1)
- AVL_SET_BALANCE_FACTOR(deep_h, 0)
- }
- else
- {
- AVL_SET_BALANCE_FACTOR(deep_h, 1)
- AVL_SET_BALANCE_FACTOR(old_h, 0)
- }
-
- AVL_SET_BALANCE_FACTOR(bal_h, 0)
- }
- else
- {
- AVL_SET_BALANCE_FACTOR(old_h, 0)
- AVL_SET_BALANCE_FACTOR(deep_h, 0)
- }
- }
- else
- {
- AVL_SET_GREATER(bal_h, AVL_GET_LESS(deep_h, 0))
- AVL_SET_LESS(deep_h, bal_h)
-
- if (AVL_GET_BALANCE_FACTOR(deep_h) == 0)
- {
- AVL_SET_BALANCE_FACTOR(deep_h, -1)
- AVL_SET_BALANCE_FACTOR(bal_h, 1)
- }
- else
- {
- AVL_SET_BALANCE_FACTOR(deep_h, 0)
- AVL_SET_BALANCE_FACTOR(bal_h, 0)
- }
-
- bal_h = deep_h;
+ if (AVL_GET_BALANCE_FACTOR(deep_h) < 0) {
+ int bf;
+
+ AVL_HANDLE old_h = bal_h;
+ bal_h = AVL_GET_LESS(deep_h, 1);
+ L_CHECK_READ_ERROR(AVL_NULL)
+ AVL_SET_GREATER(old_h, AVL_GET_LESS(bal_h, 1))
+ AVL_SET_LESS(deep_h, AVL_GET_GREATER(bal_h, 1))
+ AVL_SET_LESS(bal_h, old_h)
+ AVL_SET_GREATER(bal_h, deep_h)
+
+ bf = AVL_GET_BALANCE_FACTOR(bal_h);
+
+ if (bf != 0) {
+ if (bf > 0) {
+ AVL_SET_BALANCE_FACTOR(old_h, -1)
+ AVL_SET_BALANCE_FACTOR(deep_h, 0)
+ } else {
+ AVL_SET_BALANCE_FACTOR(deep_h, 1)
+ AVL_SET_BALANCE_FACTOR(old_h, 0)
}
+
+ AVL_SET_BALANCE_FACTOR(bal_h, 0)
+ } else {
+ AVL_SET_BALANCE_FACTOR(old_h, 0)
+ AVL_SET_BALANCE_FACTOR(deep_h, 0)
+ }
+ } else {
+ AVL_SET_GREATER(bal_h, AVL_GET_LESS(deep_h, 0))
+ AVL_SET_LESS(deep_h, bal_h)
+
+ if (AVL_GET_BALANCE_FACTOR(deep_h) == 0) {
+ AVL_SET_BALANCE_FACTOR(deep_h, -1)
+ AVL_SET_BALANCE_FACTOR(bal_h, 1)
+ } else {
+ AVL_SET_BALANCE_FACTOR(deep_h, 0)
+ AVL_SET_BALANCE_FACTOR(bal_h, 0)
+ }
+
+ bal_h = deep_h;
}
- else
- {
- /* "Less than" subtree is deeper. */
+ } else {
+ /* "Less than" subtree is deeper. */
- deep_h = AVL_GET_LESS(bal_h, 1);
- L_CHECK_READ_ERROR(AVL_NULL)
+ deep_h = AVL_GET_LESS(bal_h, 1);
+ L_CHECK_READ_ERROR(AVL_NULL)
- if (AVL_GET_BALANCE_FACTOR(deep_h) > 0)
- {
- int bf;
- AVL_HANDLE old_h = bal_h;
- bal_h = AVL_GET_GREATER(deep_h, 1);
- L_CHECK_READ_ERROR(AVL_NULL)
- AVL_SET_LESS(old_h, AVL_GET_GREATER(bal_h, 0))
- AVL_SET_GREATER(deep_h, AVL_GET_LESS(bal_h, 0))
- AVL_SET_GREATER(bal_h, old_h)
- AVL_SET_LESS(bal_h, deep_h)
-
- bf = AVL_GET_BALANCE_FACTOR(bal_h);
-
- if (bf != 0)
- {
- if (bf < 0)
- {
- AVL_SET_BALANCE_FACTOR(old_h, 1)
- AVL_SET_BALANCE_FACTOR(deep_h, 0)
- }
- else
- {
- AVL_SET_BALANCE_FACTOR(deep_h, -1)
- AVL_SET_BALANCE_FACTOR(old_h, 0)
- }
-
- AVL_SET_BALANCE_FACTOR(bal_h, 0)
- }
- else
- {
- AVL_SET_BALANCE_FACTOR(old_h, 0)
- AVL_SET_BALANCE_FACTOR(deep_h, 0)
- }
- }
- else
- {
- AVL_SET_LESS(bal_h, AVL_GET_GREATER(deep_h, 0))
- AVL_SET_GREATER(deep_h, bal_h)
-
- if (AVL_GET_BALANCE_FACTOR(deep_h) == 0)
- {
- AVL_SET_BALANCE_FACTOR(deep_h, 1)
- AVL_SET_BALANCE_FACTOR(bal_h, -1)
- }
- else
- {
- AVL_SET_BALANCE_FACTOR(deep_h, 0)
- AVL_SET_BALANCE_FACTOR(bal_h, 0)
- }
-
- bal_h = deep_h;
+ if (AVL_GET_BALANCE_FACTOR(deep_h) > 0) {
+ int bf;
+ AVL_HANDLE old_h = bal_h;
+ bal_h = AVL_GET_GREATER(deep_h, 1);
+ L_CHECK_READ_ERROR(AVL_NULL)
+ AVL_SET_LESS(old_h, AVL_GET_GREATER(bal_h, 0))
+ AVL_SET_GREATER(deep_h, AVL_GET_LESS(bal_h, 0))
+ AVL_SET_GREATER(bal_h, old_h)
+ AVL_SET_LESS(bal_h, deep_h)
+
+ bf = AVL_GET_BALANCE_FACTOR(bal_h);
+
+ if (bf != 0) {
+ if (bf < 0) {
+ AVL_SET_BALANCE_FACTOR(old_h, 1)
+ AVL_SET_BALANCE_FACTOR(deep_h, 0)
+ } else {
+ AVL_SET_BALANCE_FACTOR(deep_h, -1)
+ AVL_SET_BALANCE_FACTOR(old_h, 0)
}
+
+ AVL_SET_BALANCE_FACTOR(bal_h, 0)
+ } else {
+ AVL_SET_BALANCE_FACTOR(old_h, 0)
+ AVL_SET_BALANCE_FACTOR(deep_h, 0)
+ }
+ } else {
+ AVL_SET_LESS(bal_h, AVL_GET_GREATER(deep_h, 0))
+ AVL_SET_GREATER(deep_h, bal_h)
+
+ if (AVL_GET_BALANCE_FACTOR(deep_h) == 0) {
+ AVL_SET_BALANCE_FACTOR(deep_h, 1)
+ AVL_SET_BALANCE_FACTOR(bal_h, -1)
+ } else {
+ AVL_SET_BALANCE_FACTOR(deep_h, 0)
+ AVL_SET_BALANCE_FACTOR(bal_h, 0)
+ }
+
+ bal_h = deep_h;
}
+ }
- return(bal_h);
+ return(bal_h);
}
-L_SC AVL_HANDLE L_(insert)(L_(avl) *l_tree, AVL_HANDLE h)
-{
- AVL_SET_LESS(h, AVL_NULL)
- AVL_SET_GREATER(h, AVL_NULL)
- AVL_SET_BALANCE_FACTOR(h, 0)
+L_SC AVL_HANDLE L_(insert)(L_(avl) *l_tree, AVL_HANDLE h) {
+ AVL_SET_LESS(h, AVL_NULL)
+ AVL_SET_GREATER(h, AVL_NULL)
+ AVL_SET_BALANCE_FACTOR(h, 0)
- if (l_tree->root == AVL_NULL)
- l_tree->root = h;
- else
- {
- /* Last unbalanced node encountered in search for insertion point. */
- AVL_HANDLE unbal = AVL_NULL;
- /* Parent of last unbalanced node. */
- AVL_HANDLE parent_unbal = AVL_NULL;
- /* Balance factor of last unbalanced node. */
- int unbal_bf;
-
- /* Zero-based depth in tree. */
- unsigned depth = 0, unbal_depth = 0;
-
- /* Records a path into the tree. If bit n is true, indicates
- ** take greater branch from the nth node in the path, otherwise
- ** take the less branch. bit 0 gives branch from root, and
- ** so on. */
- L_BIT_ARR_DEFN(branch)
-
- AVL_HANDLE hh = l_tree->root;
- AVL_HANDLE parent = AVL_NULL;
- int cmp;
-
- do
- {
- if (AVL_GET_BALANCE_FACTOR(hh) != 0)
- {
- unbal = hh;
- parent_unbal = parent;
- unbal_depth = depth;
- }
-
- cmp = AVL_COMPARE_NODE_NODE(h, hh);
-
- if (cmp == 0)
- /* Duplicate key. */
- return(hh);
-
- parent = hh;
-
- if (cmp > 0)
- {
- hh = AVL_GET_GREATER(hh, 1);
- L_BIT_ARR_1(branch, depth)
- }
- else
- {
- hh = AVL_GET_LESS(hh, 1);
- L_BIT_ARR_0(branch, depth)
- }
-
- L_CHECK_READ_ERROR(AVL_NULL)
- depth++;
- }
- while (hh != AVL_NULL);
+ if (l_tree->root == AVL_NULL)
+ l_tree->root = h;
+ else {
+ /* Last unbalanced node encountered in search for insertion point. */
+ AVL_HANDLE unbal = AVL_NULL;
+ /* Parent of last unbalanced node. */
+ AVL_HANDLE parent_unbal = AVL_NULL;
+ /* Balance factor of last unbalanced node. */
+ int unbal_bf;
- /* Add node to insert as leaf of tree. */
- if (cmp < 0)
- AVL_SET_LESS(parent, h)
- else
- AVL_SET_GREATER(parent, h)
+ /* Zero-based depth in tree. */
+ unsigned depth = 0, unbal_depth = 0;
- depth = unbal_depth;
+ /* Records a path into the tree. If bit n is true, indicates
+ ** take greater branch from the nth node in the path, otherwise
+ ** take the less branch. bit 0 gives branch from root, and
+ ** so on. */
+ L_BIT_ARR_DEFN(branch)
- if (unbal == AVL_NULL)
- hh = l_tree->root;
- else
- {
- cmp = L_BIT_ARR_VAL(branch, depth) ? 1 : -1;
- depth++;
- unbal_bf = AVL_GET_BALANCE_FACTOR(unbal);
-
- if (cmp < 0)
- unbal_bf--;
- else /* cmp > 0 */
- unbal_bf++;
-
- hh = cmp < 0 ? AVL_GET_LESS(unbal, 1) : AVL_GET_GREATER(unbal, 1);
- L_CHECK_READ_ERROR(AVL_NULL)
-
- if ((unbal_bf != -2) && (unbal_bf != 2))
- {
- /* No rebalancing of tree is necessary. */
- AVL_SET_BALANCE_FACTOR(unbal, unbal_bf)
- unbal = AVL_NULL;
- }
- }
+ AVL_HANDLE hh = l_tree->root;
+ AVL_HANDLE parent = AVL_NULL;
+ int cmp;
- if (hh != AVL_NULL)
- while (h != hh)
- {
- cmp = L_BIT_ARR_VAL(branch, depth) ? 1 : -1;
- depth++;
-
- if (cmp < 0)
- {
- AVL_SET_BALANCE_FACTOR(hh, -1)
- hh = AVL_GET_LESS(hh, 1);
- }
- else /* cmp > 0 */
- {
- AVL_SET_BALANCE_FACTOR(hh, 1)
- hh = AVL_GET_GREATER(hh, 1);
- }
-
- L_CHECK_READ_ERROR(AVL_NULL)
- }
-
- if (unbal != AVL_NULL)
- {
- unbal = L_(balance)(L_BALANCE_PARAM_CALL_PREFIX unbal);
- L_CHECK_READ_ERROR(AVL_NULL)
-
- if (parent_unbal == AVL_NULL)
- l_tree->root = unbal;
- else
- {
- depth = unbal_depth - 1;
- cmp = L_BIT_ARR_VAL(branch, depth) ? 1 : -1;
-
- if (cmp < 0)
- AVL_SET_LESS(parent_unbal, unbal)
- else /* cmp > 0 */
- AVL_SET_GREATER(parent_unbal, unbal)
- }
- }
+ do {
+ if (AVL_GET_BALANCE_FACTOR(hh) != 0) {
+ unbal = hh;
+ parent_unbal = parent;
+ unbal_depth = depth;
+ }
- }
+ cmp = AVL_COMPARE_NODE_NODE(h, hh);
- return(h);
-}
+ if (cmp == 0)
+ /* Duplicate key. */
+ return(hh);
-#endif
+ parent = hh;
-#if (L_IMPL_MASK & AVL_IMPL_SEARCH)
+ if (cmp > 0) {
+ hh = AVL_GET_GREATER(hh, 1);
+ L_BIT_ARR_1(branch, depth)
+ } else {
+ hh = AVL_GET_LESS(hh, 1);
+ L_BIT_ARR_0(branch, depth)
+ }
+
+ L_CHECK_READ_ERROR(AVL_NULL)
+ depth++;
+ } while (hh != AVL_NULL);
+
+ /* Add node to insert as leaf of tree. */
+ if (cmp < 0)
+ AVL_SET_LESS(parent, h)
+ else
+ AVL_SET_GREATER(parent, h)
+
+ depth = unbal_depth;
+
+ if (unbal == AVL_NULL)
+ hh = l_tree->root;
+ else {
+ cmp = L_BIT_ARR_VAL(branch, depth) ? 1 : -1;
+ depth++;
+ unbal_bf = AVL_GET_BALANCE_FACTOR(unbal);
+
+ if (cmp < 0)
+ unbal_bf--;
+ else /* cmp > 0 */
+ unbal_bf++;
+
+ hh = cmp < 0 ? AVL_GET_LESS(unbal, 1) : AVL_GET_GREATER(unbal, 1);
+ L_CHECK_READ_ERROR(AVL_NULL)
+
+ if ((unbal_bf != -2) && (unbal_bf != 2)) {
+ /* No rebalancing of tree is necessary. */
+ AVL_SET_BALANCE_FACTOR(unbal, unbal_bf)
+ unbal = AVL_NULL;
+ }
+ }
-L_SC AVL_HANDLE L_(search)(L_(avl) *l_tree, AVL_KEY k, avl_search_type st)
-{
- int cmp, target_cmp;
- AVL_HANDLE match_h = AVL_NULL;
- AVL_HANDLE h = l_tree->root;
+ if (hh != AVL_NULL)
+ while (h != hh) {
+ cmp = L_BIT_ARR_VAL(branch, depth) ? 1 : -1;
+ depth++;
- if (st & AVL_LESS)
- target_cmp = 1;
- else if (st & AVL_GREATER)
- target_cmp = -1;
- else
- target_cmp = 0;
+ if (cmp < 0) {
+ AVL_SET_BALANCE_FACTOR(hh, -1)
+ hh = AVL_GET_LESS(hh, 1);
+ } else { /* cmp > 0 */
+ AVL_SET_BALANCE_FACTOR(hh, 1)
+ hh = AVL_GET_GREATER(hh, 1);
+ }
- while (h != AVL_NULL)
- {
- cmp = AVL_COMPARE_KEY_NODE(k, h);
+ L_CHECK_READ_ERROR(AVL_NULL)
+ }
- if (cmp == 0)
- {
- if (st & AVL_EQUAL)
- {
- match_h = h;
- break;
- }
+ if (unbal != AVL_NULL) {
+ unbal = L_(balance)(L_BALANCE_PARAM_CALL_PREFIX unbal);
+ L_CHECK_READ_ERROR(AVL_NULL)
- cmp = -target_cmp;
- }
- else if (target_cmp != 0)
- if (!((cmp ^ target_cmp) & L_MASK_HIGH_BIT))
- /* cmp and target_cmp are both positive or both negative. */
- match_h = h;
+ if (parent_unbal == AVL_NULL)
+ l_tree->root = unbal;
+ else {
+ depth = unbal_depth - 1;
+ cmp = L_BIT_ARR_VAL(branch, depth) ? 1 : -1;
- h = cmp < 0 ? AVL_GET_LESS(h, 1) : AVL_GET_GREATER(h, 1);
- L_CHECK_READ_ERROR(AVL_NULL)
+ if (cmp < 0)
+ AVL_SET_LESS(parent_unbal, unbal)
+ else /* cmp > 0 */
+ AVL_SET_GREATER(parent_unbal, unbal)
+ }
}
- return(match_h);
+ }
+
+ return(h);
+}
+
+#endif
+
+#if (L_IMPL_MASK & AVL_IMPL_SEARCH)
+
+L_SC AVL_HANDLE L_(search)(L_(avl) *l_tree, AVL_KEY k, avl_search_type st) {
+ int cmp, target_cmp;
+ AVL_HANDLE match_h = AVL_NULL;
+ AVL_HANDLE h = l_tree->root;
+
+ if (st & AVL_LESS)
+ target_cmp = 1;
+ else if (st & AVL_GREATER)
+ target_cmp = -1;
+ else
+ target_cmp = 0;
+
+ while (h != AVL_NULL) {
+ cmp = AVL_COMPARE_KEY_NODE(k, h);
+
+ if (cmp == 0) {
+ if (st & AVL_EQUAL) {
+ match_h = h;
+ break;
+ }
+
+ cmp = -target_cmp;
+ } else if (target_cmp != 0)
+ if (!((cmp ^ target_cmp) & L_MASK_HIGH_BIT))
+ /* cmp and target_cmp are both positive or both negative. */
+ match_h = h;
+
+ h = cmp < 0 ? AVL_GET_LESS(h, 1) : AVL_GET_GREATER(h, 1);
+ L_CHECK_READ_ERROR(AVL_NULL)
+ }
+
+ return(match_h);
}
#endif
#if (L_IMPL_MASK & AVL_IMPL_SEARCH_LEAST)
-L_SC AVL_HANDLE L_(search_least)(L_(avl) *l_tree)
-{
- AVL_HANDLE h = l_tree->root;
- AVL_HANDLE parent = AVL_NULL;
+L_SC AVL_HANDLE L_(search_least)(L_(avl) *l_tree) {
+ AVL_HANDLE h = l_tree->root;
+ AVL_HANDLE parent = AVL_NULL;
- while (h != AVL_NULL)
- {
- parent = h;
- h = AVL_GET_LESS(h, 1);
- L_CHECK_READ_ERROR(AVL_NULL)
- }
+ while (h != AVL_NULL) {
+ parent = h;
+ h = AVL_GET_LESS(h, 1);
+ L_CHECK_READ_ERROR(AVL_NULL)
+ }
- return(parent);
+ return(parent);
}
#endif
#if (L_IMPL_MASK & AVL_IMPL_SEARCH_GREATEST)
-L_SC AVL_HANDLE L_(search_greatest)(L_(avl) *l_tree)
-{
- AVL_HANDLE h = l_tree->root;
- AVL_HANDLE parent = AVL_NULL;
+L_SC AVL_HANDLE L_(search_greatest)(L_(avl) *l_tree) {
+ AVL_HANDLE h = l_tree->root;
+ AVL_HANDLE parent = AVL_NULL;
- while (h != AVL_NULL)
- {
- parent = h;
- h = AVL_GET_GREATER(h, 1);
- L_CHECK_READ_ERROR(AVL_NULL)
- }
+ while (h != AVL_NULL) {
+ parent = h;
+ h = AVL_GET_GREATER(h, 1);
+ L_CHECK_READ_ERROR(AVL_NULL)
+ }
- return(parent);
+ return(parent);
}
#endif
@@ -564,284 +509,253 @@ L_SC AVL_HANDLE L_(search_greatest)(L_(avl) *l_tree)
*/
L_SC AVL_HANDLE L_(balance)(L_BALANCE_PARAM_DECL_PREFIX AVL_HANDLE bal_h);
-L_SC AVL_HANDLE L_(remove)(L_(avl) *l_tree, AVL_KEY k)
-{
- /* Zero-based depth in tree. */
- unsigned depth = 0, rm_depth;
-
- /* Records a path into the tree. If bit n is true, indicates
- ** take greater branch from the nth node in the path, otherwise
- ** take the less branch. bit 0 gives branch from root, and
- ** so on. */
- L_BIT_ARR_DEFN(branch)
-
- AVL_HANDLE h = l_tree->root;
- AVL_HANDLE parent = AVL_NULL;
- AVL_HANDLE child;
- AVL_HANDLE path;
- int cmp, cmp_shortened_sub_with_path;
- int reduced_depth;
- int bf;
- AVL_HANDLE rm;
- AVL_HANDLE parent_rm;
-
- for (; ;)
- {
- if (h == AVL_NULL)
- /* No node in tree with given key. */
- return(AVL_NULL);
-
- cmp = AVL_COMPARE_KEY_NODE(k, h);
+L_SC AVL_HANDLE L_(remove)(L_(avl) *l_tree, AVL_KEY k) {
+ /* Zero-based depth in tree. */
+ unsigned depth = 0, rm_depth;
+
+ /* Records a path into the tree. If bit n is true, indicates
+ ** take greater branch from the nth node in the path, otherwise
+ ** take the less branch. bit 0 gives branch from root, and
+ ** so on. */
+ L_BIT_ARR_DEFN(branch)
+
+ AVL_HANDLE h = l_tree->root;
+ AVL_HANDLE parent = AVL_NULL;
+ AVL_HANDLE child;
+ AVL_HANDLE path;
+ int cmp, cmp_shortened_sub_with_path;
+ int reduced_depth;
+ int bf;
+ AVL_HANDLE rm;
+ AVL_HANDLE parent_rm;
+
+ for (;;) {
+ if (h == AVL_NULL)
+ /* No node in tree with given key. */
+ return(AVL_NULL);
- if (cmp == 0)
- /* Found node to remove. */
- break;
+ cmp = AVL_COMPARE_KEY_NODE(k, h);
- parent = h;
+ if (cmp == 0)
+ /* Found node to remove. */
+ break;
- if (cmp > 0)
- {
- h = AVL_GET_GREATER(h, 1);
- L_BIT_ARR_1(branch, depth)
- }
- else
- {
- h = AVL_GET_LESS(h, 1);
- L_BIT_ARR_0(branch, depth)
- }
+ parent = h;
- L_CHECK_READ_ERROR(AVL_NULL)
- depth++;
- cmp_shortened_sub_with_path = cmp;
+ if (cmp > 0) {
+ h = AVL_GET_GREATER(h, 1);
+ L_BIT_ARR_1(branch, depth)
+ } else {
+ h = AVL_GET_LESS(h, 1);
+ L_BIT_ARR_0(branch, depth)
}
- rm = h;
- parent_rm = parent;
- rm_depth = depth;
-
- /* If the node to remove is not a leaf node, we need to get a
- ** leaf node, or a node with a single leaf as its child, to put
- ** in the place of the node to remove. We will get the greatest
- ** node in the less subtree (of the node to remove), or the least
- ** node in the greater subtree. We take the leaf node from the
- ** deeper subtree, if there is one. */
-
- if (AVL_GET_BALANCE_FACTOR(h) < 0)
- {
+ L_CHECK_READ_ERROR(AVL_NULL)
+ depth++;
+ cmp_shortened_sub_with_path = cmp;
+ }
+
+ rm = h;
+ parent_rm = parent;
+ rm_depth = depth;
+
+ /* If the node to remove is not a leaf node, we need to get a
+ ** leaf node, or a node with a single leaf as its child, to put
+ ** in the place of the node to remove. We will get the greatest
+ ** node in the less subtree (of the node to remove), or the least
+ ** node in the greater subtree. We take the leaf node from the
+ ** deeper subtree, if there is one. */
+
+ if (AVL_GET_BALANCE_FACTOR(h) < 0) {
+ child = AVL_GET_LESS(h, 1);
+ L_BIT_ARR_0(branch, depth)
+ cmp = -1;
+ } else {
+ child = AVL_GET_GREATER(h, 1);
+ L_BIT_ARR_1(branch, depth)
+ cmp = 1;
+ }
+
+ L_CHECK_READ_ERROR(AVL_NULL)
+ depth++;
+
+ if (child != AVL_NULL) {
+ cmp = -cmp;
+
+ do {
+ parent = h;
+ h = child;
+
+ if (cmp < 0) {
child = AVL_GET_LESS(h, 1);
L_BIT_ARR_0(branch, depth)
- cmp = -1;
- }
- else
- {
+ } else {
child = AVL_GET_GREATER(h, 1);
L_BIT_ARR_1(branch, depth)
- cmp = 1;
- }
+ }
- L_CHECK_READ_ERROR(AVL_NULL)
- depth++;
+ L_CHECK_READ_ERROR(AVL_NULL)
+ depth++;
+ } while (child != AVL_NULL);
- if (child != AVL_NULL)
- {
- cmp = -cmp;
-
- do
- {
- parent = h;
- h = child;
-
- if (cmp < 0)
- {
- child = AVL_GET_LESS(h, 1);
- L_BIT_ARR_0(branch, depth)
- }
- else
- {
- child = AVL_GET_GREATER(h, 1);
- L_BIT_ARR_1(branch, depth)
- }
-
- L_CHECK_READ_ERROR(AVL_NULL)
- depth++;
- }
- while (child != AVL_NULL);
+ if (parent == rm)
+ /* Only went through do loop once. Deleted node will be replaced
+ ** in the tree structure by one of its immediate children. */
+ cmp_shortened_sub_with_path = -cmp;
+ else
+ cmp_shortened_sub_with_path = cmp;
+
+ /* Get the handle of the opposite child, which may not be null. */
+ child = cmp > 0 ? AVL_GET_LESS(h, 0) : AVL_GET_GREATER(h, 0);
+ }
- if (parent == rm)
- /* Only went through do loop once. Deleted node will be replaced
- ** in the tree structure by one of its immediate children. */
- cmp_shortened_sub_with_path = -cmp;
+ if (parent == AVL_NULL)
+ /* There were only 1 or 2 nodes in this tree. */
+ l_tree->root = child;
+ else if (cmp_shortened_sub_with_path < 0)
+ AVL_SET_LESS(parent, child)
+ else
+ AVL_SET_GREATER(parent, child)
+
+ /* "path" is the parent of the subtree being eliminated or reduced
+ ** from a depth of 2 to 1. If "path" is the node to be removed, we
+ ** set path to the node we're about to poke into the position of the
+ ** node to be removed. */
+ path = parent == rm ? h : parent;
+
+ if (h != rm) {
+ /* Poke in the replacement for the node to be removed. */
+ AVL_SET_LESS(h, AVL_GET_LESS(rm, 0))
+ AVL_SET_GREATER(h, AVL_GET_GREATER(rm, 0))
+ AVL_SET_BALANCE_FACTOR(h, AVL_GET_BALANCE_FACTOR(rm))
+
+ if (parent_rm == AVL_NULL)
+ l_tree->root = h;
+ else {
+ depth = rm_depth - 1;
+
+ if (L_BIT_ARR_VAL(branch, depth))
+ AVL_SET_GREATER(parent_rm, h)
else
- cmp_shortened_sub_with_path = cmp;
+ AVL_SET_LESS(parent_rm, h)
+ }
+ }
- /* Get the handle of the opposite child, which may not be null. */
- child = cmp > 0 ? AVL_GET_LESS(h, 0) : AVL_GET_GREATER(h, 0);
- }
+ if (path != AVL_NULL) {
+ /* Create a temporary linked list from the parent of the path node
+ ** to the root node. */
+ h = l_tree->root;
+ parent = AVL_NULL;
+ depth = 0;
- if (parent == AVL_NULL)
- /* There were only 1 or 2 nodes in this tree. */
- l_tree->root = child;
- else if (cmp_shortened_sub_with_path < 0)
- AVL_SET_LESS(parent, child)
- else
- AVL_SET_GREATER(parent, child)
-
- /* "path" is the parent of the subtree being eliminated or reduced
- ** from a depth of 2 to 1. If "path" is the node to be removed, we
- ** set path to the node we're about to poke into the position of the
- ** node to be removed. */
- path = parent == rm ? h : parent;
-
- if (h != rm)
- {
- /* Poke in the replacement for the node to be removed. */
- AVL_SET_LESS(h, AVL_GET_LESS(rm, 0))
- AVL_SET_GREATER(h, AVL_GET_GREATER(rm, 0))
- AVL_SET_BALANCE_FACTOR(h, AVL_GET_BALANCE_FACTOR(rm))
-
- if (parent_rm == AVL_NULL)
- l_tree->root = h;
- else
- {
- depth = rm_depth - 1;
-
- if (L_BIT_ARR_VAL(branch, depth))
- AVL_SET_GREATER(parent_rm, h)
- else
- AVL_SET_LESS(parent_rm, h)
- }
+ while (h != path) {
+ if (L_BIT_ARR_VAL(branch, depth)) {
+ child = AVL_GET_GREATER(h, 1);
+ AVL_SET_GREATER(h, parent)
+ } else {
+ child = AVL_GET_LESS(h, 1);
+ AVL_SET_LESS(h, parent)
+ }
+
+ L_CHECK_READ_ERROR(AVL_NULL)
+ depth++;
+ parent = h;
+ h = child;
}
- if (path != AVL_NULL)
- {
- /* Create a temporary linked list from the parent of the path node
- ** to the root node. */
- h = l_tree->root;
- parent = AVL_NULL;
- depth = 0;
-
- while (h != path)
- {
- if (L_BIT_ARR_VAL(branch, depth))
- {
- child = AVL_GET_GREATER(h, 1);
- AVL_SET_GREATER(h, parent)
- }
- else
- {
- child = AVL_GET_LESS(h, 1);
- AVL_SET_LESS(h, parent)
- }
-
- L_CHECK_READ_ERROR(AVL_NULL)
- depth++;
- parent = h;
- h = child;
- }
+ /* Climb from the path node to the root node using the linked
+ ** list, restoring the tree structure and rebalancing as necessary.
+ */
+ reduced_depth = 1;
+ cmp = cmp_shortened_sub_with_path;
- /* Climb from the path node to the root node using the linked
- ** list, restoring the tree structure and rebalancing as necessary.
- */
- reduced_depth = 1;
- cmp = cmp_shortened_sub_with_path;
-
- for (; ;)
- {
- if (reduced_depth)
- {
- bf = AVL_GET_BALANCE_FACTOR(h);
-
- if (cmp < 0)
- bf++;
- else /* cmp > 0 */
- bf--;
-
- if ((bf == -2) || (bf == 2))
- {
- h = L_(balance)(L_BALANCE_PARAM_CALL_PREFIX h);
- L_CHECK_READ_ERROR(AVL_NULL)
- bf = AVL_GET_BALANCE_FACTOR(h);
- }
- else
- AVL_SET_BALANCE_FACTOR(h, bf)
- reduced_depth = (bf == 0);
- }
-
- if (parent == AVL_NULL)
- break;
-
- child = h;
- h = parent;
- depth--;
- cmp = L_BIT_ARR_VAL(branch, depth) ? 1 : -1;
-
- if (cmp < 0)
- {
- parent = AVL_GET_LESS(h, 1);
- AVL_SET_LESS(h, child)
- }
- else
- {
- parent = AVL_GET_GREATER(h, 1);
- AVL_SET_GREATER(h, child)
- }
-
- L_CHECK_READ_ERROR(AVL_NULL)
- }
+ for (;;) {
+ if (reduced_depth) {
+ bf = AVL_GET_BALANCE_FACTOR(h);
+
+ if (cmp < 0)
+ bf++;
+ else /* cmp > 0 */
+ bf--;
+
+ if ((bf == -2) || (bf == 2)) {
+ h = L_(balance)(L_BALANCE_PARAM_CALL_PREFIX h);
+ L_CHECK_READ_ERROR(AVL_NULL)
+ bf = AVL_GET_BALANCE_FACTOR(h);
+ } else
+ AVL_SET_BALANCE_FACTOR(h, bf)
+ reduced_depth = (bf == 0);
+ }
+
+ if (parent == AVL_NULL)
+ break;
+
+ child = h;
+ h = parent;
+ depth--;
+ cmp = L_BIT_ARR_VAL(branch, depth) ? 1 : -1;
+
+ if (cmp < 0) {
+ parent = AVL_GET_LESS(h, 1);
+ AVL_SET_LESS(h, child)
+ } else {
+ parent = AVL_GET_GREATER(h, 1);
+ AVL_SET_GREATER(h, child)
+ }
- l_tree->root = h;
+ L_CHECK_READ_ERROR(AVL_NULL)
}
- return(rm);
+ l_tree->root = h;
+ }
+
+ return(rm);
}
#endif
#if (L_IMPL_MASK & AVL_IMPL_SUBST)
-L_SC AVL_HANDLE L_(subst)(L_(avl) *l_tree, AVL_HANDLE new_node)
-{
- AVL_HANDLE h = l_tree->root;
- AVL_HANDLE parent = AVL_NULL;
- int cmp, last_cmp;
-
- /* Search for node already in tree with same key. */
- for (; ;)
- {
- if (h == AVL_NULL)
- /* No node in tree with same key as new node. */
- return(AVL_NULL);
+L_SC AVL_HANDLE L_(subst)(L_(avl) *l_tree, AVL_HANDLE new_node) {
+ AVL_HANDLE h = l_tree->root;
+ AVL_HANDLE parent = AVL_NULL;
+ int cmp, last_cmp;
- cmp = AVL_COMPARE_NODE_NODE(new_node, h);
-
- if (cmp == 0)
- /* Found the node to substitute new one for. */
- break;
+ /* Search for node already in tree with same key. */
+ for (;;) {
+ if (h == AVL_NULL)
+ /* No node in tree with same key as new node. */
+ return(AVL_NULL);
- last_cmp = cmp;
- parent = h;
- h = cmp < 0 ? AVL_GET_LESS(h, 1) : AVL_GET_GREATER(h, 1);
- L_CHECK_READ_ERROR(AVL_NULL)
- }
+ cmp = AVL_COMPARE_NODE_NODE(new_node, h);
- /* Copy tree housekeeping fields from node in tree to new node. */
- AVL_SET_LESS(new_node, AVL_GET_LESS(h, 0))
- AVL_SET_GREATER(new_node, AVL_GET_GREATER(h, 0))
- AVL_SET_BALANCE_FACTOR(new_node, AVL_GET_BALANCE_FACTOR(h))
+ if (cmp == 0)
+ /* Found the node to substitute new one for. */
+ break;
- if (parent == AVL_NULL)
- /* New node is also new root. */
- l_tree->root = new_node;
- else
- {
- /* Make parent point to new node. */
- if (last_cmp < 0)
- AVL_SET_LESS(parent, new_node)
- else
- AVL_SET_GREATER(parent, new_node)
- }
-
- return(h);
+ last_cmp = cmp;
+ parent = h;
+ h = cmp < 0 ? AVL_GET_LESS(h, 1) : AVL_GET_GREATER(h, 1);
+ L_CHECK_READ_ERROR(AVL_NULL)
+ }
+
+ /* Copy tree housekeeping fields from node in tree to new node. */
+ AVL_SET_LESS(new_node, AVL_GET_LESS(h, 0))
+ AVL_SET_GREATER(new_node, AVL_GET_GREATER(h, 0))
+ AVL_SET_BALANCE_FACTOR(new_node, AVL_GET_BALANCE_FACTOR(h))
+
+ if (parent == AVL_NULL)
+ /* New node is also new root. */
+ l_tree->root = new_node;
+ else {
+ /* Make parent point to new node. */
+ if (last_cmp < 0)
+ AVL_SET_LESS(parent, new_node)
+ else
+ AVL_SET_GREATER(parent, new_node)
+ }
+
+ return(h);
}
#endif
@@ -851,144 +765,136 @@ L_SC AVL_HANDLE L_(subst)(L_(avl) *l_tree, AVL_HANDLE new_node)
#if (L_IMPL_MASK & AVL_IMPL_BUILD)
L_SC int L_(build)(
- L_(avl) *l_tree, AVL_BUILD_ITER_TYPE p, L_SIZE num_nodes)
-{
- /* Gives path to subtree being built. If bit n is false, branch
- ** less from the node at depth n, if true branch greater. */
- L_BIT_ARR_DEFN(branch)
-
- /* If bit n is true, then for the current subtree at depth n, its
- ** greater subtree has one more node than its less subtree. */
- L_BIT_ARR_DEFN(rem)
-
- /* Depth of root node of current subtree. */
- unsigned depth = 0;
+ L_(avl) *l_tree, AVL_BUILD_ITER_TYPE p, L_SIZE num_nodes) {
+ /* Gives path to subtree being built. If bit n is false, branch
+ ** less from the node at depth n, if true branch greater. */
+ L_BIT_ARR_DEFN(branch)
+
+ /* If bit n is true, then for the current subtree at depth n, its
+ ** greater subtree has one more node than its less subtree. */
+ L_BIT_ARR_DEFN(rem)
+
+ /* Depth of root node of current subtree. */
+ unsigned depth = 0;
+
+ /* Number of nodes in current subtree. */
+ L_SIZE num_sub = num_nodes;
+
+ /* The algorithm relies on a stack of nodes whose less subtree has
+ ** been built, but whose greater subtree has not yet been built.
+ ** The stack is implemented as linked list. The nodes are linked
+ ** together by having the "greater" handle of a node set to the
+ ** next node in the list. "less_parent" is the handle of the first
+ ** node in the list. */
+ AVL_HANDLE less_parent = AVL_NULL;
+
+ /* h is root of current subtree, child is one of its children. */
+ AVL_HANDLE h;
+ AVL_HANDLE child;
+
+ if (num_nodes == 0) {
+ l_tree->root = AVL_NULL;
+ return(1);
+ }
- /* Number of nodes in current subtree. */
- L_SIZE num_sub = num_nodes;
+ for (;;) {
+ while (num_sub > 2) {
+ /* Subtract one for root of subtree. */
+ num_sub--;
- /* The algorithm relies on a stack of nodes whose less subtree has
- ** been built, but whose greater subtree has not yet been built.
- ** The stack is implemented as linked list. The nodes are linked
- ** together by having the "greater" handle of a node set to the
- ** next node in the list. "less_parent" is the handle of the first
- ** node in the list. */
- AVL_HANDLE less_parent = AVL_NULL;
+ if (num_sub & 1)
+ L_BIT_ARR_1(rem, depth)
+ else
+ L_BIT_ARR_0(rem, depth)
+ L_BIT_ARR_0(branch, depth)
+ depth++;
- /* h is root of current subtree, child is one of its children. */
- AVL_HANDLE h;
- AVL_HANDLE child;
+ num_sub >>= 1;
+ }
- if (num_nodes == 0)
- {
- l_tree->root = AVL_NULL;
- return(1);
+ if (num_sub == 2) {
+ /* Build a subtree with two nodes, slanting to greater.
+ ** I arbitrarily chose to always have the extra node in the
+ ** greater subtree when there is an odd number of nodes to
+ ** split between the two subtrees. */
+
+ h = AVL_BUILD_ITER_VAL(p);
+ L_CHECK_READ_ERROR(0)
+ AVL_BUILD_ITER_INCR(p)
+ child = AVL_BUILD_ITER_VAL(p);
+ L_CHECK_READ_ERROR(0)
+ AVL_BUILD_ITER_INCR(p)
+ AVL_SET_LESS(child, AVL_NULL)
+ AVL_SET_GREATER(child, AVL_NULL)
+ AVL_SET_BALANCE_FACTOR(child, 0)
+ AVL_SET_GREATER(h, child)
+ AVL_SET_LESS(h, AVL_NULL)
+ AVL_SET_BALANCE_FACTOR(h, 1)
+ } else { /* num_sub == 1 */
+ /* Build a subtree with one node. */
+
+ h = AVL_BUILD_ITER_VAL(p);
+ L_CHECK_READ_ERROR(0)
+ AVL_BUILD_ITER_INCR(p)
+ AVL_SET_LESS(h, AVL_NULL)
+ AVL_SET_GREATER(h, AVL_NULL)
+ AVL_SET_BALANCE_FACTOR(h, 0)
}
- for (; ;)
- {
- while (num_sub > 2)
- {
- /* Subtract one for root of subtree. */
- num_sub--;
-
- if (num_sub & 1)
- L_BIT_ARR_1(rem, depth)
- else
- L_BIT_ARR_0(rem, depth)
- L_BIT_ARR_0(branch, depth)
- depth++;
-
- num_sub >>= 1;
+ while (depth) {
+ depth--;
+
+ if (!L_BIT_ARR_VAL(branch, depth))
+ /* We've completed a less subtree. */
+ break;
+
+ /* We've completed a greater subtree, so attach it to
+ ** its parent (that is less than it). We pop the parent
+ ** off the stack of less parents. */
+ child = h;
+ h = less_parent;
+ less_parent = AVL_GET_GREATER(h, 1);
+ L_CHECK_READ_ERROR(0)
+ AVL_SET_GREATER(h, child)
+ /* num_sub = 2 * (num_sub - rem[depth]) + rem[depth] + 1 */
+ num_sub <<= 1;
+ num_sub += L_BIT_ARR_VAL(rem, depth) ? 0 : 1;
+
+ if (num_sub & (num_sub - 1))
+ /* num_sub is not a power of 2. */
+ AVL_SET_BALANCE_FACTOR(h, 0)
+ else
+ /* num_sub is a power of 2. */
+ AVL_SET_BALANCE_FACTOR(h, 1)
}
- if (num_sub == 2)
- {
- /* Build a subtree with two nodes, slanting to greater.
- ** I arbitrarily chose to always have the extra node in the
- ** greater subtree when there is an odd number of nodes to
- ** split between the two subtrees. */
-
- h = AVL_BUILD_ITER_VAL(p);
- L_CHECK_READ_ERROR(0)
- AVL_BUILD_ITER_INCR(p)
- child = AVL_BUILD_ITER_VAL(p);
- L_CHECK_READ_ERROR(0)
- AVL_BUILD_ITER_INCR(p)
- AVL_SET_LESS(child, AVL_NULL)
- AVL_SET_GREATER(child, AVL_NULL)
- AVL_SET_BALANCE_FACTOR(child, 0)
- AVL_SET_GREATER(h, child)
- AVL_SET_LESS(h, AVL_NULL)
- AVL_SET_BALANCE_FACTOR(h, 1)
- }
- else /* num_sub == 1 */
- {
- /* Build a subtree with one node. */
-
- h = AVL_BUILD_ITER_VAL(p);
- L_CHECK_READ_ERROR(0)
- AVL_BUILD_ITER_INCR(p)
- AVL_SET_LESS(h, AVL_NULL)
- AVL_SET_GREATER(h, AVL_NULL)
- AVL_SET_BALANCE_FACTOR(h, 0)
- }
+ if (num_sub == num_nodes)
+ /* We've completed the full tree. */
+ break;
- while (depth)
- {
- depth--;
-
- if (!L_BIT_ARR_VAL(branch, depth))
- /* We've completed a less subtree. */
- break;
-
- /* We've completed a greater subtree, so attach it to
- ** its parent (that is less than it). We pop the parent
- ** off the stack of less parents. */
- child = h;
- h = less_parent;
- less_parent = AVL_GET_GREATER(h, 1);
- L_CHECK_READ_ERROR(0)
- AVL_SET_GREATER(h, child)
- /* num_sub = 2 * (num_sub - rem[depth]) + rem[depth] + 1 */
- num_sub <<= 1;
- num_sub += L_BIT_ARR_VAL(rem, depth) ? 0 : 1;
-
- if (num_sub & (num_sub - 1))
- /* num_sub is not a power of 2. */
- AVL_SET_BALANCE_FACTOR(h, 0)
- else
- /* num_sub is a power of 2. */
- AVL_SET_BALANCE_FACTOR(h, 1)
- }
-
- if (num_sub == num_nodes)
- /* We've completed the full tree. */
- break;
-
- /* The subtree we've completed is the less subtree of the
- ** next node in the sequence. */
-
- child = h;
- h = AVL_BUILD_ITER_VAL(p);
- L_CHECK_READ_ERROR(0)
- AVL_BUILD_ITER_INCR(p)
- AVL_SET_LESS(h, child)
+ /* The subtree we've completed is the less subtree of the
+ ** next node in the sequence. */
- /* Put h into stack of less parents. */
- AVL_SET_GREATER(h, less_parent)
- less_parent = h;
+ child = h;
+ h = AVL_BUILD_ITER_VAL(p);
+ L_CHECK_READ_ERROR(0)
+ AVL_BUILD_ITER_INCR(p)
+ AVL_SET_LESS(h, child)
- /* Proceed to creating greater than subtree of h. */
- L_BIT_ARR_1(branch, depth)
- num_sub += L_BIT_ARR_VAL(rem, depth) ? 1 : 0;
- depth++;
+ /* Put h into stack of less parents. */
+ AVL_SET_GREATER(h, less_parent)
+ less_parent = h;
- } /* end for ( ; ; ) */
+ /* Proceed to creating greater than subtree of h. */
+ L_BIT_ARR_1(branch, depth)
+ num_sub += L_BIT_ARR_VAL(rem, depth) ? 1 : 0;
+ depth++;
- l_tree->root = h;
+ } /* end for (;; ) */
- return(1);
+ l_tree->root = h;
+
+ return(1);
}
#endif
@@ -1001,9 +907,8 @@ L_SC int L_(build)(
** invalid. (Depth is zero-base.) It's not necessary to initialize
** iterators prior to passing them to the "start" function.
*/
-L_SC void L_(init_iter)(L_(iter) *iter)
-{
- iter->depth = ~0;
+L_SC void L_(init_iter)(L_(iter) *iter) {
+ iter->depth = ~0;
}
#endif
@@ -1011,7 +916,7 @@ L_SC void L_(init_iter)(L_(iter) *iter)
#ifdef AVL_READ_ERRORS_HAPPEN
#define L_CHECK_READ_ERROR_INV_DEPTH \
- { if (AVL_READ_ERROR) { iter->depth = ~0; return; } }
+ { if (AVL_READ_ERROR) { iter->depth = ~0; return; } }
#else
@@ -1022,174 +927,157 @@ L_SC void L_(init_iter)(L_(iter) *iter)
#if (L_IMPL_MASK & AVL_IMPL_START_ITER)
L_SC void L_(start_iter)(
- L_(avl) *l_tree, L_(iter) *iter, AVL_KEY k, avl_search_type st)
-{
- AVL_HANDLE h = l_tree->root;
- unsigned d = 0;
- int cmp, target_cmp;
-
- /* Save the tree that we're going to iterate through in a
- ** member variable. */
- iter->tree_ = l_tree;
-
- iter->depth = ~0;
+ L_(avl) *l_tree, L_(iter) *iter, AVL_KEY k, avl_search_type st) {
+ AVL_HANDLE h = l_tree->root;
+ unsigned d = 0;
+ int cmp, target_cmp;
+
+ /* Save the tree that we're going to iterate through in a
+ ** member variable. */
+ iter->tree_ = l_tree;
+
+ iter->depth = ~0;
+
+ if (h == AVL_NULL)
+ /* Tree is empty. */
+ return;
+
+ if (st & AVL_LESS)
+ /* Key can be greater than key of starting node. */
+ target_cmp = 1;
+ else if (st & AVL_GREATER)
+ /* Key can be less than key of starting node. */
+ target_cmp = -1;
+ else
+ /* Key must be same as key of starting node. */
+ target_cmp = 0;
+
+ for (;;) {
+ cmp = AVL_COMPARE_KEY_NODE(k, h);
+
+ if (cmp == 0) {
+ if (st & AVL_EQUAL) {
+ /* Equal node was sought and found as starting node. */
+ iter->depth = d;
+ break;
+ }
+
+ cmp = -target_cmp;
+ } else if (target_cmp != 0)
+ if (!((cmp ^ target_cmp) & L_MASK_HIGH_BIT))
+ /* cmp and target_cmp are both negative or both positive. */
+ iter->depth = d;
+
+ h = cmp < 0 ? AVL_GET_LESS(h, 1) : AVL_GET_GREATER(h, 1);
+ L_CHECK_READ_ERROR_INV_DEPTH
if (h == AVL_NULL)
- /* Tree is empty. */
- return;
-
- if (st & AVL_LESS)
- /* Key can be greater than key of starting node. */
- target_cmp = 1;
- else if (st & AVL_GREATER)
- /* Key can be less than key of starting node. */
- target_cmp = -1;
- else
- /* Key must be same as key of starting node. */
- target_cmp = 0;
-
- for (; ;)
- {
- cmp = AVL_COMPARE_KEY_NODE(k, h);
-
- if (cmp == 0)
- {
- if (st & AVL_EQUAL)
- {
- /* Equal node was sought and found as starting node. */
- iter->depth = d;
- break;
- }
-
- cmp = -target_cmp;
- }
- else if (target_cmp != 0)
- if (!((cmp ^ target_cmp) & L_MASK_HIGH_BIT))
- /* cmp and target_cmp are both negative or both positive. */
- iter->depth = d;
-
- h = cmp < 0 ? AVL_GET_LESS(h, 1) : AVL_GET_GREATER(h, 1);
- L_CHECK_READ_ERROR_INV_DEPTH
-
- if (h == AVL_NULL)
- break;
-
- if (cmp > 0)
- L_BIT_ARR_1(iter->branch, d)
- else
- L_BIT_ARR_0(iter->branch, d)
- iter->path_h[d++] = h;
- }
+ break;
+
+ if (cmp > 0)
+ L_BIT_ARR_1(iter->branch, d)
+ else
+ L_BIT_ARR_0(iter->branch, d)
+ iter->path_h[d++] = h;
+ }
}
#endif
#if (L_IMPL_MASK & AVL_IMPL_START_ITER_LEAST)
-L_SC void L_(start_iter_least)(L_(avl) *l_tree, L_(iter) *iter)
-{
- AVL_HANDLE h = l_tree->root;
+L_SC void L_(start_iter_least)(L_(avl) *l_tree, L_(iter) *iter) {
+ AVL_HANDLE h = l_tree->root;
- iter->tree_ = l_tree;
+ iter->tree_ = l_tree;
- iter->depth = ~0;
+ iter->depth = ~0;
- L_BIT_ARR_ALL(iter->branch, 0)
+ L_BIT_ARR_ALL(iter->branch, 0)
- while (h != AVL_NULL)
- {
- if (iter->depth != ~0)
- iter->path_h[iter->depth] = h;
+ while (h != AVL_NULL) {
+ if (iter->depth != ~0)
+ iter->path_h[iter->depth] = h;
- iter->depth++;
- h = AVL_GET_LESS(h, 1);
- L_CHECK_READ_ERROR_INV_DEPTH
- }
+ iter->depth++;
+ h = AVL_GET_LESS(h, 1);
+ L_CHECK_READ_ERROR_INV_DEPTH
+ }
}
#endif
#if (L_IMPL_MASK & AVL_IMPL_START_ITER_GREATEST)
-L_SC void L_(start_iter_greatest)(L_(avl) *l_tree, L_(iter) *iter)
-{
- AVL_HANDLE h = l_tree->root;
+L_SC void L_(start_iter_greatest)(L_(avl) *l_tree, L_(iter) *iter) {
+ AVL_HANDLE h = l_tree->root;
- iter->tree_ = l_tree;
+ iter->tree_ = l_tree;
- iter->depth = ~0;
+ iter->depth = ~0;
- L_BIT_ARR_ALL(iter->branch, 1)
+ L_BIT_ARR_ALL(iter->branch, 1)
- while (h != AVL_NULL)
- {
- if (iter->depth != ~0)
- iter->path_h[iter->depth] = h;
+ while (h != AVL_NULL) {
+ if (iter->depth != ~0)
+ iter->path_h[iter->depth] = h;
- iter->depth++;
- h = AVL_GET_GREATER(h, 1);
- L_CHECK_READ_ERROR_INV_DEPTH
- }
+ iter->depth++;
+ h = AVL_GET_GREATER(h, 1);
+ L_CHECK_READ_ERROR_INV_DEPTH
+ }
}
#endif
#if (L_IMPL_MASK & AVL_IMPL_GET_ITER)
-L_SC AVL_HANDLE L_(get_iter)(L_(iter) *iter)
-{
- if (iter->depth == ~0)
- return(AVL_NULL);
+L_SC AVL_HANDLE L_(get_iter)(L_(iter) *iter) {
+ if (iter->depth == ~0)
+ return(AVL_NULL);
- return(iter->depth == 0 ?
- iter->tree_->root : iter->path_h[iter->depth - 1]);
+ return(iter->depth == 0 ?
+ iter->tree_->root : iter->path_h[iter->depth - 1]);
}
#endif
#if (L_IMPL_MASK & AVL_IMPL_INCR_ITER)
-L_SC void L_(incr_iter)(L_(iter) *iter)
-{
+L_SC void L_(incr_iter)(L_(iter) *iter) {
#define l_tree (iter->tree_)
- if (iter->depth != ~0)
- {
- AVL_HANDLE h =
- AVL_GET_GREATER((iter->depth == 0 ?
- iter->tree_->root : iter->path_h[iter->depth - 1]), 1);
- L_CHECK_READ_ERROR_INV_DEPTH
+ if (iter->depth != ~0) {
+ AVL_HANDLE h =
+ AVL_GET_GREATER((iter->depth == 0 ?
+ iter->tree_->root : iter->path_h[iter->depth - 1]), 1);
+ L_CHECK_READ_ERROR_INV_DEPTH
- if (h == AVL_NULL)
- do
- {
- if (iter->depth == 0)
- {
- iter->depth = ~0;
- break;
- }
-
- iter->depth--;
- }
- while (L_BIT_ARR_VAL(iter->branch, iter->depth));
- else
- {
- L_BIT_ARR_1(iter->branch, iter->depth)
- iter->path_h[iter->depth++] = h;
+ if (h == AVL_NULL)
+ do {
+ if (iter->depth == 0) {
+ iter->depth = ~0;
+ break;
+ }
- for (; ;)
- {
- h = AVL_GET_LESS(h, 1);
- L_CHECK_READ_ERROR_INV_DEPTH
+ iter->depth--;
+ } while (L_BIT_ARR_VAL(iter->branch, iter->depth));
+ else {
+ L_BIT_ARR_1(iter->branch, iter->depth)
+ iter->path_h[iter->depth++] = h;
- if (h == AVL_NULL)
- break;
+ for (;;) {
+ h = AVL_GET_LESS(h, 1);
+ L_CHECK_READ_ERROR_INV_DEPTH
- L_BIT_ARR_0(iter->branch, iter->depth)
- iter->path_h[iter->depth++] = h;
- }
- }
+ if (h == AVL_NULL)
+ break;
+
+ L_BIT_ARR_0(iter->branch, iter->depth)
+ iter->path_h[iter->depth++] = h;
+ }
}
+ }
#undef l_tree
}
@@ -1198,47 +1086,40 @@ L_SC void L_(incr_iter)(L_(iter) *iter)
#if (L_IMPL_MASK & AVL_IMPL_DECR_ITER)
-L_SC void L_(decr_iter)(L_(iter) *iter)
-{
+L_SC void L_(decr_iter)(L_(iter) *iter) {
#define l_tree (iter->tree_)
- if (iter->depth != ~0)
- {
- AVL_HANDLE h =
- AVL_GET_LESS((iter->depth == 0 ?
- iter->tree_->root : iter->path_h[iter->depth - 1]), 1);
- L_CHECK_READ_ERROR_INV_DEPTH
+ if (iter->depth != ~0) {
+ AVL_HANDLE h =
+ AVL_GET_LESS((iter->depth == 0 ?
+ iter->tree_->root : iter->path_h[iter->depth - 1]), 1);
+ L_CHECK_READ_ERROR_INV_DEPTH
- if (h == AVL_NULL)
- do
- {
- if (iter->depth == 0)
- {
- iter->depth = ~0;
- break;
- }
-
- iter->depth--;
- }
- while (!L_BIT_ARR_VAL(iter->branch, iter->depth));
- else
- {
- L_BIT_ARR_0(iter->branch, iter->depth)
- iter->path_h[iter->depth++] = h;
+ if (h == AVL_NULL)
+ do {
+ if (iter->depth == 0) {
+ iter->depth = ~0;
+ break;
+ }
- for (; ;)
- {
- h = AVL_GET_GREATER(h, 1);
- L_CHECK_READ_ERROR_INV_DEPTH
+ iter->depth--;
+ } while (!L_BIT_ARR_VAL(iter->branch, iter->depth));
+ else {
+ L_BIT_ARR_0(iter->branch, iter->depth)
+ iter->path_h[iter->depth++] = h;
- if (h == AVL_NULL)
- break;
+ for (;;) {
+ h = AVL_GET_GREATER(h, 1);
+ L_CHECK_READ_ERROR_INV_DEPTH
- L_BIT_ARR_1(iter->branch, iter->depth)
- iter->path_h[iter->depth++] = h;
- }
- }
+ if (h == AVL_NULL)
+ break;
+
+ L_BIT_ARR_1(iter->branch, iter->depth)
+ iter->path_h[iter->depth++] = h;
+ }
}
+ }
#undef l_tree
}
diff --git a/libvpx/vpx_mem/memory_manager/include/heapmm.h b/libvpx/vpx_mem/memory_manager/include/heapmm.h
index 33004ca..4934c2d 100644
--- a/libvpx/vpx_mem/memory_manager/include/heapmm.h
+++ b/libvpx/vpx_mem/memory_manager/include/heapmm.h
@@ -81,30 +81,29 @@
#include "hmm_cnfg.h"
/* Heap descriptor. */
-typedef struct HMM_UNIQUE(structure)
-{
- /* private: */
-
- /* Pointer to (payload of) root node in AVL tree. This field should
- ** really be the AVL tree descriptor (type avl_avl). But (in the
- ** instantiation of the AVL tree generic package used in package) the
- ** AVL tree descriptor simply contains a pointer to the root. So,
- ** whenever a pointer to the AVL tree descriptor is needed, I use the
- ** cast:
- **
- ** (avl_avl *) &(heap_desc->avl_tree_root)
- **
- ** (where heap_desc is a pointer to a heap descriptor). This trick
- ** allows me to avoid including cavl_if.h in this external header. */
- void *avl_tree_root;
-
- /* Pointer to first byte of last block freed, after any coalescing. */
- void *last_freed;
-
- /* public: */
-
- HMM_UNIQUE(size_bau) num_baus_can_shrink;
- void *end_of_shrinkable_chunk;
+typedef struct HMM_UNIQUE(structure) {
+ /* private: */
+
+ /* Pointer to (payload of) root node in AVL tree. This field should
+ ** really be the AVL tree descriptor (type avl_avl). But (in the
+ ** instantiation of the AVL tree generic package used in package) the
+ ** AVL tree descriptor simply contains a pointer to the root. So,
+ ** whenever a pointer to the AVL tree descriptor is needed, I use the
+ ** cast:
+ **
+ ** (avl_avl *) &(heap_desc->avl_tree_root)
+ **
+ ** (where heap_desc is a pointer to a heap descriptor). This trick
+ ** allows me to avoid including cavl_if.h in this external header. */
+ void *avl_tree_root;
+
+ /* Pointer to first byte of last block freed, after any coalescing. */
+ void *last_freed;
+
+ /* public: */
+
+ HMM_UNIQUE(size_bau) num_baus_can_shrink;
+ void *end_of_shrinkable_chunk;
}
HMM_UNIQUE(descriptor);
@@ -113,41 +112,41 @@ HMM_UNIQUE(descriptor);
void HMM_UNIQUE(init)(HMM_UNIQUE(descriptor) *desc);
void *HMM_UNIQUE(alloc)(
- HMM_UNIQUE(descriptor) *desc, HMM_UNIQUE(size_aau) num_addr_align_units);
+ HMM_UNIQUE(descriptor) *desc, HMM_UNIQUE(size_aau) num_addr_align_units);
/* NOT YET IMPLEMENTED */
void *HMM_UNIQUE(greedy_alloc)(
- HMM_UNIQUE(descriptor) *desc, HMM_UNIQUE(size_aau) needed_addr_align_units,
- HMM_UNIQUE(size_aau) coveted_addr_align_units);
+ HMM_UNIQUE(descriptor) *desc, HMM_UNIQUE(size_aau) needed_addr_align_units,
+ HMM_UNIQUE(size_aau) coveted_addr_align_units);
int HMM_UNIQUE(resize)(
- HMM_UNIQUE(descriptor) *desc, void *mem,
- HMM_UNIQUE(size_aau) num_addr_align_units);
+ HMM_UNIQUE(descriptor) *desc, void *mem,
+ HMM_UNIQUE(size_aau) num_addr_align_units);
/* NOT YET IMPLEMENTED */
int HMM_UNIQUE(greedy_resize)(
- HMM_UNIQUE(descriptor) *desc, void *mem,
- HMM_UNIQUE(size_aau) needed_addr_align_units,
- HMM_UNIQUE(size_aau) coveted_addr_align_units);
+ HMM_UNIQUE(descriptor) *desc, void *mem,
+ HMM_UNIQUE(size_aau) needed_addr_align_units,
+ HMM_UNIQUE(size_aau) coveted_addr_align_units);
void HMM_UNIQUE(free)(HMM_UNIQUE(descriptor) *desc, void *mem);
HMM_UNIQUE(size_aau) HMM_UNIQUE(true_size)(void *mem);
HMM_UNIQUE(size_aau) HMM_UNIQUE(largest_available)(
- HMM_UNIQUE(descriptor) *desc);
+ HMM_UNIQUE(descriptor) *desc);
void HMM_UNIQUE(new_chunk)(
- HMM_UNIQUE(descriptor) *desc, void *start_of_chunk,
- HMM_UNIQUE(size_bau) num_block_align_units);
+ HMM_UNIQUE(descriptor) *desc, void *start_of_chunk,
+ HMM_UNIQUE(size_bau) num_block_align_units);
void HMM_UNIQUE(grow_chunk)(
- HMM_UNIQUE(descriptor) *desc, void *end_of_chunk,
- HMM_UNIQUE(size_bau) num_block_align_units);
+ HMM_UNIQUE(descriptor) *desc, void *end_of_chunk,
+ HMM_UNIQUE(size_bau) num_block_align_units);
/* NOT YET IMPLEMENTED */
void HMM_UNIQUE(shrink_chunk)(
- HMM_UNIQUE(descriptor) *desc,
- HMM_UNIQUE(size_bau) num_block_align_units);
+ HMM_UNIQUE(descriptor) *desc,
+ HMM_UNIQUE(size_bau) num_block_align_units);
#endif /* defined HMM_PROCESS */
diff --git a/libvpx/vpx_mem/memory_manager/include/hmm_cnfg.h b/libvpx/vpx_mem/memory_manager/include/hmm_cnfg.h
index 30b9f50..2c3391d 100644
--- a/libvpx/vpx_mem/memory_manager/include/hmm_cnfg.h
+++ b/libvpx/vpx_mem/memory_manager/include/hmm_cnfg.h
@@ -45,8 +45,8 @@
#define HMM_UNIQUE(BASE) hmm_ ## BASE
/* Number of bytes in an Address Alignment Unit (AAU). */
-//fwg
-//#define HMM_ADDR_ALIGN_UNIT sizeof(int)
+// fwg
+// #define HMM_ADDR_ALIGN_UNIT sizeof(int)
#define HMM_ADDR_ALIGN_UNIT 32
/* Number of AAUs in a Block Alignment Unit (BAU). */
@@ -65,7 +65,7 @@ void hmm_dflt_abort(const char *, const char *);
** statement. If you remove the definition of this macro, no self-auditing
** will be performed. */
#define HMM_AUDIT_FAIL \
- hmm_dflt_abort(__FILE__, HMM_SYM_TO_STRING(__LINE__));
+ hmm_dflt_abort(__FILE__, HMM_SYM_TO_STRING(__LINE__));
#elif HMM_CNFG_NUM == 0
@@ -90,8 +90,8 @@ extern const char *HMM_UNIQUE(fail_file);
extern unsigned HMM_UNIQUE(fail_line);
#define HMM_AUDIT_FAIL \
- { HMM_UNIQUE(fail_file) = __FILE__; HMM_UNIQUE(fail_line) = __LINE__; \
- longjmp(HMM_UNIQUE(jmp_buf), 1); }
+ { HMM_UNIQUE(fail_file) = __FILE__; HMM_UNIQUE(fail_line) = __LINE__; \
+ longjmp(HMM_UNIQUE(jmp_buf), 1); }
#elif HMM_CNFG_NUM == 1
diff --git a/libvpx/vpx_mem/memory_manager/include/hmm_intrnl.h b/libvpx/vpx_mem/memory_manager/include/hmm_intrnl.h
index 5d62abc..27cefe4 100644
--- a/libvpx/vpx_mem/memory_manager/include/hmm_intrnl.h
+++ b/libvpx/vpx_mem/memory_manager/include/hmm_intrnl.h
@@ -26,34 +26,32 @@
/* Mask of high bit of variable of size_bau type. */
#define HIGH_BIT_BAU_SIZE \
- ((U(size_bau)) ~ (((U(size_bau)) ~ (U(size_bau)) 0) >> 1))
+ ((U(size_bau)) ~ (((U(size_bau)) ~ (U(size_bau)) 0) >> 1))
/* Add a given number of AAUs to pointer. */
#define AAUS_FORWARD(PTR, AAU_OFFSET) \
- (((char *) (PTR)) + ((AAU_OFFSET) * ((U(size_aau)) HMM_ADDR_ALIGN_UNIT)))
+ (((char *) (PTR)) + ((AAU_OFFSET) * ((U(size_aau)) HMM_ADDR_ALIGN_UNIT)))
/* Subtract a given number of AAUs from pointer. */
#define AAUS_BACKWARD(PTR, AAU_OFFSET) \
- (((char *) (PTR)) - ((AAU_OFFSET) * ((U(size_aau)) HMM_ADDR_ALIGN_UNIT)))
+ (((char *) (PTR)) - ((AAU_OFFSET) * ((U(size_aau)) HMM_ADDR_ALIGN_UNIT)))
/* Add a given number of BAUs to a pointer. */
#define BAUS_FORWARD(PTR, BAU_OFFSET) \
- AAUS_FORWARD((PTR), (BAU_OFFSET) * ((U(size_aau)) HMM_BLOCK_ALIGN_UNIT))
+ AAUS_FORWARD((PTR), (BAU_OFFSET) * ((U(size_aau)) HMM_BLOCK_ALIGN_UNIT))
/* Subtract a given number of BAUs to a pointer. */
#define BAUS_BACKWARD(PTR, BAU_OFFSET) \
- AAUS_BACKWARD((PTR), (BAU_OFFSET) * ((U(size_aau)) HMM_BLOCK_ALIGN_UNIT))
+ AAUS_BACKWARD((PTR), (BAU_OFFSET) * ((U(size_aau)) HMM_BLOCK_ALIGN_UNIT))
-typedef struct head_struct
-{
- /* Sizes in Block Alignment Units. */
- HMM_UNIQUE(size_bau) previous_block_size, block_size;
+typedef struct head_struct {
+ /* Sizes in Block Alignment Units. */
+ HMM_UNIQUE(size_bau) previous_block_size, block_size;
}
head_record;
-typedef struct ptr_struct
-{
- struct ptr_struct *self, *prev, *next;
+typedef struct ptr_struct {
+ struct ptr_struct *self, *prev, *next;
}
ptr_record;
@@ -71,50 +69,50 @@ ptr_record;
/* Minimum number of BAUs in a block (allowing room for the pointer record. */
#define MIN_BLOCK_BAUS \
- DIV_ROUND_UP(HEAD_AAUS + PTR_RECORD_AAUS, HMM_BLOCK_ALIGN_UNIT)
+ DIV_ROUND_UP(HEAD_AAUS + PTR_RECORD_AAUS, HMM_BLOCK_ALIGN_UNIT)
/* Return number of BAUs in block (masking off high bit containing block
** status). */
#define BLOCK_BAUS(HEAD_PTR) \
- (((head_record *) (HEAD_PTR))->block_size & ~HIGH_BIT_BAU_SIZE)
+ (((head_record *) (HEAD_PTR))->block_size & ~HIGH_BIT_BAU_SIZE)
/* Return number of BAUs in previous block (masking off high bit containing
** block status). */
#define PREV_BLOCK_BAUS(HEAD_PTR) \
- (((head_record *) (HEAD_PTR))->previous_block_size & ~HIGH_BIT_BAU_SIZE)
+ (((head_record *) (HEAD_PTR))->previous_block_size & ~HIGH_BIT_BAU_SIZE)
/* Set number of BAUs in previous block, preserving high bit containing
** block status. */
#define SET_PREV_BLOCK_BAUS(HEAD_PTR, N_BAUS) \
- { register head_record *h_ptr = (head_record *) (HEAD_PTR); \
- h_ptr->previous_block_size &= HIGH_BIT_BAU_SIZE; \
- h_ptr->previous_block_size |= (N_BAUS); }
+ { register head_record *h_ptr = (head_record *) (HEAD_PTR); \
+ h_ptr->previous_block_size &= HIGH_BIT_BAU_SIZE; \
+ h_ptr->previous_block_size |= (N_BAUS); }
/* Convert pointer to pointer record of block to pointer to block's head
** record. */
#define PTR_REC_TO_HEAD(PTR_REC_PTR) \
- ((head_record *) AAUS_BACKWARD(PTR_REC_PTR, HEAD_AAUS))
+ ((head_record *) AAUS_BACKWARD(PTR_REC_PTR, HEAD_AAUS))
/* Convert pointer to block head to pointer to block's pointer record. */
#define HEAD_TO_PTR_REC(HEAD_PTR) \
- ((ptr_record *) AAUS_FORWARD(HEAD_PTR, HEAD_AAUS))
+ ((ptr_record *) AAUS_FORWARD(HEAD_PTR, HEAD_AAUS))
/* Returns non-zero if block is allocated. */
#define IS_BLOCK_ALLOCATED(HEAD_PTR) \
- (((((head_record *) (HEAD_PTR))->block_size | \
- ((head_record *) (HEAD_PTR))->previous_block_size) & \
- HIGH_BIT_BAU_SIZE) == 0)
+ (((((head_record *) (HEAD_PTR))->block_size | \
+ ((head_record *) (HEAD_PTR))->previous_block_size) & \
+ HIGH_BIT_BAU_SIZE) == 0)
#define MARK_BLOCK_ALLOCATED(HEAD_PTR) \
- { register head_record *h_ptr = (head_record *) (HEAD_PTR); \
- h_ptr->block_size &= ~HIGH_BIT_BAU_SIZE; \
- h_ptr->previous_block_size &= ~HIGH_BIT_BAU_SIZE; }
+ { register head_record *h_ptr = (head_record *) (HEAD_PTR); \
+ h_ptr->block_size &= ~HIGH_BIT_BAU_SIZE; \
+ h_ptr->previous_block_size &= ~HIGH_BIT_BAU_SIZE; }
/* Mark a block as free when it is not the first block in a bin (and
** therefore not a node in the AVL tree). */
#define MARK_SUCCESSIVE_BLOCK_IN_FREE_BIN(HEAD_PTR) \
- { register head_record *h_ptr = (head_record *) (HEAD_PTR); \
- h_ptr->block_size |= HIGH_BIT_BAU_SIZE; }
+ { register head_record *h_ptr = (head_record *) (HEAD_PTR); \
+ h_ptr->block_size |= HIGH_BIT_BAU_SIZE; }
/* Prototypes for internal functions implemented in one file and called in
** another.
@@ -125,7 +123,7 @@ void U(into_free_collection)(U(descriptor) *desc, head_record *head_ptr);
void U(out_of_free_collection)(U(descriptor) *desc, head_record *head_ptr);
void *U(alloc_from_bin)(
- U(descriptor) *desc, ptr_record *bin_front_ptr, U(size_bau) n_baus);
+ U(descriptor) *desc, ptr_record *bin_front_ptr, U(size_bau) n_baus);
#ifdef HMM_AUDIT_FAIL
@@ -137,12 +135,12 @@ int U(audit_block_fail_dummy_return)(void);
/* Auditing a block consists of checking that the size in its head
** matches the previous block size in the head of the next block. */
#define AUDIT_BLOCK_AS_EXPR(HEAD_PTR) \
- ((BLOCK_BAUS(HEAD_PTR) == \
- PREV_BLOCK_BAUS(BAUS_FORWARD(HEAD_PTR, BLOCK_BAUS(HEAD_PTR)))) ? \
- 0 : U(audit_block_fail_dummy_return)())
+ ((BLOCK_BAUS(HEAD_PTR) == \
+ PREV_BLOCK_BAUS(BAUS_FORWARD(HEAD_PTR, BLOCK_BAUS(HEAD_PTR)))) ? \
+ 0 : U(audit_block_fail_dummy_return)())
#define AUDIT_BLOCK(HEAD_PTR) \
- { void *h_ptr = (HEAD_PTR); AUDIT_BLOCK_AS_EXPR(h_ptr); }
+ { void *h_ptr = (HEAD_PTR); AUDIT_BLOCK_AS_EXPR(h_ptr); }
#endif
diff --git a/libvpx/vpx_mem/vpx_mem.c b/libvpx/vpx_mem/vpx_mem.c
index eade432..059248b 100644
--- a/libvpx/vpx_mem/vpx_mem.c
+++ b/libvpx/vpx_mem/vpx_mem.c
@@ -51,15 +51,14 @@ static void *vpx_mm_realloc(void *memblk, size_t size);
#endif /*CONFIG_MEM_MANAGER*/
#if USE_GLOBAL_FUNCTION_POINTERS
-struct GLOBAL_FUNC_POINTERS
-{
- g_malloc_func g_malloc;
- g_calloc_func g_calloc;
- g_realloc_func g_realloc;
- g_free_func g_free;
- g_memcpy_func g_memcpy;
- g_memset_func g_memset;
- g_memmove_func g_memmove;
+struct GLOBAL_FUNC_POINTERS {
+ g_malloc_func g_malloc;
+ g_calloc_func g_calloc;
+ g_realloc_func g_realloc;
+ g_free_func g_free;
+ g_memcpy_func g_memcpy;
+ g_memset_func g_memset;
+ g_memmove_func g_memmove;
} *g_func = NULL;
# define VPX_MALLOC_L g_func->g_malloc
@@ -77,346 +76,314 @@ struct GLOBAL_FUNC_POINTERS
# define VPX_MEMMOVE_L memmove
#endif /* USE_GLOBAL_FUNCTION_POINTERS */
-unsigned int vpx_mem_get_version()
-{
- unsigned int ver = ((unsigned int)(unsigned char)VPX_MEM_VERSION_CHIEF << 24 |
- (unsigned int)(unsigned char)VPX_MEM_VERSION_MAJOR << 16 |
- (unsigned int)(unsigned char)VPX_MEM_VERSION_MINOR << 8 |
- (unsigned int)(unsigned char)VPX_MEM_VERSION_PATCH);
- return ver;
+unsigned int vpx_mem_get_version() {
+ unsigned int ver = ((unsigned int)(unsigned char)VPX_MEM_VERSION_CHIEF << 24 |
+ (unsigned int)(unsigned char)VPX_MEM_VERSION_MAJOR << 16 |
+ (unsigned int)(unsigned char)VPX_MEM_VERSION_MINOR << 8 |
+ (unsigned int)(unsigned char)VPX_MEM_VERSION_PATCH);
+ return ver;
}
-int vpx_mem_set_heap_size(size_t size)
-{
- int ret = -1;
+int vpx_mem_set_heap_size(size_t size) {
+ int ret = -1;
#if CONFIG_MEM_MANAGER
#if MM_DYNAMIC_MEMORY
- if (!g_mng_memory_allocated && size)
- {
- g_mm_memory_size = size;
- ret = 0;
- }
- else
- ret = -3;
+ if (!g_mng_memory_allocated && size) {
+ g_mm_memory_size = size;
+ ret = 0;
+ } else
+ ret = -3;
#else
- ret = -2;
+ ret = -2;
#endif
#else
- (void)size;
+ (void)size;
#endif
- return ret;
+ return ret;
}
-void *vpx_memalign(size_t align, size_t size)
-{
- void *addr,
- * x = NULL;
+void *vpx_memalign(size_t align, size_t size) {
+ void *addr,
+ * x = NULL;
#if CONFIG_MEM_MANAGER
- int number_aau;
+ int number_aau;
- if (vpx_mm_create_heap_memory() < 0)
- {
- _P(printf("[vpx][mm] ERROR vpx_memalign() Couldn't create memory for Heap.\n");)
- }
+ if (vpx_mm_create_heap_memory() < 0) {
+ _P(printf("[vpx][mm] ERROR vpx_memalign() Couldn't create memory for Heap.\n");)
+ }
- number_aau = ((size + align - 1 + ADDRESS_STORAGE_SIZE) >>
- SHIFT_HMM_ADDR_ALIGN_UNIT) + 1;
+ number_aau = ((size + align - 1 + ADDRESS_STORAGE_SIZE) >>
+ SHIFT_HMM_ADDR_ALIGN_UNIT) + 1;
- addr = hmm_alloc(&hmm_d, number_aau);
+ addr = hmm_alloc(&hmm_d, number_aau);
#else
- addr = VPX_MALLOC_L(size + align - 1 + ADDRESS_STORAGE_SIZE);
+ addr = VPX_MALLOC_L(size + align - 1 + ADDRESS_STORAGE_SIZE);
#endif /*CONFIG_MEM_MANAGER*/
- if (addr)
- {
- x = align_addr((unsigned char *)addr + ADDRESS_STORAGE_SIZE, (int)align);
- /* save the actual malloc address */
- ((size_t *)x)[-1] = (size_t)addr;
- }
+ if (addr) {
+ x = align_addr((unsigned char *)addr + ADDRESS_STORAGE_SIZE, (int)align);
+ /* save the actual malloc address */
+ ((size_t *)x)[-1] = (size_t)addr;
+ }
- return x;
+ return x;
}
-void *vpx_malloc(size_t size)
-{
- return vpx_memalign(DEFAULT_ALIGNMENT, size);
+void *vpx_malloc(size_t size) {
+ return vpx_memalign(DEFAULT_ALIGNMENT, size);
}
-void *vpx_calloc(size_t num, size_t size)
-{
- void *x;
+void *vpx_calloc(size_t num, size_t size) {
+ void *x;
- x = vpx_memalign(DEFAULT_ALIGNMENT, num * size);
+ x = vpx_memalign(DEFAULT_ALIGNMENT, num * size);
- if (x)
- VPX_MEMSET_L(x, 0, num * size);
+ if (x)
+ VPX_MEMSET_L(x, 0, num * size);
- return x;
+ return x;
}
-void *vpx_realloc(void *memblk, size_t size)
-{
- void *addr,
- * new_addr = NULL;
- int align = DEFAULT_ALIGNMENT;
-
- /*
- The realloc() function changes the size of the object pointed to by
- ptr to the size specified by size, and returns a pointer to the
- possibly moved block. The contents are unchanged up to the lesser
- of the new and old sizes. If ptr is null, realloc() behaves like
- malloc() for the specified size. If size is zero (0) and ptr is
- not a null pointer, the object pointed to is freed.
- */
- if (!memblk)
- new_addr = vpx_malloc(size);
- else if (!size)
- vpx_free(memblk);
- else
- {
- addr = (void *)(((size_t *)memblk)[-1]);
- memblk = NULL;
+void *vpx_realloc(void *memblk, size_t size) {
+ void *addr,
+ * new_addr = NULL;
+ int align = DEFAULT_ALIGNMENT;
+
+ /*
+ The realloc() function changes the size of the object pointed to by
+ ptr to the size specified by size, and returns a pointer to the
+ possibly moved block. The contents are unchanged up to the lesser
+ of the new and old sizes. If ptr is null, realloc() behaves like
+ malloc() for the specified size. If size is zero (0) and ptr is
+ not a null pointer, the object pointed to is freed.
+ */
+ if (!memblk)
+ new_addr = vpx_malloc(size);
+ else if (!size)
+ vpx_free(memblk);
+ else {
+ addr = (void *)(((size_t *)memblk)[-1]);
+ memblk = NULL;
#if CONFIG_MEM_MANAGER
- new_addr = vpx_mm_realloc(addr, size + align + ADDRESS_STORAGE_SIZE);
+ new_addr = vpx_mm_realloc(addr, size + align + ADDRESS_STORAGE_SIZE);
#else
- new_addr = VPX_REALLOC_L(addr, size + align + ADDRESS_STORAGE_SIZE);
+ new_addr = VPX_REALLOC_L(addr, size + align + ADDRESS_STORAGE_SIZE);
#endif
- if (new_addr)
- {
- addr = new_addr;
- new_addr = (void *)(((size_t)
- ((unsigned char *)new_addr + ADDRESS_STORAGE_SIZE) + (align - 1)) &
- (size_t) - align);
- /* save the actual malloc address */
- ((size_t *)new_addr)[-1] = (size_t)addr;
- }
+ if (new_addr) {
+ addr = new_addr;
+ new_addr = (void *)(((size_t)
+ ((unsigned char *)new_addr + ADDRESS_STORAGE_SIZE) + (align - 1)) &
+ (size_t) - align);
+ /* save the actual malloc address */
+ ((size_t *)new_addr)[-1] = (size_t)addr;
}
+ }
- return new_addr;
+ return new_addr;
}
-void vpx_free(void *memblk)
-{
- if (memblk)
- {
- void *addr = (void *)(((size_t *)memblk)[-1]);
+void vpx_free(void *memblk) {
+ if (memblk) {
+ void *addr = (void *)(((size_t *)memblk)[-1]);
#if CONFIG_MEM_MANAGER
- hmm_free(&hmm_d, addr);
+ hmm_free(&hmm_d, addr);
#else
- VPX_FREE_L(addr);
+ VPX_FREE_L(addr);
#endif
- }
+ }
}
#if CONFIG_MEM_TRACKER
-void *xvpx_memalign(size_t align, size_t size, char *file, int line)
-{
+void *xvpx_memalign(size_t align, size_t size, char *file, int line) {
#if TRY_BOUNDS_CHECK
- unsigned char *x_bounds;
+ unsigned char *x_bounds;
#endif
- void *x;
+ void *x;
- if (g_alloc_count == 0)
- {
+ if (g_alloc_count == 0) {
#if TRY_BOUNDS_CHECK
- int i_rv = vpx_memory_tracker_init(BOUNDS_CHECK_PAD_SIZE, BOUNDS_CHECK_VALUE);
+ int i_rv = vpx_memory_tracker_init(BOUNDS_CHECK_PAD_SIZE, BOUNDS_CHECK_VALUE);
#else
- int i_rv = vpx_memory_tracker_init(0, 0);
+ int i_rv = vpx_memory_tracker_init(0, 0);
#endif
- if (i_rv < 0)
- {
- _P(printf("ERROR xvpx_malloc MEM_TRACK_USAGE error vpx_memory_tracker_init().\n");)
- }
+ if (i_rv < 0) {
+ _P(printf("ERROR xvpx_malloc MEM_TRACK_USAGE error vpx_memory_tracker_init().\n");)
}
+ }
#if TRY_BOUNDS_CHECK
- {
- int i;
- unsigned int tempme = BOUNDS_CHECK_VALUE;
-
- x_bounds = vpx_memalign(align, size + (BOUNDS_CHECK_PAD_SIZE * 2));
-
- if (x_bounds)
- {
- /*we're aligning the address twice here but to keep things
- consistent we want to have the padding come before the stored
- address so no matter what free function gets called we will
- attempt to free the correct address*/
- x_bounds = (unsigned char *)(((size_t *)x_bounds)[-1]);
- x = align_addr(x_bounds + BOUNDS_CHECK_PAD_SIZE + ADDRESS_STORAGE_SIZE,
- (int)align);
- /* save the actual malloc address */
- ((size_t *)x)[-1] = (size_t)x_bounds;
-
- for (i = 0; i < BOUNDS_CHECK_PAD_SIZE; i += sizeof(unsigned int))
- {
- VPX_MEMCPY_L(x_bounds + i, &tempme, sizeof(unsigned int));
- VPX_MEMCPY_L((unsigned char *)x + size + i,
- &tempme, sizeof(unsigned int));
- }
- }
- else
- x = NULL;
- }
+ {
+ int i;
+ unsigned int tempme = BOUNDS_CHECK_VALUE;
+
+ x_bounds = vpx_memalign(align, size + (BOUNDS_CHECK_PAD_SIZE * 2));
+
+ if (x_bounds) {
+ /*we're aligning the address twice here but to keep things
+ consistent we want to have the padding come before the stored
+ address so no matter what free function gets called we will
+ attempt to free the correct address*/
+ x_bounds = (unsigned char *)(((size_t *)x_bounds)[-1]);
+ x = align_addr(x_bounds + BOUNDS_CHECK_PAD_SIZE + ADDRESS_STORAGE_SIZE,
+ (int)align);
+ /* save the actual malloc address */
+ ((size_t *)x)[-1] = (size_t)x_bounds;
+
+ for (i = 0; i < BOUNDS_CHECK_PAD_SIZE; i += sizeof(unsigned int)) {
+ VPX_MEMCPY_L(x_bounds + i, &tempme, sizeof(unsigned int));
+ VPX_MEMCPY_L((unsigned char *)x + size + i,
+ &tempme, sizeof(unsigned int));
+ }
+ } else
+ x = NULL;
+ }
#else
- x = vpx_memalign(align, size);
+ x = vpx_memalign(align, size);
#endif /*TRY_BOUNDS_CHECK*/
- g_alloc_count++;
+ g_alloc_count++;
- vpx_memory_tracker_add((size_t)x, (unsigned int)size, file, line, 1);
+ vpx_memory_tracker_add((size_t)x, (unsigned int)size, file, line, 1);
- return x;
+ return x;
}
-void *xvpx_malloc(size_t size, char *file, int line)
-{
- return xvpx_memalign(DEFAULT_ALIGNMENT, size, file, line);
+void *xvpx_malloc(size_t size, char *file, int line) {
+ return xvpx_memalign(DEFAULT_ALIGNMENT, size, file, line);
}
-void *xvpx_calloc(size_t num, size_t size, char *file, int line)
-{
- void *x = xvpx_memalign(DEFAULT_ALIGNMENT, num * size, file, line);
+void *xvpx_calloc(size_t num, size_t size, char *file, int line) {
+ void *x = xvpx_memalign(DEFAULT_ALIGNMENT, num * size, file, line);
- if (x)
- VPX_MEMSET_L(x, 0, num * size);
+ if (x)
+ VPX_MEMSET_L(x, 0, num * size);
- return x;
+ return x;
}
-void *xvpx_realloc(void *memblk, size_t size, char *file, int line)
-{
- struct mem_block *p = NULL;
- int orig_size = 0,
- orig_line = 0;
- char *orig_file = NULL;
+void *xvpx_realloc(void *memblk, size_t size, char *file, int line) {
+ struct mem_block *p = NULL;
+ int orig_size = 0,
+ orig_line = 0;
+ char *orig_file = NULL;
#if TRY_BOUNDS_CHECK
- unsigned char *x_bounds = memblk ?
- (unsigned char *)(((size_t *)memblk)[-1]) :
- NULL;
+ unsigned char *x_bounds = memblk ?
+ (unsigned char *)(((size_t *)memblk)[-1]) :
+ NULL;
#endif
- void *x;
+ void *x;
- if (g_alloc_count == 0)
- {
+ if (g_alloc_count == 0) {
#if TRY_BOUNDS_CHECK
- if (!vpx_memory_tracker_init(BOUNDS_CHECK_PAD_SIZE, BOUNDS_CHECK_VALUE))
+ if (!vpx_memory_tracker_init(BOUNDS_CHECK_PAD_SIZE, BOUNDS_CHECK_VALUE))
#else
- if (!vpx_memory_tracker_init(0, 0))
+ if (!vpx_memory_tracker_init(0, 0))
#endif
- {
- _P(printf("ERROR xvpx_malloc MEM_TRACK_USAGE error vpx_memory_tracker_init().\n");)
- }
- }
-
- if ((p = vpx_memory_tracker_find((size_t)memblk)))
{
- orig_size = p->size;
- orig_file = p->file;
- orig_line = p->line;
+ _P(printf("ERROR xvpx_malloc MEM_TRACK_USAGE error vpx_memory_tracker_init().\n");)
}
+ }
+
+ if ((p = vpx_memory_tracker_find((size_t)memblk))) {
+ orig_size = p->size;
+ orig_file = p->file;
+ orig_line = p->line;
+ }
#if TRY_BOUNDS_CHECK_ON_FREE
- vpx_memory_tracker_check_integrity(file, line);
+ vpx_memory_tracker_check_integrity(file, line);
#endif
- /* have to do this regardless of success, because
- * the memory that does get realloc'd may change
- * the bounds values of this block
- */
- vpx_memory_tracker_remove((size_t)memblk);
+ /* have to do this regardless of success, because
+ * the memory that does get realloc'd may change
+ * the bounds values of this block
+ */
+ vpx_memory_tracker_remove((size_t)memblk);
#if TRY_BOUNDS_CHECK
- {
- int i;
- unsigned int tempme = BOUNDS_CHECK_VALUE;
-
- x_bounds = vpx_realloc(memblk, size + (BOUNDS_CHECK_PAD_SIZE * 2));
-
- if (x_bounds)
- {
- x_bounds = (unsigned char *)(((size_t *)x_bounds)[-1]);
- x = align_addr(x_bounds + BOUNDS_CHECK_PAD_SIZE + ADDRESS_STORAGE_SIZE,
- (int)DEFAULT_ALIGNMENT);
- /* save the actual malloc address */
- ((size_t *)x)[-1] = (size_t)x_bounds;
-
- for (i = 0; i < BOUNDS_CHECK_PAD_SIZE; i += sizeof(unsigned int))
- {
- VPX_MEMCPY_L(x_bounds + i, &tempme, sizeof(unsigned int));
- VPX_MEMCPY_L((unsigned char *)x + size + i,
- &tempme, sizeof(unsigned int));
- }
- }
- else
- x = NULL;
- }
+ {
+ int i;
+ unsigned int tempme = BOUNDS_CHECK_VALUE;
+
+ x_bounds = vpx_realloc(memblk, size + (BOUNDS_CHECK_PAD_SIZE * 2));
+
+ if (x_bounds) {
+ x_bounds = (unsigned char *)(((size_t *)x_bounds)[-1]);
+ x = align_addr(x_bounds + BOUNDS_CHECK_PAD_SIZE + ADDRESS_STORAGE_SIZE,
+ (int)DEFAULT_ALIGNMENT);
+ /* save the actual malloc address */
+ ((size_t *)x)[-1] = (size_t)x_bounds;
+
+ for (i = 0; i < BOUNDS_CHECK_PAD_SIZE; i += sizeof(unsigned int)) {
+ VPX_MEMCPY_L(x_bounds + i, &tempme, sizeof(unsigned int));
+ VPX_MEMCPY_L((unsigned char *)x + size + i,
+ &tempme, sizeof(unsigned int));
+ }
+ } else
+ x = NULL;
+ }
#else
- x = vpx_realloc(memblk, size);
+ x = vpx_realloc(memblk, size);
#endif /*TRY_BOUNDS_CHECK*/
- if (!memblk) ++g_alloc_count;
+ if (!memblk) ++g_alloc_count;
- if (x)
- vpx_memory_tracker_add((size_t)x, (unsigned int)size, file, line, 1);
- else
- vpx_memory_tracker_add((size_t)memblk, orig_size, orig_file, orig_line, 1);
+ if (x)
+ vpx_memory_tracker_add((size_t)x, (unsigned int)size, file, line, 1);
+ else
+ vpx_memory_tracker_add((size_t)memblk, orig_size, orig_file, orig_line, 1);
- return x;
+ return x;
}
-void xvpx_free(void *p_address, char *file, int line)
-{
+void xvpx_free(void *p_address, char *file, int line) {
#if TRY_BOUNDS_CHECK
- unsigned char *p_bounds_address = (unsigned char *)p_address;
- /*p_bounds_address -= BOUNDS_CHECK_PAD_SIZE;*/
+ unsigned char *p_bounds_address = (unsigned char *)p_address;
+ /*p_bounds_address -= BOUNDS_CHECK_PAD_SIZE;*/
#endif
#if !TRY_BOUNDS_CHECK_ON_FREE
- (void)file;
- (void)line;
+ (void)file;
+ (void)line;
#endif
- if (p_address)
- {
+ if (p_address) {
#if TRY_BOUNDS_CHECK_ON_FREE
- vpx_memory_tracker_check_integrity(file, line);
+ vpx_memory_tracker_check_integrity(file, line);
#endif
- /* if the addr isn't found in the list, assume it was allocated via
- * vpx_ calls not xvpx_, therefore it does not contain any padding
- */
- if (vpx_memory_tracker_remove((size_t)p_address) == -2)
- {
- p_bounds_address = p_address;
- _P(fprintf(stderr, "[vpx_mem][xvpx_free] addr: %p not found in"
- " list; freed from file:%s"
- " line:%d\n", p_address, file, line));
- }
- else
- --g_alloc_count;
+ /* if the addr isn't found in the list, assume it was allocated via
+ * vpx_ calls not xvpx_, therefore it does not contain any padding
+ */
+ if (vpx_memory_tracker_remove((size_t)p_address) == -2) {
+ p_bounds_address = p_address;
+ _P(fprintf(stderr, "[vpx_mem][xvpx_free] addr: %p not found in"
+ " list; freed from file:%s"
+ " line:%d\n", p_address, file, line));
+ } else
+ --g_alloc_count;
#if TRY_BOUNDS_CHECK
- vpx_free(p_bounds_address);
+ vpx_free(p_bounds_address);
#else
- vpx_free(p_address);
+ vpx_free(p_address);
#endif
- if (!g_alloc_count)
- vpx_memory_tracker_destroy();
- }
+ if (!g_alloc_count)
+ vpx_memory_tracker_destroy();
+ }
}
#endif /*CONFIG_MEM_TRACKER*/
@@ -426,297 +393,265 @@ void xvpx_free(void *p_address, char *file, int line)
#include <task_lib.h> /*for task_delay()*/
/* This function is only used to get a stack trace of the player
object so we can se where we are having a problem. */
-static int get_my_tt(int task)
-{
- tt(task);
+static int get_my_tt(int task) {
+ tt(task);
- return 0;
+ return 0;
}
-static void vx_sleep(int msec)
-{
- int ticks_to_sleep = 0;
+static void vx_sleep(int msec) {
+ int ticks_to_sleep = 0;
- if (msec)
- {
- int msec_per_tick = 1000 / sys_clk_rate_get();
+ if (msec) {
+ int msec_per_tick = 1000 / sys_clk_rate_get();
- if (msec < msec_per_tick)
- ticks_to_sleep++;
- else
- ticks_to_sleep = msec / msec_per_tick;
- }
+ if (msec < msec_per_tick)
+ ticks_to_sleep++;
+ else
+ ticks_to_sleep = msec / msec_per_tick;
+ }
- task_delay(ticks_to_sleep);
+ task_delay(ticks_to_sleep);
}
#endif
#endif
-void *vpx_memcpy(void *dest, const void *source, size_t length)
-{
+void *vpx_memcpy(void *dest, const void *source, size_t length) {
#if CONFIG_MEM_CHECKS
- if (((int)dest < 0x4000) || ((int)source < 0x4000))
- {
- _P(printf("WARNING: vpx_memcpy dest:0x%x source:0x%x len:%d\n", (int)dest, (int)source, length);)
+ if (((int)dest < 0x4000) || ((int)source < 0x4000)) {
+ _P(printf("WARNING: vpx_memcpy dest:0x%x source:0x%x len:%d\n", (int)dest, (int)source, length);)
#if defined(VXWORKS)
- sp(get_my_tt, task_id_self(), 0, 0, 0, 0, 0, 0, 0, 0);
+ sp(get_my_tt, task_id_self(), 0, 0, 0, 0, 0, 0, 0, 0);
- vx_sleep(10000);
+ vx_sleep(10000);
#endif
- }
+ }
#endif
- return VPX_MEMCPY_L(dest, source, length);
+ return VPX_MEMCPY_L(dest, source, length);
}
-void *vpx_memset(void *dest, int val, size_t length)
-{
+void *vpx_memset(void *dest, int val, size_t length) {
#if CONFIG_MEM_CHECKS
- if ((int)dest < 0x4000)
- {
- _P(printf("WARNING: vpx_memset dest:0x%x val:%d len:%d\n", (int)dest, val, length);)
+ if ((int)dest < 0x4000) {
+ _P(printf("WARNING: vpx_memset dest:0x%x val:%d len:%d\n", (int)dest, val, length);)
#if defined(VXWORKS)
- sp(get_my_tt, task_id_self(), 0, 0, 0, 0, 0, 0, 0, 0);
+ sp(get_my_tt, task_id_self(), 0, 0, 0, 0, 0, 0, 0, 0);
- vx_sleep(10000);
+ vx_sleep(10000);
#endif
- }
+ }
#endif
- return VPX_MEMSET_L(dest, val, length);
+ return VPX_MEMSET_L(dest, val, length);
}
-void *vpx_memmove(void *dest, const void *src, size_t count)
-{
+void *vpx_memmove(void *dest, const void *src, size_t count) {
#if CONFIG_MEM_CHECKS
- if (((int)dest < 0x4000) || ((int)src < 0x4000))
- {
- _P(printf("WARNING: vpx_memmove dest:0x%x src:0x%x count:%d\n", (int)dest, (int)src, count);)
+ if (((int)dest < 0x4000) || ((int)src < 0x4000)) {
+ _P(printf("WARNING: vpx_memmove dest:0x%x src:0x%x count:%d\n", (int)dest, (int)src, count);)
#if defined(VXWORKS)
- sp(get_my_tt, task_id_self(), 0, 0, 0, 0, 0, 0, 0, 0);
+ sp(get_my_tt, task_id_self(), 0, 0, 0, 0, 0, 0, 0, 0);
- vx_sleep(10000);
+ vx_sleep(10000);
#endif
- }
+ }
#endif
- return VPX_MEMMOVE_L(dest, src, count);
+ return VPX_MEMMOVE_L(dest, src, count);
}
#if CONFIG_MEM_MANAGER
-static int vpx_mm_create_heap_memory()
-{
- int i_rv = 0;
+static int vpx_mm_create_heap_memory() {
+ int i_rv = 0;
- if (!g_mng_memory_allocated)
- {
+ if (!g_mng_memory_allocated) {
#if MM_DYNAMIC_MEMORY
- g_p_mng_memory_raw =
- (unsigned char *)malloc(g_mm_memory_size + HMM_ADDR_ALIGN_UNIT);
-
- if (g_p_mng_memory_raw)
- {
- g_p_mng_memory = (unsigned char *)((((unsigned int)g_p_mng_memory_raw) +
- HMM_ADDR_ALIGN_UNIT - 1) &
- -(int)HMM_ADDR_ALIGN_UNIT);
-
- _P(printf("[vpx][mm] total memory size:%d g_p_mng_memory_raw:0x%x g_p_mng_memory:0x%x\n"
- , g_mm_memory_size + HMM_ADDR_ALIGN_UNIT
- , (unsigned int)g_p_mng_memory_raw
- , (unsigned int)g_p_mng_memory);)
- }
- else
- {
- _P(printf("[vpx][mm] Couldn't allocate memory:%d for vpx memory manager.\n"
- , g_mm_memory_size);)
-
- i_rv = -1;
- }
+ g_p_mng_memory_raw =
+ (unsigned char *)malloc(g_mm_memory_size + HMM_ADDR_ALIGN_UNIT);
+
+ if (g_p_mng_memory_raw) {
+ g_p_mng_memory = (unsigned char *)((((unsigned int)g_p_mng_memory_raw) +
+ HMM_ADDR_ALIGN_UNIT - 1) &
+ -(int)HMM_ADDR_ALIGN_UNIT);
+
+ _P(printf("[vpx][mm] total memory size:%d g_p_mng_memory_raw:0x%x g_p_mng_memory:0x%x\n"
+, g_mm_memory_size + HMM_ADDR_ALIGN_UNIT
+, (unsigned int)g_p_mng_memory_raw
+, (unsigned int)g_p_mng_memory);)
+ } else {
+ _P(printf("[vpx][mm] Couldn't allocate memory:%d for vpx memory manager.\n"
+, g_mm_memory_size);)
+
+ i_rv = -1;
+ }
- if (g_p_mng_memory)
+ if (g_p_mng_memory)
#endif
- {
- int chunk_size = 0;
+ {
+ int chunk_size = 0;
- g_mng_memory_allocated = 1;
+ g_mng_memory_allocated = 1;
- hmm_init(&hmm_d);
+ hmm_init(&hmm_d);
- chunk_size = g_mm_memory_size >> SHIFT_HMM_ADDR_ALIGN_UNIT;
+ chunk_size = g_mm_memory_size >> SHIFT_HMM_ADDR_ALIGN_UNIT;
- chunk_size -= DUMMY_END_BLOCK_BAUS;
+ chunk_size -= DUMMY_END_BLOCK_BAUS;
- _P(printf("[vpx][mm] memory size:%d for vpx memory manager. g_p_mng_memory:0x%x chunk_size:%d\n"
- , g_mm_memory_size
- , (unsigned int)g_p_mng_memory
- , chunk_size);)
+ _P(printf("[vpx][mm] memory size:%d for vpx memory manager. g_p_mng_memory:0x%x chunk_size:%d\n"
+, g_mm_memory_size
+, (unsigned int)g_p_mng_memory
+, chunk_size);)
- hmm_new_chunk(&hmm_d, (void *)g_p_mng_memory, chunk_size);
- }
+ hmm_new_chunk(&hmm_d, (void *)g_p_mng_memory, chunk_size);
+ }
#if MM_DYNAMIC_MEMORY
- else
- {
- _P(printf("[vpx][mm] Couldn't allocate memory:%d for vpx memory manager.\n"
- , g_mm_memory_size);)
+ else {
+ _P(printf("[vpx][mm] Couldn't allocate memory:%d for vpx memory manager.\n"
+, g_mm_memory_size);)
- i_rv = -1;
- }
+ i_rv = -1;
+ }
#endif
- }
+ }
- return i_rv;
+ return i_rv;
}
-static void *vpx_mm_realloc(void *memblk, size_t size)
-{
- void *p_ret = NULL;
+static void *vpx_mm_realloc(void *memblk, size_t size) {
+ void *p_ret = NULL;
- if (vpx_mm_create_heap_memory() < 0)
- {
- _P(printf("[vpx][mm] ERROR vpx_mm_realloc() Couldn't create memory for Heap.\n");)
- }
- else
- {
- int i_rv = 0;
- int old_num_aaus;
- int new_num_aaus;
+ if (vpx_mm_create_heap_memory() < 0) {
+ _P(printf("[vpx][mm] ERROR vpx_mm_realloc() Couldn't create memory for Heap.\n");)
+ } else {
+ int i_rv = 0;
+ int old_num_aaus;
+ int new_num_aaus;
+
+ old_num_aaus = hmm_true_size(memblk);
+ new_num_aaus = (size >> SHIFT_HMM_ADDR_ALIGN_UNIT) + 1;
+
+ if (old_num_aaus == new_num_aaus) {
+ p_ret = memblk;
+ } else {
+ i_rv = hmm_resize(&hmm_d, memblk, new_num_aaus);
+
+ if (i_rv == 0) {
+ p_ret = memblk;
+ } else {
+ /* Error. Try to malloc and then copy data. */
+ void *p_from_malloc;
- old_num_aaus = hmm_true_size(memblk);
new_num_aaus = (size >> SHIFT_HMM_ADDR_ALIGN_UNIT) + 1;
+ p_from_malloc = hmm_alloc(&hmm_d, new_num_aaus);
- if (old_num_aaus == new_num_aaus)
- {
- p_ret = memblk;
- }
- else
- {
- i_rv = hmm_resize(&hmm_d, memblk, new_num_aaus);
-
- if (i_rv == 0)
- {
- p_ret = memblk;
- }
- else
- {
- /* Error. Try to malloc and then copy data. */
- void *p_from_malloc;
-
- new_num_aaus = (size >> SHIFT_HMM_ADDR_ALIGN_UNIT) + 1;
- p_from_malloc = hmm_alloc(&hmm_d, new_num_aaus);
-
- if (p_from_malloc)
- {
- vpx_memcpy(p_from_malloc, memblk, size);
- hmm_free(&hmm_d, memblk);
-
- p_ret = p_from_malloc;
- }
- }
+ if (p_from_malloc) {
+ vpx_memcpy(p_from_malloc, memblk, size);
+ hmm_free(&hmm_d, memblk);
+
+ p_ret = p_from_malloc;
}
+ }
}
+ }
- return p_ret;
+ return p_ret;
}
#endif /*CONFIG_MEM_MANAGER*/
#if USE_GLOBAL_FUNCTION_POINTERS
# if CONFIG_MEM_TRACKER
extern int vpx_memory_tracker_set_functions(g_malloc_func g_malloc_l
- , g_calloc_func g_calloc_l
- , g_realloc_func g_realloc_l
- , g_free_func g_free_l
- , g_memcpy_func g_memcpy_l
- , g_memset_func g_memset_l
- , g_memmove_func g_memmove_l);
+, g_calloc_func g_calloc_l
+, g_realloc_func g_realloc_l
+, g_free_func g_free_l
+, g_memcpy_func g_memcpy_l
+, g_memset_func g_memset_l
+, g_memmove_func g_memmove_l);
# endif
#endif /*USE_GLOBAL_FUNCTION_POINTERS*/
int vpx_mem_set_functions(g_malloc_func g_malloc_l
- , g_calloc_func g_calloc_l
- , g_realloc_func g_realloc_l
- , g_free_func g_free_l
- , g_memcpy_func g_memcpy_l
- , g_memset_func g_memset_l
- , g_memmove_func g_memmove_l)
-{
+, g_calloc_func g_calloc_l
+, g_realloc_func g_realloc_l
+, g_free_func g_free_l
+, g_memcpy_func g_memcpy_l
+, g_memset_func g_memset_l
+, g_memmove_func g_memmove_l) {
#if USE_GLOBAL_FUNCTION_POINTERS
- /* If use global functions is turned on then the
- application must set the global functions before
- it does anything else or vpx_mem will have
- unpredictable results. */
- if (!g_func)
- {
- g_func = (struct GLOBAL_FUNC_POINTERS *)
- g_malloc_l(sizeof(struct GLOBAL_FUNC_POINTERS));
+ /* If use global functions is turned on then the
+ application must set the global functions before
+ it does anything else or vpx_mem will have
+ unpredictable results. */
+ if (!g_func) {
+ g_func = (struct GLOBAL_FUNC_POINTERS *)
+ g_malloc_l(sizeof(struct GLOBAL_FUNC_POINTERS));
- if (!g_func)
- {
- return -1;
- }
+ if (!g_func) {
+ return -1;
}
+ }
#if CONFIG_MEM_TRACKER
- {
- int rv = 0;
- rv = vpx_memory_tracker_set_functions(g_malloc_l
- , g_calloc_l
- , g_realloc_l
- , g_free_l
- , g_memcpy_l
- , g_memset_l
- , g_memmove_l);
-
- if (rv < 0)
- {
- return rv;
- }
+ {
+ int rv = 0;
+ rv = vpx_memory_tracker_set_functions(g_malloc_l
+, g_calloc_l
+, g_realloc_l
+, g_free_l
+, g_memcpy_l
+, g_memset_l
+, g_memmove_l);
+
+ if (rv < 0) {
+ return rv;
}
+ }
#endif
- g_func->g_malloc = g_malloc_l;
- g_func->g_calloc = g_calloc_l;
- g_func->g_realloc = g_realloc_l;
- g_func->g_free = g_free_l;
- g_func->g_memcpy = g_memcpy_l;
- g_func->g_memset = g_memset_l;
- g_func->g_memmove = g_memmove_l;
+ g_func->g_malloc = g_malloc_l;
+ g_func->g_calloc = g_calloc_l;
+ g_func->g_realloc = g_realloc_l;
+ g_func->g_free = g_free_l;
+ g_func->g_memcpy = g_memcpy_l;
+ g_func->g_memset = g_memset_l;
+ g_func->g_memmove = g_memmove_l;
- return 0;
+ return 0;
#else
- (void)g_malloc_l;
- (void)g_calloc_l;
- (void)g_realloc_l;
- (void)g_free_l;
- (void)g_memcpy_l;
- (void)g_memset_l;
- (void)g_memmove_l;
- return -1;
+ (void)g_malloc_l;
+ (void)g_calloc_l;
+ (void)g_realloc_l;
+ (void)g_free_l;
+ (void)g_memcpy_l;
+ (void)g_memset_l;
+ (void)g_memmove_l;
+ return -1;
#endif
}
-int vpx_mem_unset_functions()
-{
+int vpx_mem_unset_functions() {
#if USE_GLOBAL_FUNCTION_POINTERS
- if (g_func)
- {
- g_free_func temp_free = g_func->g_free;
- temp_free(g_func);
- g_func = NULL;
- }
+ if (g_func) {
+ g_free_func temp_free = g_func->g_free;
+ temp_free(g_func);
+ g_func = NULL;
+ }
#endif
- return 0;
+ return 0;
}
diff --git a/libvpx/vpx_mem/vpx_mem.h b/libvpx/vpx_mem/vpx_mem.h
index 749eaa4..c7321a9 100644
--- a/libvpx/vpx_mem/vpx_mem.h
+++ b/libvpx/vpx_mem/vpx_mem.h
@@ -12,6 +12,7 @@
#ifndef __VPX_MEM_H__
#define __VPX_MEM_H__
+#include "vpx_config.h"
#if defined(__uClinux__)
# include <lddk.h>
#endif
@@ -30,11 +31,11 @@
#endif
#ifndef VPX_CHECK_MEM_FUNCTIONS
# define VPX_CHECK_MEM_FUNCTIONS 0 /* enable basic safety checks in _memcpy,
- _memset, and _memmove */
+_memset, and _memmove */
#endif
#ifndef REPLACE_BUILTIN_FUNCTIONS
# define REPLACE_BUILTIN_FUNCTIONS 0 /* replace builtin functions with their
- vpx_ equivalents */
+vpx_ equivalents */
#endif
#include <stdlib.h>
@@ -44,70 +45,63 @@
extern "C" {
#endif
- /*
- vpx_mem_get_version()
- provided for runtime version checking. Returns an unsigned int of the form
- CHIEF | MAJOR | MINOR | PATCH, where the chief version number is the high
- order byte.
- */
- unsigned int vpx_mem_get_version(void);
-
- /*
- vpx_mem_set_heap_size(size_t size)
- size - size in bytes for the memory manager to allocate for its heap
- Sets the memory manager's initial heap size
- Return:
- 0: on success
- -1: if memory manager calls have not been included in the vpx_mem lib
- -2: if the memory manager has been compiled to use static memory
- -3: if the memory manager has already allocated its heap
- */
- int vpx_mem_set_heap_size(size_t size);
-
- void *vpx_memalign(size_t align, size_t size);
- void *vpx_malloc(size_t size);
- void *vpx_calloc(size_t num, size_t size);
- void *vpx_realloc(void *memblk, size_t size);
- void vpx_free(void *memblk);
-
- void *vpx_memcpy(void *dest, const void *src, size_t length);
- void *vpx_memset(void *dest, int val, size_t length);
- void *vpx_memmove(void *dest, const void *src, size_t count);
-
- /* special memory functions */
- void *vpx_mem_alloc(int id, size_t size, size_t align);
- void vpx_mem_free(int id, void *mem, size_t size);
-
- /* Wrappers to standard library functions. */
- typedef void*(* g_malloc_func)(size_t);
- typedef void*(* g_calloc_func)(size_t, size_t);
- typedef void*(* g_realloc_func)(void *, size_t);
- typedef void (* g_free_func)(void *);
- typedef void*(* g_memcpy_func)(void *, const void *, size_t);
- typedef void*(* g_memset_func)(void *, int, size_t);
- typedef void*(* g_memmove_func)(void *, const void *, size_t);
-
- int vpx_mem_set_functions(g_malloc_func g_malloc_l
- , g_calloc_func g_calloc_l
- , g_realloc_func g_realloc_l
- , g_free_func g_free_l
- , g_memcpy_func g_memcpy_l
- , g_memset_func g_memset_l
- , g_memmove_func g_memmove_l);
- int vpx_mem_unset_functions(void);
-
-
- /* some defines for backward compatibility */
+ /*
+ vpx_mem_get_version()
+ provided for runtime version checking. Returns an unsigned int of the form
+ CHIEF | MAJOR | MINOR | PATCH, where the chief version number is the high
+ order byte.
+ */
+ unsigned int vpx_mem_get_version(void);
+
+ /*
+ vpx_mem_set_heap_size(size_t size)
+ size - size in bytes for the memory manager to allocate for its heap
+ Sets the memory manager's initial heap size
+ Return:
+ 0: on success
+ -1: if memory manager calls have not been included in the vpx_mem lib
+ -2: if the memory manager has been compiled to use static memory
+ -3: if the memory manager has already allocated its heap
+ */
+ int vpx_mem_set_heap_size(size_t size);
+
+ void *vpx_memalign(size_t align, size_t size);
+ void *vpx_malloc(size_t size);
+ void *vpx_calloc(size_t num, size_t size);
+ void *vpx_realloc(void *memblk, size_t size);
+ void vpx_free(void *memblk);
+
+ void *vpx_memcpy(void *dest, const void *src, size_t length);
+ void *vpx_memset(void *dest, int val, size_t length);
+ void *vpx_memmove(void *dest, const void *src, size_t count);
+
+ /* special memory functions */
+ void *vpx_mem_alloc(int id, size_t size, size_t align);
+ void vpx_mem_free(int id, void *mem, size_t size);
+
+ /* Wrappers to standard library functions. */
+ typedef void *(* g_malloc_func)(size_t);
+ typedef void *(* g_calloc_func)(size_t, size_t);
+ typedef void *(* g_realloc_func)(void *, size_t);
+ typedef void (* g_free_func)(void *);
+ typedef void *(* g_memcpy_func)(void *, const void *, size_t);
+ typedef void *(* g_memset_func)(void *, int, size_t);
+ typedef void *(* g_memmove_func)(void *, const void *, size_t);
+
+ int vpx_mem_set_functions(g_malloc_func g_malloc_l
+, g_calloc_func g_calloc_l
+, g_realloc_func g_realloc_l
+, g_free_func g_free_l
+, g_memcpy_func g_memcpy_l
+, g_memset_func g_memset_l
+, g_memmove_func g_memmove_l);
+ int vpx_mem_unset_functions(void);
+
+
+ /* some defines for backward compatibility */
#define DMEM_GENERAL 0
-#define duck_memalign(X,Y,Z) vpx_memalign(X,Y)
-#define duck_malloc(X,Y) vpx_malloc(X)
-#define duck_calloc(X,Y,Z) vpx_calloc(X,Y)
-#define duck_realloc vpx_realloc
-#define duck_free vpx_free
-#define duck_memcpy vpx_memcpy
-#define duck_memmove vpx_memmove
-#define duck_memset vpx_memset
+// (*)<
#if REPLACE_BUILTIN_FUNCTIONS
# ifndef __VPX_MEM_C__
@@ -124,13 +118,13 @@ extern "C" {
#if CONFIG_MEM_TRACKER
#include <stdarg.h>
- /*from vpx_mem/vpx_mem_tracker.c*/
- extern void vpx_memory_tracker_dump();
- extern void vpx_memory_tracker_check_integrity(char *file, unsigned int line);
- extern int vpx_memory_tracker_set_log_type(int type, char *option);
- extern int vpx_memory_tracker_set_log_func(void *userdata,
- void(*logfunc)(void *userdata,
- const char *fmt, va_list args));
+ /*from vpx_mem/vpx_mem_tracker.c*/
+ extern void vpx_memory_tracker_dump();
+ extern void vpx_memory_tracker_check_integrity(char *file, unsigned int line);
+ extern int vpx_memory_tracker_set_log_type(int type, char *option);
+ extern int vpx_memory_tracker_set_log_func(void *userdata,
+ void(*logfunc)(void *userdata,
+ const char *fmt, va_list args));
# ifndef __VPX_MEM_C__
# define vpx_memalign(align, size) xvpx_memalign((align), (size), __FILE__, __LINE__)
# define vpx_malloc(size) xvpx_malloc((size), __FILE__, __LINE__)
@@ -142,13 +136,13 @@ extern "C" {
# define vpx_mem_free(id,mem,size) xvpx_mem_free(id, mem, size, __FILE__, __LINE__)
# endif
- void *xvpx_memalign(size_t align, size_t size, char *file, int line);
- void *xvpx_malloc(size_t size, char *file, int line);
- void *xvpx_calloc(size_t num, size_t size, char *file, int line);
- void *xvpx_realloc(void *memblk, size_t size, char *file, int line);
- void xvpx_free(void *memblk, char *file, int line);
- void *xvpx_mem_alloc(int id, size_t size, size_t align, char *file, int line);
- void xvpx_mem_free(int id, void *mem, size_t size, char *file, int line);
+ void *xvpx_memalign(size_t align, size_t size, char *file, int line);
+ void *xvpx_malloc(size_t size, char *file, int line);
+ void *xvpx_calloc(size_t num, size_t size, char *file, int line);
+ void *xvpx_realloc(void *memblk, size_t size, char *file, int line);
+ void xvpx_free(void *memblk, char *file, int line);
+ void *xvpx_mem_alloc(int id, size_t size, size_t align, char *file, int line);
+ void xvpx_mem_free(int id, void *mem, size_t size, char *file, int line);
#else
# ifndef __VPX_MEM_C__
diff --git a/libvpx/vpx_mem/vpx_mem_tracker.c b/libvpx/vpx_mem/vpx_mem_tracker.c
index b37076e..613e8a1 100644
--- a/libvpx/vpx_mem/vpx_mem_tracker.c
+++ b/libvpx/vpx_mem/vpx_mem_tracker.c
@@ -22,7 +22,7 @@
in the memory_tracker struct as well as calls to create/destroy/lock/unlock
the mutex in vpx_memory_tracker_init/Destroy and memory_tracker_lock_mutex/unlock_mutex
*/
-#include "vpx_config.h"
+#include "./vpx_config.h"
#if defined(__uClinux__)
# include <lddk.h>
@@ -40,20 +40,20 @@
#include <stdio.h>
#include <stdlib.h>
-#include <string.h> //VXWORKS doesn't have a malloc/memory.h file,
-//this should pull in malloc,free,etc.
+#include <string.h> // VXWORKS doesn't have a malloc/memory.h file,
+// this should pull in malloc,free,etc.
#include <stdarg.h>
#include "include/vpx_mem_tracker.h"
-#undef vpx_malloc //undefine any vpx_mem macros that may affect calls to
-#undef vpx_free //memory functions in this file
+#undef vpx_malloc // undefine any vpx_mem macros that may affect calls to
+#undef vpx_free // memory functions in this file
#undef vpx_memcpy
#undef vpx_memset
#ifndef USE_GLOBAL_FUNCTION_POINTERS
-# define USE_GLOBAL_FUNCTION_POINTERS 0 //use function pointers instead of compiled functions.
+# define USE_GLOBAL_FUNCTION_POINTERS 0 // use function pointers instead of compiled functions.
#endif
#if USE_GLOBAL_FUNCTION_POINTERS
@@ -94,39 +94,37 @@ static int memory_tracker_unlock_mutex();
#endif
#ifndef VPX_NO_GLOBALS
-struct memory_tracker
-{
- struct mem_block *head,
- * tail;
- int len,
- totalsize;
- unsigned int current_allocated,
- max_allocated;
+struct memory_tracker {
+ struct mem_block *head,
+ * tail;
+ int len,
+ totalsize;
+ unsigned int current_allocated,
+ max_allocated;
#if HAVE_PTHREAD_H
- pthread_mutex_t mutex;
+ pthread_mutex_t mutex;
#elif defined(WIN32) || defined(_WIN32_WCE)
- HANDLE mutex;
+ HANDLE mutex;
#elif defined(VXWORKS)
- SEM_ID mutex;
+ SEM_ID mutex;
#elif defined(NO_MUTEX)
#else
#error "No mutex type defined for this platform!"
#endif
- int padding_size,
- pad_value;
+ int padding_size,
+ pad_value;
};
-static struct memory_tracker memtrack; //our global memory allocation list
-static int g_b_mem_tracker_inited = 0; //indicates whether the global list has
-//been initialized (1:yes/0:no)
-static struct
-{
- FILE *file;
- int type;
- void (*func)(void *userdata, const char *fmt, va_list args);
- void *userdata;
+static struct memory_tracker memtrack; // our global memory allocation list
+static int g_b_mem_tracker_inited = 0; // indicates whether the global list has
+// been initialized (1:yes/0:no)
+static struct {
+ FILE *file;
+ int type;
+ void (*func)(void *userdata, const char *fmt, va_list args);
+ void *userdata;
} g_logging = {NULL, 0, NULL, NULL};
#else
# include "vpx_global_handling.h"
@@ -157,60 +155,54 @@ extern void *vpx_memset(void *dest, int val, size_t length);
Initializes global memory tracker structure
Allocates the head of the list
*/
-int vpx_memory_tracker_init(int padding_size, int pad_value)
-{
- if (!g_b_mem_tracker_inited)
- {
- if ((memtrack.head = (struct mem_block *)
- MEM_TRACK_MALLOC(sizeof(struct mem_block))))
- {
- int ret;
+int vpx_memory_tracker_init(int padding_size, int pad_value) {
+ if (!g_b_mem_tracker_inited) {
+ if ((memtrack.head = (struct mem_block *)
+ MEM_TRACK_MALLOC(sizeof(struct mem_block)))) {
+ int ret;
- MEM_TRACK_MEMSET(memtrack.head, 0, sizeof(struct mem_block));
+ MEM_TRACK_MEMSET(memtrack.head, 0, sizeof(struct mem_block));
- memtrack.tail = memtrack.head;
+ memtrack.tail = memtrack.head;
- memtrack.current_allocated = 0;
- memtrack.max_allocated = 0;
+ memtrack.current_allocated = 0;
+ memtrack.max_allocated = 0;
- memtrack.padding_size = padding_size;
- memtrack.pad_value = pad_value;
+ memtrack.padding_size = padding_size;
+ memtrack.pad_value = pad_value;
#if HAVE_PTHREAD_H
- ret = pthread_mutex_init(&memtrack.mutex,
- NULL); /*mutex attributes (NULL=default)*/
+ ret = pthread_mutex_init(&memtrack.mutex,
+ NULL); /*mutex attributes (NULL=default)*/
#elif defined(WIN32) || defined(_WIN32_WCE)
- memtrack.mutex = CreateMutex(NULL, /*security attributes*/
- FALSE, /*we don't want initial ownership*/
- NULL); /*mutex name*/
- ret = !memtrack.mutex;
+ memtrack.mutex = CreateMutex(NULL, /*security attributes*/
+ FALSE, /*we don't want initial ownership*/
+ NULL); /*mutex name*/
+ ret = !memtrack.mutex;
#elif defined(VXWORKS)
- memtrack.mutex = sem_bcreate(SEM_Q_FIFO, /*SEM_Q_FIFO non-priority based mutex*/
- SEM_FULL); /*SEM_FULL initial state is unlocked*/
- ret = !memtrack.mutex;
+ memtrack.mutex = sem_bcreate(SEM_Q_FIFO, /*SEM_Q_FIFO non-priority based mutex*/
+ SEM_FULL); /*SEM_FULL initial state is unlocked*/
+ ret = !memtrack.mutex;
#elif defined(NO_MUTEX)
- ret = 0;
+ ret = 0;
#endif
- if (ret)
- {
- memtrack_log("vpx_memory_tracker_init: Error creating mutex!\n");
-
- MEM_TRACK_FREE(memtrack.head);
- memtrack.head = NULL;
- }
- else
- {
- memtrack_log("Memory Tracker init'd, v."vpx_mem_tracker_version" pad_size:%d pad_val:0x%x %d\n"
- , padding_size
- , pad_value
- , pad_value);
- g_b_mem_tracker_inited = 1;
- }
- }
+ if (ret) {
+ memtrack_log("vpx_memory_tracker_init: Error creating mutex!\n");
+
+ MEM_TRACK_FREE(memtrack.head);
+ memtrack.head = NULL;
+ } else {
+ memtrack_log("Memory Tracker init'd, v."vpx_mem_tracker_version" pad_size:%d pad_val:0x%x %d\n"
+, padding_size
+, pad_value
+, pad_value);
+ g_b_mem_tracker_inited = 1;
+ }
}
+ }
- return g_b_mem_tracker_inited;
+ return g_b_mem_tracker_inited;
}
/*
@@ -218,39 +210,35 @@ int vpx_memory_tracker_init(int padding_size, int pad_value)
If our global struct was initialized zeros out all its members,
frees memory and destroys it's mutex
*/
-void vpx_memory_tracker_destroy()
-{
- if (!memory_tracker_lock_mutex())
- {
- struct mem_block *p = memtrack.head,
- * p2 = memtrack.head;
+void vpx_memory_tracker_destroy() {
+ if (!memory_tracker_lock_mutex()) {
+ struct mem_block *p = memtrack.head,
+ * p2 = memtrack.head;
- memory_tracker_dump();
+ memory_tracker_dump();
- while (p)
- {
- p2 = p;
- p = p->next;
+ while (p) {
+ p2 = p;
+ p = p->next;
- MEM_TRACK_FREE(p2);
- }
+ MEM_TRACK_FREE(p2);
+ }
- memtrack.head = NULL;
- memtrack.tail = NULL;
- memtrack.len = 0;
- memtrack.current_allocated = 0;
- memtrack.max_allocated = 0;
+ memtrack.head = NULL;
+ memtrack.tail = NULL;
+ memtrack.len = 0;
+ memtrack.current_allocated = 0;
+ memtrack.max_allocated = 0;
- if (!g_logging.type && g_logging.file && g_logging.file != stderr)
- {
- fclose(g_logging.file);
- g_logging.file = NULL;
- }
+ if (!g_logging.type && g_logging.file && g_logging.file != stderr) {
+ fclose(g_logging.file);
+ g_logging.file = NULL;
+ }
- memory_tracker_unlock_mutex();
+ memory_tracker_unlock_mutex();
- g_b_mem_tracker_inited = 0;
- }
+ g_b_mem_tracker_inited = 0;
+ }
}
/*
@@ -265,9 +253,8 @@ void vpx_memory_tracker_destroy()
*/
void vpx_memory_tracker_add(size_t addr, unsigned int size,
char *file, unsigned int line,
- int padded)
-{
- memory_tracker_add(addr, size, file, line, padded);
+ int padded) {
+ memory_tracker_add(addr, size, file, line, padded);
}
/*
@@ -278,9 +265,8 @@ void vpx_memory_tracker_add(size_t addr, unsigned int size,
Return:
Same as described for memory_tracker_remove
*/
-int vpx_memory_tracker_remove(size_t addr)
-{
- return memory_tracker_remove(addr);
+int vpx_memory_tracker_remove(size_t addr) {
+ return memory_tracker_remove(addr);
}
/*
@@ -290,17 +276,15 @@ int vpx_memory_tracker_remove(size_t addr)
If found, pointer to the memory block that matches addr
NULL otherwise
*/
-struct mem_block *vpx_memory_tracker_find(size_t addr)
-{
- struct mem_block *p = NULL;
-
- if (!memory_tracker_lock_mutex())
- {
- p = memory_tracker_find(addr);
- memory_tracker_unlock_mutex();
- }
+struct mem_block *vpx_memory_tracker_find(size_t addr) {
+ struct mem_block *p = NULL;
- return p;
+ if (!memory_tracker_lock_mutex()) {
+ p = memory_tracker_find(addr);
+ memory_tracker_unlock_mutex();
+ }
+
+ return p;
}
/*
@@ -309,13 +293,11 @@ struct mem_block *vpx_memory_tracker_find(size_t addr)
library function to dump the current contents of the
global memory allocation list
*/
-void vpx_memory_tracker_dump()
-{
- if (!memory_tracker_lock_mutex())
- {
- memory_tracker_dump();
- memory_tracker_unlock_mutex();
- }
+void vpx_memory_tracker_dump() {
+ if (!memory_tracker_lock_mutex()) {
+ memory_tracker_dump();
+ memory_tracker_unlock_mutex();
+ }
}
/*
@@ -326,13 +308,11 @@ void vpx_memory_tracker_dump()
integrity check function to inspect every address in the global
memory allocation list
*/
-void vpx_memory_tracker_check_integrity(char *file, unsigned int line)
-{
- if (!memory_tracker_lock_mutex())
- {
- memory_tracker_check_integrity(file, line);
- memory_tracker_unlock_mutex();
- }
+void vpx_memory_tracker_check_integrity(char *file, unsigned int line) {
+ if (!memory_tracker_lock_mutex()) {
+ memory_tracker_check_integrity(file, line);
+ memory_tracker_unlock_mutex();
+ }
}
/*
@@ -344,43 +324,38 @@ void vpx_memory_tracker_check_integrity(char *file, unsigned int line)
-1: if the logging type could not be set, because the value was invalid
or because a file could not be opened
*/
-int vpx_memory_tracker_set_log_type(int type, char *option)
-{
- int ret = -1;
+int vpx_memory_tracker_set_log_type(int type, char *option) {
+ int ret = -1;
- switch (type)
- {
+ switch (type) {
case 0:
- g_logging.type = 0;
+ g_logging.type = 0;
- if (!option)
- {
- g_logging.file = stderr;
- ret = 0;
- }
- else
- {
- if ((g_logging.file = fopen((char *)option, "w")))
- ret = 0;
- }
+ if (!option) {
+ g_logging.file = stderr;
+ ret = 0;
+ } else {
+ if ((g_logging.file = fopen((char *)option, "w")))
+ ret = 0;
+ }
- break;
+ break;
#if defined(WIN32) && !defined(_WIN32_WCE)
case 1:
- g_logging.type = type;
- ret = 0;
- break;
+ g_logging.type = type;
+ ret = 0;
+ break;
#endif
default:
- break;
- }
+ break;
+ }
- //output the version to the new logging destination
- if (!ret)
- memtrack_log("Memory Tracker logging initialized, "
- "Memory Tracker v."vpx_mem_tracker_version"\n");
+ // output the version to the new logging destination
+ if (!ret)
+ memtrack_log("Memory Tracker logging initialized, "
+ "Memory Tracker v."vpx_mem_tracker_version"\n");
- return ret;
+ return ret;
}
/*
@@ -392,24 +367,22 @@ int vpx_memory_tracker_set_log_type(int type, char *option)
*/
int vpx_memory_tracker_set_log_func(void *userdata,
void(*logfunc)(void *userdata,
- const char *fmt, va_list args))
-{
- int ret = -1;
-
- if (logfunc)
- {
- g_logging.type = -1;
- g_logging.userdata = userdata;
- g_logging.func = logfunc;
- ret = 0;
- }
-
- //output the version to the new logging destination
- if (!ret)
- memtrack_log("Memory Tracker logging initialized, "
- "Memory Tracker v."vpx_mem_tracker_version"\n");
-
- return ret;
+ const char *fmt, va_list args)) {
+ int ret = -1;
+
+ if (logfunc) {
+ g_logging.type = -1;
+ g_logging.userdata = userdata;
+ g_logging.func = logfunc;
+ ret = 0;
+ }
+
+ // output the version to the new logging destination
+ if (!ret)
+ memtrack_log("Memory Tracker logging initialized, "
+ "Memory Tracker v."vpx_mem_tracker_version"\n");
+
+ return ret;
}
/*
@@ -425,79 +398,73 @@ int vpx_memory_tracker_set_log_func(void *userdata,
*
*/
-static void memtrack_log(const char *fmt, ...)
-{
- va_list list;
+static void memtrack_log(const char *fmt, ...) {
+ va_list list;
- va_start(list, fmt);
+ va_start(list, fmt);
- switch (g_logging.type)
- {
+ switch (g_logging.type) {
case -1:
- if (g_logging.func)
- g_logging.func(g_logging.userdata, fmt, list);
+ if (g_logging.func)
+ g_logging.func(g_logging.userdata, fmt, list);
- break;
+ break;
case 0:
- if (g_logging.file)
- {
- vfprintf(g_logging.file, fmt, list);
- fflush(g_logging.file);
- }
+ if (g_logging.file) {
+ vfprintf(g_logging.file, fmt, list);
+ fflush(g_logging.file);
+ }
- break;
+ break;
#if defined(WIN32) && !defined(_WIN32_WCE)
- case 1:
- {
- char temp[1024];
- _vsnprintf(temp, sizeof(temp) / sizeof(char) - 1, fmt, list);
- OutputDebugString(temp);
+ case 1: {
+ char temp[1024];
+ _vsnprintf(temp, sizeof(temp) / sizeof(char) - 1, fmt, list);
+ OutputDebugString(temp);
}
break;
#endif
default:
- break;
- }
+ break;
+ }
- va_end(list);
+ va_end(list);
}
/*
memory_tracker_dump()
Dumps the current contents of the global memory allocation list
*/
-static void memory_tracker_dump()
-{
- int i = 0;
- struct mem_block *p = (memtrack.head ? memtrack.head->next : NULL);
+static void memory_tracker_dump() {
+ int i = 0;
+ struct mem_block *p = (memtrack.head ? memtrack.head->next : NULL);
- memtrack_log("\n_currently Allocated= %d; Max allocated= %d\n",
- memtrack.current_allocated, memtrack.max_allocated);
+ memtrack_log("\n_currently Allocated= %d; Max allocated= %d\n",
+ memtrack.current_allocated, memtrack.max_allocated);
- while (p)
- {
+ while (p) {
#if defined(WIN32) && !defined(_WIN32_WCE)
- /*when using outputdebugstring, output filenames so they
- can be clicked to be opened in visual studio*/
- if (g_logging.type == 1)
- memtrack_log("memblocks[%d].addr= 0x%.8x, memblocks[%d].size= %d, file:\n"
- " %s(%d):\n", i,
- p->addr, i, p->size,
- p->file, p->line);
- else
+ /*when using outputdebugstring, output filenames so they
+ can be clicked to be opened in visual studio*/
+ if (g_logging.type == 1)
+ memtrack_log("memblocks[%d].addr= 0x%.8x, memblocks[%d].size= %d, file:\n"
+ " %s(%d):\n", i,
+ p->addr, i, p->size,
+ p->file, p->line);
+ else
#endif
- memtrack_log("memblocks[%d].addr= 0x%.8x, memblocks[%d].size= %d, file: %s, line: %d\n", i,
- p->addr, i, p->size,
- p->file, p->line);
+ memtrack_log("memblocks[%d].addr= 0x%.8x, memblocks[%d].size= %d, file: %s, line: %d\n", i,
+ p->addr, i, p->size,
+ p->file, p->line);
- p = p->next;
- ++i;
- }
+ p = p->next;
+ ++i;
+ }
- memtrack_log("\n");
+ memtrack_log("\n");
}
/*
@@ -508,55 +475,49 @@ static void memory_tracker_dump()
this function will check ea. addr in the list verifying that
addr-padding_size and addr+padding_size is filled with pad_value
*/
-static void memory_tracker_check_integrity(char *file, unsigned int line)
-{
- if (memtrack.padding_size)
- {
- int i,
- index = 0;
- unsigned char *p_show_me,
- * p_show_me2;
- unsigned int tempme = memtrack.pad_value,
- dead1,
- dead2;
- unsigned char *x_bounds;
- struct mem_block *p = memtrack.head->next;
-
- while (p)
- {
- //x_bounds = (unsigned char*)p->addr;
- //back up VPX_BYTE_ALIGNMENT
- //x_bounds -= memtrack.padding_size;
-
- if (p->padded) // can the bounds be checked?
- {
- /*yes, move to the address that was actually allocated
- by the vpx_* calls*/
- x_bounds = (unsigned char *)(((size_t *)p->addr)[-1]);
-
- for (i = 0; i < memtrack.padding_size; i += sizeof(unsigned int))
- {
- p_show_me = (x_bounds + i);
- p_show_me2 = (unsigned char *)(p->addr + p->size + i);
-
- MEM_TRACK_MEMCPY(&dead1, p_show_me, sizeof(unsigned int));
- MEM_TRACK_MEMCPY(&dead2, p_show_me2, sizeof(unsigned int));
-
- if ((dead1 != tempme) || (dead2 != tempme))
- {
- memtrack_log("\n[vpx_mem integrity check failed]:\n"
- " index[%d,%d] {%s:%d} addr=0x%x, size=%d,"
- " file: %s, line: %d c0:0x%x c1:0x%x\n",
- index, i, file, line, p->addr, p->size, p->file,
- p->line, dead1, dead2);
- }
- }
- }
-
- ++index;
- p = p->next;
+static void memory_tracker_check_integrity(char *file, unsigned int line) {
+ if (memtrack.padding_size) {
+ int i,
+ index = 0;
+ unsigned char *p_show_me,
+ * p_show_me2;
+ unsigned int tempme = memtrack.pad_value,
+ dead1,
+ dead2;
+ unsigned char *x_bounds;
+ struct mem_block *p = memtrack.head->next;
+
+ while (p) {
+ // x_bounds = (unsigned char*)p->addr;
+ // back up VPX_BYTE_ALIGNMENT
+ // x_bounds -= memtrack.padding_size;
+
+ if (p->padded) { // can the bounds be checked?
+ /*yes, move to the address that was actually allocated
+ by the vpx_* calls*/
+ x_bounds = (unsigned char *)(((size_t *)p->addr)[-1]);
+
+ for (i = 0; i < memtrack.padding_size; i += sizeof(unsigned int)) {
+ p_show_me = (x_bounds + i);
+ p_show_me2 = (unsigned char *)(p->addr + p->size + i);
+
+ MEM_TRACK_MEMCPY(&dead1, p_show_me, sizeof(unsigned int));
+ MEM_TRACK_MEMCPY(&dead2, p_show_me2, sizeof(unsigned int));
+
+ if ((dead1 != tempme) || (dead2 != tempme)) {
+ memtrack_log("\n[vpx_mem integrity check failed]:\n"
+ " index[%d,%d] {%s:%d} addr=0x%x, size=%d,"
+ " file: %s, line: %d c0:0x%x c1:0x%x\n",
+ index, i, file, line, p->addr, p->size, p->file,
+ p->line, dead1, dead2);
+ }
}
+ }
+
+ ++index;
+ p = p->next;
}
+ }
}
/*
@@ -568,43 +529,38 @@ static void memory_tracker_check_integrity(char *file, unsigned int line)
*/
void memory_tracker_add(size_t addr, unsigned int size,
char *file, unsigned int line,
- int padded)
-{
- if (!memory_tracker_lock_mutex())
- {
- struct mem_block *p;
+ int padded) {
+ if (!memory_tracker_lock_mutex()) {
+ struct mem_block *p;
- p = MEM_TRACK_MALLOC(sizeof(struct mem_block));
+ p = MEM_TRACK_MALLOC(sizeof(struct mem_block));
- if (p)
- {
- p->prev = memtrack.tail;
- p->prev->next = p;
- p->addr = addr;
- p->size = size;
- p->line = line;
- p->file = file;
- p->padded = padded;
- p->next = NULL;
+ if (p) {
+ p->prev = memtrack.tail;
+ p->prev->next = p;
+ p->addr = addr;
+ p->size = size;
+ p->line = line;
+ p->file = file;
+ p->padded = padded;
+ p->next = NULL;
- memtrack.tail = p;
+ memtrack.tail = p;
- memtrack.current_allocated += size;
+ memtrack.current_allocated += size;
- if (memtrack.current_allocated > memtrack.max_allocated)
- memtrack.max_allocated = memtrack.current_allocated;
+ if (memtrack.current_allocated > memtrack.max_allocated)
+ memtrack.max_allocated = memtrack.current_allocated;
- //memtrack_log("memory_tracker_add: added addr=0x%.8x\n", addr);
+ // memtrack_log("memory_tracker_add: added addr=0x%.8x\n", addr);
- memory_tracker_unlock_mutex();
- }
- else
- {
- memtrack_log("memory_tracker_add: error allocating memory!\n");
- memory_tracker_unlock_mutex();
- vpx_memory_tracker_destroy();
- }
+ memory_tracker_unlock_mutex();
+ } else {
+ memtrack_log("memory_tracker_add: error allocating memory!\n");
+ memory_tracker_unlock_mutex();
+ vpx_memory_tracker_destroy();
}
+ }
}
/*
@@ -617,41 +573,36 @@ void memory_tracker_add(size_t addr, unsigned int size,
-1: if the mutex could not be locked
-2: if the addr was not found in the list
*/
-int memory_tracker_remove(size_t addr)
-{
- int ret = -1;
-
- if (!memory_tracker_lock_mutex())
- {
- struct mem_block *p;
+int memory_tracker_remove(size_t addr) {
+ int ret = -1;
- if ((p = memory_tracker_find(addr)))
- {
- memtrack.current_allocated -= p->size;
+ if (!memory_tracker_lock_mutex()) {
+ struct mem_block *p;
- p->prev->next = p->next;
+ if ((p = memory_tracker_find(addr))) {
+ memtrack.current_allocated -= p->size;
- if (p->next)
- p->next->prev = p->prev;
- else
- memtrack.tail = p->prev;
+ p->prev->next = p->next;
- ret = 0;
- MEM_TRACK_FREE(p);
- }
- else
- {
- if (addr)
- memtrack_log("memory_tracker_remove(): addr not found in list,"
- " 0x%.8x\n", addr);
+ if (p->next)
+ p->next->prev = p->prev;
+ else
+ memtrack.tail = p->prev;
- ret = -2;
- }
+ ret = 0;
+ MEM_TRACK_FREE(p);
+ } else {
+ if (addr)
+ memtrack_log("memory_tracker_remove(): addr not found in list,"
+ " 0x%.8x\n", addr);
- memory_tracker_unlock_mutex();
+ ret = -2;
}
- return ret;
+ memory_tracker_unlock_mutex();
+ }
+
+ return ret;
}
/*
@@ -662,19 +613,17 @@ int memory_tracker_remove(size_t addr)
the need for repeated locking and unlocking as in Remove
Returns: pointer to the mem block if found, NULL otherwise
*/
-static struct mem_block *memory_tracker_find(size_t addr)
-{
- struct mem_block *p = NULL;
+static struct mem_block *memory_tracker_find(size_t addr) {
+ struct mem_block *p = NULL;
- if (memtrack.head)
- {
- p = memtrack.head->next;
+ if (memtrack.head) {
+ p = memtrack.head->next;
- while (p && (p->addr != addr))
- p = p->next;
- }
+ while (p && (p->addr != addr))
+ p = p->next;
+ }
- return p;
+ return p;
}
@@ -687,28 +636,25 @@ static struct mem_block *memory_tracker_find(size_t addr)
<0: Failure, either the mutex was not initialized
or the call to lock the mutex failed
*/
-static int memory_tracker_lock_mutex()
-{
- int ret = -1;
+static int memory_tracker_lock_mutex() {
+ int ret = -1;
- if (g_b_mem_tracker_inited)
- {
+ if (g_b_mem_tracker_inited) {
#if HAVE_PTHREAD_H
- ret = pthread_mutex_lock(&memtrack.mutex);
+ ret = pthread_mutex_lock(&memtrack.mutex);
#elif defined(WIN32) || defined(_WIN32_WCE)
- ret = WaitForSingleObject(memtrack.mutex, INFINITE);
+ ret = WaitForSingleObject(memtrack.mutex, INFINITE);
#elif defined(VXWORKS)
- ret = sem_take(memtrack.mutex, WAIT_FOREVER);
+ ret = sem_take(memtrack.mutex, WAIT_FOREVER);
#endif
- if (ret)
- {
- memtrack_log("memory_tracker_lock_mutex: mutex lock failed\n");
- }
+ if (ret) {
+ memtrack_log("memory_tracker_lock_mutex: mutex lock failed\n");
}
+ }
- return ret;
+ return ret;
}
/*
@@ -719,28 +665,25 @@ static int memory_tracker_lock_mutex()
<0: Failure, either the mutex was not initialized
or the call to unlock the mutex failed
*/
-static int memory_tracker_unlock_mutex()
-{
- int ret = -1;
+static int memory_tracker_unlock_mutex() {
+ int ret = -1;
- if (g_b_mem_tracker_inited)
- {
+ if (g_b_mem_tracker_inited) {
#if HAVE_PTHREAD_H
- ret = pthread_mutex_unlock(&memtrack.mutex);
+ ret = pthread_mutex_unlock(&memtrack.mutex);
#elif defined(WIN32) || defined(_WIN32_WCE)
- ret = !ReleaseMutex(memtrack.mutex);
+ ret = !ReleaseMutex(memtrack.mutex);
#elif defined(VXWORKS)
- ret = sem_give(memtrack.mutex);
+ ret = sem_give(memtrack.mutex);
#endif
- if (ret)
- {
- memtrack_log("memory_tracker_unlock_mutex: mutex unlock failed\n");
- }
+ if (ret) {
+ memtrack_log("memory_tracker_unlock_mutex: mutex unlock failed\n");
}
+ }
- return ret;
+ return ret;
}
#endif
@@ -754,45 +697,44 @@ static int memory_tracker_unlock_mutex()
-1: if the use global function pointers is not set.
*/
int vpx_memory_tracker_set_functions(mem_track_malloc_func g_malloc_l
- , mem_track_calloc_func g_calloc_l
- , mem_track_realloc_func g_realloc_l
- , mem_track_free_func g_free_l
- , mem_track_memcpy_func g_memcpy_l
- , mem_track_memset_func g_memset_l
- , mem_track_memmove_func g_memmove_l)
-{
+, mem_track_calloc_func g_calloc_l
+, mem_track_realloc_func g_realloc_l
+, mem_track_free_func g_free_l
+, mem_track_memcpy_func g_memcpy_l
+, mem_track_memset_func g_memset_l
+, mem_track_memmove_func g_memmove_l) {
#if USE_GLOBAL_FUNCTION_POINTERS
- if (g_malloc_l)
- g_malloc = g_malloc_l;
+ if (g_malloc_l)
+ g_malloc = g_malloc_l;
- if (g_calloc_l)
- g_calloc = g_calloc_l;
+ if (g_calloc_l)
+ g_calloc = g_calloc_l;
- if (g_realloc_l)
- g_realloc = g_realloc_l;
+ if (g_realloc_l)
+ g_realloc = g_realloc_l;
- if (g_free_l)
- g_free = g_free_l;
+ if (g_free_l)
+ g_free = g_free_l;
- if (g_memcpy_l)
- g_memcpy = g_memcpy_l;
+ if (g_memcpy_l)
+ g_memcpy = g_memcpy_l;
- if (g_memset_l)
- g_memset = g_memset_l;
+ if (g_memset_l)
+ g_memset = g_memset_l;
- if (g_memmove_l)
- g_memmove = g_memmove_l;
+ if (g_memmove_l)
+ g_memmove = g_memmove_l;
- return 0;
+ return 0;
#else
- (void)g_malloc_l;
- (void)g_calloc_l;
- (void)g_realloc_l;
- (void)g_free_l;
- (void)g_memcpy_l;
- (void)g_memset_l;
- (void)g_memmove_l;
- return -1;
+ (void)g_malloc_l;
+ (void)g_calloc_l;
+ (void)g_realloc_l;
+ (void)g_free_l;
+ (void)g_memcpy_l;
+ (void)g_memset_l;
+ (void)g_memmove_l;
+ return -1;
#endif
}
diff --git a/libvpx/vpx_ports/arm_cpudetect.c b/libvpx/vpx_ports/arm_cpudetect.c
index 8ff95a1..542ff67 100644
--- a/libvpx/vpx_ports/arm_cpudetect.c
+++ b/libvpx/vpx_ports/arm_cpudetect.c
@@ -12,50 +12,45 @@
#include <string.h>
#include "arm.h"
-static int arm_cpu_env_flags(int *flags)
-{
- char *env;
- env = getenv("VPX_SIMD_CAPS");
- if (env && *env)
- {
- *flags = (int)strtol(env, NULL, 0);
- return 0;
- }
- *flags = 0;
- return -1;
+static int arm_cpu_env_flags(int *flags) {
+ char *env;
+ env = getenv("VPX_SIMD_CAPS");
+ if (env && *env) {
+ *flags = (int)strtol(env, NULL, 0);
+ return 0;
+ }
+ *flags = 0;
+ return -1;
}
-static int arm_cpu_env_mask(void)
-{
- char *env;
- env = getenv("VPX_SIMD_CAPS_MASK");
- return env && *env ? (int)strtol(env, NULL, 0) : ~0;
+static int arm_cpu_env_mask(void) {
+ char *env;
+ env = getenv("VPX_SIMD_CAPS_MASK");
+ return env && *env ? (int)strtol(env, NULL, 0) : ~0;
}
#if !CONFIG_RUNTIME_CPU_DETECT
-int arm_cpu_caps(void)
-{
+int arm_cpu_caps(void) {
/* This function should actually be a no-op. There is no way to adjust any of
* these because the RTCD tables do not exist: the functions are called
* statically */
- int flags;
- int mask;
- if (!arm_cpu_env_flags(&flags))
- {
- return flags;
- }
- mask = arm_cpu_env_mask();
+ int flags;
+ int mask;
+ if (!arm_cpu_env_flags(&flags)) {
+ return flags;
+ }
+ mask = arm_cpu_env_mask();
#if HAVE_EDSP
- flags |= HAS_EDSP;
+ flags |= HAS_EDSP;
#endif /* HAVE_EDSP */
#if HAVE_MEDIA
- flags |= HAS_MEDIA;
+ flags |= HAS_MEDIA;
#endif /* HAVE_MEDIA */
#if HAVE_NEON
- flags |= HAS_NEON;
+ flags |= HAS_NEON;
#endif /* HAVE_NEON */
- return flags & mask;
+ return flags & mask;
}
#elif defined(_MSC_VER) /* end !CONFIG_RUNTIME_CPU_DETECT */
@@ -64,156 +59,134 @@ int arm_cpu_caps(void)
#define WIN32_EXTRA_LEAN
#include <windows.h>
-int arm_cpu_caps(void)
-{
- int flags;
- int mask;
- if (!arm_cpu_env_flags(&flags))
- {
- return flags;
- }
- mask = arm_cpu_env_mask();
- /* MSVC has no inline __asm support for ARM, but it does let you __emit
- * instructions via their assembled hex code.
- * All of these instructions should be essentially nops.
- */
+int arm_cpu_caps(void) {
+ int flags;
+ int mask;
+ if (!arm_cpu_env_flags(&flags)) {
+ return flags;
+ }
+ mask = arm_cpu_env_mask();
+ /* MSVC has no inline __asm support for ARM, but it does let you __emit
+ * instructions via their assembled hex code.
+ * All of these instructions should be essentially nops.
+ */
#if HAVE_EDSP
- if (mask & HAS_EDSP)
- {
- __try
- {
- /*PLD [r13]*/
- __emit(0xF5DDF000);
- flags |= HAS_EDSP;
- }
- __except(GetExceptionCode() == EXCEPTION_ILLEGAL_INSTRUCTION)
- {
- /*Ignore exception.*/
- }
+ if (mask & HAS_EDSP) {
+ __try {
+ /*PLD [r13]*/
+ __emit(0xF5DDF000);
+ flags |= HAS_EDSP;
+ } __except (GetExceptionCode() == EXCEPTION_ILLEGAL_INSTRUCTION) {
+ /*Ignore exception.*/
}
+ }
#if HAVE_MEDIA
- if (mask & HAS_MEDIA)
- __try
- {
- /*SHADD8 r3,r3,r3*/
- __emit(0xE6333F93);
- flags |= HAS_MEDIA;
- }
- __except(GetExceptionCode() == EXCEPTION_ILLEGAL_INSTRUCTION)
- {
- /*Ignore exception.*/
- }
- }
+ if (mask & HAS_MEDIA)
+ __try {
+ /*SHADD8 r3,r3,r3*/
+ __emit(0xE6333F93);
+ flags |= HAS_MEDIA;
+ } __except (GetExceptionCode() == EXCEPTION_ILLEGAL_INSTRUCTION) {
+ /*Ignore exception.*/
+ }
+}
#if HAVE_NEON
- if (mask & HAS_NEON)
- {
- __try
- {
- /*VORR q0,q0,q0*/
- __emit(0xF2200150);
- flags |= HAS_NEON;
- }
- __except(GetExceptionCode() == EXCEPTION_ILLEGAL_INSTRUCTION)
- {
- /*Ignore exception.*/
- }
- }
+if (mask &HAS_NEON) {
+ __try {
+ /*VORR q0,q0,q0*/
+ __emit(0xF2200150);
+ flags |= HAS_NEON;
+ } __except (GetExceptionCode() == EXCEPTION_ILLEGAL_INSTRUCTION) {
+ /*Ignore exception.*/
+ }
+}
#endif /* HAVE_NEON */
#endif /* HAVE_MEDIA */
#endif /* HAVE_EDSP */
- return flags & mask;
+return flags & mask;
}
#elif defined(__ANDROID__) /* end _MSC_VER */
#include <cpu-features.h>
-int arm_cpu_caps(void)
-{
- int flags;
- int mask;
- uint64_t features;
- if (!arm_cpu_env_flags(&flags))
- {
- return flags;
- }
- mask = arm_cpu_env_mask();
- features = android_getCpuFeatures();
+int arm_cpu_caps(void) {
+ int flags;
+ int mask;
+ uint64_t features;
+ if (!arm_cpu_env_flags(&flags)) {
+ return flags;
+ }
+ mask = arm_cpu_env_mask();
+ features = android_getCpuFeatures();
#if HAVE_EDSP
- flags |= HAS_EDSP;
+ flags |= HAS_EDSP;
#endif /* HAVE_EDSP */
#if HAVE_MEDIA
- flags |= HAS_MEDIA;
+ flags |= HAS_MEDIA;
#endif /* HAVE_MEDIA */
#if HAVE_NEON
- if (features & ANDROID_CPU_ARM_FEATURE_NEON)
- flags |= HAS_NEON;
+ if (features & ANDROID_CPU_ARM_FEATURE_NEON)
+ flags |= HAS_NEON;
#endif /* HAVE_NEON */
- return flags & mask;
+ return flags & mask;
}
#elif defined(__linux__) /* end __ANDROID__ */
+
#include <stdio.h>
-int arm_cpu_caps(void)
-{
- FILE *fin;
- int flags;
- int mask;
- if (!arm_cpu_env_flags(&flags))
- {
- return flags;
- }
- mask = arm_cpu_env_mask();
- /* Reading /proc/self/auxv would be easier, but that doesn't work reliably
- * on Android.
- * This also means that detection will fail in Scratchbox.
+int arm_cpu_caps(void) {
+ FILE *fin;
+ int flags;
+ int mask;
+ if (!arm_cpu_env_flags(&flags)) {
+ return flags;
+ }
+ mask = arm_cpu_env_mask();
+ /* Reading /proc/self/auxv would be easier, but that doesn't work reliably
+ * on Android.
+ * This also means that detection will fail in Scratchbox.
+ */
+ fin = fopen("/proc/cpuinfo", "r");
+ if (fin != NULL) {
+ /* 512 should be enough for anybody (it's even enough for all the flags
+ * that x86 has accumulated... so far).
*/
- fin = fopen("/proc/cpuinfo","r");
- if(fin != NULL)
- {
- /* 512 should be enough for anybody (it's even enough for all the flags
- * that x86 has accumulated... so far).
- */
- char buf[512];
- while (fgets(buf, 511, fin) != NULL)
- {
+ char buf[512];
+ while (fgets(buf, 511, fin) != NULL) {
#if HAVE_EDSP || HAVE_NEON
- if (memcmp(buf, "Features", 8) == 0)
- {
- char *p;
+ if (memcmp(buf, "Features", 8) == 0) {
+ char *p;
#if HAVE_EDSP
- p=strstr(buf, " edsp");
- if (p != NULL && (p[5] == ' ' || p[5] == '\n'))
- {
- flags |= HAS_EDSP;
- }
+ p = strstr(buf, " edsp");
+ if (p != NULL && (p[5] == ' ' || p[5] == '\n')) {
+ flags |= HAS_EDSP;
+ }
#if HAVE_NEON
- p = strstr(buf, " neon");
- if (p != NULL && (p[5] == ' ' || p[5] == '\n'))
- {
- flags |= HAS_NEON;
- }
+ p = strstr(buf, " neon");
+ if (p != NULL && (p[5] == ' ' || p[5] == '\n')) {
+ flags |= HAS_NEON;
+ }
#endif /* HAVE_NEON */
#endif /* HAVE_EDSP */
- }
+ }
#endif /* HAVE_EDSP || HAVE_NEON */
#if HAVE_MEDIA
- if (memcmp(buf, "CPU architecture:",17) == 0){
- int version;
- version = atoi(buf+17);
- if (version >= 6)
- {
- flags |= HAS_MEDIA;
- }
- }
-#endif /* HAVE_MEDIA */
+ if (memcmp(buf, "CPU architecture:", 17) == 0) {
+ int version;
+ version = atoi(buf + 17);
+ if (version >= 6) {
+ flags |= HAS_MEDIA;
}
- fclose(fin);
+ }
+#endif /* HAVE_MEDIA */
}
- return flags & mask;
+ fclose(fin);
+ }
+ return flags & mask;
}
#else /* end __linux__ */
#error "--enable-runtime-cpu-detect selected, but no CPU detection method " \
- "available for your platform. Reconfigure with --disable-runtime-cpu-detect."
+"available for your platform. Reconfigure with --disable-runtime-cpu-detect."
#endif
diff --git a/libvpx/vpx_ports/asm_offsets.h b/libvpx/vpx_ports/asm_offsets.h
index 7b6ae4a..d3a3e5a 100644
--- a/libvpx/vpx_ports/asm_offsets.h
+++ b/libvpx/vpx_ports/asm_offsets.h
@@ -15,8 +15,8 @@
#include <stddef.h>
#define ct_assert(name,cond) \
- static void assert_##name(void) UNUSED;\
- static void assert_##name(void) {switch(0){case 0:case !!(cond):;}}
+ static void assert_##name(void) UNUSED;\
+ static void assert_##name(void) {switch(0){case 0:case !!(cond):;}}
#if INLINE_ASM
#define DEFINE(sym, val) asm("\n" #sym " EQU %0" : : "i" (val))
diff --git a/libvpx/vpx_ports/config.h b/libvpx/vpx_ports/config.h
new file mode 100644
index 0000000..1abe70d
--- /dev/null
+++ b/libvpx/vpx_ports/config.h
@@ -0,0 +1,10 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "vpx_config.h"
diff --git a/libvpx/vpx_ports/emmintrin_compat.h b/libvpx/vpx_ports/emmintrin_compat.h
new file mode 100644
index 0000000..782d603
--- /dev/null
+++ b/libvpx/vpx_ports/emmintrin_compat.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_PORTS_EMMINTRIN_COMPAT_H
+#define VPX_PORTS_EMMINTRIN_COMPAT_H
+
+#if defined(__GNUC__) && __GNUC__ < 4
+/* From emmintrin.h (gcc 4.5.3) */
+/* Casts between various SP, DP, INT vector types. Note that these do no
+ conversion of values, they just change the type. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_castpd_ps(__m128d __A)
+{
+ return (__m128) __A;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_castpd_si128(__m128d __A)
+{
+ return (__m128i) __A;
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_castps_pd(__m128 __A)
+{
+ return (__m128d) __A;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_castps_si128(__m128 __A)
+{
+ return (__m128i) __A;
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_castsi128_ps(__m128i __A)
+{
+ return (__m128) __A;
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_castsi128_pd(__m128i __A)
+{
+ return (__m128d) __A;
+}
+#endif
+
+#endif
diff --git a/libvpx/vpx_ports/emms.asm b/libvpx/vpx_ports/emms.asm
index efad1a5..db8da28 100644
--- a/libvpx/vpx_ports/emms.asm
+++ b/libvpx/vpx_ports/emms.asm
@@ -18,7 +18,7 @@ sym(vpx_reset_mmx_state):
ret
-%ifidn __OUTPUT_FORMAT__,x64
+%if LIBVPX_YASM_WIN64
global sym(vpx_winx64_fldcw) PRIVATE
sym(vpx_winx64_fldcw):
sub rsp, 8
diff --git a/libvpx/vpx_ports/mem.h b/libvpx/vpx_ports/mem.h
index 29e507f..62b86bb 100644
--- a/libvpx/vpx_ports/mem.h
+++ b/libvpx/vpx_ports/mem.h
@@ -11,6 +11,7 @@
#ifndef VPX_PORTS_MEM_H
#define VPX_PORTS_MEM_H
+
#include "vpx_config.h"
#include "vpx/vpx_integer.h"
@@ -31,8 +32,8 @@
* within the array.
*/
#define DECLARE_ALIGNED_ARRAY(a,typ,val,n)\
-typ val##_[(n)+(a)/sizeof(typ)+1];\
-typ *val = (typ*)((((intptr_t)val##_)+(a)-1)&((intptr_t)-(a)))
+ typ val##_[(n)+(a)/sizeof(typ)+1];\
+ typ *val = (typ*)((((intptr_t)val##_)+(a)-1)&((intptr_t)-(a)))
/* Indicates that the usage of the specified variable has been audited to assure
diff --git a/libvpx/vpx_ports/mem_ops.h b/libvpx/vpx_ports/mem_ops.h
index dec28d5..2d44a3a 100644
--- a/libvpx/vpx_ports/mem_ops.h
+++ b/libvpx/vpx_ports/mem_ops.h
@@ -60,88 +60,82 @@
#undef mem_get_be16
#define mem_get_be16 mem_ops_wrap_symbol(mem_get_be16)
-static unsigned MEM_VALUE_T mem_get_be16(const void *vmem)
-{
- unsigned MEM_VALUE_T val;
- const MAU_T *mem = (const MAU_T *)vmem;
-
- val = mem[0] << 8;
- val |= mem[1];
- return val;
+static unsigned MEM_VALUE_T mem_get_be16(const void *vmem) {
+ unsigned MEM_VALUE_T val;
+ const MAU_T *mem = (const MAU_T *)vmem;
+
+ val = mem[0] << 8;
+ val |= mem[1];
+ return val;
}
#undef mem_get_be24
#define mem_get_be24 mem_ops_wrap_symbol(mem_get_be24)
-static unsigned MEM_VALUE_T mem_get_be24(const void *vmem)
-{
- unsigned MEM_VALUE_T val;
- const MAU_T *mem = (const MAU_T *)vmem;
-
- val = mem[0] << 16;
- val |= mem[1] << 8;
- val |= mem[2];
- return val;
+static unsigned MEM_VALUE_T mem_get_be24(const void *vmem) {
+ unsigned MEM_VALUE_T val;
+ const MAU_T *mem = (const MAU_T *)vmem;
+
+ val = mem[0] << 16;
+ val |= mem[1] << 8;
+ val |= mem[2];
+ return val;
}
#undef mem_get_be32
#define mem_get_be32 mem_ops_wrap_symbol(mem_get_be32)
-static unsigned MEM_VALUE_T mem_get_be32(const void *vmem)
-{
- unsigned MEM_VALUE_T val;
- const MAU_T *mem = (const MAU_T *)vmem;
-
- val = mem[0] << 24;
- val |= mem[1] << 16;
- val |= mem[2] << 8;
- val |= mem[3];
- return val;
+static unsigned MEM_VALUE_T mem_get_be32(const void *vmem) {
+ unsigned MEM_VALUE_T val;
+ const MAU_T *mem = (const MAU_T *)vmem;
+
+ val = mem[0] << 24;
+ val |= mem[1] << 16;
+ val |= mem[2] << 8;
+ val |= mem[3];
+ return val;
}
#undef mem_get_le16
#define mem_get_le16 mem_ops_wrap_symbol(mem_get_le16)
-static unsigned MEM_VALUE_T mem_get_le16(const void *vmem)
-{
- unsigned MEM_VALUE_T val;
- const MAU_T *mem = (const MAU_T *)vmem;
-
- val = mem[1] << 8;
- val |= mem[0];
- return val;
+static unsigned MEM_VALUE_T mem_get_le16(const void *vmem) {
+ unsigned MEM_VALUE_T val;
+ const MAU_T *mem = (const MAU_T *)vmem;
+
+ val = mem[1] << 8;
+ val |= mem[0];
+ return val;
}
#undef mem_get_le24
#define mem_get_le24 mem_ops_wrap_symbol(mem_get_le24)
-static unsigned MEM_VALUE_T mem_get_le24(const void *vmem)
-{
- unsigned MEM_VALUE_T val;
- const MAU_T *mem = (const MAU_T *)vmem;
-
- val = mem[2] << 16;
- val |= mem[1] << 8;
- val |= mem[0];
- return val;
+static unsigned MEM_VALUE_T mem_get_le24(const void *vmem) {
+ unsigned MEM_VALUE_T val;
+ const MAU_T *mem = (const MAU_T *)vmem;
+
+ val = mem[2] << 16;
+ val |= mem[1] << 8;
+ val |= mem[0];
+ return val;
}
#undef mem_get_le32
#define mem_get_le32 mem_ops_wrap_symbol(mem_get_le32)
-static unsigned MEM_VALUE_T mem_get_le32(const void *vmem)
-{
- unsigned MEM_VALUE_T val;
- const MAU_T *mem = (const MAU_T *)vmem;
-
- val = mem[3] << 24;
- val |= mem[2] << 16;
- val |= mem[1] << 8;
- val |= mem[0];
- return val;
+static unsigned MEM_VALUE_T mem_get_le32(const void *vmem) {
+ unsigned MEM_VALUE_T val;
+ const MAU_T *mem = (const MAU_T *)vmem;
+
+ val = mem[3] << 24;
+ val |= mem[2] << 16;
+ val |= mem[1] << 8;
+ val |= mem[0];
+ return val;
}
#define mem_get_s_generic(end,sz) \
- static signed MEM_VALUE_T mem_get_s##end##sz(const void *vmem) {\
- const MAU_T *mem = (const MAU_T*)vmem;\
- signed MEM_VALUE_T val = mem_get_##end##sz(mem);\
- return (val << (MEM_VALUE_T_SZ_BITS - sz)) >> (MEM_VALUE_T_SZ_BITS - sz);\
- }
+ static signed MEM_VALUE_T mem_get_s##end##sz(const void *vmem) {\
+ const MAU_T *mem = (const MAU_T*)vmem;\
+ signed MEM_VALUE_T val = mem_get_##end##sz(mem);\
+ return (val << (MEM_VALUE_T_SZ_BITS - sz)) >> (MEM_VALUE_T_SZ_BITS - sz);\
+ }
#undef mem_get_sbe16
#define mem_get_sbe16 mem_ops_wrap_symbol(mem_get_sbe16)
@@ -169,66 +163,60 @@ mem_get_s_generic(le, 32)
#undef mem_put_be16
#define mem_put_be16 mem_ops_wrap_symbol(mem_put_be16)
-static void mem_put_be16(void *vmem, MEM_VALUE_T val)
-{
- MAU_T *mem = (MAU_T *)vmem;
+static void mem_put_be16(void *vmem, MEM_VALUE_T val) {
+ MAU_T *mem = (MAU_T *)vmem;
- mem[0] = (val >> 8) & 0xff;
- mem[1] = (val >> 0) & 0xff;
+ mem[0] = (val >> 8) & 0xff;
+ mem[1] = (val >> 0) & 0xff;
}
#undef mem_put_be24
#define mem_put_be24 mem_ops_wrap_symbol(mem_put_be24)
-static void mem_put_be24(void *vmem, MEM_VALUE_T val)
-{
- MAU_T *mem = (MAU_T *)vmem;
+static void mem_put_be24(void *vmem, MEM_VALUE_T val) {
+ MAU_T *mem = (MAU_T *)vmem;
- mem[0] = (val >> 16) & 0xff;
- mem[1] = (val >> 8) & 0xff;
- mem[2] = (val >> 0) & 0xff;
+ mem[0] = (val >> 16) & 0xff;
+ mem[1] = (val >> 8) & 0xff;
+ mem[2] = (val >> 0) & 0xff;
}
#undef mem_put_be32
#define mem_put_be32 mem_ops_wrap_symbol(mem_put_be32)
-static void mem_put_be32(void *vmem, MEM_VALUE_T val)
-{
- MAU_T *mem = (MAU_T *)vmem;
-
- mem[0] = (val >> 24) & 0xff;
- mem[1] = (val >> 16) & 0xff;
- mem[2] = (val >> 8) & 0xff;
- mem[3] = (val >> 0) & 0xff;
+static void mem_put_be32(void *vmem, MEM_VALUE_T val) {
+ MAU_T *mem = (MAU_T *)vmem;
+
+ mem[0] = (val >> 24) & 0xff;
+ mem[1] = (val >> 16) & 0xff;
+ mem[2] = (val >> 8) & 0xff;
+ mem[3] = (val >> 0) & 0xff;
}
#undef mem_put_le16
#define mem_put_le16 mem_ops_wrap_symbol(mem_put_le16)
-static void mem_put_le16(void *vmem, MEM_VALUE_T val)
-{
- MAU_T *mem = (MAU_T *)vmem;
+static void mem_put_le16(void *vmem, MEM_VALUE_T val) {
+ MAU_T *mem = (MAU_T *)vmem;
- mem[0] = (val >> 0) & 0xff;
- mem[1] = (val >> 8) & 0xff;
+ mem[0] = (val >> 0) & 0xff;
+ mem[1] = (val >> 8) & 0xff;
}
#undef mem_put_le24
#define mem_put_le24 mem_ops_wrap_symbol(mem_put_le24)
-static void mem_put_le24(void *vmem, MEM_VALUE_T val)
-{
- MAU_T *mem = (MAU_T *)vmem;
+static void mem_put_le24(void *vmem, MEM_VALUE_T val) {
+ MAU_T *mem = (MAU_T *)vmem;
- mem[0] = (val >> 0) & 0xff;
- mem[1] = (val >> 8) & 0xff;
- mem[2] = (val >> 16) & 0xff;
+ mem[0] = (val >> 0) & 0xff;
+ mem[1] = (val >> 8) & 0xff;
+ mem[2] = (val >> 16) & 0xff;
}
#undef mem_put_le32
#define mem_put_le32 mem_ops_wrap_symbol(mem_put_le32)
-static void mem_put_le32(void *vmem, MEM_VALUE_T val)
-{
- MAU_T *mem = (MAU_T *)vmem;
-
- mem[0] = (val >> 0) & 0xff;
- mem[1] = (val >> 8) & 0xff;
- mem[2] = (val >> 16) & 0xff;
- mem[3] = (val >> 24) & 0xff;
+static void mem_put_le32(void *vmem, MEM_VALUE_T val) {
+ MAU_T *mem = (MAU_T *)vmem;
+
+ mem[0] = (val >> 0) & 0xff;
+ mem[1] = (val >> 8) & 0xff;
+ mem[2] = (val >> 16) & 0xff;
+ mem[3] = (val >> 24) & 0xff;
}
diff --git a/libvpx/vpx_ports/mem_ops_aligned.h b/libvpx/vpx_ports/mem_ops_aligned.h
index fca653a..0100300 100644
--- a/libvpx/vpx_ports/mem_ops_aligned.h
+++ b/libvpx/vpx_ports/mem_ops_aligned.h
@@ -24,61 +24,61 @@
* could redefine these macros.
*/
#define swap_endian_16(val,raw) do {\
- val = ((raw>>8) & 0x00ff) \
- | ((raw<<8) & 0xff00);\
- } while(0)
+ val = ((raw>>8) & 0x00ff) \
+ | ((raw<<8) & 0xff00);\
+ } while(0)
#define swap_endian_32(val,raw) do {\
- val = ((raw>>24) & 0x000000ff) \
- | ((raw>>8) & 0x0000ff00) \
- | ((raw<<8) & 0x00ff0000) \
- | ((raw<<24) & 0xff000000); \
- } while(0)
+ val = ((raw>>24) & 0x000000ff) \
+ | ((raw>>8) & 0x0000ff00) \
+ | ((raw<<8) & 0x00ff0000) \
+ | ((raw<<24) & 0xff000000); \
+ } while(0)
#define swap_endian_16_se(val,raw) do {\
- swap_endian_16(val,raw);\
- val = ((val << 16) >> 16);\
- } while(0)
+ swap_endian_16(val,raw);\
+ val = ((val << 16) >> 16);\
+ } while(0)
#define swap_endian_32_se(val,raw) swap_endian_32(val,raw)
#define mem_get_ne_aligned_generic(end,sz) \
- static unsigned MEM_VALUE_T mem_get_##end##sz##_aligned(const void *vmem) {\
- const uint##sz##_t *mem = (const uint##sz##_t *)vmem;\
- return *mem;\
- }
+ static unsigned MEM_VALUE_T mem_get_##end##sz##_aligned(const void *vmem) {\
+ const uint##sz##_t *mem = (const uint##sz##_t *)vmem;\
+ return *mem;\
+ }
#define mem_get_sne_aligned_generic(end,sz) \
- static signed MEM_VALUE_T mem_get_s##end##sz##_aligned(const void *vmem) {\
- const int##sz##_t *mem = (const int##sz##_t *)vmem;\
- return *mem;\
- }
+ static signed MEM_VALUE_T mem_get_s##end##sz##_aligned(const void *vmem) {\
+ const int##sz##_t *mem = (const int##sz##_t *)vmem;\
+ return *mem;\
+ }
#define mem_get_se_aligned_generic(end,sz) \
- static unsigned MEM_VALUE_T mem_get_##end##sz##_aligned(const void *vmem) {\
- const uint##sz##_t *mem = (const uint##sz##_t *)vmem;\
- unsigned MEM_VALUE_T val, raw = *mem;\
- swap_endian_##sz(val,raw);\
- return val;\
- }
+ static unsigned MEM_VALUE_T mem_get_##end##sz##_aligned(const void *vmem) {\
+ const uint##sz##_t *mem = (const uint##sz##_t *)vmem;\
+ unsigned MEM_VALUE_T val, raw = *mem;\
+ swap_endian_##sz(val,raw);\
+ return val;\
+ }
#define mem_get_sse_aligned_generic(end,sz) \
- static signed MEM_VALUE_T mem_get_s##end##sz##_aligned(const void *vmem) {\
- const int##sz##_t *mem = (const int##sz##_t *)vmem;\
- unsigned MEM_VALUE_T val, raw = *mem;\
- swap_endian_##sz##_se(val,raw);\
- return val;\
- }
+ static signed MEM_VALUE_T mem_get_s##end##sz##_aligned(const void *vmem) {\
+ const int##sz##_t *mem = (const int##sz##_t *)vmem;\
+ unsigned MEM_VALUE_T val, raw = *mem;\
+ swap_endian_##sz##_se(val,raw);\
+ return val;\
+ }
#define mem_put_ne_aligned_generic(end,sz) \
- static void mem_put_##end##sz##_aligned(void *vmem, MEM_VALUE_T val) {\
- uint##sz##_t *mem = (uint##sz##_t *)vmem;\
- *mem = (uint##sz##_t)val;\
- }
+ static void mem_put_##end##sz##_aligned(void *vmem, MEM_VALUE_T val) {\
+ uint##sz##_t *mem = (uint##sz##_t *)vmem;\
+ *mem = (uint##sz##_t)val;\
+ }
#define mem_put_se_aligned_generic(end,sz) \
- static void mem_put_##end##sz##_aligned(void *vmem, MEM_VALUE_T val) {\
- uint##sz##_t *mem = (uint##sz##_t *)vmem, raw;\
- swap_endian_##sz(raw,val);\
- *mem = (uint##sz##_t)raw;\
- }
+ static void mem_put_##end##sz##_aligned(void *vmem, MEM_VALUE_T val) {\
+ uint##sz##_t *mem = (uint##sz##_t *)vmem, raw;\
+ swap_endian_##sz(raw,val);\
+ *mem = (uint##sz##_t)raw;\
+ }
#include "vpx_config.h"
#if CONFIG_BIG_ENDIAN
diff --git a/libvpx/vpx_ports/vpx_once.h b/libvpx/vpx_ports/vpx_once.h
new file mode 100644
index 0000000..16a735c
--- /dev/null
+++ b/libvpx/vpx_ports/vpx_once.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "vpx_config.h"
+
+#if CONFIG_MULTITHREAD && defined(_WIN32)
+#include <windows.h>
+#include <stdlib.h>
+static void once(void (*func)(void))
+{
+ static CRITICAL_SECTION *lock;
+ static LONG waiters;
+ static int done;
+ void *lock_ptr = &lock;
+
+ /* If the initialization is complete, return early. This isn't just an
+ * optimization, it prevents races on the destruction of the global
+ * lock.
+ */
+ if(done)
+ return;
+
+ InterlockedIncrement(&waiters);
+
+ /* Get a lock. We create one and try to make it the one-true-lock,
+ * throwing it away if we lost the race.
+ */
+
+ {
+ /* Scope to protect access to new_lock */
+ CRITICAL_SECTION *new_lock = malloc(sizeof(CRITICAL_SECTION));
+ InitializeCriticalSection(new_lock);
+ if (InterlockedCompareExchangePointer(lock_ptr, new_lock, NULL) != NULL)
+ {
+ DeleteCriticalSection(new_lock);
+ free(new_lock);
+ }
+ }
+
+ /* At this point, we have a lock that can be synchronized on. We don't
+ * care which thread actually performed the allocation.
+ */
+
+ EnterCriticalSection(lock);
+
+ if (!done)
+ {
+ func();
+ done = 1;
+ }
+
+ LeaveCriticalSection(lock);
+
+ /* Last one out should free resources. The destructed objects are
+ * protected by checking if(done) above.
+ */
+ if(!InterlockedDecrement(&waiters))
+ {
+ DeleteCriticalSection(lock);
+ free(lock);
+ lock = NULL;
+ }
+}
+
+
+#elif CONFIG_MULTITHREAD && HAVE_PTHREAD_H
+#include <pthread.h>
+static void once(void (*func)(void))
+{
+ static pthread_once_t lock = PTHREAD_ONCE_INIT;
+ pthread_once(&lock, func);
+}
+
+
+#else
+/* No-op version that performs no synchronization. vp8_rtcd() is idempotent,
+ * so as long as your platform provides atomic loads/stores of pointers
+ * no synchronization is strictly necessary.
+ */
+
+static void once(void (*func)(void))
+{
+ static int done;
+
+ if(!done)
+ {
+ func();
+ done = 1;
+ }
+}
+#endif
diff --git a/libvpx/vpx_ports/vpx_timer.h b/libvpx/vpx_ports/vpx_timer.h
index d07e086..cdad9ef 100644
--- a/libvpx/vpx_ports/vpx_timer.h
+++ b/libvpx/vpx_ports/vpx_timer.h
@@ -32,65 +32,61 @@
/* timersub is not provided by msys at this time. */
#ifndef timersub
#define timersub(a, b, result) \
- do { \
- (result)->tv_sec = (a)->tv_sec - (b)->tv_sec; \
- (result)->tv_usec = (a)->tv_usec - (b)->tv_usec; \
- if ((result)->tv_usec < 0) { \
- --(result)->tv_sec; \
- (result)->tv_usec += 1000000; \
- } \
- } while (0)
+ do { \
+ (result)->tv_sec = (a)->tv_sec - (b)->tv_sec; \
+ (result)->tv_usec = (a)->tv_usec - (b)->tv_usec; \
+ if ((result)->tv_usec < 0) { \
+ --(result)->tv_sec; \
+ (result)->tv_usec += 1000000; \
+ } \
+ } while (0)
#endif
#endif
-struct vpx_usec_timer
-{
+struct vpx_usec_timer {
#if defined(_WIN32)
- LARGE_INTEGER begin, end;
+ LARGE_INTEGER begin, end;
#else
- struct timeval begin, end;
+ struct timeval begin, end;
#endif
};
static void
-vpx_usec_timer_start(struct vpx_usec_timer *t)
-{
+vpx_usec_timer_start(struct vpx_usec_timer *t) {
#if defined(_WIN32)
- QueryPerformanceCounter(&t->begin);
+ QueryPerformanceCounter(&t->begin);
#else
- gettimeofday(&t->begin, NULL);
+ gettimeofday(&t->begin, NULL);
#endif
}
static void
-vpx_usec_timer_mark(struct vpx_usec_timer *t)
-{
+vpx_usec_timer_mark(struct vpx_usec_timer *t) {
#if defined(_WIN32)
- QueryPerformanceCounter(&t->end);
+ QueryPerformanceCounter(&t->end);
#else
- gettimeofday(&t->end, NULL);
+ gettimeofday(&t->end, NULL);
#endif
}
static int64_t
-vpx_usec_timer_elapsed(struct vpx_usec_timer *t)
-{
+vpx_usec_timer_elapsed(struct vpx_usec_timer *t) {
#if defined(_WIN32)
- LARGE_INTEGER freq, diff;
+ LARGE_INTEGER freq, diff;
- diff.QuadPart = t->end.QuadPart - t->begin.QuadPart;
+ diff.QuadPart = t->end.QuadPart - t->begin.QuadPart;
- QueryPerformanceFrequency(&freq);
- return diff.QuadPart * 1000000 / freq.QuadPart;
+ QueryPerformanceFrequency(&freq);
+ return diff.QuadPart * 1000000 / freq.QuadPart;
#else
- struct timeval diff;
+ struct timeval diff;
- timersub(&t->end, &t->begin, &diff);
- return diff.tv_sec * 1000000 + diff.tv_usec;
+ timersub(&t->end, &t->begin, &diff);
+ return diff.tv_sec * 1000000 + diff.tv_usec;
#endif
}
@@ -101,9 +97,8 @@ vpx_usec_timer_elapsed(struct vpx_usec_timer *t)
#define timersub(a, b, result)
#endif
-struct vpx_usec_timer
-{
- void *dummy;
+struct vpx_usec_timer {
+ void *dummy;
};
static void
@@ -113,7 +108,9 @@ static void
vpx_usec_timer_mark(struct vpx_usec_timer *t) { }
static long
-vpx_usec_timer_elapsed(struct vpx_usec_timer *t) { return 0; }
+vpx_usec_timer_elapsed(struct vpx_usec_timer *t) {
+ return 0;
+}
#endif /* CONFIG_OS_SUPPORT */
diff --git a/libvpx/vpx_ports/vpxtypes.h b/libvpx/vpx_ports/vpxtypes.h
deleted file mode 100644
index f2fb089..0000000
--- a/libvpx/vpx_ports/vpxtypes.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-
-#ifndef __VPXTYPES_H__
-#define __VPXTYPES_H__
-
-#include "vpx_config.h"
-
-//#include <sys/types.h>
-#ifdef _MSC_VER
-# include <basetsd.h>
-typedef SSIZE_T ssize_t;
-#endif
-
-#if defined(HAVE_STDINT_H) && HAVE_STDINT_H
-/* C99 types are preferred to vpx integer types */
-# include <stdint.h>
-#endif
-
-/*!\defgroup basetypes Base Types
- @{*/
-#if !defined(HAVE_STDINT_H) && !defined(INT_T_DEFINED)
-# ifdef STRICTTYPES
-typedef signed char int8_t;
-typedef signed short int16_t;
-typedef signed int int32_t;
-# else
-typedef char int8_t;
-typedef short int16_t;
-typedef int int32_t;
-# endif
-typedef unsigned char uint8_t;
-typedef unsigned short uint16_t;
-typedef unsigned int uint32_t;
-#endif
-
-typedef int8_t vpxs8;
-typedef uint8_t vpxu8;
-typedef int16_t vpxs16;
-typedef uint16_t vpxu16;
-typedef int32_t vpxs32;
-typedef uint32_t vpxu32;
-typedef int32_t vpxbool;
-
-enum {vpxfalse, vpxtrue};
-
-/*!\def OTC
- \brief a macro suitable for declaring a constant #vpxtc*/
-/*!\def VPXTC
- \brief printf format string suitable for printing an #vpxtc*/
-#ifdef UNICODE
-# ifdef NO_WCHAR
-# error "no non-wchar support added yet"
-# else
-# include <wchar.h>
-typedef wchar_t vpxtc;
-# define OTC(str) L ## str
-# define VPXTC "ls"
-# endif /*NO_WCHAR*/
-#else
-typedef char vpxtc;
-# define OTC(str) (vpxtc*)str
-# define VPXTC "s"
-#endif /*UNICODE*/
-/*@} end - base types*/
-
-/*!\addtogroup basetypes
- @{*/
-/*!\def VPX64
- \brief printf format string suitable for printing an #vpxs64*/
-#if defined(HAVE_STDINT_H)
-# define VPX64 PRId64
-typedef int64_t vpxs64;
-#elif defined(HASLONGLONG)
-# undef PRId64
-# define PRId64 "lld"
-# define VPX64 PRId64
-typedef long long vpxs64;
-#elif defined(WIN32) || defined(_WIN32_WCE)
-# undef PRId64
-# define PRId64 "I64d"
-# define VPX64 PRId64
-typedef __int64 vpxs64;
-typedef unsigned __int64 vpxu64;
-#elif defined(__uClinux__) && defined(CHIP_DM642)
-# include <lddk.h>
-# undef PRId64
-# define PRId64 "lld"
-# define VPX64 PRId64
-typedef long vpxs64;
-#else
-# error "64 bit integer type undefined for this platform!"
-#endif
-#if !defined(HAVE_STDINT_H) && !defined(INT_T_DEFINED)
-typedef vpxs64 int64_t;
-typedef vpxu64 uint64_t;
-#endif
-/*!@} end - base types*/
-
-/*!\ingroup basetypes
- \brief Common return type*/
-typedef enum
-{
- VPX_NOT_FOUND = -404,
- VPX_BUFFER_EMPTY = -202,
- VPX_BUFFER_FULL = -201,
-
- VPX_CONNREFUSED = -102,
- VPX_TIMEDOUT = -101,
- VPX_WOULDBLOCK = -100,
-
- VPX_NET_ERROR = -9,
- VPX_INVALID_VERSION = -8,
- VPX_INPROGRESS = -7,
- VPX_NOT_SUPP = -6,
- VPX_NO_MEM = -3,
- VPX_INVALID_PARAMS = -2,
- VPX_ERROR = -1,
- VPX_OK = 0,
- VPX_DONE = 1
-} vpxsc;
-
-#if defined(WIN32) || defined(_WIN32_WCE)
-# define DLLIMPORT __declspec(dllimport)
-# define DLLEXPORT __declspec(dllexport)
-# define DLLLOCAL
-#elif defined(LINUX)
-# define DLLIMPORT
-/*visibility attribute support is available in 3.4 and later.
- see: http://gcc.gnu.org/wiki/Visibility for more info*/
-# if defined(__GNUC__) && ((__GNUC__<<16|(__GNUC_MINOR__&0xff)) >= (3<<16|4))
-# define GCC_HASCLASSVISIBILITY
-# endif /*defined(__GNUC__) && __GNUC_PREREQ(3,4)*/
-# ifdef GCC_HASCLASSVISIBILITY
-# define DLLEXPORT __attribute__ ((visibility("default")))
-# define DLLLOCAL __attribute__ ((visibility("hidden")))
-# else
-# define DLLEXPORT
-# define DLLLOCAL
-# endif /*GCC_HASCLASSVISIBILITY*/
-#endif /*platform ifdefs*/
-
-#endif /*__VPXTYPES_H__*/
-
-#undef VPXAPI
-/*!\def VPXAPI
- \brief library calling convention/storage class attributes.
-
- Specifies whether the function is imported through a dll
- or is from a static library.*/
-#ifdef VPXDLL
-# ifdef VPXDLLEXPORT
-# define VPXAPI DLLEXPORT
-# else
-# define VPXAPI DLLIMPORT
-# endif /*VPXDLLEXPORT*/
-#else
-# define VPXAPI
-#endif /*VPXDLL*/
diff --git a/libvpx/vpx_ports/x86.h b/libvpx/vpx_ports/x86.h
index 9dd8c4b..b009c35 100644
--- a/libvpx/vpx_ports/x86.h
+++ b/libvpx/vpx_ports/x86.h
@@ -14,80 +14,79 @@
#include <stdlib.h>
#include "vpx_config.h"
-typedef enum
-{
- VPX_CPU_UNKNOWN = -1,
- VPX_CPU_AMD,
- VPX_CPU_AMD_OLD,
- VPX_CPU_CENTAUR,
- VPX_CPU_CYRIX,
- VPX_CPU_INTEL,
- VPX_CPU_NEXGEN,
- VPX_CPU_NSC,
- VPX_CPU_RISE,
- VPX_CPU_SIS,
- VPX_CPU_TRANSMETA,
- VPX_CPU_TRANSMETA_OLD,
- VPX_CPU_UMC,
- VPX_CPU_VIA,
-
- VPX_CPU_LAST
+typedef enum {
+ VPX_CPU_UNKNOWN = -1,
+ VPX_CPU_AMD,
+ VPX_CPU_AMD_OLD,
+ VPX_CPU_CENTAUR,
+ VPX_CPU_CYRIX,
+ VPX_CPU_INTEL,
+ VPX_CPU_NEXGEN,
+ VPX_CPU_NSC,
+ VPX_CPU_RISE,
+ VPX_CPU_SIS,
+ VPX_CPU_TRANSMETA,
+ VPX_CPU_TRANSMETA_OLD,
+ VPX_CPU_UMC,
+ VPX_CPU_VIA,
+
+ VPX_CPU_LAST
} vpx_cpu_t;
-#if defined(__GNUC__) && __GNUC__
+#if defined(__GNUC__) && __GNUC__ || defined(__ANDROID__)
#if ARCH_X86_64
#define cpuid(func,ax,bx,cx,dx)\
- __asm__ __volatile__ (\
- "cpuid \n\t" \
- : "=a" (ax), "=b" (bx), "=c" (cx), "=d" (dx) \
- : "a" (func));
+ __asm__ __volatile__ (\
+ "cpuid \n\t" \
+ : "=a" (ax), "=b" (bx), "=c" (cx), "=d" (dx) \
+ : "a" (func));
#else
#define cpuid(func,ax,bx,cx,dx)\
- __asm__ __volatile__ (\
- "mov %%ebx, %%edi \n\t" \
- "cpuid \n\t" \
- "xchg %%edi, %%ebx \n\t" \
- : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \
- : "a" (func));
+ __asm__ __volatile__ (\
+ "mov %%ebx, %%edi \n\t" \
+ "cpuid \n\t" \
+ "xchg %%edi, %%ebx \n\t" \
+ : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \
+ : "a" (func));
#endif
-#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
+#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC) /* end __GNUC__ or __ANDROID__*/
#if ARCH_X86_64
#define cpuid(func,ax,bx,cx,dx)\
- asm volatile (\
- "xchg %rsi, %rbx \n\t" \
- "cpuid \n\t" \
- "movl %ebx, %edi \n\t" \
- "xchg %rsi, %rbx \n\t" \
- : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \
- : "a" (func));
+ asm volatile (\
+ "xchg %rsi, %rbx \n\t" \
+ "cpuid \n\t" \
+ "movl %ebx, %edi \n\t" \
+ "xchg %rsi, %rbx \n\t" \
+ : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \
+ : "a" (func));
#else
#define cpuid(func,ax,bx,cx,dx)\
- asm volatile (\
- "pushl %ebx \n\t" \
- "cpuid \n\t" \
- "movl %ebx, %edi \n\t" \
- "popl %ebx \n\t" \
- : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \
- : "a" (func));
+ asm volatile (\
+ "pushl %ebx \n\t" \
+ "cpuid \n\t" \
+ "movl %ebx, %edi \n\t" \
+ "popl %ebx \n\t" \
+ : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \
+ : "a" (func));
#endif
-#else
+#else /* end __SUNPRO__ */
#if ARCH_X86_64
void __cpuid(int CPUInfo[4], int info_type);
#pragma intrinsic(__cpuid)
#define cpuid(func,a,b,c,d) do{\
- int regs[4];\
- __cpuid(regs,func); a=regs[0]; b=regs[1]; c=regs[2]; d=regs[3];\
- } while(0)
+ int regs[4];\
+ __cpuid(regs,func); a=regs[0]; b=regs[1]; c=regs[2]; d=regs[3];\
+ } while(0)
#else
#define cpuid(func,a,b,c,d)\
- __asm mov eax, func\
- __asm cpuid\
- __asm mov a, eax\
- __asm mov b, ebx\
- __asm mov c, ecx\
- __asm mov d, edx
-#endif
+ __asm mov eax, func\
+ __asm cpuid\
+ __asm mov a, eax\
+ __asm mov b, ebx\
+ __asm mov c, ecx\
+ __asm mov d, edx
#endif
+#endif /* end others */
#define HAS_MMX 0x01
#define HAS_SSE 0x02
@@ -100,47 +99,46 @@ void __cpuid(int CPUInfo[4], int info_type);
#endif
static int
-x86_simd_caps(void)
-{
- unsigned int flags = 0;
- unsigned int mask = ~0;
- unsigned int reg_eax, reg_ebx, reg_ecx, reg_edx;
- char *env;
- (void)reg_ebx;
+x86_simd_caps(void) {
+ unsigned int flags = 0;
+ unsigned int mask = ~0;
+ unsigned int reg_eax, reg_ebx, reg_ecx, reg_edx;
+ char *env;
+ (void)reg_ebx;
- /* See if the CPU capabilities are being overridden by the environment */
- env = getenv("VPX_SIMD_CAPS");
+ /* See if the CPU capabilities are being overridden by the environment */
+ env = getenv("VPX_SIMD_CAPS");
- if (env && *env)
- return (int)strtol(env, NULL, 0);
+ if (env && *env)
+ return (int)strtol(env, NULL, 0);
- env = getenv("VPX_SIMD_CAPS_MASK");
+ env = getenv("VPX_SIMD_CAPS_MASK");
- if (env && *env)
- mask = strtol(env, NULL, 0);
+ if (env && *env)
+ mask = strtol(env, NULL, 0);
- /* Ensure that the CPUID instruction supports extended features */
- cpuid(0, reg_eax, reg_ebx, reg_ecx, reg_edx);
+ /* Ensure that the CPUID instruction supports extended features */
+ cpuid(0, reg_eax, reg_ebx, reg_ecx, reg_edx);
- if (reg_eax < 1)
- return 0;
+ if (reg_eax < 1)
+ return 0;
- /* Get the standard feature flags */
- cpuid(1, reg_eax, reg_ebx, reg_ecx, reg_edx);
+ /* Get the standard feature flags */
+ cpuid(1, reg_eax, reg_ebx, reg_ecx, reg_edx);
- if (reg_edx & BIT(23)) flags |= HAS_MMX;
+ if (reg_edx & BIT(23)) flags |= HAS_MMX;
- if (reg_edx & BIT(25)) flags |= HAS_SSE; /* aka xmm */
+ if (reg_edx & BIT(25)) flags |= HAS_SSE; /* aka xmm */
- if (reg_edx & BIT(26)) flags |= HAS_SSE2; /* aka wmt */
+ if (reg_edx & BIT(26)) flags |= HAS_SSE2; /* aka wmt */
- if (reg_ecx & BIT(0)) flags |= HAS_SSE3;
+ if (reg_ecx & BIT(0)) flags |= HAS_SSE3;
- if (reg_ecx & BIT(9)) flags |= HAS_SSSE3;
+ if (reg_ecx & BIT(9)) flags |= HAS_SSSE3;
- if (reg_ecx & BIT(19)) flags |= HAS_SSE4_1;
+ if (reg_ecx & BIT(19)) flags |= HAS_SSE4_1;
- return flags & mask;
+ return flags & mask;
}
vpx_cpu_t vpx_x86_vendor(void);
@@ -150,21 +148,20 @@ unsigned __int64 __rdtsc(void);
#pragma intrinsic(__rdtsc)
#endif
static unsigned int
-x86_readtsc(void)
-{
+x86_readtsc(void) {
#if defined(__GNUC__) && __GNUC__
- unsigned int tsc;
- __asm__ __volatile__("rdtsc\n\t":"=a"(tsc):);
- return tsc;
+ unsigned int tsc;
+ __asm__ __volatile__("rdtsc\n\t":"=a"(tsc):);
+ return tsc;
#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
- unsigned int tsc;
- asm volatile("rdtsc\n\t":"=a"(tsc):);
- return tsc;
+ unsigned int tsc;
+ asm volatile("rdtsc\n\t":"=a"(tsc):);
+ return tsc;
#else
#if ARCH_X86_64
- return (unsigned int)__rdtsc();
+ return (unsigned int)__rdtsc();
#else
- __asm rdtsc;
+ __asm rdtsc;
#endif
#endif
}
@@ -172,45 +169,41 @@ x86_readtsc(void)
#if defined(__GNUC__) && __GNUC__
#define x86_pause_hint()\
- __asm__ __volatile__ ("pause \n\t")
+ __asm__ __volatile__ ("pause \n\t")
#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
#define x86_pause_hint()\
- asm volatile ("pause \n\t")
+ asm volatile ("pause \n\t")
#else
#if ARCH_X86_64
#define x86_pause_hint()\
- _mm_pause();
+ _mm_pause();
#else
#define x86_pause_hint()\
- __asm pause
+ __asm pause
#endif
#endif
#if defined(__GNUC__) && __GNUC__
static void
-x87_set_control_word(unsigned short mode)
-{
- __asm__ __volatile__("fldcw %0" : : "m"(*&mode));
+x87_set_control_word(unsigned short mode) {
+ __asm__ __volatile__("fldcw %0" : : "m"(*&mode));
}
static unsigned short
-x87_get_control_word(void)
-{
- unsigned short mode;
- __asm__ __volatile__("fstcw %0\n\t":"=m"(*&mode):);
+x87_get_control_word(void) {
+ unsigned short mode;
+ __asm__ __volatile__("fstcw %0\n\t":"=m"(*&mode):);
return mode;
}
#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
static void
-x87_set_control_word(unsigned short mode)
-{
- asm volatile("fldcw %0" : : "m"(*&mode));
+x87_set_control_word(unsigned short mode) {
+ asm volatile("fldcw %0" : : "m"(*&mode));
}
static unsigned short
-x87_get_control_word(void)
-{
- unsigned short mode;
- asm volatile("fstcw %0\n\t":"=m"(*&mode):);
- return mode;
+x87_get_control_word(void) {
+ unsigned short mode;
+ asm volatile("fstcw %0\n\t":"=m"(*&mode):);
+ return mode;
}
#elif ARCH_X86_64
/* No fldcw intrinsics on Windows x64, punt to external asm */
@@ -220,25 +213,22 @@ extern unsigned short vpx_winx64_fstcw(void);
#define x87_get_control_word vpx_winx64_fstcw
#else
static void
-x87_set_control_word(unsigned short mode)
-{
- __asm { fldcw mode }
+x87_set_control_word(unsigned short mode) {
+ __asm { fldcw mode }
}
static unsigned short
-x87_get_control_word(void)
-{
- unsigned short mode;
- __asm { fstcw mode }
- return mode;
+x87_get_control_word(void) {
+ unsigned short mode;
+ __asm { fstcw mode }
+ return mode;
}
#endif
static unsigned short
-x87_set_double_precision(void)
-{
- unsigned short mode = x87_get_control_word();
- x87_set_control_word((mode&~0x300) | 0x200);
- return mode;
+x87_set_double_precision(void) {
+ unsigned short mode = x87_get_control_word();
+ x87_set_control_word((mode&~0x300) | 0x200);
+ return mode;
}
diff --git a/libvpx/vpx_ports/x86_abi_support.asm b/libvpx/vpx_ports/x86_abi_support.asm
index 0c9fe37..eccbfa3 100644
--- a/libvpx/vpx_ports/x86_abi_support.asm
+++ b/libvpx/vpx_ports/x86_abi_support.asm
@@ -78,6 +78,17 @@
%endif
+; LIBVPX_YASM_WIN64
+; Set LIBVPX_YASM_WIN64 if output is Windows 64bit so the code will work if x64
+; or win64 is defined on the Yasm command line.
+%ifidn __OUTPUT_FORMAT__,win64
+%define LIBVPX_YASM_WIN64 1
+%elifidn __OUTPUT_FORMAT__,x64
+%define LIBVPX_YASM_WIN64 1
+%else
+%define LIBVPX_YASM_WIN64 0
+%endif
+
; sym()
; Return the proper symbol name for the target ABI.
;
@@ -90,7 +101,7 @@
%define sym(x) x
%elifidn __OUTPUT_FORMAT__,elfx32
%define sym(x) x
-%elifidn __OUTPUT_FORMAT__,x64
+%elif LIBVPX_YASM_WIN64
%define sym(x) x
%else
%define sym(x) _ %+ x
@@ -114,7 +125,7 @@
%define PRIVATE :hidden
%elifidn __OUTPUT_FORMAT__,elfx32
%define PRIVATE :hidden
- %elifidn __OUTPUT_FORMAT__,x64
+ %elif LIBVPX_YASM_WIN64
%define PRIVATE
%else
%define PRIVATE :private_extern
@@ -131,7 +142,7 @@
%else
; 64 bit ABI passes arguments in registers. This is a workaround to get up
; and running quickly. Relies on SHADOW_ARGS_TO_STACK
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
%define arg(x) [rbp+16+8*x]
%else
%define arg(x) [rbp-8-8*x]
@@ -230,6 +241,12 @@
%elifidn __OUTPUT_FORMAT__,elfx32
%define WRT_PLT wrt ..plt
%define HIDDEN_DATA(x) x:data hidden
+ %elifidn __OUTPUT_FORMAT__,macho64
+ %ifdef CHROMIUM
+ %define HIDDEN_DATA(x) x:private_extern
+ %else
+ %define HIDDEN_DATA(x) x
+ %endif
%else
%define HIDDEN_DATA(x) x
%endif
@@ -251,7 +268,7 @@
%endm
%define UNSHADOW_ARGS
%else
-%ifidn __OUTPUT_FORMAT__,x64
+%if LIBVPX_YASM_WIN64
%macro SHADOW_ARGS_TO_STACK 1 ; argc
%if %1 > 0
mov arg(0),rcx
@@ -307,7 +324,7 @@
; Win64 ABI requires 16 byte stack alignment, but then pushes an 8 byte return
; value. Typically we follow this up with 'push rbp' - re-aligning the stack -
; but in some cases this is not done and unaligned movs must be used.
-%ifidn __OUTPUT_FORMAT__,x64
+%if LIBVPX_YASM_WIN64
%macro SAVE_XMM 1-2 a
%if %1 < 6
%error Only xmm registers 6-15 must be preserved
diff --git a/libvpx/vpx_ports/x86_cpuid.c b/libvpx/vpx_ports/x86_cpuid.c
index ce64033..fe86cfc 100644
--- a/libvpx/vpx_ports/x86_cpuid.c
+++ b/libvpx/vpx_ports/x86_cpuid.c
@@ -11,43 +11,39 @@
#include <string.h>
#include "x86.h"
-struct cpuid_vendors
-{
- char vendor_string[12];
- vpx_cpu_t vendor_id;
+struct cpuid_vendors {
+ char vendor_string[12];
+ vpx_cpu_t vendor_id;
};
-static struct cpuid_vendors cpuid_vendor_list[VPX_CPU_LAST] =
-{
- { "AuthenticAMD", VPX_CPU_AMD },
- { "AMDisbetter!", VPX_CPU_AMD_OLD },
- { "CentaurHauls", VPX_CPU_CENTAUR },
- { "CyrixInstead", VPX_CPU_CYRIX },
- { "GenuineIntel", VPX_CPU_INTEL },
- { "NexGenDriven", VPX_CPU_NEXGEN },
- { "Geode by NSC", VPX_CPU_NSC },
- { "RiseRiseRise", VPX_CPU_RISE },
- { "SiS SiS SiS ", VPX_CPU_SIS },
- { "GenuineTMx86", VPX_CPU_TRANSMETA },
- { "TransmetaCPU", VPX_CPU_TRANSMETA_OLD },
- { "UMC UMC UMC ", VPX_CPU_UMC },
- { "VIA VIA VIA ", VPX_CPU_VIA },
+static struct cpuid_vendors cpuid_vendor_list[VPX_CPU_LAST] = {
+ { "AuthenticAMD", VPX_CPU_AMD },
+ { "AMDisbetter!", VPX_CPU_AMD_OLD },
+ { "CentaurHauls", VPX_CPU_CENTAUR },
+ { "CyrixInstead", VPX_CPU_CYRIX },
+ { "GenuineIntel", VPX_CPU_INTEL },
+ { "NexGenDriven", VPX_CPU_NEXGEN },
+ { "Geode by NSC", VPX_CPU_NSC },
+ { "RiseRiseRise", VPX_CPU_RISE },
+ { "SiS SiS SiS ", VPX_CPU_SIS },
+ { "GenuineTMx86", VPX_CPU_TRANSMETA },
+ { "TransmetaCPU", VPX_CPU_TRANSMETA_OLD },
+ { "UMC UMC UMC ", VPX_CPU_UMC },
+ { "VIA VIA VIA ", VPX_CPU_VIA },
};
-vpx_cpu_t vpx_x86_vendor(void)
-{
- unsigned int reg_eax;
- unsigned int vs[3];
- int i;
+vpx_cpu_t vpx_x86_vendor(void) {
+ unsigned int reg_eax;
+ unsigned int vs[3];
+ int i;
- /* Get the Vendor String from the CPU */
- cpuid(0, reg_eax, vs[0], vs[2], vs[1]);
+ /* Get the Vendor String from the CPU */
+ cpuid(0, reg_eax, vs[0], vs[2], vs[1]);
- for (i = 0; i < VPX_CPU_LAST; i++)
- {
- if (strncmp ((const char *)vs, cpuid_vendor_list[i].vendor_string, 12) == 0)
- return (cpuid_vendor_list[i].vendor_id);
- }
+ for (i = 0; i < VPX_CPU_LAST; i++) {
+ if (strncmp((const char *)vs, cpuid_vendor_list[i].vendor_string, 12) == 0)
+ return (cpuid_vendor_list[i].vendor_id);
+ }
- return VPX_CPU_UNKNOWN;
+ return VPX_CPU_UNKNOWN;
}
diff --git a/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copy_y_neon.asm b/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copy_y_neon.asm
index 9189641..d070a47 100644
--- a/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copy_y_neon.asm
+++ b/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copy_y_neon.asm
@@ -9,18 +9,19 @@
;
- EXPORT |vp8_yv12_copy_y_neon|
+ EXPORT |vpx_yv12_copy_y_neon|
ARM
REQUIRE8
PRESERVE8
- INCLUDE asm_com_offsets.asm
+ INCLUDE vpx_scale_asm_offsets.asm
AREA ||.text||, CODE, READONLY, ALIGN=2
-;void vpxyv12_copy_y_neon(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc)
-|vp8_yv12_copy_y_neon| PROC
+;void vpx_yv12_copy_y_neon(const YV12_BUFFER_CONFIG *src_ybc,
+; YV12_BUFFER_CONFIG *dst_ybc)
+|vpx_yv12_copy_y_neon| PROC
push {r4 - r11, lr}
vpush {d8-d15}
diff --git a/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copyframe_func_neon.asm b/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copyframe_func_neon.asm
index e55d076..696f47a 100644
--- a/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copyframe_func_neon.asm
+++ b/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copyframe_func_neon.asm
@@ -14,11 +14,11 @@
REQUIRE8
PRESERVE8
- INCLUDE asm_com_offsets.asm
+ INCLUDE vpx_scale_asm_offsets.asm
AREA ||.text||, CODE, READONLY, ALIGN=2
-;void vp8_yv12_copy_frame_func_neon(YV12_BUFFER_CONFIG *src_ybc,
+;void vp8_yv12_copy_frame_func_neon(const YV12_BUFFER_CONFIG *src_ybc,
; YV12_BUFFER_CONFIG *dst_ybc);
|vp8_yv12_copy_frame_func_neon| PROC
diff --git a/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copysrcframe_func_neon.asm b/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copysrcframe_func_neon.asm
index ec64dbc..d3306b6 100644
--- a/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copysrcframe_func_neon.asm
+++ b/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copysrcframe_func_neon.asm
@@ -14,14 +14,15 @@
REQUIRE8
PRESERVE8
- INCLUDE asm_com_offsets.asm
+ INCLUDE vpx_scale_asm_offsets.asm
AREA ||.text||, CODE, READONLY, ALIGN=2
-;Note: This function is used to copy source data in src_buffer[i] at beginning of
-;the encoding. The buffer has a width and height of cpi->oxcf.Width and cpi->oxcf.Height,
-;which can be ANY numbers(NOT always multiples of 16 or 4).
+;Note: This function is used to copy source data in src_buffer[i] at beginning
+;of the encoding. The buffer has a width and height of cpi->oxcf.Width and
+;cpi->oxcf.Height, which can be ANY numbers(NOT always multiples of 16 or 4).
-;void vp8_yv12_copy_src_frame_func_neon(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc);
+;void vp8_yv12_copy_src_frame_func_neon(const YV12_BUFFER_CONFIG *src_ybc,
+; YV12_BUFFER_CONFIG *dst_ybc);
|vp8_yv12_copy_src_frame_func_neon| PROC
push {r4 - r11, lr}
diff --git a/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_extendframeborders_neon.asm b/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_extendframeborders_neon.asm
index ebc4242..b2eb9eb 100644
--- a/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_extendframeborders_neon.asm
+++ b/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_extendframeborders_neon.asm
@@ -14,7 +14,7 @@
REQUIRE8
PRESERVE8
- INCLUDE asm_com_offsets.asm
+ INCLUDE vpx_scale_asm_offsets.asm
AREA ||.text||, CODE, READONLY, ALIGN=2
;void vp8_yv12_extend_frame_borders_neon (YV12_BUFFER_CONFIG *ybf);
diff --git a/libvpx/vpx_scale/arm/neon/yv12extend_arm.c b/libvpx/vpx_scale/arm/neon/yv12extend_arm.c
index eabd495..fac7bbc 100644
--- a/libvpx/vpx_scale/arm/neon/yv12extend_arm.c
+++ b/libvpx/vpx_scale/arm/neon/yv12extend_arm.c
@@ -8,14 +8,14 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_rtcd.h"
+#include "./vpx_scale_rtcd.h"
-extern void vp8_yv12_copy_frame_func_neon(struct yv12_buffer_config *src_ybc,
- struct yv12_buffer_config *dst_ybc);
+extern void vp8_yv12_copy_frame_func_neon(
+ const struct yv12_buffer_config *src_ybc,
+ struct yv12_buffer_config *dst_ybc);
-void vp8_yv12_copy_frame_neon(struct yv12_buffer_config *src_ybc,
+void vp8_yv12_copy_frame_neon(const struct yv12_buffer_config *src_ybc,
struct yv12_buffer_config *dst_ybc) {
vp8_yv12_copy_frame_func_neon(src_ybc, dst_ybc);
-
vp8_yv12_extend_frame_borders_neon(dst_ybc);
}
diff --git a/libvpx/vpx_scale/generic/bicubic_scaler.c b/libvpx/vpx_scale/generic/bicubic_scaler.c
deleted file mode 100644
index c116740..0000000
--- a/libvpx/vpx_scale/generic/bicubic_scaler.c
+++ /dev/null
@@ -1,569 +0,0 @@
-/*
- * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-
-#include <float.h>
-#include <math.h>
-#include <stdio.h>
-#include "vpx_mem/vpx_mem.h"
-#include "vpxscale_arbitrary.h"
-
-#define FIXED_POINT
-
-#define MAX_IN_WIDTH 800
-#define MAX_IN_HEIGHT 600
-#define MAX_OUT_WIDTH 800
-#define MAX_OUT_HEIGHT 600
-#define MAX_OUT_DIMENSION ((MAX_OUT_WIDTH > MAX_OUT_HEIGHT) ? \
- MAX_OUT_WIDTH : MAX_OUT_HEIGHT)
-
-BICUBIC_SCALER_STRUCT g_b_scaler;
-static int g_first_time = 1;
-
-#pragma DATA_SECTION(g_hbuf, "VP6_HEAP")
-#pragma DATA_ALIGN (g_hbuf, 32);
-unsigned char g_hbuf[MAX_OUT_DIMENSION];
-
-#pragma DATA_SECTION(g_hbuf_uv, "VP6_HEAP")
-#pragma DATA_ALIGN (g_hbuf_uv, 32);
-unsigned char g_hbuf_uv[MAX_OUT_DIMENSION];
-
-
-#ifdef FIXED_POINT
-static int a_i = 0.6 * 65536;
-#else
-static float a = -0.6;
-#endif
-
-#ifdef FIXED_POINT
-// 3 2
-// C0 = a*t - a*t
-//
-static short c0_fixed(unsigned int t) {
- // put t in Q16 notation
- unsigned short v1, v2;
-
- // Q16
- v1 = (a_i * t) >> 16;
- v1 = (v1 * t) >> 16;
-
- // Q16
- v2 = (a_i * t) >> 16;
- v2 = (v2 * t) >> 16;
- v2 = (v2 * t) >> 16;
-
- // Q12
- return -((v1 - v2) >> 4);
-}
-
-// 2 3
-// C1 = a*t + (3-2*a)*t - (2-a)*t
-//
-static short c1_fixed(unsigned int t) {
- unsigned short v1, v2, v3;
- unsigned short two, three;
-
- // Q16
- v1 = (a_i * t) >> 16;
-
- // Q13
- two = 2 << 13;
- v2 = two - (a_i >> 3);
- v2 = (v2 * t) >> 16;
- v2 = (v2 * t) >> 16;
- v2 = (v2 * t) >> 16;
-
- // Q13
- three = 3 << 13;
- v3 = three - (2 * (a_i >> 3));
- v3 = (v3 * t) >> 16;
- v3 = (v3 * t) >> 16;
-
- // Q12
- return (((v1 >> 3) - v2 + v3) >> 1);
-
-}
-
-// 2 3
-// C2 = 1 - (3-a)*t + (2-a)*t
-//
-static short c2_fixed(unsigned int t) {
- unsigned short v1, v2, v3;
- unsigned short two, three;
-
- // Q13
- v1 = 1 << 13;
-
- // Q13
- three = 3 << 13;
- v2 = three - (a_i >> 3);
- v2 = (v2 * t) >> 16;
- v2 = (v2 * t) >> 16;
-
- // Q13
- two = 2 << 13;
- v3 = two - (a_i >> 3);
- v3 = (v3 * t) >> 16;
- v3 = (v3 * t) >> 16;
- v3 = (v3 * t) >> 16;
-
- // Q12
- return (v1 - v2 + v3) >> 1;
-}
-
-// 2 3
-// C3 = a*t - 2*a*t + a*t
-//
-static short c3_fixed(unsigned int t) {
- int v1, v2, v3;
-
- // Q16
- v1 = (a_i * t) >> 16;
-
- // Q15
- v2 = 2 * (a_i >> 1);
- v2 = (v2 * t) >> 16;
- v2 = (v2 * t) >> 16;
-
- // Q16
- v3 = (a_i * t) >> 16;
- v3 = (v3 * t) >> 16;
- v3 = (v3 * t) >> 16;
-
- // Q12
- return ((v2 - (v1 >> 1) - (v3 >> 1)) >> 3);
-}
-#else
-// 3 2
-// C0 = -a*t + a*t
-//
-float C0(float t) {
- return -a * t * t * t + a * t * t;
-}
-
-// 2 3
-// C1 = -a*t + (2*a+3)*t - (a+2)*t
-//
-float C1(float t) {
- return -(a + 2.0f) * t * t * t + (2.0f * a + 3.0f) * t * t - a * t;
-}
-
-// 2 3
-// C2 = 1 - (a+3)*t + (a+2)*t
-//
-float C2(float t) {
- return (a + 2.0f) * t * t * t - (a + 3.0f) * t * t + 1.0f;
-}
-
-// 2 3
-// C3 = a*t - 2*a*t + a*t
-//
-float C3(float t) {
- return a * t * t * t - 2.0f * a * t * t + a * t;
-}
-#endif
-
-#if 0
-int compare_real_fixed() {
- int i, errors = 0;
- float mult = 1.0 / 10000.0;
- unsigned int fixed_mult = mult * 4294967296;// 65536;
- unsigned int phase_offset_int;
- float phase_offset_real;
-
- for (i = 0; i < 10000; i++) {
- int fixed0, fixed1, fixed2, fixed3, fixed_total;
- int real0, real1, real2, real3, real_total;
-
- phase_offset_real = (float)i * mult;
- phase_offset_int = (fixed_mult * i) >> 16;
-// phase_offset_int = phase_offset_real * 65536;
-
- fixed0 = c0_fixed(phase_offset_int);
- real0 = C0(phase_offset_real) * 4096.0;
-
- if ((abs(fixed0) > (abs(real0) + 1)) || (abs(fixed0) < (abs(real0) - 1)))
- errors++;
-
- fixed1 = c1_fixed(phase_offset_int);
- real1 = C1(phase_offset_real) * 4096.0;
-
- if ((abs(fixed1) > (abs(real1) + 1)) || (abs(fixed1) < (abs(real1) - 1)))
- errors++;
-
- fixed2 = c2_fixed(phase_offset_int);
- real2 = C2(phase_offset_real) * 4096.0;
-
- if ((abs(fixed2) > (abs(real2) + 1)) || (abs(fixed2) < (abs(real2) - 1)))
- errors++;
-
- fixed3 = c3_fixed(phase_offset_int);
- real3 = C3(phase_offset_real) * 4096.0;
-
- if ((abs(fixed3) > (abs(real3) + 1)) || (abs(fixed3) < (abs(real3) - 1)))
- errors++;
-
- fixed_total = fixed0 + fixed1 + fixed2 + fixed3;
- real_total = real0 + real1 + real2 + real3;
-
- if ((fixed_total > 4097) || (fixed_total < 4094))
- errors++;
-
- if ((real_total > 4097) || (real_total < 4095))
- errors++;
- }
-
- return errors;
-}
-#endif
-
-// Find greatest common denominator between two integers. Method used here is
-// slow compared to Euclid's algorithm, but does not require any division.
-int gcd(int a, int b) {
- // Problem with this algorithm is that if a or b = 0 this function
- // will never exit. Don't want to return 0 because any computation
- // that was based on a common denoninator and tried to reduce by
- // dividing by 0 would fail. Best solution that could be thought of
- // would to be fail by returing a 1;
- if (a <= 0 || b <= 0)
- return 1;
-
- while (a != b) {
- if (b > a)
- b = b - a;
- else {
- int tmp = a;// swap large and
- a = b; // small
- b = tmp;
- }
- }
-
- return b;
-}
-
-void bicubic_coefficient_init() {
- vpx_memset(&g_b_scaler, 0, sizeof(BICUBIC_SCALER_STRUCT));
- g_first_time = 0;
-}
-
-void bicubic_coefficient_destroy() {
- if (!g_first_time) {
- vpx_free(g_b_scaler.l_w);
-
- vpx_free(g_b_scaler.l_h);
-
- vpx_free(g_b_scaler.l_h_uv);
-
- vpx_free(g_b_scaler.c_w);
-
- vpx_free(g_b_scaler.c_h);
-
- vpx_free(g_b_scaler.c_h_uv);
-
- vpx_memset(&g_b_scaler, 0, sizeof(BICUBIC_SCALER_STRUCT));
- }
-}
-
-// Create the coeffients that will be used for the cubic interpolation.
-// Because scaling does not have to be equal in the vertical and horizontal
-// regimes the phase offsets will be different. There are 4 coefficents
-// for each point, two on each side. The layout is that there are the
-// 4 coefficents for each phase in the array and then the next phase.
-int bicubic_coefficient_setup(int in_width, int in_height, int out_width, int out_height) {
- int i;
-#ifdef FIXED_POINT
- int phase_offset_int;
- unsigned int fixed_mult;
- int product_val = 0;
-#else
- float phase_offset;
-#endif
- int gcd_w, gcd_h, gcd_h_uv, d_w, d_h, d_h_uv;
-
- if (g_first_time)
- bicubic_coefficient_init();
-
-
- // check to see if the coefficents have already been set up correctly
- if ((in_width == g_b_scaler.in_width) && (in_height == g_b_scaler.in_height)
- && (out_width == g_b_scaler.out_width) && (out_height == g_b_scaler.out_height))
- return 0;
-
- g_b_scaler.in_width = in_width;
- g_b_scaler.in_height = in_height;
- g_b_scaler.out_width = out_width;
- g_b_scaler.out_height = out_height;
-
- // Don't want to allow crazy scaling, just try and prevent a catastrophic
- // failure here. Want to fail after setting the member functions so if
- // if the scaler is called the member functions will not scale.
- if (out_width <= 0 || out_height <= 0)
- return -1;
-
- // reduce in/out width and height ratios using the gcd
- gcd_w = gcd(out_width, in_width);
- gcd_h = gcd(out_height, in_height);
- gcd_h_uv = gcd(out_height, in_height / 2);
-
- // the numerator width and height are to be saved in
- // globals so they can be used during the scaling process
- // without having to be recalculated.
- g_b_scaler.nw = out_width / gcd_w;
- d_w = in_width / gcd_w;
-
- g_b_scaler.nh = out_height / gcd_h;
- d_h = in_height / gcd_h;
-
- g_b_scaler.nh_uv = out_height / gcd_h_uv;
- d_h_uv = (in_height / 2) / gcd_h_uv;
-
- // allocate memory for the coefficents
- vpx_free(g_b_scaler.l_w);
-
- vpx_free(g_b_scaler.l_h);
-
- vpx_free(g_b_scaler.l_h_uv);
-
- g_b_scaler.l_w = (short *)vpx_memalign(32, out_width * 2);
- g_b_scaler.l_h = (short *)vpx_memalign(32, out_height * 2);
- g_b_scaler.l_h_uv = (short *)vpx_memalign(32, out_height * 2);
-
- vpx_free(g_b_scaler.c_w);
-
- vpx_free(g_b_scaler.c_h);
-
- vpx_free(g_b_scaler.c_h_uv);
-
- g_b_scaler.c_w = (short *)vpx_memalign(32, g_b_scaler.nw * 4 * 2);
- g_b_scaler.c_h = (short *)vpx_memalign(32, g_b_scaler.nh * 4 * 2);
- g_b_scaler.c_h_uv = (short *)vpx_memalign(32, g_b_scaler.nh_uv * 4 * 2);
-
- g_b_scaler.hbuf = g_hbuf;
- g_b_scaler.hbuf_uv = g_hbuf_uv;
-
- // Set up polyphase filter taps. This needs to be done before
- // the scaling because of the floating point math required. The
- // coefficients are multiplied by 2^12 so that fixed point math
- // can be used in the main scaling loop.
-#ifdef FIXED_POINT
- fixed_mult = (1.0 / (float)g_b_scaler.nw) * 4294967296;
-
- product_val = 0;
-
- for (i = 0; i < g_b_scaler.nw; i++) {
- if (product_val > g_b_scaler.nw)
- product_val -= g_b_scaler.nw;
-
- phase_offset_int = (fixed_mult * product_val) >> 16;
-
- g_b_scaler.c_w[i * 4] = c3_fixed(phase_offset_int);
- g_b_scaler.c_w[i * 4 + 1] = c2_fixed(phase_offset_int);
- g_b_scaler.c_w[i * 4 + 2] = c1_fixed(phase_offset_int);
- g_b_scaler.c_w[i * 4 + 3] = c0_fixed(phase_offset_int);
-
- product_val += d_w;
- }
-
-
- fixed_mult = (1.0 / (float)g_b_scaler.nh) * 4294967296;
-
- product_val = 0;
-
- for (i = 0; i < g_b_scaler.nh; i++) {
- if (product_val > g_b_scaler.nh)
- product_val -= g_b_scaler.nh;
-
- phase_offset_int = (fixed_mult * product_val) >> 16;
-
- g_b_scaler.c_h[i * 4] = c0_fixed(phase_offset_int);
- g_b_scaler.c_h[i * 4 + 1] = c1_fixed(phase_offset_int);
- g_b_scaler.c_h[i * 4 + 2] = c2_fixed(phase_offset_int);
- g_b_scaler.c_h[i * 4 + 3] = c3_fixed(phase_offset_int);
-
- product_val += d_h;
- }
-
- fixed_mult = (1.0 / (float)g_b_scaler.nh_uv) * 4294967296;
-
- product_val = 0;
-
- for (i = 0; i < g_b_scaler.nh_uv; i++) {
- if (product_val > g_b_scaler.nh_uv)
- product_val -= g_b_scaler.nh_uv;
-
- phase_offset_int = (fixed_mult * product_val) >> 16;
-
- g_b_scaler.c_h_uv[i * 4] = c0_fixed(phase_offset_int);
- g_b_scaler.c_h_uv[i * 4 + 1] = c1_fixed(phase_offset_int);
- g_b_scaler.c_h_uv[i * 4 + 2] = c2_fixed(phase_offset_int);
- g_b_scaler.c_h_uv[i * 4 + 3] = c3_fixed(phase_offset_int);
-
- product_val += d_h_uv;
- }
-
-#else
-
- for (i = 0; i < g_nw; i++) {
- phase_offset = (float)((i * d_w) % g_nw) / (float)g_nw;
- g_c_w[i * 4] = (C3(phase_offset) * 4096.0);
- g_c_w[i * 4 + 1] = (C2(phase_offset) * 4096.0);
- g_c_w[i * 4 + 2] = (C1(phase_offset) * 4096.0);
- g_c_w[i * 4 + 3] = (C0(phase_offset) * 4096.0);
- }
-
- for (i = 0; i < g_nh; i++) {
- phase_offset = (float)((i * d_h) % g_nh) / (float)g_nh;
- g_c_h[i * 4] = (C0(phase_offset) * 4096.0);
- g_c_h[i * 4 + 1] = (C1(phase_offset) * 4096.0);
- g_c_h[i * 4 + 2] = (C2(phase_offset) * 4096.0);
- g_c_h[i * 4 + 3] = (C3(phase_offset) * 4096.0);
- }
-
- for (i = 0; i < g_nh_uv; i++) {
- phase_offset = (float)((i * d_h_uv) % g_nh_uv) / (float)g_nh_uv;
- g_c_h_uv[i * 4] = (C0(phase_offset) * 4096.0);
- g_c_h_uv[i * 4 + 1] = (C1(phase_offset) * 4096.0);
- g_c_h_uv[i * 4 + 2] = (C2(phase_offset) * 4096.0);
- g_c_h_uv[i * 4 + 3] = (C3(phase_offset) * 4096.0);
- }
-
-#endif
-
- // Create an array that corresponds input lines to output lines.
- // This doesn't require floating point math, but it does require
- // a division and because hardware division is not present that
- // is a call.
- for (i = 0; i < out_width; i++) {
- g_b_scaler.l_w[i] = (i * d_w) / g_b_scaler.nw;
-
- if ((g_b_scaler.l_w[i] + 2) <= in_width)
- g_b_scaler.max_usable_out_width = i;
-
- }
-
- for (i = 0; i < out_height + 1; i++) {
- g_b_scaler.l_h[i] = (i * d_h) / g_b_scaler.nh;
- g_b_scaler.l_h_uv[i] = (i * d_h_uv) / g_b_scaler.nh_uv;
- }
-
- return 0;
-}
-
-int bicubic_scale(int in_width, int in_height, int in_stride,
- int out_width, int out_height, int out_stride,
- unsigned char *input_image, unsigned char *output_image) {
- short *RESTRICT l_w, * RESTRICT l_h;
- short *RESTRICT c_w, * RESTRICT c_h;
- unsigned char *RESTRICT ip, * RESTRICT op;
- unsigned char *RESTRICT hbuf;
- int h, w, lw, lh;
- int temp_sum;
- int phase_offset_w, phase_offset_h;
-
- c_w = g_b_scaler.c_w;
- c_h = g_b_scaler.c_h;
-
- op = output_image;
-
- l_w = g_b_scaler.l_w;
- l_h = g_b_scaler.l_h;
-
- phase_offset_h = 0;
-
- for (h = 0; h < out_height; h++) {
- // select the row to work on
- lh = l_h[h];
- ip = input_image + (in_stride * lh);
-
- // vp8_filter the row vertically into an temporary buffer.
- // If the phase offset == 0 then all the multiplication
- // is going to result in the output equalling the input.
- // So instead point the temporary buffer to the input.
- // Also handle the boundry condition of not being able to
- // filter that last lines.
- if (phase_offset_h && (lh < in_height - 2)) {
- hbuf = g_b_scaler.hbuf;
-
- for (w = 0; w < in_width; w++) {
- temp_sum = c_h[phase_offset_h * 4 + 3] * ip[w - in_stride];
- temp_sum += c_h[phase_offset_h * 4 + 2] * ip[w];
- temp_sum += c_h[phase_offset_h * 4 + 1] * ip[w + in_stride];
- temp_sum += c_h[phase_offset_h * 4] * ip[w + 2 * in_stride];
-
- hbuf[w] = temp_sum >> 12;
- }
- } else
- hbuf = ip;
-
- // increase the phase offset for the next time around.
- if (++phase_offset_h >= g_b_scaler.nh)
- phase_offset_h = 0;
-
- // now filter and expand it horizontally into the final
- // output buffer
- phase_offset_w = 0;
-
- for (w = 0; w < out_width; w++) {
- // get the index to use to expand the image
- lw = l_w[w];
-
- temp_sum = c_w[phase_offset_w * 4] * hbuf[lw - 1];
- temp_sum += c_w[phase_offset_w * 4 + 1] * hbuf[lw];
- temp_sum += c_w[phase_offset_w * 4 + 2] * hbuf[lw + 1];
- temp_sum += c_w[phase_offset_w * 4 + 3] * hbuf[lw + 2];
- temp_sum = temp_sum >> 12;
-
- if (++phase_offset_w >= g_b_scaler.nw)
- phase_offset_w = 0;
-
- // boundry conditions
- if ((lw + 2) >= in_width)
- temp_sum = hbuf[lw];
-
- if (lw == 0)
- temp_sum = hbuf[0];
-
- op[w] = temp_sum;
- }
-
- op += out_stride;
- }
-
- return 0;
-}
-
-void bicubic_scale_frame_reset() {
- g_b_scaler.out_width = 0;
- g_b_scaler.out_height = 0;
-}
-
-void bicubic_scale_frame(YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst,
- int new_width, int new_height) {
-
- dst->y_width = new_width;
- dst->y_height = new_height;
- dst->uv_width = new_width / 2;
- dst->uv_height = new_height / 2;
-
- dst->y_stride = dst->y_width;
- dst->uv_stride = dst->uv_width;
-
- bicubic_scale(src->y_width, src->y_height, src->y_stride,
- new_width, new_height, dst->y_stride,
- src->y_buffer, dst->y_buffer);
-
- bicubic_scale(src->uv_width, src->uv_height, src->uv_stride,
- new_width / 2, new_height / 2, dst->uv_stride,
- src->u_buffer, dst->u_buffer);
-
- bicubic_scale(src->uv_width, src->uv_height, src->uv_stride,
- new_width / 2, new_height / 2, dst->uv_stride,
- src->v_buffer, dst->v_buffer);
-}
diff --git a/libvpx/vpx_scale/generic/gen_scalers.c b/libvpx/vpx_scale/generic/gen_scalers.c
index 60c21fb..5f355c5 100644
--- a/libvpx/vpx_scale/generic/gen_scalers.c
+++ b/libvpx/vpx_scale/generic/gen_scalers.c
@@ -9,7 +9,7 @@
*/
-#include "vpx_scale/vpxscale.h"
+#include "vpx_scale/vpx_scale.h"
#include "vpx_mem/vpx_mem.h"
/****************************************************************************
* Imports
@@ -17,688 +17,6 @@
/****************************************************************************
*
- * ROUTINE : vp8_horizontal_line_4_5_scale_c
- *
- * INPUTS : const unsigned char *source : Pointer to source data.
- * unsigned int source_width : Stride of source.
- * unsigned char *dest : Pointer to destination data.
- * unsigned int dest_width : Stride of destination (NOT USED).
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Copies horizontal line of pixels from source to
- * destination scaling up by 4 to 5.
- *
- * SPECIAL NOTES : None.
- *
- ****************************************************************************/
-void vp8_horizontal_line_4_5_scale_c(const unsigned char *source,
- unsigned int source_width,
- unsigned char *dest,
- unsigned int dest_width) {
- unsigned i;
- unsigned int a, b, c;
- unsigned char *des = dest;
- const unsigned char *src = source;
-
- (void) dest_width;
-
- for (i = 0; i < source_width - 4; i += 4) {
- a = src[0];
- b = src[1];
- des [0] = (unsigned char) a;
- des [1] = (unsigned char)((a * 51 + 205 * b + 128) >> 8);
- c = src[2] * 154;
- a = src[3];
- des [2] = (unsigned char)((b * 102 + c + 128) >> 8);
- des [3] = (unsigned char)((c + 102 * a + 128) >> 8);
- b = src[4];
- des [4] = (unsigned char)((a * 205 + 51 * b + 128) >> 8);
-
- src += 4;
- des += 5;
- }
-
- a = src[0];
- b = src[1];
- des [0] = (unsigned char)(a);
- des [1] = (unsigned char)((a * 51 + 205 * b + 128) >> 8);
- c = src[2] * 154;
- a = src[3];
- des [2] = (unsigned char)((b * 102 + c + 128) >> 8);
- des [3] = (unsigned char)((c + 102 * a + 128) >> 8);
- des [4] = (unsigned char)(a);
-
-}
-
-/****************************************************************************
- *
- * ROUTINE : vp8_vertical_band_4_5_scale_c
- *
- * INPUTS : unsigned char *dest : Pointer to destination data.
- * unsigned int dest_pitch : Stride of destination data.
- * unsigned int dest_width : Width of destination data.
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Scales vertical band of pixels by scale 4 to 5. The
- * height of the band scaled is 4-pixels.
- *
- * SPECIAL NOTES : The routine uses the first line of the band below
- * the current band.
- *
- ****************************************************************************/
-void vp8_vertical_band_4_5_scale_c(unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width) {
- unsigned int i;
- unsigned int a, b, c, d;
- unsigned char *des = dest;
-
- for (i = 0; i < dest_width; i++) {
- a = des [0];
- b = des [dest_pitch];
-
- des[dest_pitch] = (unsigned char)((a * 51 + 205 * b + 128) >> 8);
-
- c = des[dest_pitch * 2] * 154;
- d = des[dest_pitch * 3];
-
- des [dest_pitch * 2] = (unsigned char)((b * 102 + c + 128) >> 8);
- des [dest_pitch * 3] = (unsigned char)((c + 102 * d + 128) >> 8);
-
- /* First line in next band */
- a = des [dest_pitch * 5];
- des [dest_pitch * 4] = (unsigned char)((d * 205 + 51 * a + 128) >> 8);
-
- des++;
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : vp8_last_vertical_band_4_5_scale_c
- *
- * INPUTS : unsigned char *dest : Pointer to destination data.
- * unsigned int dest_pitch : Stride of destination data.
- * unsigned int dest_width : Width of destination data.
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Scales last vertical band of pixels by scale 4 to 5. The
- * height of the band scaled is 4-pixels.
- *
- * SPECIAL NOTES : The routine does not have available the first line of
- * the band below the current band, since this is the
- * last band.
- *
- ****************************************************************************/
-void vp8_last_vertical_band_4_5_scale_c(unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width) {
- unsigned int i;
- unsigned int a, b, c, d;
- unsigned char *des = dest;
-
- for (i = 0; i < dest_width; ++i) {
- a = des[0];
- b = des[dest_pitch];
-
- des[dest_pitch] = (unsigned char)((a * 51 + 205 * b + 128) >> 8);
-
- c = des[dest_pitch * 2] * 154;
- d = des[dest_pitch * 3];
-
- des [dest_pitch * 2] = (unsigned char)((b * 102 + c + 128) >> 8);
- des [dest_pitch * 3] = (unsigned char)((c + 102 * d + 128) >> 8);
-
- /* No other line for interplation of this line, so .. */
- des[dest_pitch * 4] = (unsigned char) d;
-
- des++;
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : vp8_horizontal_line_2_3_scale_c
- *
- * INPUTS : const unsigned char *source : Pointer to source data.
- * unsigned int source_width : Stride of source.
- * unsigned char *dest : Pointer to destination data.
- * unsigned int dest_width : Stride of destination (NOT USED).
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Copies horizontal line of pixels from source to
- * destination scaling up by 2 to 3.
- *
- * SPECIAL NOTES : None.
- *
- *
- ****************************************************************************/
-void vp8_horizontal_line_2_3_scale_c(const unsigned char *source,
- unsigned int source_width,
- unsigned char *dest,
- unsigned int dest_width) {
- unsigned int i;
- unsigned int a, b, c;
- unsigned char *des = dest;
- const unsigned char *src = source;
-
- (void) dest_width;
-
- for (i = 0; i < source_width - 2; i += 2) {
- a = src[0];
- b = src[1];
- c = src[2];
-
- des [0] = (unsigned char)(a);
- des [1] = (unsigned char)((a * 85 + 171 * b + 128) >> 8);
- des [2] = (unsigned char)((b * 171 + 85 * c + 128) >> 8);
-
- src += 2;
- des += 3;
- }
-
- a = src[0];
- b = src[1];
- des [0] = (unsigned char)(a);
- des [1] = (unsigned char)((a * 85 + 171 * b + 128) >> 8);
- des [2] = (unsigned char)(b);
-}
-
-
-/****************************************************************************
- *
- * ROUTINE : vp8_vertical_band_2_3_scale_c
- *
- * INPUTS : unsigned char *dest : Pointer to destination data.
- * unsigned int dest_pitch : Stride of destination data.
- * unsigned int dest_width : Width of destination data.
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Scales vertical band of pixels by scale 2 to 3. The
- * height of the band scaled is 2-pixels.
- *
- * SPECIAL NOTES : The routine uses the first line of the band below
- * the current band.
- *
- ****************************************************************************/
-void vp8_vertical_band_2_3_scale_c(unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width) {
- unsigned int i;
- unsigned int a, b, c;
- unsigned char *des = dest;
-
- for (i = 0; i < dest_width; i++) {
- a = des [0];
- b = des [dest_pitch];
- c = des[dest_pitch * 3];
- des [dest_pitch ] = (unsigned char)((a * 85 + 171 * b + 128) >> 8);
- des [dest_pitch * 2] = (unsigned char)((b * 171 + 85 * c + 128) >> 8);
-
- des++;
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : vp8_last_vertical_band_2_3_scale_c
- *
- * INPUTS : unsigned char *dest : Pointer to destination data.
- * unsigned int dest_pitch : Stride of destination data.
- * unsigned int dest_width : Width of destination data.
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Scales last vertical band of pixels by scale 2 to 3. The
- * height of the band scaled is 2-pixels.
- *
- * SPECIAL NOTES : The routine does not have available the first line of
- * the band below the current band, since this is the
- * last band.
- *
- ****************************************************************************/
-void vp8_last_vertical_band_2_3_scale_c(unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width) {
- unsigned int i;
- unsigned int a, b;
- unsigned char *des = dest;
-
- for (i = 0; i < dest_width; ++i) {
- a = des [0];
- b = des [dest_pitch];
-
- des [dest_pitch ] = (unsigned char)((a * 85 + 171 * b + 128) >> 8);
- des [dest_pitch * 2] = (unsigned char)(b);
- des++;
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : vp8_horizontal_line_3_5_scale_c
- *
- * INPUTS : const unsigned char *source : Pointer to source data.
- * unsigned int source_width : Stride of source.
- * unsigned char *dest : Pointer to destination data.
- * unsigned int dest_width : Stride of destination (NOT USED).
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Copies horizontal line of pixels from source to
- * destination scaling up by 3 to 5.
- *
- * SPECIAL NOTES : None.
- *
- *
- ****************************************************************************/
-void vp8_horizontal_line_3_5_scale_c(const unsigned char *source,
- unsigned int source_width,
- unsigned char *dest,
- unsigned int dest_width) {
- unsigned int i;
- unsigned int a, b, c;
- unsigned char *des = dest;
- const unsigned char *src = source;
-
- (void) dest_width;
-
- for (i = 0; i < source_width - 3; i += 3) {
- a = src[0];
- b = src[1];
- des [0] = (unsigned char)(a);
- des [1] = (unsigned char)((a * 102 + 154 * b + 128) >> 8);
-
- c = src[2];
- des [2] = (unsigned char)((b * 205 + c * 51 + 128) >> 8);
- des [3] = (unsigned char)((b * 51 + c * 205 + 128) >> 8);
-
- a = src[3];
- des [4] = (unsigned char)((c * 154 + a * 102 + 128) >> 8);
-
- src += 3;
- des += 5;
- }
-
- a = src[0];
- b = src[1];
- des [0] = (unsigned char)(a);
-
- des [1] = (unsigned char)((a * 102 + 154 * b + 128) >> 8);
- c = src[2];
- des [2] = (unsigned char)((b * 205 + c * 51 + 128) >> 8);
- des [3] = (unsigned char)((b * 51 + c * 205 + 128) >> 8);
-
- des [4] = (unsigned char)(c);
-}
-
-/****************************************************************************
- *
- * ROUTINE : vp8_vertical_band_3_5_scale_c
- *
- * INPUTS : unsigned char *dest : Pointer to destination data.
- * unsigned int dest_pitch : Stride of destination data.
- * unsigned int dest_width : Width of destination data.
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Scales vertical band of pixels by scale 3 to 5. The
- * height of the band scaled is 3-pixels.
- *
- * SPECIAL NOTES : The routine uses the first line of the band below
- * the current band.
- *
- ****************************************************************************/
-void vp8_vertical_band_3_5_scale_c(unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width) {
- unsigned int i;
- unsigned int a, b, c;
- unsigned char *des = dest;
-
- for (i = 0; i < dest_width; i++) {
- a = des [0];
- b = des [dest_pitch];
- des [dest_pitch] = (unsigned char)((a * 102 + 154 * b + 128) >> 8);
-
- c = des[dest_pitch * 2];
- des [dest_pitch * 2] = (unsigned char)((b * 205 + c * 51 + 128) >> 8);
- des [dest_pitch * 3] = (unsigned char)((b * 51 + c * 205 + 128) >> 8);
-
- /* First line in next band... */
- a = des [dest_pitch * 5];
- des [dest_pitch * 4] = (unsigned char)((c * 154 + a * 102 + 128) >> 8);
-
- des++;
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : vp8_last_vertical_band_3_5_scale_c
- *
- * INPUTS : unsigned char *dest : Pointer to destination data.
- * unsigned int dest_pitch : Stride of destination data.
- * unsigned int dest_width : Width of destination data.
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Scales last vertical band of pixels by scale 3 to 5. The
- * height of the band scaled is 3-pixels.
- *
- * SPECIAL NOTES : The routine does not have available the first line of
- * the band below the current band, since this is the
- * last band.
- *
- ****************************************************************************/
-void vp8_last_vertical_band_3_5_scale_c(unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width) {
- unsigned int i;
- unsigned int a, b, c;
- unsigned char *des = dest;
-
- for (i = 0; i < dest_width; ++i) {
- a = des [0];
- b = des [dest_pitch];
-
- des [ dest_pitch ] = (unsigned char)((a * 102 + 154 * b + 128) >> 8);
-
- c = des[dest_pitch * 2];
- des [dest_pitch * 2] = (unsigned char)((b * 205 + c * 51 + 128) >> 8);
- des [dest_pitch * 3] = (unsigned char)((b * 51 + c * 205 + 128) >> 8);
-
- /* No other line for interplation of this line, so .. */
- des [ dest_pitch * 4 ] = (unsigned char)(c);
-
- des++;
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : vp8_horizontal_line_3_4_scale_c
- *
- * INPUTS : const unsigned char *source : Pointer to source data.
- * unsigned int source_width : Stride of source.
- * unsigned char *dest : Pointer to destination data.
- * unsigned int dest_width : Stride of destination (NOT USED).
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Copies horizontal line of pixels from source to
- * destination scaling up by 3 to 4.
- *
- * SPECIAL NOTES : None.
- *
- *
- ****************************************************************************/
-void vp8_horizontal_line_3_4_scale_c(const unsigned char *source,
- unsigned int source_width,
- unsigned char *dest,
- unsigned int dest_width) {
- unsigned int i;
- unsigned int a, b, c;
- unsigned char *des = dest;
- const unsigned char *src = source;
-
- (void) dest_width;
-
- for (i = 0; i < source_width - 3; i += 3) {
- a = src[0];
- b = src[1];
- des [0] = (unsigned char)(a);
- des [1] = (unsigned char)((a * 64 + b * 192 + 128) >> 8);
-
- c = src[2];
- des [2] = (unsigned char)((b + c + 1) >> 1);
-
- a = src[3];
- des [3] = (unsigned char)((c * 192 + a * 64 + 128) >> 8);
-
- src += 3;
- des += 4;
- }
-
- a = src[0];
- b = src[1];
- des [0] = (unsigned char)(a);
- des [1] = (unsigned char)((a * 64 + b * 192 + 128) >> 8);
-
- c = src[2];
- des [2] = (unsigned char)((b + c + 1) >> 1);
- des [3] = (unsigned char)(c);
-}
-
-/****************************************************************************
- *
- * ROUTINE : vp8_vertical_band_3_4_scale_c
- *
- * INPUTS : unsigned char *dest : Pointer to destination data.
- * unsigned int dest_pitch : Stride of destination data.
- * unsigned int dest_width : Width of destination data.
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Scales vertical band of pixels by scale 3 to 4. The
- * height of the band scaled is 3-pixels.
- *
- * SPECIAL NOTES : The routine uses the first line of the band below
- * the current band.
- *
- ****************************************************************************/
-void vp8_vertical_band_3_4_scale_c(unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width) {
- unsigned int i;
- unsigned int a, b, c;
- unsigned char *des = dest;
-
- for (i = 0; i < dest_width; i++) {
- a = des [0];
- b = des [dest_pitch];
- des [dest_pitch] = (unsigned char)((a * 64 + b * 192 + 128) >> 8);
-
- c = des[dest_pitch * 2];
- des [dest_pitch * 2] = (unsigned char)((b + c + 1) >> 1);
-
- /* First line in next band... */
- a = des [dest_pitch * 4];
- des [dest_pitch * 3] = (unsigned char)((c * 192 + a * 64 + 128) >> 8);
-
- des++;
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : vp8_last_vertical_band_3_4_scale_c
- *
- * INPUTS : unsigned char *dest : Pointer to destination data.
- * unsigned int dest_pitch : Stride of destination data.
- * unsigned int dest_width : Width of destination data.
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Scales last vertical band of pixels by scale 3 to 4. The
- * height of the band scaled is 3-pixels.
- *
- * SPECIAL NOTES : The routine does not have available the first line of
- * the band below the current band, since this is the
- * last band.
- *
- ****************************************************************************/
-void vp8_last_vertical_band_3_4_scale_c(unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width) {
- unsigned int i;
- unsigned int a, b, c;
- unsigned char *des = dest;
-
- for (i = 0; i < dest_width; ++i) {
- a = des [0];
- b = des [dest_pitch];
-
- des [dest_pitch] = (unsigned char)((a * 64 + b * 192 + 128) >> 8);
-
- c = des[dest_pitch * 2];
- des [dest_pitch * 2] = (unsigned char)((b + c + 1) >> 1);
-
- /* No other line for interplation of this line, so .. */
- des [dest_pitch * 3] = (unsigned char)(c);
-
- des++;
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : vp8_horizontal_line_1_2_scale_c
- *
- * INPUTS : const unsigned char *source : Pointer to source data.
- * unsigned int source_width : Stride of source.
- * unsigned char *dest : Pointer to destination data.
- * unsigned int dest_width : Stride of destination (NOT USED).
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Copies horizontal line of pixels from source to
- * destination scaling up by 1 to 2.
- *
- * SPECIAL NOTES : None.
- *
- ****************************************************************************/
-void vp8_horizontal_line_1_2_scale_c(const unsigned char *source,
- unsigned int source_width,
- unsigned char *dest,
- unsigned int dest_width) {
- unsigned int i;
- unsigned int a, b;
- unsigned char *des = dest;
- const unsigned char *src = source;
-
- (void) dest_width;
-
- for (i = 0; i < source_width - 1; i += 1) {
- a = src[0];
- b = src[1];
- des [0] = (unsigned char)(a);
- des [1] = (unsigned char)((a + b + 1) >> 1);
- src += 1;
- des += 2;
- }
-
- a = src[0];
- des [0] = (unsigned char)(a);
- des [1] = (unsigned char)(a);
-}
-
-/****************************************************************************
- *
- * ROUTINE : vp8_vertical_band_1_2_scale_c
- *
- * INPUTS : unsigned char *dest : Pointer to destination data.
- * unsigned int dest_pitch : Stride of destination data.
- * unsigned int dest_width : Width of destination data.
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Scales vertical band of pixels by scale 1 to 2. The
- * height of the band scaled is 1-pixel.
- *
- * SPECIAL NOTES : The routine uses the first line of the band below
- * the current band.
- *
- ****************************************************************************/
-void vp8_vertical_band_1_2_scale_c(unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width) {
- unsigned int i;
- unsigned int a, b;
- unsigned char *des = dest;
-
- for (i = 0; i < dest_width; i++) {
- a = des [0];
- b = des [dest_pitch * 2];
-
- des[dest_pitch] = (unsigned char)((a + b + 1) >> 1);
-
- des++;
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : vp8_last_vertical_band_1_2_scale_c
- *
- * INPUTS : unsigned char *dest : Pointer to destination data.
- * unsigned int dest_pitch : Stride of destination data.
- * unsigned int dest_width : Width of destination data.
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Scales last vertical band of pixels by scale 1 to 2. The
- * height of the band scaled is 1-pixel.
- *
- * SPECIAL NOTES : The routine does not have available the first line of
- * the band below the current band, since this is the
- * last band.
- *
- ****************************************************************************/
-void vp8_last_vertical_band_1_2_scale_c(unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width) {
- unsigned int i;
- unsigned char *des = dest;
-
- for (i = 0; i < dest_width; ++i) {
- des[dest_pitch] = des[0];
- des++;
- }
-}
-
-
-
-
-
-/****************************************************************************
- *
- * ROUTINE : vp8_horizontal_line_4_5_scale_c
*
* INPUTS : const unsigned char *source : Pointer to source data.
* unsigned int source_width : Stride of source.
diff --git a/libvpx/vpx_scale/generic/vpxscale.c b/libvpx/vpx_scale/generic/vpx_scale.c
index 7de85ca..8044d2a 100644
--- a/libvpx/vpx_scale/generic/vpxscale.c
+++ b/libvpx/vpx_scale/generic/vpx_scale.c
@@ -20,10 +20,9 @@
/****************************************************************************
* Header Files
****************************************************************************/
-#include "./vpx_rtcd.h"
+#include "./vpx_scale_rtcd.h"
#include "vpx_mem/vpx_mem.h"
#include "vpx_scale/yv12config.h"
-#include "vpx_scale/scale_mode.h"
typedef struct {
int expanded_frame_width;
@@ -41,66 +40,6 @@ typedef struct {
/****************************************************************************
*
- * ROUTINE : horizontal_line_copy
- *
- * INPUTS : None
- *
- *
- * OUTPUTS : None.
- *
- * RETURNS : None
- *
- * FUNCTION : 1 to 1 scaling up for a horizontal line of pixles
- *
- * SPECIAL NOTES : None.
- *
- * ERRORS : None.
- *
- ****************************************************************************/
-static
-void horizontal_line_copy(
- const unsigned char *source,
- unsigned int source_width,
- unsigned char *dest,
- unsigned int dest_width
-) {
- (void) dest_width;
-
- duck_memcpy(dest, source, source_width);
-}
-/****************************************************************************
- *
- * ROUTINE : null_scale
- *
- * INPUTS : None
- *
- *
- * OUTPUTS : None.
- *
- * RETURNS : None
- *
- * FUNCTION : 1 to 1 scaling up for a vertical band
- *
- * SPECIAL NOTES : None.
- *
- * ERRORS : None.
- *
- ****************************************************************************/
-static
-void null_scale(
- unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width
-) {
- (void) dest;
- (void) dest_pitch;
- (void) dest_width;
-
- return;
-}
-
-/****************************************************************************
- *
* ROUTINE : scale1d_2t1_i
*
* INPUTS : const unsigned char *source : Pointer to data to be scaled.
@@ -493,7 +432,7 @@ void Scale2D
temp_area + i * dest_pitch, 1, hratio, dest_width);
} else { /* Duplicate the last row */
/* copy temp_area row 0 over from last row in the past */
- duck_memcpy(temp_area + i * dest_pitch, temp_area + (i - 1)*dest_pitch, dest_pitch);
+ vpx_memcpy(temp_area + i * dest_pitch, temp_area + (i - 1)*dest_pitch, dest_pitch);
}
}
@@ -504,7 +443,7 @@ void Scale2D
}
/* copy temp_area row 0 over from last row in the past */
- duck_memcpy(temp_area, temp_area + source_band_height * dest_pitch, dest_pitch);
+ vpx_memcpy(temp_area, temp_area + source_band_height * dest_pitch, dest_pitch);
/* move to the next band */
source += source_band_height * source_pitch;
@@ -514,7 +453,7 @@ void Scale2D
/****************************************************************************
*
- * ROUTINE :
+ * ROUTINE : vpx_scale_frame
*
* INPUTS : YV12_BUFFER_CONFIG *src : Pointer to frame to be scaled.
* YV12_BUFFER_CONFIG *dst : Pointer to buffer to hold scaled frame.
@@ -536,7 +475,7 @@ void Scale2D
* caching.
*
****************************************************************************/
-void vp8_scale_frame
+void vpx_scale_frame
(
YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *dst,
@@ -559,11 +498,11 @@ void vp8_scale_frame
if (dw < (int)dst->y_width)
for (i = 0; i < dh; i++)
- duck_memset(dst->y_buffer + i * dst->y_stride + dw - 1, dst->y_buffer[i * dst->y_stride + dw - 2], dst->y_width - dw + 1);
+ vpx_memset(dst->y_buffer + i * dst->y_stride + dw - 1, dst->y_buffer[i * dst->y_stride + dw - 2], dst->y_width - dw + 1);
if (dh < (int)dst->y_height)
for (i = dh - 1; i < (int)dst->y_height; i++)
- duck_memcpy(dst->y_buffer + i * dst->y_stride, dst->y_buffer + (dh - 2) * dst->y_stride, dst->y_width + 1);
+ vpx_memcpy(dst->y_buffer + i * dst->y_stride, dst->y_buffer + (dh - 2) * dst->y_stride, dst->y_width + 1);
Scale2D((unsigned char *) src->u_buffer, src->uv_stride, src->uv_width, src->uv_height,
(unsigned char *) dst->u_buffer, dst->uv_stride, dw / 2, dh / 2,
@@ -571,11 +510,11 @@ void vp8_scale_frame
if (dw / 2 < (int)dst->uv_width)
for (i = 0; i < dst->uv_height; i++)
- duck_memset(dst->u_buffer + i * dst->uv_stride + dw / 2 - 1, dst->u_buffer[i * dst->uv_stride + dw / 2 - 2], dst->uv_width - dw / 2 + 1);
+ vpx_memset(dst->u_buffer + i * dst->uv_stride + dw / 2 - 1, dst->u_buffer[i * dst->uv_stride + dw / 2 - 2], dst->uv_width - dw / 2 + 1);
if (dh / 2 < (int)dst->uv_height)
for (i = dh / 2 - 1; i < (int)dst->y_height / 2; i++)
- duck_memcpy(dst->u_buffer + i * dst->uv_stride, dst->u_buffer + (dh / 2 - 2)*dst->uv_stride, dst->uv_width);
+ vpx_memcpy(dst->u_buffer + i * dst->uv_stride, dst->u_buffer + (dh / 2 - 2)*dst->uv_stride, dst->uv_width);
Scale2D((unsigned char *) src->v_buffer, src->uv_stride, src->uv_width, src->uv_height,
(unsigned char *) dst->v_buffer, dst->uv_stride, dw / 2, dh / 2,
@@ -583,428 +522,9 @@ void vp8_scale_frame
if (dw / 2 < (int)dst->uv_width)
for (i = 0; i < dst->uv_height; i++)
- duck_memset(dst->v_buffer + i * dst->uv_stride + dw / 2 - 1, dst->v_buffer[i * dst->uv_stride + dw / 2 - 2], dst->uv_width - dw / 2 + 1);
+ vpx_memset(dst->v_buffer + i * dst->uv_stride + dw / 2 - 1, dst->v_buffer[i * dst->uv_stride + dw / 2 - 2], dst->uv_width - dw / 2 + 1);
if (dh / 2 < (int) dst->uv_height)
for (i = dh / 2 - 1; i < (int)dst->y_height / 2; i++)
- duck_memcpy(dst->v_buffer + i * dst->uv_stride, dst->v_buffer + (dh / 2 - 2)*dst->uv_stride, dst->uv_width);
-}
-/****************************************************************************
- *
- * ROUTINE : any_ratio_2d_scale
- *
- * INPUTS : SCALE_INSTANCE *si : Pointer to post-processor instance (NOT USED).
- * const unsigned char *source : Pointer to source image.
- * unsigned int source_pitch : Stride of source image.
- * unsigned int source_width : Width of source image.
- * unsigned int source_height : Height of source image (NOT USED).
- * unsigned char *dest : Pointer to destination image.
- * unsigned int dest_pitch : Stride of destination image.
- * unsigned int dest_width : Width of destination image.
- * unsigned int dest_height : Height of destination image.
- *
- * OUTPUTS : None.
- *
- * RETURNS : int: 1 if image scaled, 0 if image could not be scaled.
- *
- * FUNCTION : Scale the image with changing apect ratio.
- *
- * SPECIAL NOTES : This scaling is a bi-linear scaling. Need to re-work the
- * whole function for new scaling algorithm.
- *
- ****************************************************************************/
-static
-int any_ratio_2d_scale
-(
- SCALE_VARS *si,
- const unsigned char *source,
- int source_pitch,
- unsigned int source_width,
- unsigned int source_height,
- unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width,
- unsigned int dest_height
-) {
- unsigned int i, k;
- unsigned int src_band_height = 0;
- unsigned int dest_band_height = 0;
-
- /* suggested scale factors */
- int hs = si->HScale;
- int hr = si->HRatio;
- int vs = si->VScale;
- int vr = si->VRatio;
-
- /* assume the ratios are scalable instead of should be centered */
- int ratio_scalable = 1;
-
- const unsigned char *source_base = ((source_pitch >= 0) ? source : (source + ((source_height - 1) * source_pitch)));
- const unsigned char *line_src;
-
- void (*horiz_line_scale)(const unsigned char *, unsigned int, unsigned char *, unsigned int) = NULL;
- void (*vert_band_scale)(unsigned char *, unsigned int, unsigned int) = NULL;
- void (*last_vert_band_scale)(unsigned char *, unsigned int, unsigned int) = NULL;
-
- (void) si;
-
- /* find out the ratio for each direction */
- switch (hr * 30 / hs) {
- case 24:
- /* 4-5 Scale in Width direction */
- horiz_line_scale = vp8_horizontal_line_4_5_scale;
- break;
- case 22:
- /* 3-4 Scale in Width direction */
- horiz_line_scale = vp8_horizontal_line_3_4_scale;
- break;
-
- case 20:
- /* 4-5 Scale in Width direction */
- horiz_line_scale = vp8_horizontal_line_2_3_scale;
- break;
- case 18:
- /* 3-5 Scale in Width direction */
- horiz_line_scale = vp8_horizontal_line_3_5_scale;
- break;
- case 15:
- /* 1-2 Scale in Width direction */
- horiz_line_scale = vp8_horizontal_line_1_2_scale;
- break;
- case 30:
- /* no scale in Width direction */
- horiz_line_scale = horizontal_line_copy;
- break;
- default:
- /* The ratio is not acceptable now */
- /* throw("The ratio is not acceptable for now!"); */
- ratio_scalable = 0;
- break;
- }
-
- switch (vr * 30 / vs) {
- case 24:
- /* 4-5 Scale in vertical direction */
- vert_band_scale = vp8_vertical_band_4_5_scale;
- last_vert_band_scale = vp8_last_vertical_band_4_5_scale;
- src_band_height = 4;
- dest_band_height = 5;
- break;
- case 22:
- /* 3-4 Scale in vertical direction */
- vert_band_scale = vp8_vertical_band_3_4_scale;
- last_vert_band_scale = vp8_last_vertical_band_3_4_scale;
- src_band_height = 3;
- dest_band_height = 4;
- break;
- case 20:
- /* 2-3 Scale in vertical direction */
- vert_band_scale = vp8_vertical_band_2_3_scale;
- last_vert_band_scale = vp8_last_vertical_band_2_3_scale;
- src_band_height = 2;
- dest_band_height = 3;
- break;
- case 18:
- /* 3-5 Scale in vertical direction */
- vert_band_scale = vp8_vertical_band_3_5_scale;
- last_vert_band_scale = vp8_last_vertical_band_3_5_scale;
- src_band_height = 3;
- dest_band_height = 5;
- break;
- case 15:
- /* 1-2 Scale in vertical direction */
- vert_band_scale = vp8_vertical_band_1_2_scale;
- last_vert_band_scale = vp8_last_vertical_band_1_2_scale;
- src_band_height = 1;
- dest_band_height = 2;
- break;
- case 30:
- /* no scale in Width direction */
- vert_band_scale = null_scale;
- last_vert_band_scale = null_scale;
- src_band_height = 4;
- dest_band_height = 4;
- break;
- default:
- /* The ratio is not acceptable now */
- /* throw("The ratio is not acceptable for now!"); */
- ratio_scalable = 0;
- break;
- }
-
- if (ratio_scalable == 0)
- return ratio_scalable;
-
- horiz_line_scale(source, source_width, dest, dest_width);
-
- /* except last band */
- for (k = 0; k < (dest_height + dest_band_height - 1) / dest_band_height - 1; k++) {
- /* scale one band horizontally */
- for (i = 1; i < src_band_height; i++) {
- /* Trap case where we could read off the base of the source buffer */
- line_src = source + i * source_pitch;
-
- if (line_src < source_base)
- line_src = source_base;
-
- horiz_line_scale(line_src, source_width,
- dest + i * dest_pitch, dest_width);
- }
-
- /* first line of next band */
- /* Trap case where we could read off the base of the source buffer */
- line_src = source + src_band_height * source_pitch;
-
- if (line_src < source_base)
- line_src = source_base;
-
- horiz_line_scale(line_src, source_width,
- dest + dest_band_height * dest_pitch,
- dest_width);
-
- /* Vertical scaling is in place */
- vert_band_scale(dest, dest_pitch, dest_width);
-
- /* Next band... */
- source += src_band_height * source_pitch;
- dest += dest_band_height * dest_pitch;
- }
-
- /* scale one band horizontally */
- for (i = 1; i < src_band_height; i++) {
- /* Trap case where we could read off the base of the source buffer */
- line_src = source + i * source_pitch;
-
- if (line_src < source_base)
- line_src = source_base;
-
- horiz_line_scale(line_src, source_width,
- dest + i * dest_pitch,
- dest_width);
- }
-
- /* Vertical scaling is in place */
- last_vert_band_scale(dest, dest_pitch, dest_width);
-
- return ratio_scalable;
-}
-
-/****************************************************************************
- *
- * ROUTINE : any_ratio_frame_scale
- *
- * INPUTS : SCALE_INSTANCE *si : Pointer to post-processor instance (NOT USED).
- * unsigned char *frame_buffer : Pointer to source image.
- * int YOffset : Offset from start of buffer to Y samples.
- * int UVOffset : Offset from start of buffer to UV samples.
- *
- * OUTPUTS : None.
- *
- * RETURNS : int: 1 if image scaled, 0 if image could not be scaled.
- *
- * FUNCTION : Scale the image with changing apect ratio.
- *
- * SPECIAL NOTES : None.
- *
- ****************************************************************************/
-static
-int any_ratio_frame_scale(SCALE_VARS *scale_vars, int YOffset, int UVOffset) {
- int i;
- int ew;
- int eh;
-
- /* suggested scale factors */
- int hs = scale_vars->HScale;
- int hr = scale_vars->HRatio;
- int vs = scale_vars->VScale;
- int vr = scale_vars->VRatio;
-
- int ratio_scalable = 1;
-
- int sw = (scale_vars->expanded_frame_width * hr + hs - 1) / hs;
- int sh = (scale_vars->expanded_frame_height * vr + vs - 1) / vs;
- int dw = scale_vars->expanded_frame_width;
- int dh = scale_vars->expanded_frame_height;
- YV12_BUFFER_CONFIG *src_yuv_config = scale_vars->src_yuv_config;
- YV12_BUFFER_CONFIG *dst_yuv_config = scale_vars->dst_yuv_config;
-
- if (hr == 3)
- ew = (sw + 2) / 3 * 3 * hs / hr;
- else
- ew = (sw + 7) / 8 * 8 * hs / hr;
-
- if (vr == 3)
- eh = (sh + 2) / 3 * 3 * vs / vr;
- else
- eh = (sh + 7) / 8 * 8 * vs / vr;
-
- ratio_scalable = any_ratio_2d_scale(scale_vars,
- (const unsigned char *)src_yuv_config->y_buffer,
- src_yuv_config->y_stride, sw, sh,
- (unsigned char *) dst_yuv_config->y_buffer + YOffset,
- dst_yuv_config->y_stride, dw, dh);
-
- for (i = 0; i < eh; i++)
- duck_memset(dst_yuv_config->y_buffer + YOffset + i * dst_yuv_config->y_stride + dw, 0, ew - dw);
-
- for (i = dh; i < eh; i++)
- duck_memset(dst_yuv_config->y_buffer + YOffset + i * dst_yuv_config->y_stride, 0, ew);
-
- if (ratio_scalable == 0)
- return ratio_scalable;
-
- sw = (sw + 1) >> 1;
- sh = (sh + 1) >> 1;
- dw = (dw + 1) >> 1;
- dh = (dh + 1) >> 1;
-
- any_ratio_2d_scale(scale_vars,
- (const unsigned char *)src_yuv_config->u_buffer,
- src_yuv_config->y_stride / 2, sw, sh,
- (unsigned char *)dst_yuv_config->u_buffer + UVOffset,
- dst_yuv_config->uv_stride, dw, dh);
-
- any_ratio_2d_scale(scale_vars,
- (const unsigned char *)src_yuv_config->v_buffer,
- src_yuv_config->y_stride / 2, sw, sh,
- (unsigned char *)dst_yuv_config->v_buffer + UVOffset,
- dst_yuv_config->uv_stride, dw, dh);
-
- return ratio_scalable;
-}
-
-/****************************************************************************
- *
- * ROUTINE : center_image
- *
- * INPUTS : SCALE_INSTANCE *si : Pointer to post-processor instance.
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Centers the image without scaling in the output buffer.
- *
- * SPECIAL NOTES : None.
- *
- ****************************************************************************/
-static void
-center_image(YV12_BUFFER_CONFIG *src_yuv_config, YV12_BUFFER_CONFIG *dst_yuv_config) {
- int i;
- int row_offset, col_offset;
- unsigned char *src_data_pointer;
- unsigned char *dst_data_pointer;
-
- /* center values */
- row_offset = (dst_yuv_config->y_height - src_yuv_config->y_height) / 2;
- col_offset = (dst_yuv_config->y_width - src_yuv_config->y_width) / 2;
-
- /* Y's */
- src_data_pointer = src_yuv_config->y_buffer;
- dst_data_pointer = (unsigned char *)dst_yuv_config->y_buffer + (row_offset * dst_yuv_config->y_stride) + col_offset;
-
- for (i = 0; i < src_yuv_config->y_height; i++) {
- duck_memcpy(dst_data_pointer, src_data_pointer, src_yuv_config->y_width);
- dst_data_pointer += dst_yuv_config->y_stride;
- src_data_pointer += src_yuv_config->y_stride;
- }
-
- row_offset /= 2;
- col_offset /= 2;
-
- /* U's */
- src_data_pointer = src_yuv_config->u_buffer;
- dst_data_pointer = (unsigned char *)dst_yuv_config->u_buffer + (row_offset * dst_yuv_config->uv_stride) + col_offset;
-
- for (i = 0; i < src_yuv_config->uv_height; i++) {
- duck_memcpy(dst_data_pointer, src_data_pointer, src_yuv_config->uv_width);
- dst_data_pointer += dst_yuv_config->uv_stride;
- src_data_pointer += src_yuv_config->uv_stride;
- }
-
- /* V's */
- src_data_pointer = src_yuv_config->v_buffer;
- dst_data_pointer = (unsigned char *)dst_yuv_config->v_buffer + (row_offset * dst_yuv_config->uv_stride) + col_offset;
-
- for (i = 0; i < src_yuv_config->uv_height; i++) {
- duck_memcpy(dst_data_pointer, src_data_pointer, src_yuv_config->uv_width);
- dst_data_pointer += dst_yuv_config->uv_stride;
- src_data_pointer += src_yuv_config->uv_stride;
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : scale_or_center
- *
- * INPUTS : SCALE_INSTANCE *si : Pointer to post-processor instance.
- *
- *
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Decides to scale or center image in scale buffer for blit
- *
- * SPECIAL NOTES : None.
- *
- ****************************************************************************/
-void
-vp8_yv12_scale_or_center
-(
- YV12_BUFFER_CONFIG *src_yuv_config,
- YV12_BUFFER_CONFIG *dst_yuv_config,
- int expanded_frame_width,
- int expanded_frame_height,
- int scaling_mode,
- int HScale,
- int HRatio,
- int VScale,
- int VRatio
-) {
- /*if ( ppi->post_processing_level )
- update_umvborder ( ppi, frame_buffer );*/
-
-
- switch (scaling_mode) {
- case SCALE_TO_FIT:
- case MAINTAIN_ASPECT_RATIO: {
- SCALE_VARS scale_vars;
- /* center values */
-#if 1
- int row = (dst_yuv_config->y_height - expanded_frame_height) / 2;
- int col = (dst_yuv_config->y_width - expanded_frame_width) / 2;
- /*int YOffset = row * dst_yuv_config->y_width + col;
- int UVOffset = (row>>1) * dst_yuv_config->uv_width + (col>>1);*/
- int YOffset = row * dst_yuv_config->y_stride + col;
- int UVOffset = (row >> 1) * dst_yuv_config->uv_stride + (col >> 1);
-#else
- int row = (src_yuv_config->y_height - expanded_frame_height) / 2;
- int col = (src_yuv_config->y_width - expanded_frame_width) / 2;
- int YOffset = row * src_yuv_config->y_width + col;
- int UVOffset = (row >> 1) * src_yuv_config->uv_width + (col >> 1);
-#endif
-
- scale_vars.dst_yuv_config = dst_yuv_config;
- scale_vars.src_yuv_config = src_yuv_config;
- scale_vars.HScale = HScale;
- scale_vars.HRatio = HRatio;
- scale_vars.VScale = VScale;
- scale_vars.VRatio = VRatio;
- scale_vars.expanded_frame_width = expanded_frame_width;
- scale_vars.expanded_frame_height = expanded_frame_height;
-
- /* perform center and scale */
- any_ratio_frame_scale(&scale_vars, YOffset, UVOffset);
-
- break;
- }
- case CENTER:
- center_image(src_yuv_config, dst_yuv_config);
- break;
-
- default:
- break;
- }
+ vpx_memcpy(dst->v_buffer + i * dst->uv_stride, dst->v_buffer + (dh / 2 - 2)*dst->uv_stride, dst->uv_width);
}
diff --git a/libvpx/vpx_scale/generic/yv12config.c b/libvpx/vpx_scale/generic/yv12config.c
index 4cb2a41..a89e29d 100644
--- a/libvpx/vpx_scale/generic/yv12config.c
+++ b/libvpx/vpx_scale/generic/yv12config.c
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
+#include "./vpx_config.h"
#include "vpx_scale/yv12config.h"
#include "vpx_mem/vpx_mem.h"
@@ -35,58 +35,176 @@ vp8_yv12_de_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf) {
return 0;
}
-/****************************************************************************
- *
- ****************************************************************************/
-int
-vp8_yv12_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height, int border) {
- /*NOTE:*/
-
+int vp8_yv12_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
+ int width, int height, int border) {
if (ybf) {
- int y_stride = ((width + 2 * border) + 31) & ~31;
- int yplane_size = (height + 2 * border) * y_stride;
- int uv_width = width >> 1;
- int uv_height = height >> 1;
+ int aligned_width = (width + 15) & ~15;
+ int aligned_height = (height + 15) & ~15;
+ int y_stride = ((aligned_width + 2 * border) + 31) & ~31;
+ int yplane_size = (aligned_height + 2 * border) * y_stride;
+ int uv_width = aligned_width >> 1;
+ int uv_height = aligned_height >> 1;
/** There is currently a bunch of code which assumes
* uv_stride == y_stride/2, so enforce this here. */
int uv_stride = y_stride >> 1;
int uvplane_size = (uv_height + border) * uv_stride;
+ const int frame_size = yplane_size + 2 * uvplane_size;
- vp8_yv12_de_alloc_frame_buffer(ybf);
+ if (!ybf->buffer_alloc) {
+ ybf->buffer_alloc = vpx_memalign(32, frame_size);
+ ybf->buffer_alloc_sz = frame_size;
+ }
- /** Only support allocating buffers that have a height and width that
- * are multiples of 16, and a border that's a multiple of 32.
- * The border restriction is required to get 16-byte alignment of the
- * start of the chroma rows without intoducing an arbitrary gap
- * between planes, which would break the semantics of things like
- * vpx_img_set_rect(). */
- if ((width & 0xf) | (height & 0xf) | (border & 0x1f))
+ if (!ybf->buffer_alloc || ybf->buffer_alloc_sz < frame_size)
+ return -1;
+
+ /* Only support allocating buffers that have a border that's a multiple
+ * of 32. The border restriction is required to get 16-byte alignment of
+ * the start of the chroma rows without introducing an arbitrary gap
+ * between planes, which would break the semantics of things like
+ * vpx_img_set_rect(). */
+ if (border & 0x1f)
return -3;
- ybf->y_width = width;
- ybf->y_height = height;
+ ybf->y_crop_width = width;
+ ybf->y_crop_height = height;
+ ybf->y_width = aligned_width;
+ ybf->y_height = aligned_height;
ybf->y_stride = y_stride;
ybf->uv_width = uv_width;
ybf->uv_height = uv_height;
ybf->uv_stride = uv_stride;
- ybf->border = border;
- ybf->frame_size = yplane_size + 2 * uvplane_size;
-
- ybf->buffer_alloc = (unsigned char *) vpx_memalign(32, ybf->frame_size);
+ ybf->alpha_width = 0;
+ ybf->alpha_height = 0;
+ ybf->alpha_stride = 0;
- if (ybf->buffer_alloc == NULL)
- return -1;
+ ybf->border = border;
+ ybf->frame_size = frame_size;
ybf->y_buffer = ybf->buffer_alloc + (border * y_stride) + border;
ybf->u_buffer = ybf->buffer_alloc + yplane_size + (border / 2 * uv_stride) + border / 2;
ybf->v_buffer = ybf->buffer_alloc + yplane_size + uvplane_size + (border / 2 * uv_stride) + border / 2;
+ ybf->alpha_buffer = NULL;
ybf->corrupted = 0; /* assume not currupted by errors */
+ return 0;
+ }
+ return -2;
+}
+
+int vp8_yv12_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
+ int width, int height, int border) {
+ if (ybf) {
+ vp8_yv12_de_alloc_frame_buffer(ybf);
+ return vp8_yv12_realloc_frame_buffer(ybf, width, height, border);
+ }
+ return -2;
+}
+
+#if CONFIG_VP9
+// TODO(jkoleszar): Maybe replace this with struct vpx_image
+
+int vp9_free_frame_buffer(YV12_BUFFER_CONFIG *ybf) {
+ if (ybf) {
+ vpx_free(ybf->buffer_alloc);
+
+ /* buffer_alloc isn't accessed by most functions. Rather y_buffer,
+ u_buffer and v_buffer point to buffer_alloc and are used. Clear out
+ all of this so that a freed pointer isn't inadvertently used */
+ vpx_memset(ybf, 0, sizeof(YV12_BUFFER_CONFIG));
} else {
- return -2;
+ return -1;
}
return 0;
}
+
+int vp9_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
+ int width, int height,
+ int ss_x, int ss_y, int border) {
+ if (ybf) {
+ const int aligned_width = (width + 7) & ~7;
+ const int aligned_height = (height + 7) & ~7;
+ const int y_stride = ((aligned_width + 2 * border) + 31) & ~31;
+ const int yplane_size = (aligned_height + 2 * border) * y_stride;
+ const int uv_width = aligned_width >> ss_x;
+ const int uv_height = aligned_height >> ss_y;
+ const int uv_stride = y_stride >> ss_x;
+ const int uv_border_w = border >> ss_x;
+ const int uv_border_h = border >> ss_y;
+ const int uvplane_size = (uv_height + 2 * uv_border_h) * uv_stride;
+#if CONFIG_ALPHA
+ const int alpha_width = aligned_width;
+ const int alpha_height = aligned_height;
+ const int alpha_stride = y_stride;
+ const int alpha_border_w = border;
+ const int alpha_border_h = border;
+ const int alpha_plane_size = (alpha_height + 2 * alpha_border_h) *
+ alpha_stride;
+ const int frame_size = yplane_size + 2 * uvplane_size +
+ alpha_plane_size;
+#else
+ const int frame_size = yplane_size + 2 * uvplane_size;
+#endif
+ if (!ybf->buffer_alloc) {
+ ybf->buffer_alloc = vpx_memalign(32, frame_size);
+ ybf->buffer_alloc_sz = frame_size;
+ }
+
+ if (!ybf->buffer_alloc || ybf->buffer_alloc_sz < frame_size)
+ return -1;
+
+ /* Only support allocating buffers that have a border that's a multiple
+ * of 32. The border restriction is required to get 16-byte alignment of
+ * the start of the chroma rows without introducing an arbitrary gap
+ * between planes, which would break the semantics of things like
+ * vpx_img_set_rect(). */
+ if (border & 0x1f)
+ return -3;
+
+ ybf->y_crop_width = width;
+ ybf->y_crop_height = height;
+ ybf->y_width = aligned_width;
+ ybf->y_height = aligned_height;
+ ybf->y_stride = y_stride;
+
+ ybf->uv_crop_width = (width + ss_x) >> ss_x;
+ ybf->uv_crop_height = (height + ss_y) >> ss_y;
+ ybf->uv_width = uv_width;
+ ybf->uv_height = uv_height;
+ ybf->uv_stride = uv_stride;
+
+ ybf->border = border;
+ ybf->frame_size = frame_size;
+
+ ybf->y_buffer = ybf->buffer_alloc + (border * y_stride) + border;
+ ybf->u_buffer = ybf->buffer_alloc + yplane_size +
+ (uv_border_h * uv_stride) + uv_border_w;
+ ybf->v_buffer = ybf->buffer_alloc + yplane_size + uvplane_size +
+ (uv_border_h * uv_stride) + uv_border_w;
+
+#if CONFIG_ALPHA
+ ybf->alpha_width = alpha_width;
+ ybf->alpha_height = alpha_height;
+ ybf->alpha_stride = alpha_stride;
+ ybf->alpha_buffer = ybf->buffer_alloc + yplane_size + 2 * uvplane_size +
+ (alpha_border_h * alpha_stride) + alpha_border_w;
+#endif
+ ybf->corrupted = 0; /* assume not corrupted by errors */
+ return 0;
+ }
+ return -2;
+}
+
+int vp9_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
+ int width, int height,
+ int ss_x, int ss_y, int border) {
+ if (ybf) {
+ vp9_free_frame_buffer(ybf);
+ return vp9_realloc_frame_buffer(ybf, width, height, ss_x, ss_y, border);
+ }
+ return -2;
+}
+#endif
diff --git a/libvpx/vpx_scale/generic/yv12extend.c b/libvpx/vpx_scale/generic/yv12extend.c
index 247078c..f2aec2b 100644
--- a/libvpx/vpx_scale/generic/yv12extend.c
+++ b/libvpx/vpx_scale/generic/yv12extend.c
@@ -8,256 +8,175 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
-#include "vpx_scale/yv12config.h"
+#include <assert.h>
+#include "./vpx_config.h"
+#include "vpx/vpx_integer.h"
#include "vpx_mem/vpx_mem.h"
-#include "vpx_scale/vpxscale.h"
-
-/****************************************************************************
-* Exports
-****************************************************************************/
+#include "vpx_scale/yv12config.h"
-/****************************************************************************
- *
- ****************************************************************************/
-void
-vp8_yv12_extend_frame_borders_c(YV12_BUFFER_CONFIG *ybf) {
+static void extend_plane(uint8_t *const src, int src_stride,
+ int width, int height,
+ int extend_top, int extend_left,
+ int extend_bottom, int extend_right) {
int i;
- unsigned char *src_ptr1, *src_ptr2;
- unsigned char *dest_ptr1, *dest_ptr2;
-
- unsigned int Border;
- int plane_stride;
- int plane_height;
- int plane_width;
-
- /***********/
- /* Y Plane */
- /***********/
- Border = ybf->border;
- plane_stride = ybf->y_stride;
- plane_height = ybf->y_height;
- plane_width = ybf->y_width;
+ const int linesize = extend_left + extend_right + width;
/* copy the left and right most columns out */
- src_ptr1 = ybf->y_buffer;
- src_ptr2 = src_ptr1 + plane_width - 1;
- dest_ptr1 = src_ptr1 - Border;
- dest_ptr2 = src_ptr2 + 1;
-
- for (i = 0; i < plane_height; i++) {
- vpx_memset(dest_ptr1, src_ptr1[0], Border);
- vpx_memset(dest_ptr2, src_ptr2[0], Border);
- src_ptr1 += plane_stride;
- src_ptr2 += plane_stride;
- dest_ptr1 += plane_stride;
- dest_ptr2 += plane_stride;
+ uint8_t *src_ptr1 = src;
+ uint8_t *src_ptr2 = src + width - 1;
+ uint8_t *dst_ptr1 = src - extend_left;
+ uint8_t *dst_ptr2 = src + width;
+
+ for (i = 0; i < height; ++i) {
+ vpx_memset(dst_ptr1, src_ptr1[0], extend_left);
+ vpx_memset(dst_ptr2, src_ptr2[0], extend_right);
+ src_ptr1 += src_stride;
+ src_ptr2 += src_stride;
+ dst_ptr1 += src_stride;
+ dst_ptr2 += src_stride;
}
- /* Now copy the top and bottom source lines into each line of the respective borders */
- src_ptr1 = ybf->y_buffer - Border;
- src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride;
- dest_ptr1 = src_ptr1 - (Border * plane_stride);
- dest_ptr2 = src_ptr2 + plane_stride;
-
- for (i = 0; i < (int)Border; i++) {
- vpx_memcpy(dest_ptr1, src_ptr1, plane_stride);
- vpx_memcpy(dest_ptr2, src_ptr2, plane_stride);
- dest_ptr1 += plane_stride;
- dest_ptr2 += plane_stride;
+ /* Now copy the top and bottom lines into each line of the respective
+ * borders
+ */
+ src_ptr1 = src - extend_left;
+ src_ptr2 = src + src_stride * (height - 1) - extend_left;
+ dst_ptr1 = src + src_stride * -extend_top - extend_left;
+ dst_ptr2 = src + src_stride * height - extend_left;
+
+ for (i = 0; i < extend_top; ++i) {
+ vpx_memcpy(dst_ptr1, src_ptr1, linesize);
+ dst_ptr1 += src_stride;
}
-
- /***********/
- /* U Plane */
- /***********/
- plane_stride = ybf->uv_stride;
- plane_height = ybf->uv_height;
- plane_width = ybf->uv_width;
- Border /= 2;
-
- /* copy the left and right most columns out */
- src_ptr1 = ybf->u_buffer;
- src_ptr2 = src_ptr1 + plane_width - 1;
- dest_ptr1 = src_ptr1 - Border;
- dest_ptr2 = src_ptr2 + 1;
-
- for (i = 0; i < plane_height; i++) {
- vpx_memset(dest_ptr1, src_ptr1[0], Border);
- vpx_memset(dest_ptr2, src_ptr2[0], Border);
- src_ptr1 += plane_stride;
- src_ptr2 += plane_stride;
- dest_ptr1 += plane_stride;
- dest_ptr2 += plane_stride;
- }
-
- /* Now copy the top and bottom source lines into each line of the respective borders */
- src_ptr1 = ybf->u_buffer - Border;
- src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride;
- dest_ptr1 = src_ptr1 - (Border * plane_stride);
- dest_ptr2 = src_ptr2 + plane_stride;
-
- for (i = 0; i < (int)(Border); i++) {
- vpx_memcpy(dest_ptr1, src_ptr1, plane_stride);
- vpx_memcpy(dest_ptr2, src_ptr2, plane_stride);
- dest_ptr1 += plane_stride;
- dest_ptr2 += plane_stride;
- }
-
- /***********/
- /* V Plane */
- /***********/
-
- /* copy the left and right most columns out */
- src_ptr1 = ybf->v_buffer;
- src_ptr2 = src_ptr1 + plane_width - 1;
- dest_ptr1 = src_ptr1 - Border;
- dest_ptr2 = src_ptr2 + 1;
-
- for (i = 0; i < plane_height; i++) {
- vpx_memset(dest_ptr1, src_ptr1[0], Border);
- vpx_memset(dest_ptr2, src_ptr2[0], Border);
- src_ptr1 += plane_stride;
- src_ptr2 += plane_stride;
- dest_ptr1 += plane_stride;
- dest_ptr2 += plane_stride;
- }
-
- /* Now copy the top and bottom source lines into each line of the respective borders */
- src_ptr1 = ybf->v_buffer - Border;
- src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride;
- dest_ptr1 = src_ptr1 - (Border * plane_stride);
- dest_ptr2 = src_ptr2 + plane_stride;
-
- for (i = 0; i < (int)(Border); i++) {
- vpx_memcpy(dest_ptr1, src_ptr1, plane_stride);
- vpx_memcpy(dest_ptr2, src_ptr2, plane_stride);
- dest_ptr1 += plane_stride;
- dest_ptr2 += plane_stride;
+ for (i = 0; i < extend_bottom; ++i) {
+ vpx_memcpy(dst_ptr2, src_ptr2, linesize);
+ dst_ptr2 += src_stride;
}
}
+void vp8_yv12_extend_frame_borders_c(YV12_BUFFER_CONFIG *ybf) {
+ assert(ybf->y_height - ybf->y_crop_height < 16);
+ assert(ybf->y_width - ybf->y_crop_width < 16);
+ assert(ybf->y_height - ybf->y_crop_height >= 0);
+ assert(ybf->y_width - ybf->y_crop_width >= 0);
+
+ extend_plane(ybf->y_buffer, ybf->y_stride,
+ ybf->y_crop_width, ybf->y_crop_height,
+ ybf->border, ybf->border,
+ ybf->border + ybf->y_height - ybf->y_crop_height,
+ ybf->border + ybf->y_width - ybf->y_crop_width);
+
+ extend_plane(ybf->u_buffer, ybf->uv_stride,
+ (ybf->y_crop_width + 1) / 2, (ybf->y_crop_height + 1) / 2,
+ ybf->border / 2, ybf->border / 2,
+ (ybf->border + ybf->y_height - ybf->y_crop_height + 1) / 2,
+ (ybf->border + ybf->y_width - ybf->y_crop_width + 1) / 2);
+
+ extend_plane(ybf->v_buffer, ybf->uv_stride,
+ (ybf->y_crop_width + 1) / 2, (ybf->y_crop_height + 1) / 2,
+ ybf->border / 2, ybf->border / 2,
+ (ybf->border + ybf->y_height - ybf->y_crop_height + 1) / 2,
+ (ybf->border + ybf->y_width - ybf->y_crop_width + 1) / 2);
+}
-static void
-extend_frame_borders_yonly_c(YV12_BUFFER_CONFIG *ybf) {
- int i;
- unsigned char *src_ptr1, *src_ptr2;
- unsigned char *dest_ptr1, *dest_ptr2;
-
- unsigned int Border;
- int plane_stride;
- int plane_height;
- int plane_width;
-
- /***********/
- /* Y Plane */
- /***********/
- Border = ybf->border;
- plane_stride = ybf->y_stride;
- plane_height = ybf->y_height;
- plane_width = ybf->y_width;
-
- /* copy the left and right most columns out */
- src_ptr1 = ybf->y_buffer;
- src_ptr2 = src_ptr1 + plane_width - 1;
- dest_ptr1 = src_ptr1 - Border;
- dest_ptr2 = src_ptr2 + 1;
-
- for (i = 0; i < plane_height; i++) {
- vpx_memset(dest_ptr1, src_ptr1[0], Border);
- vpx_memset(dest_ptr2, src_ptr2[0], Border);
- src_ptr1 += plane_stride;
- src_ptr2 += plane_stride;
- dest_ptr1 += plane_stride;
- dest_ptr2 += plane_stride;
- }
-
- /* Now copy the top and bottom source lines into each line of the respective borders */
- src_ptr1 = ybf->y_buffer - Border;
- src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride;
- dest_ptr1 = src_ptr1 - (Border * plane_stride);
- dest_ptr2 = src_ptr2 + plane_stride;
-
- for (i = 0; i < (int)Border; i++) {
- vpx_memcpy(dest_ptr1, src_ptr1, plane_stride);
- vpx_memcpy(dest_ptr2, src_ptr2, plane_stride);
- dest_ptr1 += plane_stride;
- dest_ptr2 += plane_stride;
- }
-
- plane_stride /= 2;
- plane_height /= 2;
- plane_width /= 2;
- Border /= 2;
-
+#if CONFIG_VP9
+static void extend_frame(YV12_BUFFER_CONFIG *const ybf,
+ int subsampling_x, int subsampling_y,
+ int ext_size) {
+ const int c_w = (ybf->y_crop_width + subsampling_x) >> subsampling_x;
+ const int c_h = (ybf->y_crop_height + subsampling_y) >> subsampling_y;
+ const int c_et = ext_size >> subsampling_y;
+ const int c_el = ext_size >> subsampling_x;
+ const int c_eb = (ext_size + ybf->y_height - ybf->y_crop_height +
+ subsampling_y) >> subsampling_y;
+ const int c_er = (ext_size + ybf->y_width - ybf->y_crop_width +
+ subsampling_x) >> subsampling_x;
+
+ assert(ybf->y_height - ybf->y_crop_height < 16);
+ assert(ybf->y_width - ybf->y_crop_width < 16);
+ assert(ybf->y_height - ybf->y_crop_height >= 0);
+ assert(ybf->y_width - ybf->y_crop_width >= 0);
+
+ extend_plane(ybf->y_buffer, ybf->y_stride,
+ ybf->y_crop_width, ybf->y_crop_height,
+ ext_size, ext_size,
+ ext_size + ybf->y_height - ybf->y_crop_height,
+ ext_size + ybf->y_width - ybf->y_crop_width);
+
+ extend_plane(ybf->u_buffer, ybf->uv_stride,
+ c_w, c_h, c_et, c_el, c_eb, c_er);
+
+ extend_plane(ybf->v_buffer, ybf->uv_stride,
+ c_w, c_h, c_et, c_el, c_eb, c_er);
}
+void vp9_extend_frame_borders_c(YV12_BUFFER_CONFIG *ybf,
+ int subsampling_x, int subsampling_y) {
+ extend_frame(ybf, subsampling_x, subsampling_y, ybf->border);
+}
+void vp9_extend_frame_inner_borders_c(YV12_BUFFER_CONFIG *ybf,
+ int subsampling_x, int subsampling_y) {
+ const int inner_bw = (ybf->border > VP9INNERBORDERINPIXELS) ?
+ VP9INNERBORDERINPIXELS : ybf->border;
+ extend_frame(ybf, subsampling_x, subsampling_y, inner_bw);
+}
+#endif // CONFIG_VP9
-/****************************************************************************
- *
- * ROUTINE : vp8_yv12_copy_frame
- *
- * INPUTS :
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Copies the source image into the destination image and
- * updates the destination's UMV borders.
- *
- * SPECIAL NOTES : The frames are assumed to be identical in size.
- *
- ****************************************************************************/
-void
-vp8_yv12_copy_frame_c(YV12_BUFFER_CONFIG *src_ybc,
- YV12_BUFFER_CONFIG *dst_ybc) {
+// Copies the source image into the destination image and updates the
+// destination's UMV borders.
+// Note: The frames are assumed to be identical in size.
+void vp8_yv12_copy_frame_c(const YV12_BUFFER_CONFIG *src_ybc,
+ YV12_BUFFER_CONFIG *dst_ybc) {
int row;
- unsigned char *source, *dest;
-
- source = src_ybc->y_buffer;
- dest = dst_ybc->y_buffer;
-
- for (row = 0; row < src_ybc->y_height; row++) {
- vpx_memcpy(dest, source, src_ybc->y_width);
- source += src_ybc->y_stride;
- dest += dst_ybc->y_stride;
+ const uint8_t *src = src_ybc->y_buffer;
+ uint8_t *dst = dst_ybc->y_buffer;
+
+#if 0
+ /* These assertions are valid in the codec, but the libvpx-tester uses
+ * this code slightly differently.
+ */
+ assert(src_ybc->y_width == dst_ybc->y_width);
+ assert(src_ybc->y_height == dst_ybc->y_height);
+#endif
+
+ for (row = 0; row < src_ybc->y_height; ++row) {
+ vpx_memcpy(dst, src, src_ybc->y_width);
+ src += src_ybc->y_stride;
+ dst += dst_ybc->y_stride;
}
- source = src_ybc->u_buffer;
- dest = dst_ybc->u_buffer;
+ src = src_ybc->u_buffer;
+ dst = dst_ybc->u_buffer;
- for (row = 0; row < src_ybc->uv_height; row++) {
- vpx_memcpy(dest, source, src_ybc->uv_width);
- source += src_ybc->uv_stride;
- dest += dst_ybc->uv_stride;
+ for (row = 0; row < src_ybc->uv_height; ++row) {
+ vpx_memcpy(dst, src, src_ybc->uv_width);
+ src += src_ybc->uv_stride;
+ dst += dst_ybc->uv_stride;
}
- source = src_ybc->v_buffer;
- dest = dst_ybc->v_buffer;
+ src = src_ybc->v_buffer;
+ dst = dst_ybc->v_buffer;
- for (row = 0; row < src_ybc->uv_height; row++) {
- vpx_memcpy(dest, source, src_ybc->uv_width);
- source += src_ybc->uv_stride;
- dest += dst_ybc->uv_stride;
+ for (row = 0; row < src_ybc->uv_height; ++row) {
+ vpx_memcpy(dst, src, src_ybc->uv_width);
+ src += src_ybc->uv_stride;
+ dst += dst_ybc->uv_stride;
}
vp8_yv12_extend_frame_borders_c(dst_ybc);
}
-void vp8_yv12_copy_y_c(YV12_BUFFER_CONFIG *src_ybc,
+void vpx_yv12_copy_y_c(const YV12_BUFFER_CONFIG *src_ybc,
YV12_BUFFER_CONFIG *dst_ybc) {
int row;
- unsigned char *source, *dest;
-
-
- source = src_ybc->y_buffer;
- dest = dst_ybc->y_buffer;
+ const uint8_t *src = src_ybc->y_buffer;
+ uint8_t *dst = dst_ybc->y_buffer;
- for (row = 0; row < src_ybc->y_height; row++) {
- vpx_memcpy(dest, source, src_ybc->y_width);
- source += src_ybc->y_stride;
- dest += dst_ybc->y_stride;
+ for (row = 0; row < src_ybc->y_height; ++row) {
+ vpx_memcpy(dst, src, src_ybc->y_width);
+ src += src_ybc->y_stride;
+ dst += dst_ybc->y_stride;
}
}
diff --git a/libvpx/vpx_scale/generic/yv12extend_generic.h b/libvpx/vpx_scale/generic/yv12extend_generic.h
deleted file mode 100644
index cc2a554..0000000
--- a/libvpx/vpx_scale/generic/yv12extend_generic.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-
-#ifndef YV12_EXTEND_GENERIC_H
-#define YV12_EXTEND_GENERIC_H
-
-#include "vpx_scale/yv12config.h"
-
- void vp8_yv12_extend_frame_borders(YV12_BUFFER_CONFIG *ybf);
-
- /* Copy Y,U,V buffer data from src to dst, filling border of dst as well. */
- void vp8_yv12_copy_frame(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc);
-
- /* Copy Y buffer data from src_ybc to dst_ybc without filling border data */
- void vp8_yv12_copy_y_c(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc);
-
-#endif /* YV12_EXTEND_GENERIC_H */
diff --git a/libvpx/vpx_scale/include/generic/vpxscale_arbitrary.h b/libvpx/vpx_scale/include/generic/vpxscale_arbitrary.h
deleted file mode 100644
index c535252..0000000
--- a/libvpx/vpx_scale/include/generic/vpxscale_arbitrary.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-
-#ifndef __VPX_SCALE_ARBITRARY_H__
-#define __VPX_SCALE_ARBITRARY_H__
-
-#include "vpx_scale/yv12config.h"
-
-typedef struct {
- int in_width;
- int in_height;
-
- int out_width;
- int out_height;
- int max_usable_out_width;
-
- // numerator for the width and height
- int nw;
- int nh;
- int nh_uv;
-
- // output to input correspondance array
- short *l_w;
- short *l_h;
- short *l_h_uv;
-
- // polyphase coefficients
- short *c_w;
- short *c_h;
- short *c_h_uv;
-
- // buffer for horizontal filtering.
- unsigned char *hbuf;
- unsigned char *hbuf_uv;
-} BICUBIC_SCALER_STRUCT;
-
-int bicubic_coefficient_setup(int in_width, int in_height, int out_width, int out_height);
-int bicubic_scale(int in_width, int in_height, int in_stride,
- int out_width, int out_height, int out_stride,
- unsigned char *input_image, unsigned char *output_image);
-void bicubic_scale_frame_reset();
-void bicubic_scale_frame(YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst,
- int new_width, int new_height);
-void bicubic_coefficient_init();
-void bicubic_coefficient_destroy();
-
-#endif /* __VPX_SCALE_ARBITRARY_H__ */
diff --git a/libvpx/vpx_scale/include/generic/vpxscale_depricated.h b/libvpx/vpx_scale/include/generic/vpxscale_depricated.h
deleted file mode 100644
index 3f7fe0f..0000000
--- a/libvpx/vpx_scale/include/generic/vpxscale_depricated.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-
-/****************************************************************************
-*
-* Module Title : postp.h
-*
-* Description : Post processor interface
-*
-****************************************************************************/
-#ifndef VPXSCALE_H
-#define VPXSCALE_H
-
-extern void (*vp8_vertical_band_4_5_scale)(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-extern void (*vp8_last_vertical_band_4_5_scale)(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-extern void (*vp8_vertical_band_3_5_scale)(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-extern void (*vp8_last_vertical_band_3_5_scale)(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-extern void (*vp8_horizontal_line_1_2_scale)(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-extern void (*vp8_horizontal_line_3_5_scale)(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-extern void (*vp8_horizontal_line_4_5_scale)(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-extern void (*vp8_vertical_band_1_2_scale)(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-extern void (*vp8_last_vertical_band_1_2_scale)(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-
-extern void dmachine_specific_config(int mmx_enabled, int xmm_enabled, int wmt_enabled);
-
-#endif
diff --git a/libvpx/vpx_scale/vpxscale.h b/libvpx/vpx_scale/vpx_scale.h
index 3c2194d..9ddf62e 100644
--- a/libvpx/vpx_scale/vpxscale.h
+++ b/libvpx/vpx_scale/vpx_scale.h
@@ -14,17 +14,7 @@
#include "vpx_scale/yv12config.h"
-extern void vp8_yv12_scale_or_center(YV12_BUFFER_CONFIG *src_yuv_config,
- YV12_BUFFER_CONFIG *dst_yuv_config,
- int expanded_frame_width,
- int expanded_frame_height,
- int scaling_mode,
- int HScale,
- int HRatio,
- int VScale,
- int VRatio);
-
-extern void vp8_scale_frame(YV12_BUFFER_CONFIG *src,
+extern void vpx_scale_frame(YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *dst,
unsigned char *temp_area,
unsigned char temp_height,
diff --git a/libvpx/vpx_scale/vpx_scale.mk b/libvpx/vpx_scale/vpx_scale.mk
index dc89478..76c11e7 100644
--- a/libvpx/vpx_scale/vpx_scale.mk
+++ b/libvpx/vpx_scale/vpx_scale.mk
@@ -1,12 +1,13 @@
SCALE_SRCS-yes += vpx_scale.mk
-SCALE_SRCS-yes += scale_mode.h
SCALE_SRCS-yes += yv12config.h
-SCALE_SRCS-yes += vpxscale.h
-SCALE_SRCS-yes += generic/vpxscale.c
+SCALE_SRCS-yes += vpx_scale.h
+SCALE_SRCS-yes += generic/vpx_scale.c
SCALE_SRCS-yes += generic/yv12config.c
SCALE_SRCS-yes += generic/yv12extend.c
-SCALE_SRCS-yes += generic/yv12extend_generic.h
SCALE_SRCS-$(CONFIG_SPATIAL_RESAMPLING) += generic/gen_scalers.c
+SCALE_SRCS-yes += vpx_scale_asm_offsets.c
+SCALE_SRCS-yes += vpx_scale_rtcd.c
+SCALE_SRCS-yes += vpx_scale_rtcd.sh
#neon
SCALE_SRCS-$(HAVE_NEON) += arm/neon/vp8_vpxyv12_copyframe_func_neon$(ASM)
@@ -16,3 +17,8 @@ SCALE_SRCS-$(HAVE_NEON) += arm/neon/vp8_vpxyv12_extendframeborders_neon$(ASM)
SCALE_SRCS-$(HAVE_NEON) += arm/neon/yv12extend_arm.c
SCALE_SRCS-no += $(SCALE_SRCS_REMOVE-yes)
+
+$(eval $(call asm_offsets_template,\
+ vpx_scale_asm_offsets.asm, vpx_scale/vpx_scale_asm_offsets.c))
+
+$(eval $(call rtcd_h_template,vpx_scale_rtcd,vpx_scale/vpx_scale_rtcd.sh))
diff --git a/libvpx/vp8/common/asm_com_offsets.c b/libvpx/vpx_scale/vpx_scale_asm_offsets.c
index ae22b5f..caa9e80 100644
--- a/libvpx/vp8/common/asm_com_offsets.c
+++ b/libvpx/vpx_scale/vpx_scale_asm_offsets.c
@@ -9,15 +9,10 @@
*/
-#include "vpx_config.h"
+#include "./vpx_config.h"
#include "vpx/vpx_codec.h"
#include "vpx_ports/asm_offsets.h"
#include "vpx_scale/yv12config.h"
-#include "vp8/common/blockd.h"
-
-#if CONFIG_POSTPROC
-#include "postproc.h"
-#endif /* CONFIG_POSTPROC */
BEGIN
@@ -34,38 +29,12 @@ DEFINE(yv12_buffer_config_v_buffer, offsetof(YV12_BUFFER_CONFIG, v_b
DEFINE(yv12_buffer_config_border, offsetof(YV12_BUFFER_CONFIG, border));
DEFINE(VP8BORDERINPIXELS_VAL, VP8BORDERINPIXELS);
-#if CONFIG_POSTPROC
-/* mfqe.c / filter_by_weight */
-DEFINE(MFQE_PRECISION_VAL, MFQE_PRECISION);
-#endif /* CONFIG_POSTPROC */
-
END
/* add asserts for any offset that is not supported by assembly code */
/* add asserts for any size that is not supported by assembly code */
-#if HAVE_MEDIA
-/* switch case in vp8_intra4x4_predict_armv6 is based on these enumerated values */
-ct_assert(B_DC_PRED, B_DC_PRED == 0);
-ct_assert(B_TM_PRED, B_TM_PRED == 1);
-ct_assert(B_VE_PRED, B_VE_PRED == 2);
-ct_assert(B_HE_PRED, B_HE_PRED == 3);
-ct_assert(B_LD_PRED, B_LD_PRED == 4);
-ct_assert(B_RD_PRED, B_RD_PRED == 5);
-ct_assert(B_VR_PRED, B_VR_PRED == 6);
-ct_assert(B_VL_PRED, B_VL_PRED == 7);
-ct_assert(B_HD_PRED, B_HD_PRED == 8);
-ct_assert(B_HU_PRED, B_HU_PRED == 9);
-#endif
-
#if HAVE_NEON
/* vp8_yv12_extend_frame_borders_neon makes several assumptions based on this */
ct_assert(VP8BORDERINPIXELS_VAL, VP8BORDERINPIXELS == 32)
#endif
-
-#if HAVE_SSE2
-#if CONFIG_POSTPROC
-/* vp8_filter_by_weight16x16 and 8x8 */
-ct_assert(MFQE_PRECISION_VAL, MFQE_PRECISION == 4)
-#endif /* CONFIG_POSTPROC */
-#endif /* HAVE_SSE2 */
diff --git a/libvpx/vpx_scale/vpx_scale_rtcd.c b/libvpx/vpx_scale/vpx_scale_rtcd.c
new file mode 100644
index 0000000..656a22f
--- /dev/null
+++ b/libvpx/vpx_scale/vpx_scale_rtcd.c
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "vpx_config.h"
+#define RTCD_C
+#include "vpx_scale_rtcd.h"
+#include "vpx_ports/vpx_once.h"
+
+void vpx_scale_rtcd()
+{
+ once(setup_rtcd_internal);
+}
diff --git a/libvpx/vpx_scale/vpx_scale_rtcd.sh b/libvpx/vpx_scale/vpx_scale_rtcd.sh
new file mode 100644
index 0000000..ea7b0e2
--- /dev/null
+++ b/libvpx/vpx_scale/vpx_scale_rtcd.sh
@@ -0,0 +1,34 @@
+vpx_scale_forward_decls() {
+cat <<EOF
+struct yv12_buffer_config;
+EOF
+}
+forward_decls vpx_scale_forward_decls
+
+# Scaler functions
+if [ "CONFIG_SPATIAL_RESAMPLING" != "yes" ]; then
+ prototype void vp8_horizontal_line_5_4_scale "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width"
+ prototype void vp8_vertical_band_5_4_scale "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
+ prototype void vp8_horizontal_line_5_3_scale "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width"
+ prototype void vp8_vertical_band_5_3_scale "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
+ prototype void vp8_horizontal_line_2_1_scale "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width"
+ prototype void vp8_vertical_band_2_1_scale "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
+ prototype void vp8_vertical_band_2_1_scale_i "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
+fi
+
+prototype void vp8_yv12_extend_frame_borders "struct yv12_buffer_config *ybf"
+specialize vp8_yv12_extend_frame_borders neon
+
+prototype void vp8_yv12_copy_frame "const struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc"
+specialize vp8_yv12_copy_frame neon
+
+prototype void vpx_yv12_copy_y "const struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc"
+specialize vpx_yv12_copy_y neon
+
+if [ "$CONFIG_VP9" = "yes" ]; then
+ prototype void vp9_extend_frame_borders "struct yv12_buffer_config *ybf, int subsampling_x, int subsampling_y"
+ specialize vp9_extend_frame_borders
+
+ prototype void vp9_extend_frame_inner_borders "struct yv12_buffer_config *ybf, int subsampling_x, int subsampling_y"
+ specialize vp9_extend_frame_inner_borders_c
+fi
diff --git a/libvpx/vpx_scale/win32/scaleopt.c b/libvpx/vpx_scale/win32/scaleopt.c
index 2d96cc7..4336ece 100644
--- a/libvpx/vpx_scale/win32/scaleopt.c
+++ b/libvpx/vpx_scale/win32/scaleopt.c
@@ -18,1184 +18,14 @@
****************************************************************************/
#include "pragmas.h"
-
-
/****************************************************************************
* Module Statics
****************************************************************************/
-__declspec(align(16)) const static unsigned short one_fifth[] = { 51, 51, 51, 51 };
-__declspec(align(16)) const static unsigned short two_fifths[] = { 102, 102, 102, 102 };
-__declspec(align(16)) const static unsigned short three_fifths[] = { 154, 154, 154, 154 };
-__declspec(align(16)) const static unsigned short four_fifths[] = { 205, 205, 205, 205 };
__declspec(align(16)) const static unsigned short round_values[] = { 128, 128, 128, 128 };
-__declspec(align(16)) const static unsigned short four_ones[] = { 1, 1, 1, 1};
-__declspec(align(16)) const static unsigned short const45_2[] = {205, 154, 102, 51 };
-__declspec(align(16)) const static unsigned short const45_1[] = { 51, 102, 154, 205 };
-__declspec(align(16)) const static unsigned char mask45[] = { 0, 0, 0, 0, 0, 0, 255, 0};
-__declspec(align(16)) const static unsigned short const35_2[] = { 154, 51, 205, 102 };
-__declspec(align(16)) const static unsigned short const35_1[] = { 102, 205, 51, 154 };
-
-
-#include "vpx_scale/vpxscale.h"
+#include "vpx_scale/vpx_scale.h"
#include "vpx_mem/vpx_mem.h"
-/****************************************************************************
- *
- * ROUTINE : horizontal_line_3_5_scale_mmx
- *
- * INPUTS : const unsigned char *source :
- * unsigned int source_width :
- * unsigned char *dest :
- * unsigned int dest_width :
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : 3 to 5 up-scaling of a horizontal line of pixels.
- *
- * SPECIAL NOTES : None.
- *
- ****************************************************************************/
-static
-void horizontal_line_3_5_scale_mmx
-(
- const unsigned char *source,
- unsigned int source_width,
- unsigned char *dest,
- unsigned int dest_width
-) {
- (void) dest_width;
-
- __asm {
-
- push ebx
-
- mov esi, source
- mov edi, dest
-
- mov ecx, source_width
- lea edx, [esi+ecx-3];
-
- movq mm5, const35_1 // mm5 = 66 xx cd xx 33 xx 9a xx
- movq mm6, const35_2 // mm6 = 9a xx 33 xx cd xx 66 xx
-
- movq mm4, round_values // mm4 = 80 xx 80 xx 80 xx 80 xx
- pxor mm7, mm7 // clear mm7
-
- horiz_line_3_5_loop:
-
- mov eax, DWORD PTR [esi] // eax = 00 01 02 03
- mov ebx, eax
-
- and ebx, 0xffff00 // ebx = xx 01 02 xx
- mov ecx, eax // ecx = 00 01 02 03
-
- and eax, 0xffff0000 // eax = xx xx 02 03
- xor ecx, eax // ecx = 00 01 xx xx
-
- shr ebx, 8 // ebx = 01 02 xx xx
- or eax, ebx // eax = 01 02 02 03
-
- shl ebx, 16 // ebx = xx xx 01 02
- movd mm1, eax // mm1 = 01 02 02 03 xx xx xx xx
-
- or ebx, ecx // ebx = 00 01 01 02
- punpcklbw mm1, mm7 // mm1 = 01 xx 02 xx 02 xx 03 xx
-
- movd mm0, ebx // mm0 = 00 01 01 02
- pmullw mm1, mm6 //
-
- punpcklbw mm0, mm7 // mm0 = 00 xx 01 xx 01 xx 02 xx
- pmullw mm0, mm5 //
-
- mov [edi], ebx // writeoutput 00 xx xx xx
- add esi, 3
-
- add edi, 5
- paddw mm0, mm1
-
- paddw mm0, mm4
- psrlw mm0, 8
-
- cmp esi, edx
- packuswb mm0, mm7
-
- movd DWORD Ptr [edi-4], mm0
- jl horiz_line_3_5_loop
-
-// Exit:
- mov eax, DWORD PTR [esi] // eax = 00 01 02 03
- mov ebx, eax
-
- and ebx, 0xffff00 // ebx = xx 01 02 xx
- mov ecx, eax // ecx = 00 01 02 03
-
- and eax, 0xffff0000 // eax = xx xx 02 03
- xor ecx, eax // ecx = 00 01 xx xx
-
- shr ebx, 8 // ebx = 01 02 xx xx
- or eax, ebx // eax = 01 02 02 03
-
- shl eax, 8 // eax = xx 01 02 02
- and eax, 0xffff0000 // eax = xx xx 02 02
-
- or eax, ebx // eax = 01 02 02 02
-
- shl ebx, 16 // ebx = xx xx 01 02
- movd mm1, eax // mm1 = 01 02 02 02 xx xx xx xx
-
- or ebx, ecx // ebx = 00 01 01 02
- punpcklbw mm1, mm7 // mm1 = 01 xx 02 xx 02 xx 02 xx
-
- movd mm0, ebx // mm0 = 00 01 01 02
- pmullw mm1, mm6 //
-
- punpcklbw mm0, mm7 // mm0 = 00 xx 01 xx 01 xx 02 xx
- pmullw mm0, mm5 //
-
- mov [edi], ebx // writeoutput 00 xx xx xx
- paddw mm0, mm1
-
- paddw mm0, mm4
- psrlw mm0, 8
-
- packuswb mm0, mm7
- movd DWORD Ptr [edi+1], mm0
-
- pop ebx
-
- }
-
-}
-
-
-/****************************************************************************
- *
- * ROUTINE : horizontal_line_4_5_scale_mmx
- *
- * INPUTS : const unsigned char *source :
- * unsigned int source_width :
- * unsigned char *dest :
- * unsigned int dest_width :
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : 4 to 5 up-scaling of a horizontal line of pixels.
- *
- * SPECIAL NOTES : None.
- *
- ****************************************************************************/
-static
-void horizontal_line_4_5_scale_mmx
-(
- const unsigned char *source,
- unsigned int source_width,
- unsigned char *dest,
- unsigned int dest_width
-) {
- (void)dest_width;
-
- __asm {
-
- mov esi, source
- mov edi, dest
-
- mov ecx, source_width
- lea edx, [esi+ecx-8];
-
- movq mm5, const45_1 // mm5 = 33 xx 66 xx 9a xx cd xx
- movq mm6, const45_2 // mm6 = cd xx 9a xx 66 xx 33 xx
-
- movq mm4, round_values // mm4 = 80 xx 80 xx 80 xx 80 xx
- pxor mm7, mm7 // clear mm7
-
- horiz_line_4_5_loop:
-
- movq mm0, QWORD PTR [esi] // mm0 = 00 01 02 03 04 05 06 07
- movq mm1, QWORD PTR [esi+1]; // mm1 = 01 02 03 04 05 06 07 08
-
- movq mm2, mm0 // mm2 = 00 01 02 03 04 05 06 07
- movq mm3, mm1 // mm3 = 01 02 03 04 05 06 07 08
-
- movd DWORD PTR [edi], mm0 // write output 00 xx xx xx
- punpcklbw mm0, mm7 // mm0 = 00 xx 01 xx 02 xx 03 xx
-
- punpcklbw mm1, mm7 // mm1 = 01 xx 02 xx 03 xx 04 xx
- pmullw mm0, mm5 // 00* 51 01*102 02*154 03*205
-
- pmullw mm1, mm6 // 01*205 02*154 03*102 04* 51
- punpckhbw mm2, mm7 // mm2 = 04 xx 05 xx 06 xx 07 xx
-
- movd DWORD PTR [edi+5], mm2 // write ouput 05 xx xx xx
- pmullw mm2, mm5 // 04* 51 05*102 06*154 07*205
-
- punpckhbw mm3, mm7 // mm3 = 05 xx 06 xx 07 xx 08 xx
- pmullw mm3, mm6 // 05*205 06*154 07*102 08* 51
-
- paddw mm0, mm1 // added round values
- paddw mm0, mm4
-
- psrlw mm0, 8 // output: 01 xx 02 xx 03 xx 04 xx
- packuswb mm0, mm7
-
- movd DWORD PTR [edi+1], mm0 // write output 01 02 03 04
- add edi, 10
-
- add esi, 8
- paddw mm2, mm3 //
-
- paddw mm2, mm4 // added round values
- cmp esi, edx
-
- psrlw mm2, 8
- packuswb mm2, mm7
-
- movd DWORD PTR [edi-4], mm2 // writeoutput 06 07 08 09
- jl horiz_line_4_5_loop
-
-// Exit:
- movq mm0, [esi] // mm0 = 00 01 02 03 04 05 06 07
- movq mm1, mm0 // mm1 = 00 01 02 03 04 05 06 07
-
- movq mm2, mm0 // mm2 = 00 01 02 03 04 05 06 07
- psrlq mm1, 8 // mm1 = 01 02 03 04 05 06 07 00
-
- movq mm3, mask45 // mm3 = 00 00 00 00 00 00 ff 00
- pand mm3, mm1 // mm3 = 00 00 00 00 00 00 07 00
-
- psllq mm3, 8 // mm3 = 00 00 00 00 00 00 00 07
- por mm1, mm3 // mm1 = 01 02 03 04 05 06 07 07
-
- movq mm3, mm1
-
- movd DWORD PTR [edi], mm0 // write output 00 xx xx xx
- punpcklbw mm0, mm7 // mm0 = 00 xx 01 xx 02 xx 03 xx
-
- punpcklbw mm1, mm7 // mm1 = 01 xx 02 xx 03 xx 04 xx
- pmullw mm0, mm5 // 00* 51 01*102 02*154 03*205
-
- pmullw mm1, mm6 // 01*205 02*154 03*102 04* 51
- punpckhbw mm2, mm7 // mm2 = 04 xx 05 xx 06 xx 07 xx
-
- movd DWORD PTR [edi+5], mm2 // write ouput 05 xx xx xx
- pmullw mm2, mm5 // 04* 51 05*102 06*154 07*205
-
- punpckhbw mm3, mm7 // mm3 = 05 xx 06 xx 07 xx 08 xx
- pmullw mm3, mm6 // 05*205 06*154 07*102 07* 51
-
- paddw mm0, mm1 // added round values
- paddw mm0, mm4
-
- psrlw mm0, 8 // output: 01 xx 02 xx 03 xx 04 xx
- packuswb mm0, mm7 // 01 02 03 04 xx xx xx xx
-
- movd DWORD PTR [edi+1], mm0 // write output 01 02 03 04
- paddw mm2, mm3 //
-
- paddw mm2, mm4 // added round values
- psrlw mm2, 8
-
- packuswb mm2, mm7
- movd DWORD PTR [edi+6], mm2 // writeoutput 06 07 08 09
-
-
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : vertical_band_4_5_scale_mmx
- *
- * INPUTS : unsigned char *dest :
- * unsigned int dest_pitch :
- * unsigned int dest_width :
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : 4 to 5 up-scaling of a 4 pixel high band of pixels.
- *
- * SPECIAL NOTES : The routine uses the first line of the band below
- * the current band. The function also has a "C" only
- * version.
- *
- ****************************************************************************/
-static
-void vertical_band_4_5_scale_mmx
-(
- unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width
-) {
- __asm {
-
- mov esi, dest // Get the source and destination pointer
- mov ecx, dest_pitch // Get the pitch size
-
- lea edi, [esi+ecx*2] // tow lines below
- add edi, ecx // three lines below
-
- pxor mm7, mm7 // clear out mm7
- mov edx, dest_width // Loop counter
-
- vs_4_5_loop:
-
- movq mm0, QWORD ptr [esi] // src[0];
- movq mm1, QWORD ptr [esi+ecx] // src[1];
-
- movq mm2, mm0 // Make a copy
- punpcklbw mm0, mm7 // unpack low to word
-
- movq mm5, one_fifth
- punpckhbw mm2, mm7 // unpack high to word
-
- pmullw mm0, mm5 // a * 1/5
-
- movq mm3, mm1 // make a copy
- punpcklbw mm1, mm7 // unpack low to word
-
- pmullw mm2, mm5 // a * 1/5
- movq mm6, four_fifths // constan
-
- movq mm4, mm1 // copy of low b
- pmullw mm4, mm6 // b * 4/5
-
- punpckhbw mm3, mm7 // unpack high to word
- movq mm5, mm3 // copy of high b
-
- pmullw mm5, mm6 // b * 4/5
- paddw mm0, mm4 // a * 1/5 + b * 4/5
-
- paddw mm2, mm5 // a * 1/5 + b * 4/5
- paddw mm0, round_values // + 128
-
- paddw mm2, round_values // + 128
- psrlw mm0, 8
-
- psrlw mm2, 8
- packuswb mm0, mm2 // des [1]
-
- movq QWORD ptr [esi+ecx], mm0 // write des[1]
- movq mm0, [esi+ecx*2] // mm0 = src[2]
-
- // mm1, mm3 --- Src[1]
- // mm0 --- Src[2]
- // mm7 for unpacking
-
- movq mm5, two_fifths
- movq mm2, mm0 // make a copy
-
- pmullw mm1, mm5 // b * 2/5
- movq mm6, three_fifths
-
-
- punpcklbw mm0, mm7 // unpack low to word
- pmullw mm3, mm5 // b * 2/5
-
- movq mm4, mm0 // make copy of c
- punpckhbw mm2, mm7 // unpack high to word
-
- pmullw mm4, mm6 // c * 3/5
- movq mm5, mm2
-
- pmullw mm5, mm6 // c * 3/5
- paddw mm1, mm4 // b * 2/5 + c * 3/5
-
- paddw mm3, mm5 // b * 2/5 + c * 3/5
- paddw mm1, round_values // + 128
-
- paddw mm3, round_values // + 128
- psrlw mm1, 8
-
- psrlw mm3, 8
- packuswb mm1, mm3 // des[2]
-
- movq QWORD ptr [esi+ecx*2], mm1 // write des[2]
- movq mm1, [edi] // mm1=Src[3];
-
- // mm0, mm2 --- Src[2]
- // mm1 --- Src[3]
- // mm6 --- 3/5
- // mm7 for unpacking
-
- pmullw mm0, mm6 // c * 3/5
- movq mm5, two_fifths // mm5 = 2/5
-
- movq mm3, mm1 // make a copy
- pmullw mm2, mm6 // c * 3/5
-
- punpcklbw mm1, mm7 // unpack low
- movq mm4, mm1 // make a copy
-
- punpckhbw mm3, mm7 // unpack high
- pmullw mm4, mm5 // d * 2/5
-
- movq mm6, mm3 // make a copy
- pmullw mm6, mm5 // d * 2/5
-
- paddw mm0, mm4 // c * 3/5 + d * 2/5
- paddw mm2, mm6 // c * 3/5 + d * 2/5
-
- paddw mm0, round_values // + 128
- paddw mm2, round_values // + 128
-
- psrlw mm0, 8
- psrlw mm2, 8
-
- packuswb mm0, mm2 // des[3]
- movq QWORD ptr [edi], mm0 // write des[3]
-
- // mm1, mm3 --- Src[3]
- // mm7 -- cleared for unpacking
-
- movq mm0, [edi+ecx*2] // mm0, Src[0] of the next group
-
- movq mm5, four_fifths // mm5 = 4/5
- pmullw mm1, mm5 // d * 4/5
-
- movq mm6, one_fifth // mm6 = 1/5
- movq mm2, mm0 // make a copy
-
- pmullw mm3, mm5 // d * 4/5
- punpcklbw mm0, mm7 // unpack low
-
- pmullw mm0, mm6 // an * 1/5
- punpckhbw mm2, mm7 // unpack high
-
- paddw mm1, mm0 // d * 4/5 + an * 1/5
- pmullw mm2, mm6 // an * 1/5
-
- paddw mm3, mm2 // d * 4/5 + an * 1/5
- paddw mm1, round_values // + 128
-
- paddw mm3, round_values // + 128
- psrlw mm1, 8
-
- psrlw mm3, 8
- packuswb mm1, mm3 // des[4]
-
- movq QWORD ptr [edi+ecx], mm1 // write des[4]
-
- add edi, 8
- add esi, 8
-
- sub edx, 8
- jg vs_4_5_loop
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : last_vertical_band_4_5_scale_mmx
- *
- * INPUTS : unsigned char *dest :
- * unsigned int dest_pitch :
- * unsigned int dest_width :
- *
- * OUTPUTS : None.
- *
- * RETURNS : None
- *
- * FUNCTION : 4 to 5 up-scaling of the last 4-pixel high band in an image.
- *
- * SPECIAL NOTES : The routine uses the first line of the band below
- * the current band. The function also has an "C" only
- * version.
- *
- ****************************************************************************/
-static
-void last_vertical_band_4_5_scale_mmx
-(
- unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width
-) {
- __asm {
- mov esi, dest // Get the source and destination pointer
- mov ecx, dest_pitch // Get the pitch size
-
- lea edi, [esi+ecx*2] // tow lines below
- add edi, ecx // three lines below
-
- pxor mm7, mm7 // clear out mm7
- mov edx, dest_width // Loop counter
-
- last_vs_4_5_loop:
-
- movq mm0, QWORD ptr [esi] // src[0];
- movq mm1, QWORD ptr [esi+ecx] // src[1];
-
- movq mm2, mm0 // Make a copy
- punpcklbw mm0, mm7 // unpack low to word
-
- movq mm5, one_fifth
- punpckhbw mm2, mm7 // unpack high to word
-
- pmullw mm0, mm5 // a * 1/5
-
- movq mm3, mm1 // make a copy
- punpcklbw mm1, mm7 // unpack low to word
-
- pmullw mm2, mm5 // a * 1/5
- movq mm6, four_fifths // constan
-
- movq mm4, mm1 // copy of low b
- pmullw mm4, mm6 // b * 4/5
-
- punpckhbw mm3, mm7 // unpack high to word
- movq mm5, mm3 // copy of high b
-
- pmullw mm5, mm6 // b * 4/5
- paddw mm0, mm4 // a * 1/5 + b * 4/5
-
- paddw mm2, mm5 // a * 1/5 + b * 4/5
- paddw mm0, round_values // + 128
-
- paddw mm2, round_values // + 128
- psrlw mm0, 8
-
- psrlw mm2, 8
- packuswb mm0, mm2 // des [1]
-
- movq QWORD ptr [esi+ecx], mm0 // write des[1]
- movq mm0, [esi+ecx*2] // mm0 = src[2]
-
- // mm1, mm3 --- Src[1]
- // mm0 --- Src[2]
- // mm7 for unpacking
-
- movq mm5, two_fifths
- movq mm2, mm0 // make a copy
-
- pmullw mm1, mm5 // b * 2/5
- movq mm6, three_fifths
-
-
- punpcklbw mm0, mm7 // unpack low to word
- pmullw mm3, mm5 // b * 2/5
-
- movq mm4, mm0 // make copy of c
- punpckhbw mm2, mm7 // unpack high to word
-
- pmullw mm4, mm6 // c * 3/5
- movq mm5, mm2
-
- pmullw mm5, mm6 // c * 3/5
- paddw mm1, mm4 // b * 2/5 + c * 3/5
-
- paddw mm3, mm5 // b * 2/5 + c * 3/5
- paddw mm1, round_values // + 128
-
- paddw mm3, round_values // + 128
- psrlw mm1, 8
-
- psrlw mm3, 8
- packuswb mm1, mm3 // des[2]
-
- movq QWORD ptr [esi+ecx*2], mm1 // write des[2]
- movq mm1, [edi] // mm1=Src[3];
-
- movq QWORD ptr [edi+ecx], mm1 // write des[4];
-
- // mm0, mm2 --- Src[2]
- // mm1 --- Src[3]
- // mm6 --- 3/5
- // mm7 for unpacking
-
- pmullw mm0, mm6 // c * 3/5
- movq mm5, two_fifths // mm5 = 2/5
-
- movq mm3, mm1 // make a copy
- pmullw mm2, mm6 // c * 3/5
-
- punpcklbw mm1, mm7 // unpack low
- movq mm4, mm1 // make a copy
-
- punpckhbw mm3, mm7 // unpack high
- pmullw mm4, mm5 // d * 2/5
-
- movq mm6, mm3 // make a copy
- pmullw mm6, mm5 // d * 2/5
-
- paddw mm0, mm4 // c * 3/5 + d * 2/5
- paddw mm2, mm6 // c * 3/5 + d * 2/5
-
- paddw mm0, round_values // + 128
- paddw mm2, round_values // + 128
-
- psrlw mm0, 8
- psrlw mm2, 8
-
- packuswb mm0, mm2 // des[3]
- movq QWORD ptr [edi], mm0 // write des[3]
-
- // mm1, mm3 --- Src[3]
- // mm7 -- cleared for unpacking
- add edi, 8
- add esi, 8
-
- sub edx, 8
- jg last_vs_4_5_loop
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : vertical_band_3_5_scale_mmx
- *
- * INPUTS : unsigned char *dest :
- * unsigned int dest_pitch :
- * unsigned int dest_width :
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : 3 to 5 up-scaling of a 3-pixel high band of pixels.
- *
- * SPECIAL NOTES : The routine uses the first line of the band below
- * the current band. The function also has an "C" only
- * version.
- *
- ****************************************************************************/
-static
-void vertical_band_3_5_scale_mmx
-(
- unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width
-) {
- __asm {
- mov esi, dest // Get the source and destination pointer
- mov ecx, dest_pitch // Get the pitch size
-
- lea edi, [esi+ecx*2] // tow lines below
- add edi, ecx // three lines below
-
- pxor mm7, mm7 // clear out mm7
- mov edx, dest_width // Loop counter
-
- vs_3_5_loop:
-
- movq mm0, QWORD ptr [esi] // src[0];
- movq mm1, QWORD ptr [esi+ecx] // src[1];
-
- movq mm2, mm0 // Make a copy
- punpcklbw mm0, mm7 // unpack low to word
-
- movq mm5, two_fifths // mm5 = 2/5
- punpckhbw mm2, mm7 // unpack high to word
-
- pmullw mm0, mm5 // a * 2/5
-
- movq mm3, mm1 // make a copy
- punpcklbw mm1, mm7 // unpack low to word
-
- pmullw mm2, mm5 // a * 2/5
- movq mm6, three_fifths // mm6 = 3/5
-
- movq mm4, mm1 // copy of low b
- pmullw mm4, mm6 // b * 3/5
-
- punpckhbw mm3, mm7 // unpack high to word
- movq mm5, mm3 // copy of high b
-
- pmullw mm5, mm6 // b * 3/5
- paddw mm0, mm4 // a * 2/5 + b * 3/5
-
- paddw mm2, mm5 // a * 2/5 + b * 3/5
- paddw mm0, round_values // + 128
-
- paddw mm2, round_values // + 128
- psrlw mm0, 8
-
- psrlw mm2, 8
- packuswb mm0, mm2 // des [1]
-
- movq QWORD ptr [esi+ecx], mm0 // write des[1]
- movq mm0, [esi+ecx*2] // mm0 = src[2]
-
- // mm1, mm3 --- Src[1]
- // mm0 --- Src[2]
- // mm7 for unpacking
-
- movq mm4, mm1 // b low
- pmullw mm1, four_fifths // b * 4/5 low
-
- movq mm5, mm3 // b high
- pmullw mm3, four_fifths // b * 4/5 high
-
- movq mm2, mm0 // c
- pmullw mm4, one_fifth // b * 1/5
-
- punpcklbw mm0, mm7 // c low
- pmullw mm5, one_fifth // b * 1/5
-
- movq mm6, mm0 // make copy of c low
- punpckhbw mm2, mm7 // c high
-
- pmullw mm6, one_fifth // c * 1/5 low
- movq mm7, mm2 // make copy of c high
-
- pmullw mm7, one_fifth // c * 1/5 high
- paddw mm1, mm6 // b * 4/5 + c * 1/5 low
-
- paddw mm3, mm7 // b * 4/5 + c * 1/5 high
- movq mm6, mm0 // make copy of c low
-
- pmullw mm6, four_fifths // c * 4/5 low
- movq mm7, mm2 // make copy of c high
-
- pmullw mm7, four_fifths // c * 4/5 high
-
- paddw mm4, mm6 // b * 1/5 + c * 4/5 low
- paddw mm5, mm7 // b * 1/5 + c * 4/5 high
-
- paddw mm1, round_values // + 128
- paddw mm3, round_values // + 128
-
- psrlw mm1, 8
- psrlw mm3, 8
-
- packuswb mm1, mm3 // des[2]
- movq QWORD ptr [esi+ecx*2], mm1 // write des[2]
-
- paddw mm4, round_values // + 128
- paddw mm5, round_values // + 128
-
- psrlw mm4, 8
- psrlw mm5, 8
-
- packuswb mm4, mm5 // des[3]
- movq QWORD ptr [edi], mm4 // write des[3]
-
- // mm0, mm2 --- Src[3]
-
- pxor mm7, mm7 // clear mm7 for unpacking
- movq mm1, [edi+ecx*2] // mm1 = Src[0] of the next group
-
- movq mm5, three_fifths // mm5 = 3/5
- pmullw mm0, mm5 // d * 3/5
-
- movq mm6, two_fifths // mm6 = 2/5
- movq mm3, mm1 // make a copy
-
- pmullw mm2, mm5 // d * 3/5
- punpcklbw mm1, mm7 // unpack low
-
- pmullw mm1, mm6 // an * 2/5
- punpckhbw mm3, mm7 // unpack high
-
- paddw mm0, mm1 // d * 3/5 + an * 2/5
- pmullw mm3, mm6 // an * 2/5
-
- paddw mm2, mm3 // d * 3/5 + an * 2/5
- paddw mm0, round_values // + 128
-
- paddw mm2, round_values // + 128
- psrlw mm0, 8
-
- psrlw mm2, 8
- packuswb mm0, mm2 // des[4]
-
- movq QWORD ptr [edi+ecx], mm0 // write des[4]
-
- add edi, 8
- add esi, 8
-
- sub edx, 8
- jg vs_3_5_loop
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : last_vertical_band_3_5_scale_mmx
- *
- * INPUTS : unsigned char *dest :
- * unsigned int dest_pitch :
- * unsigned int dest_width :
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : 3 to 5 up-scaling of a 3-pixel high band of pixels.
- *
- * SPECIAL NOTES : The routine uses the first line of the band below
- * the current band. The function also has an "C" only
- * version.
- *
- ****************************************************************************/
-static
-void last_vertical_band_3_5_scale_mmx
-(
- unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width
-) {
- __asm {
- mov esi, dest // Get the source and destination pointer
- mov ecx, dest_pitch // Get the pitch size
-
- lea edi, [esi+ecx*2] // tow lines below
- add edi, ecx // three lines below
-
- pxor mm7, mm7 // clear out mm7
- mov edx, dest_width // Loop counter
-
-
- last_vs_3_5_loop:
-
- movq mm0, QWORD ptr [esi] // src[0];
- movq mm1, QWORD ptr [esi+ecx] // src[1];
-
- movq mm2, mm0 // Make a copy
- punpcklbw mm0, mm7 // unpack low to word
-
- movq mm5, two_fifths // mm5 = 2/5
- punpckhbw mm2, mm7 // unpack high to word
-
- pmullw mm0, mm5 // a * 2/5
-
- movq mm3, mm1 // make a copy
- punpcklbw mm1, mm7 // unpack low to word
-
- pmullw mm2, mm5 // a * 2/5
- movq mm6, three_fifths // mm6 = 3/5
-
- movq mm4, mm1 // copy of low b
- pmullw mm4, mm6 // b * 3/5
-
- punpckhbw mm3, mm7 // unpack high to word
- movq mm5, mm3 // copy of high b
-
- pmullw mm5, mm6 // b * 3/5
- paddw mm0, mm4 // a * 2/5 + b * 3/5
-
- paddw mm2, mm5 // a * 2/5 + b * 3/5
- paddw mm0, round_values // + 128
-
- paddw mm2, round_values // + 128
- psrlw mm0, 8
-
- psrlw mm2, 8
- packuswb mm0, mm2 // des [1]
-
- movq QWORD ptr [esi+ecx], mm0 // write des[1]
- movq mm0, [esi+ecx*2] // mm0 = src[2]
-
-
-
- // mm1, mm3 --- Src[1]
- // mm0 --- Src[2]
- // mm7 for unpacking
-
- movq mm4, mm1 // b low
- pmullw mm1, four_fifths // b * 4/5 low
-
- movq QWORD ptr [edi+ecx], mm0 // write des[4]
-
- movq mm5, mm3 // b high
- pmullw mm3, four_fifths // b * 4/5 high
-
- movq mm2, mm0 // c
- pmullw mm4, one_fifth // b * 1/5
-
- punpcklbw mm0, mm7 // c low
- pmullw mm5, one_fifth // b * 1/5
-
- movq mm6, mm0 // make copy of c low
- punpckhbw mm2, mm7 // c high
-
- pmullw mm6, one_fifth // c * 1/5 low
- movq mm7, mm2 // make copy of c high
-
- pmullw mm7, one_fifth // c * 1/5 high
- paddw mm1, mm6 // b * 4/5 + c * 1/5 low
-
- paddw mm3, mm7 // b * 4/5 + c * 1/5 high
- movq mm6, mm0 // make copy of c low
-
- pmullw mm6, four_fifths // c * 4/5 low
- movq mm7, mm2 // make copy of c high
-
- pmullw mm7, four_fifths // c * 4/5 high
-
- paddw mm4, mm6 // b * 1/5 + c * 4/5 low
- paddw mm5, mm7 // b * 1/5 + c * 4/5 high
-
- paddw mm1, round_values // + 128
- paddw mm3, round_values // + 128
-
- psrlw mm1, 8
- psrlw mm3, 8
-
- packuswb mm1, mm3 // des[2]
- movq QWORD ptr [esi+ecx*2], mm1 // write des[2]
-
- paddw mm4, round_values // + 128
- paddw mm5, round_values // + 128
-
- psrlw mm4, 8
- psrlw mm5, 8
-
- packuswb mm4, mm5 // des[3]
- movq QWORD ptr [edi], mm4 // write des[3]
-
- // mm0, mm2 --- Src[3]
-
- add edi, 8
- add esi, 8
-
- sub edx, 8
- jg last_vs_3_5_loop
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : vertical_band_1_2_scale_mmx
- *
- * INPUTS : unsigned char *dest :
- * unsigned int dest_pitch :
- * unsigned int dest_width :
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : 1 to 2 up-scaling of a band of pixels.
- *
- * SPECIAL NOTES : The routine uses the first line of the band below
- * the current band. The function also has an "C" only
- * version.
- *
- ****************************************************************************/
-static
-void vertical_band_1_2_scale_mmx
-(
- unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width
-) {
- __asm {
-
- mov esi, dest // Get the source and destination pointer
- mov ecx, dest_pitch // Get the pitch size
-
- pxor mm7, mm7 // clear out mm7
- mov edx, dest_width // Loop counter
-
- vs_1_2_loop:
-
- movq mm0, [esi] // get Src[0]
- movq mm1, [esi + ecx * 2] // get Src[1]
-
- movq mm2, mm0 // make copy before unpack
- movq mm3, mm1 // make copy before unpack
-
- punpcklbw mm0, mm7 // low Src[0]
- movq mm6, four_ones // mm6= 1, 1, 1, 1
-
- punpcklbw mm1, mm7 // low Src[1]
- paddw mm0, mm1 // low (a + b)
-
- punpckhbw mm2, mm7 // high Src[0]
- paddw mm0, mm6 // low (a + b + 1)
-
- punpckhbw mm3, mm7
- paddw mm2, mm3 // high (a + b )
-
- psraw mm0, 1 // low (a + b +1 )/2
- paddw mm2, mm6 // high (a + b + 1)
-
- psraw mm2, 1 // high (a + b + 1)/2
- packuswb mm0, mm2 // pack results
-
- movq [esi+ecx], mm0 // write out eight bytes
- add esi, 8
-
- sub edx, 8
- jg vs_1_2_loop
- }
-
-}
-
-/****************************************************************************
- *
- * ROUTINE : last_vertical_band_1_2_scale_mmx
- *
- * INPUTS : unsigned char *dest :
- * unsigned int dest_pitch :
- * unsigned int dest_width :
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : 1 to 2 up-scaling of band of pixels.
- *
- * SPECIAL NOTES : The routine uses the first line of the band below
- * the current band. The function also has an "C" only
- * version.
- *
- ****************************************************************************/
-static
-void last_vertical_band_1_2_scale_mmx
-(
- unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width
-) {
- __asm {
- mov esi, dest // Get the source and destination pointer
- mov ecx, dest_pitch // Get the pitch size
-
- mov edx, dest_width // Loop counter
-
- last_vs_1_2_loop:
-
- movq mm0, [esi] // get Src[0]
- movq [esi+ecx], mm0 // write out eight bytes
-
- add esi, 8
- sub edx, 8
-
- jg last_vs_1_2_loop
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : horizontal_line_1_2_scale
- *
- * INPUTS : const unsigned char *source :
- * unsigned int source_width :
- * unsigned char *dest :
- * unsigned int dest_width :
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : 1 to 2 up-scaling of a horizontal line of pixels.
- *
- * SPECIAL NOTES : None.
- *
- ****************************************************************************/
-static
-void horizontal_line_1_2_scale_mmx
-(
- const unsigned char *source,
- unsigned int source_width,
- unsigned char *dest,
- unsigned int dest_width
-) {
- (void) dest_width;
-
- __asm {
- mov esi, source
- mov edi, dest
-
- pxor mm7, mm7
- movq mm6, four_ones
-
- mov ecx, source_width
-
- hs_1_2_loop:
-
- movq mm0, [esi]
- movq mm1, [esi+1]
-
- movq mm2, mm0
- movq mm3, mm1
-
- movq mm4, mm0
- punpcklbw mm0, mm7
-
- punpcklbw mm1, mm7
- paddw mm0, mm1
-
- paddw mm0, mm6
- punpckhbw mm2, mm7
-
- punpckhbw mm3, mm7
- paddw mm2, mm3
-
- paddw mm2, mm6
- psraw mm0, 1
-
- psraw mm2, 1
- packuswb mm0, mm2
-
- movq mm2, mm4
- punpcklbw mm2, mm0
-
- movq [edi], mm2
- punpckhbw mm4, mm0
-
- movq [edi+8], mm4
- add esi, 8
-
- add edi, 16
- sub ecx, 8
-
- cmp ecx, 8
- jg hs_1_2_loop
-
-// last eight pixel
-
- movq mm0, [esi]
- movq mm1, mm0
-
- movq mm2, mm0
- movq mm3, mm1
-
- psrlq mm1, 8
- psrlq mm3, 56
-
- psllq mm3, 56
- por mm1, mm3
-
- movq mm3, mm1
- movq mm4, mm0
-
- punpcklbw mm0, mm7
- punpcklbw mm1, mm7
-
- paddw mm0, mm1
- paddw mm0, mm6
-
- punpckhbw mm2, mm7
- punpckhbw mm3, mm7
-
- paddw mm2, mm3
- paddw mm2, mm6
-
- psraw mm0, 1
- psraw mm2, 1
-
- packuswb mm0, mm2
- movq mm2, mm4
-
- punpcklbw mm2, mm0
- movq [edi], mm2
-
- punpckhbw mm4, mm0
- movq [edi+8], mm4
- }
-}
-
-
-
-
-
__declspec(align(16)) const static unsigned short const54_2[] = { 0, 64, 128, 192 };
__declspec(align(16)) const static unsigned short const54_1[] = {256, 192, 128, 64 };
@@ -1685,25 +515,6 @@ void vertical_band_2_1_scale_i_mmx(unsigned char *source, unsigned int src_pitch
void
register_mmxscalers(void) {
- vp8_horizontal_line_1_2_scale = horizontal_line_1_2_scale_mmx;
- vp8_vertical_band_1_2_scale = vertical_band_1_2_scale_mmx;
- vp8_last_vertical_band_1_2_scale = last_vertical_band_1_2_scale_mmx;
- vp8_horizontal_line_3_5_scale = horizontal_line_3_5_scale_mmx;
- vp8_vertical_band_3_5_scale = vertical_band_3_5_scale_mmx;
- vp8_last_vertical_band_3_5_scale = last_vertical_band_3_5_scale_mmx;
- vp8_horizontal_line_4_5_scale = horizontal_line_4_5_scale_mmx;
- vp8_vertical_band_4_5_scale = vertical_band_4_5_scale_mmx;
- vp8_last_vertical_band_4_5_scale = last_vertical_band_4_5_scale_mmx;
-
- vp8_horizontal_line_3_4_scale = vp8cx_horizontal_line_3_4_scale_c;
- vp8_vertical_band_3_4_scale = vp8cx_vertical_band_3_4_scale_c;
- vp8_last_vertical_band_3_4_scale = vp8cx_last_vertical_band_3_4_scale_c;
- vp8_horizontal_line_2_3_scale = vp8cx_horizontal_line_2_3_scale_c;
- vp8_vertical_band_2_3_scale = vp8cx_vertical_band_2_3_scale_c;
- vp8_last_vertical_band_2_3_scale = vp8cx_last_vertical_band_2_3_scale_c;
-
-
-
vp8_vertical_band_5_4_scale = vertical_band_5_4_scale_mmx;
vp8_vertical_band_5_3_scale = vertical_band_5_3_scale_mmx;
vp8_vertical_band_2_1_scale = vertical_band_2_1_scale_mmx;
@@ -1711,8 +522,4 @@ register_mmxscalers(void) {
vp8_horizontal_line_2_1_scale = horizontal_line_2_1_scale_mmx;
vp8_horizontal_line_5_3_scale = horizontal_line_5_3_scale_mmx;
vp8_horizontal_line_5_4_scale = horizontal_line_5_4_scale_mmx;
-
-
-
-
}
diff --git a/libvpx/vpx_scale/win32/scalesystemdependent.c b/libvpx/vpx_scale/win32/scalesystemdependent.c
deleted file mode 100644
index 98913d1..0000000
--- a/libvpx/vpx_scale/win32/scalesystemdependent.c
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-
-/****************************************************************************
-*
-* Module Title : system_dependent.c
-*
-* Description : Miscellaneous system dependent functions
-*
-****************************************************************************/
-
-/****************************************************************************
-* Header Files
-****************************************************************************/
-#include "vpx_scale/vpxscale.h"
-#include "cpuidlib.h"
-
-/****************************************************************************
-* Imports
-*****************************************************************************/
-extern void register_generic_scalers(void);
-extern void register_mmxscalers(void);
-
-/****************************************************************************
- *
- * ROUTINE : post_proc_machine_specific_config
- *
- * INPUTS : UINT32 Version : Codec version number.
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Checks for machine specifc features such as MMX support
- * sets appropriate flags and function pointers.
- *
- * SPECIAL NOTES : None.
- *
- ****************************************************************************/
-void
-vp8_scale_machine_specific_config(void) {
- // If MMX supported then set to use MMX versions of functions else
- // use original 'C' versions.
- int mmx_enabled;
- int xmm_enabled;
- int wmt_enabled;
-
- vpx_get_processor_flags(&mmx_enabled, &xmm_enabled, &wmt_enabled);
-
- if (mmx_enabled || xmm_enabled || wmt_enabled) {
- register_mmxscalers();
- } else {
- vp8_horizontal_line_1_2_scale = vp8cx_horizontal_line_1_2_scale_c;
- vp8_vertical_band_1_2_scale = vp8cx_vertical_band_1_2_scale_c;
- vp8_last_vertical_band_1_2_scale = vp8cx_last_vertical_band_1_2_scale_c;
- vp8_horizontal_line_3_5_scale = vp8cx_horizontal_line_3_5_scale_c;
- vp8_vertical_band_3_5_scale = vp8cx_vertical_band_3_5_scale_c;
- vp8_last_vertical_band_3_5_scale = vp8cx_last_vertical_band_3_5_scale_c;
- vp8_horizontal_line_3_4_scale = vp8cx_horizontal_line_3_4_scale_c;
- vp8_vertical_band_3_4_scale = vp8cx_vertical_band_3_4_scale_c;
- vp8_last_vertical_band_3_4_scale = vp8cx_last_vertical_band_3_4_scale_c;
- vp8_horizontal_line_2_3_scale = vp8cx_horizontal_line_2_3_scale_c;
- vp8_vertical_band_2_3_scale = vp8cx_vertical_band_2_3_scale_c;
- vp8_last_vertical_band_2_3_scale = vp8cx_last_vertical_band_2_3_scale_c;
- vp8_horizontal_line_4_5_scale = vp8cx_horizontal_line_4_5_scale_c;
- vp8_vertical_band_4_5_scale = vp8cx_vertical_band_4_5_scale_c;
- vp8_last_vertical_band_4_5_scale = vp8cx_last_vertical_band_4_5_scale_c;
-
-
- vp8_vertical_band_5_4_scale = vp8cx_vertical_band_5_4_scale_c;
- vp8_vertical_band_5_3_scale = vp8cx_vertical_band_5_3_scale_c;
- vp8_vertical_band_2_1_scale = vp8cx_vertical_band_2_1_scale_c;
- vp8_vertical_band_2_1_scale_i = vp8cx_vertical_band_2_1_scale_i_c;
- vp8_horizontal_line_2_1_scale = vp8cx_horizontal_line_2_1_scale_c;
- vp8_horizontal_line_5_3_scale = vp8cx_horizontal_line_5_3_scale_c;
- vp8_horizontal_line_5_4_scale = vp8cx_horizontal_line_5_4_scale_c;
-
- }
-}
diff --git a/libvpx/vpx_scale/yv12config.h b/libvpx/vpx_scale/yv12config.h
index 6a8a1fc..0e950fb 100644
--- a/libvpx/vpx_scale/yv12config.h
+++ b/libvpx/vpx_scale/yv12config.h
@@ -8,66 +8,69 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef YV12_CONFIG_H
#define YV12_CONFIG_H
+
#ifdef __cplusplus
-extern "C"
-{
+extern "C" {
#endif
+#include "vpx/vpx_integer.h"
+
#define VP8BORDERINPIXELS 32
-#define VP9BORDERINPIXELS 64
+#define VP9INNERBORDERINPIXELS 96
+#define VP9BORDERINPIXELS 160
#define VP9_INTERP_EXTEND 4
- /*************************************
- For INT_YUV:
-
- Y = (R+G*2+B)/4;
- U = (R-B)/2;
- V = (G*2 - R - B)/4;
- And
- R = Y+U-V;
- G = Y+V;
- B = Y-U-V;
- ************************************/
- typedef enum
- {
- REG_YUV = 0, /* Regular yuv */
- INT_YUV = 1 /* The type of yuv that can be tranfer to and from RGB through integer transform */
- }
- YUV_TYPE;
-
typedef struct yv12_buffer_config {
int y_width;
int y_height;
+ int y_crop_width;
+ int y_crop_height;
int y_stride;
/* int yinternal_width; */
int uv_width;
int uv_height;
+ int uv_crop_width;
+ int uv_crop_height;
int uv_stride;
/* int uvinternal_width; */
- unsigned char *y_buffer;
- unsigned char *u_buffer;
- unsigned char *v_buffer;
+ int alpha_width;
+ int alpha_height;
+ int alpha_stride;
+
+ uint8_t *y_buffer;
+ uint8_t *u_buffer;
+ uint8_t *v_buffer;
+ uint8_t *alpha_buffer;
- unsigned char *buffer_alloc;
+ uint8_t *buffer_alloc;
+ int buffer_alloc_sz;
int border;
int frame_size;
- YUV_TYPE clrtype;
int corrupted;
int flags;
} YV12_BUFFER_CONFIG;
- int vp8_yv12_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height, int border);
+ int vp8_yv12_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
+ int width, int height, int border);
+ int vp8_yv12_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
+ int width, int height, int border);
int vp8_yv12_de_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf);
+ int vp9_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
+ int width, int height, int ss_x, int ss_y,
+ int border);
+ int vp9_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
+ int width, int height, int ss_x, int ss_y,
+ int border);
+ int vp9_free_frame_buffer(YV12_BUFFER_CONFIG *ybf);
+
#ifdef __cplusplus
}
#endif
-
-#endif /*YV12_CONFIG_H*/
+#endif // YV12_CONFIG_H
diff --git a/libvpx/vpxdec.c b/libvpx/vpxdec.c
index 9b728bf..513d7bd 100644
--- a/libvpx/vpxdec.c
+++ b/libvpx/vpxdec.c
@@ -12,6 +12,7 @@
/* This is a simple program that reads ivf files and decodes them
* using the new interface. Decoded frames are output as YV12 raw.
*/
+#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
@@ -22,7 +23,7 @@
#include "vpx_config.h"
#include "vpx/vpx_decoder.h"
#include "vpx_ports/vpx_timer.h"
-#if CONFIG_VP8_DECODER
+#if CONFIG_VP8_DECODER || CONFIG_VP9_DECODER
#include "vpx/vp8dx.h"
#endif
#if CONFIG_MD5
@@ -30,6 +31,7 @@
#endif
#include "tools_common.h"
#include "nestegg/include/nestegg/nestegg.h"
+#include "third_party/libyuv/include/libyuv/scale.h"
#if CONFIG_OS_SUPPORT
#if defined(_MSC_VER)
@@ -49,175 +51,175 @@
static const char *exec_name;
#define VP8_FOURCC (0x00385056)
-static const struct
-{
- char const *name;
- vpx_codec_iface_t *iface;
- unsigned int fourcc;
- unsigned int fourcc_mask;
-} ifaces[] =
-{
+#define VP9_FOURCC (0x00395056)
+static const struct {
+ char const *name;
+ const vpx_codec_iface_t *(*iface)(void);
+ unsigned int fourcc;
+ unsigned int fourcc_mask;
+} ifaces[] = {
#if CONFIG_VP8_DECODER
- {"vp8", &vpx_codec_vp8_dx_algo, VP8_FOURCC, 0x00FFFFFF},
+ {"vp8", vpx_codec_vp8_dx, VP8_FOURCC, 0x00FFFFFF},
+#endif
+#if CONFIG_VP9_DECODER
+ {"vp9", vpx_codec_vp9_dx, VP9_FOURCC, 0x00FFFFFF},
#endif
};
#include "args.h"
+static const arg_def_t looparg = ARG_DEF(NULL, "loops", 1,
+ "Number of times to decode the file");
static const arg_def_t codecarg = ARG_DEF(NULL, "codec", 1,
- "Codec to use");
+ "Codec to use");
static const arg_def_t use_yv12 = ARG_DEF(NULL, "yv12", 0,
- "Output raw YV12 frames");
+ "Output raw YV12 frames");
static const arg_def_t use_i420 = ARG_DEF(NULL, "i420", 0,
- "Output raw I420 frames");
+ "Output raw I420 frames");
static const arg_def_t flipuvarg = ARG_DEF(NULL, "flipuv", 0,
- "Flip the chroma planes in the output");
+ "Flip the chroma planes in the output");
static const arg_def_t noblitarg = ARG_DEF(NULL, "noblit", 0,
- "Don't process the decoded frames");
+ "Don't process the decoded frames");
static const arg_def_t progressarg = ARG_DEF(NULL, "progress", 0,
- "Show progress after each frame decodes");
+ "Show progress after each frame decodes");
static const arg_def_t limitarg = ARG_DEF(NULL, "limit", 1,
- "Stop decoding after n frames");
+ "Stop decoding after n frames");
+static const arg_def_t skiparg = ARG_DEF(NULL, "skip", 1,
+ "Skip the first n input frames");
static const arg_def_t postprocarg = ARG_DEF(NULL, "postproc", 0,
- "Postprocess decoded frames");
+ "Postprocess decoded frames");
static const arg_def_t summaryarg = ARG_DEF(NULL, "summary", 0,
- "Show timing summary");
+ "Show timing summary");
static const arg_def_t outputfile = ARG_DEF("o", "output", 1,
- "Output file name pattern (see below)");
+ "Output file name pattern (see below)");
static const arg_def_t threadsarg = ARG_DEF("t", "threads", 1,
- "Max threads to use");
+ "Max threads to use");
static const arg_def_t verbosearg = ARG_DEF("v", "verbose", 0,
- "Show version string");
+ "Show version string");
static const arg_def_t error_concealment = ARG_DEF(NULL, "error-concealment", 0,
- "Enable decoder error-concealment");
+ "Enable decoder error-concealment");
+static const arg_def_t scalearg = ARG_DEF("S", "scale", 0,
+ "Scale output frames uniformly");
#if CONFIG_MD5
static const arg_def_t md5arg = ARG_DEF(NULL, "md5", 0,
"Compute the MD5 sum of the decoded frame");
#endif
-static const arg_def_t *all_args[] =
-{
- &codecarg, &use_yv12, &use_i420, &flipuvarg, &noblitarg,
- &progressarg, &limitarg, &postprocarg, &summaryarg, &outputfile,
- &threadsarg, &verbosearg,
+static const arg_def_t *all_args[] = {
+ &codecarg, &use_yv12, &use_i420, &flipuvarg, &noblitarg,
+ &progressarg, &limitarg, &skiparg, &postprocarg, &summaryarg, &outputfile,
+ &threadsarg, &verbosearg, &scalearg,
#if CONFIG_MD5
- &md5arg,
+ &md5arg,
#endif
- &error_concealment,
- NULL
+ &error_concealment,
+ NULL
};
#if CONFIG_VP8_DECODER
static const arg_def_t addnoise_level = ARG_DEF(NULL, "noise-level", 1,
- "Enable VP8 postproc add noise");
+ "Enable VP8 postproc add noise");
static const arg_def_t deblock = ARG_DEF(NULL, "deblock", 0,
- "Enable VP8 deblocking");
+ "Enable VP8 deblocking");
static const arg_def_t demacroblock_level = ARG_DEF(NULL, "demacroblock-level", 1,
- "Enable VP8 demacroblocking, w/ level");
+ "Enable VP8 demacroblocking, w/ level");
static const arg_def_t pp_debug_info = ARG_DEF(NULL, "pp-debug-info", 1,
- "Enable VP8 visible debug info");
+ "Enable VP8 visible debug info");
static const arg_def_t pp_disp_ref_frame = ARG_DEF(NULL, "pp-dbg-ref-frame", 1,
- "Display only selected reference frame per macro block");
+ "Display only selected reference frame per macro block");
static const arg_def_t pp_disp_mb_modes = ARG_DEF(NULL, "pp-dbg-mb-modes", 1,
- "Display only selected macro block modes");
+ "Display only selected macro block modes");
static const arg_def_t pp_disp_b_modes = ARG_DEF(NULL, "pp-dbg-b-modes", 1,
- "Display only selected block modes");
+ "Display only selected block modes");
static const arg_def_t pp_disp_mvs = ARG_DEF(NULL, "pp-dbg-mvs", 1,
- "Draw only selected motion vectors");
+ "Draw only selected motion vectors");
static const arg_def_t mfqe = ARG_DEF(NULL, "mfqe", 0,
- "Enable multiframe quality enhancement");
+ "Enable multiframe quality enhancement");
-static const arg_def_t *vp8_pp_args[] =
-{
- &addnoise_level, &deblock, &demacroblock_level, &pp_debug_info,
- &pp_disp_ref_frame, &pp_disp_mb_modes, &pp_disp_b_modes, &pp_disp_mvs, &mfqe,
- NULL
+static const arg_def_t *vp8_pp_args[] = {
+ &addnoise_level, &deblock, &demacroblock_level, &pp_debug_info,
+ &pp_disp_ref_frame, &pp_disp_mb_modes, &pp_disp_b_modes, &pp_disp_mvs, &mfqe,
+ NULL
};
#endif
-static void usage_exit()
-{
- int i;
+static void usage_exit() {
+ int i;
- fprintf(stderr, "Usage: %s <options> filename\n\n"
- "Options:\n", exec_name);
- arg_show_usage(stderr, all_args);
+ fprintf(stderr, "Usage: %s <options> filename\n\n"
+ "Options:\n", exec_name);
+ arg_show_usage(stderr, all_args);
#if CONFIG_VP8_DECODER
- fprintf(stderr, "\nVP8 Postprocessing Options:\n");
- arg_show_usage(stderr, vp8_pp_args);
+ fprintf(stderr, "\nVP8 Postprocessing Options:\n");
+ arg_show_usage(stderr, vp8_pp_args);
#endif
- fprintf(stderr,
- "\nOutput File Patterns:\n\n"
- " The -o argument specifies the name of the file(s) to "
- "write to. If the\n argument does not include any escape "
- "characters, the output will be\n written to a single file. "
- "Otherwise, the filename will be calculated by\n expanding "
- "the following escape characters:\n");
- fprintf(stderr,
- "\n\t%%w - Frame width"
- "\n\t%%h - Frame height"
- "\n\t%%<n> - Frame number, zero padded to <n> places (1..9)"
- "\n\n Pattern arguments are only supported in conjunction "
- "with the --yv12 and\n --i420 options. If the -o option is "
- "not specified, the output will be\n directed to stdout.\n"
- );
- fprintf(stderr, "\nIncluded decoders:\n\n");
-
- for (i = 0; i < sizeof(ifaces) / sizeof(ifaces[0]); i++)
- fprintf(stderr, " %-6s - %s\n",
- ifaces[i].name,
- vpx_codec_iface_name(ifaces[i].iface));
-
- exit(EXIT_FAILURE);
+ fprintf(stderr,
+ "\nOutput File Patterns:\n\n"
+ " The -o argument specifies the name of the file(s) to "
+ "write to. If the\n argument does not include any escape "
+ "characters, the output will be\n written to a single file. "
+ "Otherwise, the filename will be calculated by\n expanding "
+ "the following escape characters:\n");
+ fprintf(stderr,
+ "\n\t%%w - Frame width"
+ "\n\t%%h - Frame height"
+ "\n\t%%<n> - Frame number, zero padded to <n> places (1..9)"
+ "\n\n Pattern arguments are only supported in conjunction "
+ "with the --yv12 and\n --i420 options. If the -o option is "
+ "not specified, the output will be\n directed to stdout.\n"
+ );
+ fprintf(stderr, "\nIncluded decoders:\n\n");
+
+ for (i = 0; i < sizeof(ifaces) / sizeof(ifaces[0]); i++)
+ fprintf(stderr, " %-6s - %s\n",
+ ifaces[i].name,
+ vpx_codec_iface_name(ifaces[i].iface()));
+
+ exit(EXIT_FAILURE);
}
-void die(const char *fmt, ...)
-{
- va_list ap;
- va_start(ap, fmt);
- vfprintf(stderr, fmt, ap);
- fprintf(stderr, "\n");
- usage_exit();
+void die(const char *fmt, ...) {
+ va_list ap;
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ fprintf(stderr, "\n");
+ usage_exit();
}
-static unsigned int mem_get_le16(const void *vmem)
-{
- unsigned int val;
- const unsigned char *mem = (const unsigned char *)vmem;
+static unsigned int mem_get_le16(const void *vmem) {
+ unsigned int val;
+ const unsigned char *mem = (const unsigned char *)vmem;
- val = mem[1] << 8;
- val |= mem[0];
- return val;
+ val = mem[1] << 8;
+ val |= mem[0];
+ return val;
}
-static unsigned int mem_get_le32(const void *vmem)
-{
- unsigned int val;
- const unsigned char *mem = (const unsigned char *)vmem;
+static unsigned int mem_get_le32(const void *vmem) {
+ unsigned int val;
+ const unsigned char *mem = (const unsigned char *)vmem;
- val = mem[3] << 24;
- val |= mem[2] << 16;
- val |= mem[1] << 8;
- val |= mem[0];
- return val;
+ val = mem[3] << 24;
+ val |= mem[2] << 16;
+ val |= mem[1] << 8;
+ val |= mem[0];
+ return val;
}
-enum file_kind
-{
- RAW_FILE,
- IVF_FILE,
- WEBM_FILE
+enum file_kind {
+ RAW_FILE,
+ IVF_FILE,
+ WEBM_FILE
};
-struct input_ctx
-{
- enum file_kind kind;
- FILE *infile;
- nestegg *nestegg_ctx;
- nestegg_packet *pkt;
- unsigned int chunk;
- unsigned int chunks;
- unsigned int video_track;
+struct input_ctx {
+ enum file_kind kind;
+ FILE *infile;
+ nestegg *nestegg_ctx;
+ nestegg_packet *pkt;
+ unsigned int chunk;
+ unsigned int chunks;
+ unsigned int video_track;
};
#define IVF_FRAME_HDR_SZ (sizeof(uint32_t) + sizeof(uint64_t))
@@ -225,163 +227,136 @@ struct input_ctx
static int read_frame(struct input_ctx *input,
uint8_t **buf,
size_t *buf_sz,
- size_t *buf_alloc_sz)
-{
- char raw_hdr[IVF_FRAME_HDR_SZ];
- size_t new_buf_sz;
- FILE *infile = input->infile;
- enum file_kind kind = input->kind;
- if(kind == WEBM_FILE)
- {
- if(input->chunk >= input->chunks)
- {
- unsigned int track;
-
- do
- {
- /* End of this packet, get another. */
- if(input->pkt)
- nestegg_free_packet(input->pkt);
-
- if(nestegg_read_packet(input->nestegg_ctx, &input->pkt) <= 0
- || nestegg_packet_track(input->pkt, &track))
- return 1;
-
- } while(track != input->video_track);
-
- if(nestegg_packet_count(input->pkt, &input->chunks))
- return 1;
- input->chunk = 0;
- }
+ size_t *buf_alloc_sz) {
+ char raw_hdr[IVF_FRAME_HDR_SZ];
+ size_t new_buf_sz;
+ FILE *infile = input->infile;
+ enum file_kind kind = input->kind;
+ if (kind == WEBM_FILE) {
+ if (input->chunk >= input->chunks) {
+ unsigned int track;
+
+ do {
+ /* End of this packet, get another. */
+ if (input->pkt)
+ nestegg_free_packet(input->pkt);
+
+ if (nestegg_read_packet(input->nestegg_ctx, &input->pkt) <= 0
+ || nestegg_packet_track(input->pkt, &track))
+ return 1;
+
+ } while (track != input->video_track);
+
+ if (nestegg_packet_count(input->pkt, &input->chunks))
+ return 1;
+ input->chunk = 0;
+ }
- if(nestegg_packet_data(input->pkt, input->chunk, buf, buf_sz))
- return 1;
- input->chunk++;
+ if (nestegg_packet_data(input->pkt, input->chunk, buf, buf_sz))
+ return 1;
+ input->chunk++;
- return 0;
- }
- /* For both the raw and ivf formats, the frame size is the first 4 bytes
- * of the frame header. We just need to special case on the header
- * size.
- */
- else if (fread(raw_hdr, kind==IVF_FILE
- ? IVF_FRAME_HDR_SZ : RAW_FRAME_HDR_SZ, 1, infile) != 1)
- {
- if (!feof(infile))
- fprintf(stderr, "Failed to read frame size\n");
+ return 0;
+ }
+ /* For both the raw and ivf formats, the frame size is the first 4 bytes
+ * of the frame header. We just need to special case on the header
+ * size.
+ */
+ else if (fread(raw_hdr, kind == IVF_FILE
+ ? IVF_FRAME_HDR_SZ : RAW_FRAME_HDR_SZ, 1, infile) != 1) {
+ if (!feof(infile))
+ fprintf(stderr, "Failed to read frame size\n");
- new_buf_sz = 0;
- }
- else
- {
- new_buf_sz = mem_get_le32(raw_hdr);
-
- if (new_buf_sz > 256 * 1024 * 1024)
- {
- fprintf(stderr, "Error: Read invalid frame size (%u)\n",
- (unsigned int)new_buf_sz);
- new_buf_sz = 0;
- }
+ new_buf_sz = 0;
+ } else {
+ new_buf_sz = mem_get_le32(raw_hdr);
- if (kind == RAW_FILE && new_buf_sz > 256 * 1024)
- fprintf(stderr, "Warning: Read invalid frame size (%u)"
- " - not a raw file?\n", (unsigned int)new_buf_sz);
-
- if (new_buf_sz > *buf_alloc_sz)
- {
- uint8_t *new_buf = realloc(*buf, 2 * new_buf_sz);
-
- if (new_buf)
- {
- *buf = new_buf;
- *buf_alloc_sz = 2 * new_buf_sz;
- }
- else
- {
- fprintf(stderr, "Failed to allocate compressed data buffer\n");
- new_buf_sz = 0;
- }
- }
+ if (new_buf_sz > 256 * 1024 * 1024) {
+ fprintf(stderr, "Error: Read invalid frame size (%u)\n",
+ (unsigned int)new_buf_sz);
+ new_buf_sz = 0;
}
- *buf_sz = new_buf_sz;
+ if (kind == RAW_FILE && new_buf_sz > 256 * 1024)
+ fprintf(stderr, "Warning: Read invalid frame size (%u)"
+ " - not a raw file?\n", (unsigned int)new_buf_sz);
- if (!feof(infile))
- {
- if (fread(*buf, 1, *buf_sz, infile) != *buf_sz)
- {
- fprintf(stderr, "Failed to read full frame\n");
- return 1;
- }
+ if (new_buf_sz > *buf_alloc_sz) {
+ uint8_t *new_buf = realloc(*buf, 2 * new_buf_sz);
- return 0;
+ if (new_buf) {
+ *buf = new_buf;
+ *buf_alloc_sz = 2 * new_buf_sz;
+ } else {
+ fprintf(stderr, "Failed to allocate compressed data buffer\n");
+ new_buf_sz = 0;
+ }
}
+ }
- return 1;
+ *buf_sz = new_buf_sz;
+
+ if (!feof(infile)) {
+ if (fread(*buf, 1, *buf_sz, infile) != *buf_sz) {
+ fprintf(stderr, "Failed to read full frame\n");
+ return 1;
+ }
+
+ return 0;
+ }
+
+ return 1;
}
-void *out_open(const char *out_fn, int do_md5)
-{
- void *out = NULL;
+void *out_open(const char *out_fn, int do_md5) {
+ void *out = NULL;
- if (do_md5)
- {
+ if (do_md5) {
#if CONFIG_MD5
- MD5Context *md5_ctx = out = malloc(sizeof(MD5Context));
- (void)out_fn;
- MD5Init(md5_ctx);
+ MD5Context *md5_ctx = out = malloc(sizeof(MD5Context));
+ (void)out_fn;
+ MD5Init(md5_ctx);
#endif
+ } else {
+ FILE *outfile = out = strcmp("-", out_fn) ? fopen(out_fn, "wb")
+ : set_binary_mode(stdout);
+
+ if (!outfile) {
+ fprintf(stderr, "Failed to output file");
+ exit(EXIT_FAILURE);
}
- else
- {
- FILE *outfile = out = strcmp("-", out_fn) ? fopen(out_fn, "wb")
- : set_binary_mode(stdout);
-
- if (!outfile)
- {
- fprintf(stderr, "Failed to output file");
- exit(EXIT_FAILURE);
- }
- }
+ }
- return out;
+ return out;
}
-void out_put(void *out, const uint8_t *buf, unsigned int len, int do_md5)
-{
- if (do_md5)
- {
+void out_put(void *out, const uint8_t *buf, unsigned int len, int do_md5) {
+ if (do_md5) {
#if CONFIG_MD5
- MD5Update(out, buf, len);
+ MD5Update(out, buf, len);
#endif
- }
- else
- {
- (void) fwrite(buf, 1, len, out);
- }
+ } else {
+ (void) fwrite(buf, 1, len, out);
+ }
}
-void out_close(void *out, const char *out_fn, int do_md5)
-{
- if (do_md5)
- {
+void out_close(void *out, const char *out_fn, int do_md5) {
+ if (do_md5) {
#if CONFIG_MD5
- uint8_t md5[16];
- int i;
+ uint8_t md5[16];
+ int i;
- MD5Final(md5, out);
- free(out);
+ MD5Final(md5, out);
+ free(out);
- for (i = 0; i < 16; i++)
- printf("%02x", md5[i]);
+ for (i = 0; i < 16; i++)
+ printf("%02x", md5[i]);
- printf(" %s\n", out_fn);
+ printf(" %s\n", out_fn);
#endif
- }
- else
- {
- fclose(out);
- }
+ } else {
+ fclose(out);
+ }
}
unsigned int file_is_ivf(FILE *infile,
@@ -389,56 +364,50 @@ unsigned int file_is_ivf(FILE *infile,
unsigned int *width,
unsigned int *height,
unsigned int *fps_den,
- unsigned int *fps_num)
-{
- char raw_hdr[32];
- int is_ivf = 0;
-
- if (fread(raw_hdr, 1, 32, infile) == 32)
- {
- if (raw_hdr[0] == 'D' && raw_hdr[1] == 'K'
- && raw_hdr[2] == 'I' && raw_hdr[3] == 'F')
- {
- is_ivf = 1;
-
- if (mem_get_le16(raw_hdr + 4) != 0)
- fprintf(stderr, "Error: Unrecognized IVF version! This file may not"
- " decode properly.");
-
- *fourcc = mem_get_le32(raw_hdr + 8);
- *width = mem_get_le16(raw_hdr + 12);
- *height = mem_get_le16(raw_hdr + 14);
- *fps_num = mem_get_le32(raw_hdr + 16);
- *fps_den = mem_get_le32(raw_hdr + 20);
-
- /* Some versions of vpxenc used 1/(2*fps) for the timebase, so
- * we can guess the framerate using only the timebase in this
- * case. Other files would require reading ahead to guess the
- * timebase, like we do for webm.
- */
- if(*fps_num < 1000)
- {
- /* Correct for the factor of 2 applied to the timebase in the
- * encoder.
- */
- if(*fps_num&1)*fps_den<<=1;
- else *fps_num>>=1;
- }
- else
- {
- /* Don't know FPS for sure, and don't have readahead code
- * (yet?), so just default to 30fps.
- */
- *fps_num = 30;
- *fps_den = 1;
- }
- }
+ unsigned int *fps_num) {
+ char raw_hdr[32];
+ int is_ivf = 0;
+
+ if (fread(raw_hdr, 1, 32, infile) == 32) {
+ if (raw_hdr[0] == 'D' && raw_hdr[1] == 'K'
+ && raw_hdr[2] == 'I' && raw_hdr[3] == 'F') {
+ is_ivf = 1;
+
+ if (mem_get_le16(raw_hdr + 4) != 0)
+ fprintf(stderr, "Error: Unrecognized IVF version! This file may not"
+ " decode properly.");
+
+ *fourcc = mem_get_le32(raw_hdr + 8);
+ *width = mem_get_le16(raw_hdr + 12);
+ *height = mem_get_le16(raw_hdr + 14);
+ *fps_num = mem_get_le32(raw_hdr + 16);
+ *fps_den = mem_get_le32(raw_hdr + 20);
+
+ /* Some versions of vpxenc used 1/(2*fps) for the timebase, so
+ * we can guess the framerate using only the timebase in this
+ * case. Other files would require reading ahead to guess the
+ * timebase, like we do for webm.
+ */
+ if (*fps_num < 1000) {
+ /* Correct for the factor of 2 applied to the timebase in the
+ * encoder.
+ */
+ if (*fps_num & 1)*fps_den <<= 1;
+ else *fps_num >>= 1;
+ } else {
+ /* Don't know FPS for sure, and don't have readahead code
+ * (yet?), so just default to 30fps.
+ */
+ *fps_num = 30;
+ *fps_den = 1;
+ }
}
+ }
- if (!is_ivf)
- rewind(infile);
+ if (!is_ivf)
+ rewind(infile);
- return is_ivf;
+ return is_ivf;
}
@@ -447,126 +416,121 @@ unsigned int file_is_raw(FILE *infile,
unsigned int *width,
unsigned int *height,
unsigned int *fps_den,
- unsigned int *fps_num)
-{
- unsigned char buf[32];
- int is_raw = 0;
- vpx_codec_stream_info_t si;
-
- si.sz = sizeof(si);
-
- if (fread(buf, 1, 32, infile) == 32)
- {
- int i;
-
- if(mem_get_le32(buf) < 256 * 1024 * 1024)
- for (i = 0; i < sizeof(ifaces) / sizeof(ifaces[0]); i++)
- if(!vpx_codec_peek_stream_info(ifaces[i].iface,
- buf + 4, 32 - 4, &si))
- {
- is_raw = 1;
- *fourcc = ifaces[i].fourcc;
- *width = si.w;
- *height = si.h;
- *fps_num = 30;
- *fps_den = 1;
- break;
- }
- }
+ unsigned int *fps_num) {
+ unsigned char buf[32];
+ int is_raw = 0;
+ vpx_codec_stream_info_t si;
- rewind(infile);
- return is_raw;
+ si.sz = sizeof(si);
+
+ if (fread(buf, 1, 32, infile) == 32) {
+ int i;
+
+ if (mem_get_le32(buf) < 256 * 1024 * 1024)
+ for (i = 0; i < sizeof(ifaces) / sizeof(ifaces[0]); i++)
+ if (!vpx_codec_peek_stream_info(ifaces[i].iface(),
+ buf + 4, 32 - 4, &si)) {
+ is_raw = 1;
+ *fourcc = ifaces[i].fourcc;
+ *width = si.w;
+ *height = si.h;
+ *fps_num = 30;
+ *fps_den = 1;
+ break;
+ }
+ }
+
+ rewind(infile);
+ return is_raw;
}
static int
-nestegg_read_cb(void *buffer, size_t length, void *userdata)
-{
- FILE *f = userdata;
-
- if(fread(buffer, 1, length, f) < length)
- {
- if (ferror(f))
- return -1;
- if (feof(f))
- return 0;
- }
- return 1;
+nestegg_read_cb(void *buffer, size_t length, void *userdata) {
+ FILE *f = userdata;
+
+ if (fread(buffer, 1, length, f) < length) {
+ if (ferror(f))
+ return -1;
+ if (feof(f))
+ return 0;
+ }
+ return 1;
}
static int
-nestegg_seek_cb(int64_t offset, int whence, void * userdata)
-{
- switch(whence) {
- case NESTEGG_SEEK_SET: whence = SEEK_SET; break;
- case NESTEGG_SEEK_CUR: whence = SEEK_CUR; break;
- case NESTEGG_SEEK_END: whence = SEEK_END; break;
- };
- return fseek(userdata, (long)offset, whence)? -1 : 0;
+nestegg_seek_cb(int64_t offset, int whence, void *userdata) {
+ switch (whence) {
+ case NESTEGG_SEEK_SET:
+ whence = SEEK_SET;
+ break;
+ case NESTEGG_SEEK_CUR:
+ whence = SEEK_CUR;
+ break;
+ case NESTEGG_SEEK_END:
+ whence = SEEK_END;
+ break;
+ };
+ return fseek(userdata, (long)offset, whence) ? -1 : 0;
}
static int64_t
-nestegg_tell_cb(void * userdata)
-{
- return ftell(userdata);
+nestegg_tell_cb(void *userdata) {
+ return ftell(userdata);
}
static void
-nestegg_log_cb(nestegg * context, unsigned int severity, char const * format,
- ...)
-{
- va_list ap;
-
- va_start(ap, format);
- vfprintf(stderr, format, ap);
- fprintf(stderr, "\n");
- va_end(ap);
+nestegg_log_cb(nestegg *context, unsigned int severity, char const *format,
+ ...) {
+ va_list ap;
+
+ va_start(ap, format);
+ vfprintf(stderr, format, ap);
+ fprintf(stderr, "\n");
+ va_end(ap);
}
static int
webm_guess_framerate(struct input_ctx *input,
unsigned int *fps_den,
- unsigned int *fps_num)
-{
- unsigned int i;
- uint64_t tstamp=0;
-
- /* Guess the framerate. Read up to 1 second, or 50 video packets,
- * whichever comes first.
- */
- for(i=0; tstamp < 1000000000 && i < 50;)
- {
- nestegg_packet * pkt;
- unsigned int track;
-
- if(nestegg_read_packet(input->nestegg_ctx, &pkt) <= 0)
- break;
-
- nestegg_packet_track(pkt, &track);
- if(track == input->video_track)
- {
- nestegg_packet_tstamp(pkt, &tstamp);
- i++;
- }
+ unsigned int *fps_num) {
+ unsigned int i;
+ uint64_t tstamp = 0;
+
+ /* Guess the framerate. Read up to 1 second, or 50 video packets,
+ * whichever comes first.
+ */
+ for (i = 0; tstamp < 1000000000 && i < 50;) {
+ nestegg_packet *pkt;
+ unsigned int track;
+
+ if (nestegg_read_packet(input->nestegg_ctx, &pkt) <= 0)
+ break;
- nestegg_free_packet(pkt);
+ nestegg_packet_track(pkt, &track);
+ if (track == input->video_track) {
+ nestegg_packet_tstamp(pkt, &tstamp);
+ i++;
}
- if(nestegg_track_seek(input->nestegg_ctx, input->video_track, 0))
- goto fail;
+ nestegg_free_packet(pkt);
+ }
- *fps_num = (i - 1) * 1000000;
- *fps_den = (unsigned int)(tstamp / 1000);
- return 0;
+ if (nestegg_track_seek(input->nestegg_ctx, input->video_track, 0))
+ goto fail;
+
+ *fps_num = (i - 1) * 1000000;
+ *fps_den = (unsigned int)(tstamp / 1000);
+ return 0;
fail:
- nestegg_destroy(input->nestegg_ctx);
- input->nestegg_ctx = NULL;
- rewind(input->infile);
- return 1;
+ nestegg_destroy(input->nestegg_ctx);
+ input->nestegg_ctx = NULL;
+ rewind(input->infile);
+ return 1;
}
@@ -576,586 +540,641 @@ file_is_webm(struct input_ctx *input,
unsigned int *width,
unsigned int *height,
unsigned int *fps_den,
- unsigned int *fps_num)
-{
- unsigned int i, n;
- int track_type = -1;
+ unsigned int *fps_num) {
+ unsigned int i, n;
+ int track_type = -1;
+ int codec_id;
- nestegg_io io = {nestegg_read_cb, nestegg_seek_cb, nestegg_tell_cb, 0};
- nestegg_video_params params;
+ nestegg_io io = {nestegg_read_cb, nestegg_seek_cb, nestegg_tell_cb, 0};
+ nestegg_video_params params;
- io.userdata = input->infile;
- if(nestegg_init(&input->nestegg_ctx, io, NULL))
- goto fail;
+ io.userdata = input->infile;
+ if (nestegg_init(&input->nestegg_ctx, io, NULL))
+ goto fail;
- if(nestegg_track_count(input->nestegg_ctx, &n))
- goto fail;
+ if (nestegg_track_count(input->nestegg_ctx, &n))
+ goto fail;
- for(i=0; i<n; i++)
- {
- track_type = nestegg_track_type(input->nestegg_ctx, i);
+ for (i = 0; i < n; i++) {
+ track_type = nestegg_track_type(input->nestegg_ctx, i);
- if(track_type == NESTEGG_TRACK_VIDEO)
- break;
- else if(track_type < 0)
- goto fail;
- }
-
- if(nestegg_track_codec_id(input->nestegg_ctx, i) != NESTEGG_CODEC_VP8)
- {
- fprintf(stderr, "Not VP8 video, quitting.\n");
- exit(1);
- }
+ if (track_type == NESTEGG_TRACK_VIDEO)
+ break;
+ else if (track_type < 0)
+ goto fail;
+ }
- input->video_track = i;
-
- if(nestegg_track_video_params(input->nestegg_ctx, i, &params))
- goto fail;
-
- *fps_den = 0;
- *fps_num = 0;
+ codec_id = nestegg_track_codec_id(input->nestegg_ctx, i);
+ if (codec_id == NESTEGG_CODEC_VP8) {
*fourcc = VP8_FOURCC;
- *width = params.width;
- *height = params.height;
- return 1;
+ } else if (codec_id == NESTEGG_CODEC_VP9) {
+ *fourcc = VP9_FOURCC;
+ } else {
+ fprintf(stderr, "Not VPx video, quitting.\n");
+ exit(1);
+ }
+
+ input->video_track = i;
+
+ if (nestegg_track_video_params(input->nestegg_ctx, i, &params))
+ goto fail;
+
+ *fps_den = 0;
+ *fps_num = 0;
+ *width = params.width;
+ *height = params.height;
+ return 1;
fail:
- input->nestegg_ctx = NULL;
- rewind(input->infile);
- return 0;
+ input->nestegg_ctx = NULL;
+ rewind(input->infile);
+ return 0;
}
-void show_progress(int frame_in, int frame_out, unsigned long dx_time)
-{
- fprintf(stderr, "%d decoded frames/%d showed frames in %lu us (%.2f fps)\r",
- frame_in, frame_out, dx_time,
- (float)frame_out * 1000000.0 / (float)dx_time);
+void show_progress(int frame_in, int frame_out, unsigned long dx_time) {
+ fprintf(stderr, "%d decoded frames/%d showed frames in %lu us (%.2f fps)\r",
+ frame_in, frame_out, dx_time,
+ (float)frame_out * 1000000.0 / (float)dx_time);
}
void generate_filename(const char *pattern, char *out, size_t q_len,
unsigned int d_w, unsigned int d_h,
- unsigned int frame_in)
-{
- const char *p = pattern;
- char *q = out;
-
- do
- {
- char *next_pat = strchr(p, '%');
-
- if(p == next_pat)
- {
- size_t pat_len;
-
- /* parse the pattern */
- q[q_len - 1] = '\0';
- switch(p[1])
- {
- case 'w': snprintf(q, q_len - 1, "%d", d_w); break;
- case 'h': snprintf(q, q_len - 1, "%d", d_h); break;
- case '1': snprintf(q, q_len - 1, "%d", frame_in); break;
- case '2': snprintf(q, q_len - 1, "%02d", frame_in); break;
- case '3': snprintf(q, q_len - 1, "%03d", frame_in); break;
- case '4': snprintf(q, q_len - 1, "%04d", frame_in); break;
- case '5': snprintf(q, q_len - 1, "%05d", frame_in); break;
- case '6': snprintf(q, q_len - 1, "%06d", frame_in); break;
- case '7': snprintf(q, q_len - 1, "%07d", frame_in); break;
- case '8': snprintf(q, q_len - 1, "%08d", frame_in); break;
- case '9': snprintf(q, q_len - 1, "%09d", frame_in); break;
- default:
- die("Unrecognized pattern %%%c\n", p[1]);
- }
-
- pat_len = strlen(q);
- if(pat_len >= q_len - 1)
- die("Output filename too long.\n");
- q += pat_len;
- p += 2;
- q_len -= pat_len;
- }
- else
- {
- size_t copy_len;
-
- /* copy the next segment */
- if(!next_pat)
- copy_len = strlen(p);
- else
- copy_len = next_pat - p;
-
- if(copy_len >= q_len - 1)
- die("Output filename too long.\n");
-
- memcpy(q, p, copy_len);
- q[copy_len] = '\0';
- q += copy_len;
- p += copy_len;
- q_len -= copy_len;
- }
- } while(*p);
+ unsigned int frame_in) {
+ const char *p = pattern;
+ char *q = out;
+
+ do {
+ char *next_pat = strchr(p, '%');
+
+ if (p == next_pat) {
+ size_t pat_len;
+
+ /* parse the pattern */
+ q[q_len - 1] = '\0';
+ switch (p[1]) {
+ case 'w':
+ snprintf(q, q_len - 1, "%d", d_w);
+ break;
+ case 'h':
+ snprintf(q, q_len - 1, "%d", d_h);
+ break;
+ case '1':
+ snprintf(q, q_len - 1, "%d", frame_in);
+ break;
+ case '2':
+ snprintf(q, q_len - 1, "%02d", frame_in);
+ break;
+ case '3':
+ snprintf(q, q_len - 1, "%03d", frame_in);
+ break;
+ case '4':
+ snprintf(q, q_len - 1, "%04d", frame_in);
+ break;
+ case '5':
+ snprintf(q, q_len - 1, "%05d", frame_in);
+ break;
+ case '6':
+ snprintf(q, q_len - 1, "%06d", frame_in);
+ break;
+ case '7':
+ snprintf(q, q_len - 1, "%07d", frame_in);
+ break;
+ case '8':
+ snprintf(q, q_len - 1, "%08d", frame_in);
+ break;
+ case '9':
+ snprintf(q, q_len - 1, "%09d", frame_in);
+ break;
+ default:
+ die("Unrecognized pattern %%%c\n", p[1]);
+ }
+
+ pat_len = strlen(q);
+ if (pat_len >= q_len - 1)
+ die("Output filename too long.\n");
+ q += pat_len;
+ p += 2;
+ q_len -= pat_len;
+ } else {
+ size_t copy_len;
+
+ /* copy the next segment */
+ if (!next_pat)
+ copy_len = strlen(p);
+ else
+ copy_len = next_pat - p;
+
+ if (copy_len >= q_len - 1)
+ die("Output filename too long.\n");
+
+ memcpy(q, p, copy_len);
+ q[copy_len] = '\0';
+ q += copy_len;
+ p += copy_len;
+ q_len -= copy_len;
+ }
+ } while (*p);
}
-int main(int argc, const char **argv_)
-{
- vpx_codec_ctx_t decoder;
- char *fn = NULL;
- int i;
- uint8_t *buf = NULL;
- size_t buf_sz = 0, buf_alloc_sz = 0;
- FILE *infile;
- int frame_in = 0, frame_out = 0, flipuv = 0, noblit = 0, do_md5 = 0, progress = 0;
- int stop_after = 0, postproc = 0, summary = 0, quiet = 1;
- int ec_enabled = 0;
- vpx_codec_iface_t *iface = NULL;
- unsigned int fourcc;
- unsigned long dx_time = 0;
- struct arg arg;
- char **argv, **argi, **argj;
- const char *outfile_pattern = 0;
- char outfile[PATH_MAX];
- int single_file;
- int use_y4m = 1;
- unsigned int width;
- unsigned int height;
- unsigned int fps_den;
- unsigned int fps_num;
- void *out = NULL;
- vpx_codec_dec_cfg_t cfg = {0};
+int main_loop(int argc, const char **argv_) {
+ vpx_codec_ctx_t decoder;
+ char *fn = NULL;
+ int i;
+ uint8_t *buf = NULL;
+ size_t buf_sz = 0, buf_alloc_sz = 0;
+ FILE *infile;
+ int frame_in = 0, frame_out = 0, flipuv = 0, noblit = 0, do_md5 = 0, progress = 0;
+ int stop_after = 0, postproc = 0, summary = 0, quiet = 1;
+ int arg_skip = 0;
+ int ec_enabled = 0;
+ vpx_codec_iface_t *iface = NULL;
+ unsigned int fourcc;
+ unsigned long dx_time = 0;
+ struct arg arg;
+ char **argv, **argi, **argj;
+ const char *outfile_pattern = 0;
+ char outfile[PATH_MAX];
+ int single_file;
+ int use_y4m = 1;
+ unsigned int width;
+ unsigned int height;
+ unsigned int fps_den;
+ unsigned int fps_num;
+ void *out = NULL;
+ vpx_codec_dec_cfg_t cfg = {0};
#if CONFIG_VP8_DECODER
- vp8_postproc_cfg_t vp8_pp_cfg = {0};
- int vp8_dbg_color_ref_frame = 0;
- int vp8_dbg_color_mb_modes = 0;
- int vp8_dbg_color_b_modes = 0;
- int vp8_dbg_display_mv = 0;
+ vp8_postproc_cfg_t vp8_pp_cfg = {0};
+ int vp8_dbg_color_ref_frame = 0;
+ int vp8_dbg_color_mb_modes = 0;
+ int vp8_dbg_color_b_modes = 0;
+ int vp8_dbg_display_mv = 0;
#endif
- struct input_ctx input = {0};
- int frames_corrupted = 0;
- int dec_flags = 0;
-
- /* Parse command line */
- exec_name = argv_[0];
- argv = argv_dup(argc - 1, argv_ + 1);
-
- for (argi = argj = argv; (*argj = *argi); argi += arg.argv_step)
- {
- memset(&arg, 0, sizeof(arg));
- arg.argv_step = 1;
-
- if (arg_match(&arg, &codecarg, argi))
- {
- int j, k = -1;
-
- for (j = 0; j < sizeof(ifaces) / sizeof(ifaces[0]); j++)
- if (!strcmp(ifaces[j].name, arg.val))
- k = j;
-
- if (k >= 0)
- iface = ifaces[k].iface;
- else
- die("Error: Unrecognized argument (%s) to --codec\n",
- arg.val);
- }
- else if (arg_match(&arg, &outputfile, argi))
- outfile_pattern = arg.val;
- else if (arg_match(&arg, &use_yv12, argi))
- {
- use_y4m = 0;
- flipuv = 1;
- }
- else if (arg_match(&arg, &use_i420, argi))
- {
- use_y4m = 0;
- flipuv = 0;
- }
- else if (arg_match(&arg, &flipuvarg, argi))
- flipuv = 1;
- else if (arg_match(&arg, &noblitarg, argi))
- noblit = 1;
- else if (arg_match(&arg, &progressarg, argi))
- progress = 1;
- else if (arg_match(&arg, &limitarg, argi))
- stop_after = arg_parse_uint(&arg);
- else if (arg_match(&arg, &postprocarg, argi))
- postproc = 1;
- else if (arg_match(&arg, &md5arg, argi))
- do_md5 = 1;
- else if (arg_match(&arg, &summaryarg, argi))
- summary = 1;
- else if (arg_match(&arg, &threadsarg, argi))
- cfg.threads = arg_parse_uint(&arg);
- else if (arg_match(&arg, &verbosearg, argi))
- quiet = 0;
+ struct input_ctx input = {0};
+ int frames_corrupted = 0;
+ int dec_flags = 0;
+ int do_scale = 0;
+ int stream_w = 0, stream_h = 0;
+ vpx_image_t *scaled_img = NULL;
+ int frame_avail, got_data;
+
+ /* Parse command line */
+ exec_name = argv_[0];
+ argv = argv_dup(argc - 1, argv_ + 1);
+
+ for (argi = argj = argv; (*argj = *argi); argi += arg.argv_step) {
+ memset(&arg, 0, sizeof(arg));
+ arg.argv_step = 1;
+
+ if (arg_match(&arg, &codecarg, argi)) {
+ int j, k = -1;
+
+ for (j = 0; j < sizeof(ifaces) / sizeof(ifaces[0]); j++)
+ if (!strcmp(ifaces[j].name, arg.val))
+ k = j;
+
+ if (k >= 0)
+ iface = ifaces[k].iface();
+ else
+ die("Error: Unrecognized argument (%s) to --codec\n",
+ arg.val);
+ } else if (arg_match(&arg, &looparg, argi)) {
+ // no-op
+ } else if (arg_match(&arg, &outputfile, argi))
+ outfile_pattern = arg.val;
+ else if (arg_match(&arg, &use_yv12, argi)) {
+ use_y4m = 0;
+ flipuv = 1;
+ } else if (arg_match(&arg, &use_i420, argi)) {
+ use_y4m = 0;
+ flipuv = 0;
+ } else if (arg_match(&arg, &flipuvarg, argi))
+ flipuv = 1;
+ else if (arg_match(&arg, &noblitarg, argi))
+ noblit = 1;
+ else if (arg_match(&arg, &progressarg, argi))
+ progress = 1;
+ else if (arg_match(&arg, &limitarg, argi))
+ stop_after = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &skiparg, argi))
+ arg_skip = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &postprocarg, argi))
+ postproc = 1;
+ else if (arg_match(&arg, &md5arg, argi))
+ do_md5 = 1;
+ else if (arg_match(&arg, &summaryarg, argi))
+ summary = 1;
+ else if (arg_match(&arg, &threadsarg, argi))
+ cfg.threads = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &verbosearg, argi))
+ quiet = 0;
+ else if (arg_match(&arg, &scalearg, argi))
+ do_scale = 1;
#if CONFIG_VP8_DECODER
- else if (arg_match(&arg, &addnoise_level, argi))
- {
- postproc = 1;
- vp8_pp_cfg.post_proc_flag |= VP8_ADDNOISE;
- vp8_pp_cfg.noise_level = arg_parse_uint(&arg);
- }
- else if (arg_match(&arg, &demacroblock_level, argi))
- {
- postproc = 1;
- vp8_pp_cfg.post_proc_flag |= VP8_DEMACROBLOCK;
- vp8_pp_cfg.deblocking_level = arg_parse_uint(&arg);
- }
- else if (arg_match(&arg, &deblock, argi))
- {
- postproc = 1;
- vp8_pp_cfg.post_proc_flag |= VP8_DEBLOCK;
- }
- else if (arg_match(&arg, &mfqe, argi))
- {
- postproc = 1;
- vp8_pp_cfg.post_proc_flag |= VP8_MFQE;
- }
- else if (arg_match(&arg, &pp_debug_info, argi))
- {
- unsigned int level = arg_parse_uint(&arg);
-
- postproc = 1;
- vp8_pp_cfg.post_proc_flag &= ~0x7;
-
- if (level)
- vp8_pp_cfg.post_proc_flag |= level;
- }
- else if (arg_match(&arg, &pp_disp_ref_frame, argi))
- {
- unsigned int flags = arg_parse_int(&arg);
- if (flags)
- {
- postproc = 1;
- vp8_dbg_color_ref_frame = flags;
- }
- }
- else if (arg_match(&arg, &pp_disp_mb_modes, argi))
- {
- unsigned int flags = arg_parse_int(&arg);
- if (flags)
- {
- postproc = 1;
- vp8_dbg_color_mb_modes = flags;
- }
- }
- else if (arg_match(&arg, &pp_disp_b_modes, argi))
- {
- unsigned int flags = arg_parse_int(&arg);
- if (flags)
- {
- postproc = 1;
- vp8_dbg_color_b_modes = flags;
- }
- }
- else if (arg_match(&arg, &pp_disp_mvs, argi))
- {
- unsigned int flags = arg_parse_int(&arg);
- if (flags)
- {
- postproc = 1;
- vp8_dbg_display_mv = flags;
- }
- }
- else if (arg_match(&arg, &error_concealment, argi))
- {
- ec_enabled = 1;
- }
+ else if (arg_match(&arg, &addnoise_level, argi)) {
+ postproc = 1;
+ vp8_pp_cfg.post_proc_flag |= VP8_ADDNOISE;
+ vp8_pp_cfg.noise_level = arg_parse_uint(&arg);
+ } else if (arg_match(&arg, &demacroblock_level, argi)) {
+ postproc = 1;
+ vp8_pp_cfg.post_proc_flag |= VP8_DEMACROBLOCK;
+ vp8_pp_cfg.deblocking_level = arg_parse_uint(&arg);
+ } else if (arg_match(&arg, &deblock, argi)) {
+ postproc = 1;
+ vp8_pp_cfg.post_proc_flag |= VP8_DEBLOCK;
+ } else if (arg_match(&arg, &mfqe, argi)) {
+ postproc = 1;
+ vp8_pp_cfg.post_proc_flag |= VP8_MFQE;
+ } else if (arg_match(&arg, &pp_debug_info, argi)) {
+ unsigned int level = arg_parse_uint(&arg);
+
+ postproc = 1;
+ vp8_pp_cfg.post_proc_flag &= ~0x7;
+
+ if (level)
+ vp8_pp_cfg.post_proc_flag |= level;
+ } else if (arg_match(&arg, &pp_disp_ref_frame, argi)) {
+ unsigned int flags = arg_parse_int(&arg);
+ if (flags) {
+ postproc = 1;
+ vp8_dbg_color_ref_frame = flags;
+ }
+ } else if (arg_match(&arg, &pp_disp_mb_modes, argi)) {
+ unsigned int flags = arg_parse_int(&arg);
+ if (flags) {
+ postproc = 1;
+ vp8_dbg_color_mb_modes = flags;
+ }
+ } else if (arg_match(&arg, &pp_disp_b_modes, argi)) {
+ unsigned int flags = arg_parse_int(&arg);
+ if (flags) {
+ postproc = 1;
+ vp8_dbg_color_b_modes = flags;
+ }
+ } else if (arg_match(&arg, &pp_disp_mvs, argi)) {
+ unsigned int flags = arg_parse_int(&arg);
+ if (flags) {
+ postproc = 1;
+ vp8_dbg_display_mv = flags;
+ }
+ } else if (arg_match(&arg, &error_concealment, argi)) {
+ ec_enabled = 1;
+ }
#endif
- else
- argj++;
- }
+ else
+ argj++;
+ }
- /* Check for unrecognized options */
- for (argi = argv; *argi; argi++)
- if (argi[0][0] == '-' && strlen(argi[0]) > 1)
- die("Error: Unrecognized option %s\n", *argi);
+ /* Check for unrecognized options */
+ for (argi = argv; *argi; argi++)
+ if (argi[0][0] == '-' && strlen(argi[0]) > 1)
+ die("Error: Unrecognized option %s\n", *argi);
- /* Handle non-option arguments */
- fn = argv[0];
+ /* Handle non-option arguments */
+ fn = argv[0];
- if (!fn)
- usage_exit();
+ if (!fn)
+ usage_exit();
- /* Open file */
- infile = strcmp(fn, "-") ? fopen(fn, "rb") : set_binary_mode(stdin);
+ /* Open file */
+ infile = strcmp(fn, "-") ? fopen(fn, "rb") : set_binary_mode(stdin);
- if (!infile)
- {
- fprintf(stderr, "Failed to open file '%s'",
- strcmp(fn, "-") ? fn : "stdin");
- return EXIT_FAILURE;
- }
+ if (!infile) {
+ fprintf(stderr, "Failed to open file '%s'",
+ strcmp(fn, "-") ? fn : "stdin");
+ return EXIT_FAILURE;
+ }
#if CONFIG_OS_SUPPORT
- /* Make sure we don't dump to the terminal, unless forced to with -o - */
- if(!outfile_pattern && isatty(fileno(stdout)) && !do_md5 && !noblit)
- {
- fprintf(stderr,
- "Not dumping raw video to your terminal. Use '-o -' to "
- "override.\n");
- return EXIT_FAILURE;
- }
+ /* Make sure we don't dump to the terminal, unless forced to with -o - */
+ if (!outfile_pattern && isatty(fileno(stdout)) && !do_md5 && !noblit) {
+ fprintf(stderr,
+ "Not dumping raw video to your terminal. Use '-o -' to "
+ "override.\n");
+ return EXIT_FAILURE;
+ }
#endif
- input.infile = infile;
- if(file_is_ivf(infile, &fourcc, &width, &height, &fps_den,
- &fps_num))
- input.kind = IVF_FILE;
- else if(file_is_webm(&input, &fourcc, &width, &height, &fps_den, &fps_num))
- input.kind = WEBM_FILE;
- else if(file_is_raw(infile, &fourcc, &width, &height, &fps_den, &fps_num))
- input.kind = RAW_FILE;
- else
- {
- fprintf(stderr, "Unrecognized input file type.\n");
- return EXIT_FAILURE;
+ input.infile = infile;
+ if (file_is_ivf(infile, &fourcc, &width, &height, &fps_den,
+ &fps_num))
+ input.kind = IVF_FILE;
+ else if (file_is_webm(&input, &fourcc, &width, &height, &fps_den, &fps_num))
+ input.kind = WEBM_FILE;
+ else if (file_is_raw(infile, &fourcc, &width, &height, &fps_den, &fps_num))
+ input.kind = RAW_FILE;
+ else {
+ fprintf(stderr, "Unrecognized input file type.\n");
+ return EXIT_FAILURE;
+ }
+
+ /* If the output file is not set or doesn't have a sequence number in
+ * it, then we only open it once.
+ */
+ outfile_pattern = outfile_pattern ? outfile_pattern : "-";
+ single_file = 1;
+ {
+ const char *p = outfile_pattern;
+ do {
+ p = strchr(p, '%');
+ if (p && p[1] >= '1' && p[1] <= '9') {
+ /* pattern contains sequence number, so it's not unique. */
+ single_file = 0;
+ break;
+ }
+ if (p)
+ p++;
+ } while (p);
+ }
+
+ if (single_file && !noblit) {
+ generate_filename(outfile_pattern, outfile, sizeof(outfile) - 1,
+ width, height, 0);
+ out = out_open(outfile, do_md5);
+ }
+
+ if (use_y4m && !noblit) {
+ char buffer[128];
+
+ if (!single_file) {
+ fprintf(stderr, "YUV4MPEG2 not supported with output patterns,"
+ " try --i420 or --yv12.\n");
+ return EXIT_FAILURE;
}
- /* If the output file is not set or doesn't have a sequence number in
- * it, then we only open it once.
- */
- outfile_pattern = outfile_pattern ? outfile_pattern : "-";
- single_file = 1;
- {
- const char *p = outfile_pattern;
- do
- {
- p = strchr(p, '%');
- if(p && p[1] >= '1' && p[1] <= '9')
- {
- /* pattern contains sequence number, so it's not unique. */
- single_file = 0;
- break;
- }
- if(p)
- p++;
- } while(p);
- }
-
- if(single_file && !noblit)
- {
- generate_filename(outfile_pattern, outfile, sizeof(outfile)-1,
- width, height, 0);
- out = out_open(outfile, do_md5);
- }
-
- if (use_y4m && !noblit)
- {
- char buffer[128];
- if (!single_file)
- {
- fprintf(stderr, "YUV4MPEG2 not supported with output patterns,"
- " try --i420 or --yv12.\n");
- return EXIT_FAILURE;
- }
-
- if(input.kind == WEBM_FILE)
- if(webm_guess_framerate(&input, &fps_den, &fps_num))
- {
- fprintf(stderr, "Failed to guess framerate -- error parsing "
- "webm file?\n");
- return EXIT_FAILURE;
- }
-
-
- /*Note: We can't output an aspect ratio here because IVF doesn't
- store one, and neither does VP8.
- That will have to wait until these tools support WebM natively.*/
- sprintf(buffer, "YUV4MPEG2 C%s W%u H%u F%u:%u I%c\n",
- "420jpeg", width, height, fps_num, fps_den, 'p');
- out_put(out, (unsigned char *)buffer,
- (unsigned int)strlen(buffer), do_md5);
- }
-
- /* Try to determine the codec from the fourcc. */
- for (i = 0; i < sizeof(ifaces) / sizeof(ifaces[0]); i++)
- if ((fourcc & ifaces[i].fourcc_mask) == ifaces[i].fourcc)
- {
- vpx_codec_iface_t *ivf_iface = ifaces[i].iface;
-
- if (iface && iface != ivf_iface)
- fprintf(stderr, "Notice -- IVF header indicates codec: %s\n",
- ifaces[i].name);
- else
- iface = ivf_iface;
-
- break;
- }
-
- dec_flags = (postproc ? VPX_CODEC_USE_POSTPROC : 0) |
- (ec_enabled ? VPX_CODEC_USE_ERROR_CONCEALMENT : 0);
- if (vpx_codec_dec_init(&decoder, iface ? iface : ifaces[0].iface, &cfg,
- dec_flags))
- {
- fprintf(stderr, "Failed to initialize decoder: %s\n", vpx_codec_error(&decoder));
+ if (input.kind == WEBM_FILE)
+ if (webm_guess_framerate(&input, &fps_den, &fps_num)) {
+ fprintf(stderr, "Failed to guess framerate -- error parsing "
+ "webm file?\n");
return EXIT_FAILURE;
+ }
+
+
+ /*Note: We can't output an aspect ratio here because IVF doesn't
+ store one, and neither does VP8.
+ That will have to wait until these tools support WebM natively.*/
+ snprintf(buffer, sizeof(buffer), "YUV4MPEG2 W%u H%u F%u:%u I%c ",
+ width, height, fps_num, fps_den, 'p');
+ out_put(out, (unsigned char *)buffer,
+ (unsigned int)strlen(buffer), do_md5);
+ }
+
+ /* Try to determine the codec from the fourcc. */
+ for (i = 0; i < sizeof(ifaces) / sizeof(ifaces[0]); i++)
+ if ((fourcc & ifaces[i].fourcc_mask) == ifaces[i].fourcc) {
+ vpx_codec_iface_t *ivf_iface = ifaces[i].iface();
+
+ if (iface && iface != ivf_iface)
+ fprintf(stderr, "Notice -- IVF header indicates codec: %s\n",
+ ifaces[i].name);
+ else
+ iface = ivf_iface;
+
+ break;
}
- if (!quiet)
- fprintf(stderr, "%s\n", decoder.name);
+ dec_flags = (postproc ? VPX_CODEC_USE_POSTPROC : 0) |
+ (ec_enabled ? VPX_CODEC_USE_ERROR_CONCEALMENT : 0);
+ if (vpx_codec_dec_init(&decoder, iface ? iface : ifaces[0].iface(), &cfg,
+ dec_flags)) {
+ fprintf(stderr, "Failed to initialize decoder: %s\n", vpx_codec_error(&decoder));
+ return EXIT_FAILURE;
+ }
+
+ if (!quiet)
+ fprintf(stderr, "%s\n", decoder.name);
#if CONFIG_VP8_DECODER
- if (vp8_pp_cfg.post_proc_flag
- && vpx_codec_control(&decoder, VP8_SET_POSTPROC, &vp8_pp_cfg))
- {
- fprintf(stderr, "Failed to configure postproc: %s\n", vpx_codec_error(&decoder));
- return EXIT_FAILURE;
- }
+ if (vp8_pp_cfg.post_proc_flag
+ && vpx_codec_control(&decoder, VP8_SET_POSTPROC, &vp8_pp_cfg)) {
+ fprintf(stderr, "Failed to configure postproc: %s\n", vpx_codec_error(&decoder));
+ return EXIT_FAILURE;
+ }
+
+ if (vp8_dbg_color_ref_frame
+ && vpx_codec_control(&decoder, VP8_SET_DBG_COLOR_REF_FRAME, vp8_dbg_color_ref_frame)) {
+ fprintf(stderr, "Failed to configure reference block visualizer: %s\n", vpx_codec_error(&decoder));
+ return EXIT_FAILURE;
+ }
+
+ if (vp8_dbg_color_mb_modes
+ && vpx_codec_control(&decoder, VP8_SET_DBG_COLOR_MB_MODES, vp8_dbg_color_mb_modes)) {
+ fprintf(stderr, "Failed to configure macro block visualizer: %s\n", vpx_codec_error(&decoder));
+ return EXIT_FAILURE;
+ }
+
+ if (vp8_dbg_color_b_modes
+ && vpx_codec_control(&decoder, VP8_SET_DBG_COLOR_B_MODES, vp8_dbg_color_b_modes)) {
+ fprintf(stderr, "Failed to configure block visualizer: %s\n", vpx_codec_error(&decoder));
+ return EXIT_FAILURE;
+ }
+
+ if (vp8_dbg_display_mv
+ && vpx_codec_control(&decoder, VP8_SET_DBG_DISPLAY_MV, vp8_dbg_display_mv)) {
+ fprintf(stderr, "Failed to configure motion vector visualizer: %s\n", vpx_codec_error(&decoder));
+ return EXIT_FAILURE;
+ }
+#endif
- if (vp8_dbg_color_ref_frame
- && vpx_codec_control(&decoder, VP8_SET_DBG_COLOR_REF_FRAME, vp8_dbg_color_ref_frame))
- {
- fprintf(stderr, "Failed to configure reference block visualizer: %s\n", vpx_codec_error(&decoder));
- return EXIT_FAILURE;
- }
- if (vp8_dbg_color_mb_modes
- && vpx_codec_control(&decoder, VP8_SET_DBG_COLOR_MB_MODES, vp8_dbg_color_mb_modes))
- {
- fprintf(stderr, "Failed to configure macro block visualizer: %s\n", vpx_codec_error(&decoder));
- return EXIT_FAILURE;
- }
+ if(arg_skip)
+ fprintf(stderr, "Skiping first %d frames.\n", arg_skip);
+ while (arg_skip) {
+ if (read_frame(&input, &buf, &buf_sz, &buf_alloc_sz))
+ break;
+ arg_skip--;
+ }
- if (vp8_dbg_color_b_modes
- && vpx_codec_control(&decoder, VP8_SET_DBG_COLOR_B_MODES, vp8_dbg_color_b_modes))
- {
- fprintf(stderr, "Failed to configure block visualizer: %s\n", vpx_codec_error(&decoder));
- return EXIT_FAILURE;
- }
+ frame_avail = 1;
+ got_data = 0;
- if (vp8_dbg_display_mv
- && vpx_codec_control(&decoder, VP8_SET_DBG_DISPLAY_MV, vp8_dbg_display_mv))
- {
- fprintf(stderr, "Failed to configure motion vector visualizer: %s\n", vpx_codec_error(&decoder));
- return EXIT_FAILURE;
- }
-#endif
+ /* Decode file */
+ while (frame_avail || got_data) {
+ vpx_codec_iter_t iter = NULL;
+ vpx_image_t *img;
+ struct vpx_usec_timer timer;
+ int corrupted;
- /* Decode file */
- while (!read_frame(&input, &buf, &buf_sz, &buf_alloc_sz))
- {
- vpx_codec_iter_t iter = NULL;
- vpx_image_t *img;
- struct vpx_usec_timer timer;
- int corrupted;
+ frame_avail = 0;
+ if (!stop_after || frame_in < stop_after) {
+ if(!read_frame(&input, &buf, &buf_sz, &buf_alloc_sz)) {
+ frame_avail = 1;
+ frame_in++;
vpx_usec_timer_start(&timer);
- if (vpx_codec_decode(&decoder, buf, (unsigned int)buf_sz, NULL, 0))
- {
- const char *detail = vpx_codec_error_detail(&decoder);
- fprintf(stderr, "Failed to decode frame: %s\n", vpx_codec_error(&decoder));
+ if (vpx_codec_decode(&decoder, buf, (unsigned int)buf_sz, NULL, 0)) {
+ const char *detail = vpx_codec_error_detail(&decoder);
+ fprintf(stderr, "Failed to decode frame: %s\n",
+ vpx_codec_error(&decoder));
- if (detail)
- fprintf(stderr, " Additional information: %s\n", detail);
-
- goto fail;
+ if (detail)
+ fprintf(stderr, " Additional information: %s\n", detail);
+ goto fail;
}
vpx_usec_timer_mark(&timer);
dx_time += (unsigned int)vpx_usec_timer_elapsed(&timer);
+ }
+ }
- ++frame_in;
+ vpx_usec_timer_start(&timer);
- if (vpx_codec_control(&decoder, VP8D_GET_FRAME_CORRUPTED, &corrupted))
- {
- fprintf(stderr, "Failed VP8_GET_FRAME_CORRUPTED: %s\n",
- vpx_codec_error(&decoder));
- goto fail;
+ got_data = 0;
+ if ((img = vpx_codec_get_frame(&decoder, &iter))) {
+ ++frame_out;
+ got_data = 1;
+ }
+
+ vpx_usec_timer_mark(&timer);
+ dx_time += (unsigned int)vpx_usec_timer_elapsed(&timer);
+
+ if (vpx_codec_control(&decoder, VP8D_GET_FRAME_CORRUPTED, &corrupted)) {
+ fprintf(stderr, "Failed VP8_GET_FRAME_CORRUPTED: %s\n",
+ vpx_codec_error(&decoder));
+ goto fail;
+ }
+ frames_corrupted += corrupted;
+
+ if (progress)
+ show_progress(frame_in, frame_out, dx_time);
+
+ if (!noblit) {
+ if (frame_out == 1 && img && use_y4m) {
+ /* Write out the color format to terminate the header line */
+ const char *color =
+ img->fmt == VPX_IMG_FMT_444A ? "C444alpha\n" :
+ img->fmt == VPX_IMG_FMT_I444 ? "C444\n" :
+ img->fmt == VPX_IMG_FMT_I422 ? "C422\n" :
+ "C420jpeg\n";
+
+ out_put(out, (const unsigned char*)color, strlen(color), do_md5);
+ }
+
+ if (do_scale) {
+ if (img && frame_out == 1) {
+ stream_w = img->d_w;
+ stream_h = img->d_h;
+ scaled_img = vpx_img_alloc(NULL, VPX_IMG_FMT_I420,
+ stream_w, stream_h, 16);
+ }
+ if (img && (img->d_w != stream_w || img->d_h != stream_h)) {
+ assert(img->fmt == VPX_IMG_FMT_I420);
+ I420Scale(img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
+ img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
+ img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V],
+ img->d_w, img->d_h,
+ scaled_img->planes[VPX_PLANE_Y],
+ scaled_img->stride[VPX_PLANE_Y],
+ scaled_img->planes[VPX_PLANE_U],
+ scaled_img->stride[VPX_PLANE_U],
+ scaled_img->planes[VPX_PLANE_V],
+ scaled_img->stride[VPX_PLANE_V],
+ stream_w, stream_h,
+ kFilterBox);
+ img = scaled_img;
+ }
+ }
+
+ if (img) {
+ unsigned int y;
+ char out_fn[PATH_MAX];
+ uint8_t *buf;
+ unsigned int c_w =
+ img->x_chroma_shift ? (1 + img->d_w) >> img->x_chroma_shift
+ : img->d_w;
+ unsigned int c_h =
+ img->y_chroma_shift ? (1 + img->d_h) >> img->y_chroma_shift
+ : img->d_h;
+
+ if (!single_file) {
+ size_t len = sizeof(out_fn) - 1;
+
+ out_fn[len] = '\0';
+ generate_filename(outfile_pattern, out_fn, len - 1,
+ img->d_w, img->d_h, frame_in);
+ out = out_open(out_fn, do_md5);
+ } else if (use_y4m)
+ out_put(out, (unsigned char *)"FRAME\n", 6, do_md5);
+
+ buf = img->planes[VPX_PLANE_Y];
+
+ for (y = 0; y < img->d_h; y++) {
+ out_put(out, buf, img->d_w, do_md5);
+ buf += img->stride[VPX_PLANE_Y];
}
- frames_corrupted += corrupted;
- vpx_usec_timer_start(&timer);
+ buf = img->planes[flipuv ? VPX_PLANE_V : VPX_PLANE_U];
- if ((img = vpx_codec_get_frame(&decoder, &iter)))
- ++frame_out;
+ for (y = 0; y < c_h; y++) {
+ out_put(out, buf, c_w, do_md5);
+ buf += img->stride[VPX_PLANE_U];
+ }
- vpx_usec_timer_mark(&timer);
- dx_time += (unsigned int)vpx_usec_timer_elapsed(&timer);
+ buf = img->planes[flipuv ? VPX_PLANE_U : VPX_PLANE_V];
- if (progress)
- show_progress(frame_in, frame_out, dx_time);
-
- if (!noblit)
- {
- if (img)
- {
- unsigned int y;
- char out_fn[PATH_MAX];
- uint8_t *buf;
-
- if (!single_file)
- {
- size_t len = sizeof(out_fn)-1;
-
- out_fn[len] = '\0';
- generate_filename(outfile_pattern, out_fn, len-1,
- img->d_w, img->d_h, frame_in);
- out = out_open(out_fn, do_md5);
- }
- else if(use_y4m)
- out_put(out, (unsigned char *)"FRAME\n", 6, do_md5);
-
- buf = img->planes[VPX_PLANE_Y];
-
- for (y = 0; y < img->d_h; y++)
- {
- out_put(out, buf, img->d_w, do_md5);
- buf += img->stride[VPX_PLANE_Y];
- }
-
- buf = img->planes[flipuv?VPX_PLANE_V:VPX_PLANE_U];
-
- for (y = 0; y < (1 + img->d_h) / 2; y++)
- {
- out_put(out, buf, (1 + img->d_w) / 2, do_md5);
- buf += img->stride[VPX_PLANE_U];
- }
-
- buf = img->planes[flipuv?VPX_PLANE_U:VPX_PLANE_V];
-
- for (y = 0; y < (1 + img->d_h) / 2; y++)
- {
- out_put(out, buf, (1 + img->d_w) / 2, do_md5);
- buf += img->stride[VPX_PLANE_V];
- }
-
- if (!single_file)
- out_close(out, out_fn, do_md5);
- }
+ for (y = 0; y < c_h; y++) {
+ out_put(out, buf, c_w, do_md5);
+ buf += img->stride[VPX_PLANE_V];
}
- if (stop_after && frame_in >= stop_after)
- break;
+ if (!single_file)
+ out_close(out, out_fn, do_md5);
+ }
}
- if (summary || progress)
- {
- show_progress(frame_in, frame_out, dx_time);
- fprintf(stderr, "\n");
- }
+ if (stop_after && frame_in >= stop_after)
+ break;
+ }
- if (frames_corrupted)
- fprintf(stderr, "WARNING: %d frames corrupted.\n",frames_corrupted);
+ if (summary || progress) {
+ show_progress(frame_in, frame_out, dx_time);
+ fprintf(stderr, "\n");
+ }
+
+ if (frames_corrupted)
+ fprintf(stderr, "WARNING: %d frames corrupted.\n", frames_corrupted);
fail:
- if (vpx_codec_destroy(&decoder))
- {
- fprintf(stderr, "Failed to destroy decoder: %s\n", vpx_codec_error(&decoder));
- return EXIT_FAILURE;
- }
+ if (vpx_codec_destroy(&decoder)) {
+ fprintf(stderr, "Failed to destroy decoder: %s\n", vpx_codec_error(&decoder));
+ return EXIT_FAILURE;
+ }
+
+ if (single_file && !noblit)
+ out_close(out, outfile, do_md5);
- if (single_file && !noblit)
- out_close(out, outfile, do_md5);
+ if (input.nestegg_ctx)
+ nestegg_destroy(input.nestegg_ctx);
+ if (input.kind != WEBM_FILE)
+ free(buf);
+ fclose(infile);
+ free(argv);
- if(input.nestegg_ctx)
- nestegg_destroy(input.nestegg_ctx);
- if(input.kind != WEBM_FILE)
- free(buf);
- fclose(infile);
- free(argv);
+ return frames_corrupted ? EXIT_FAILURE : EXIT_SUCCESS;
+}
+
+int main(int argc, const char **argv_) {
+ unsigned int loops = 1, i;
+ char **argv, **argi, **argj;
+ struct arg arg;
+ int error = 0;
- return frames_corrupted ? EXIT_FAILURE : EXIT_SUCCESS;
+ argv = argv_dup(argc - 1, argv_ + 1);
+ for (argi = argj = argv; (*argj = *argi); argi += arg.argv_step) {
+ memset(&arg, 0, sizeof(arg));
+ arg.argv_step = 1;
+
+ if (arg_match(&arg, &looparg, argi)) {
+ loops = arg_parse_uint(&arg);
+ break;
+ }
+ }
+ free(argv);
+ for (i = 0; !error && i < loops; i++)
+ error = main_loop(argc, argv_);
+ return error;
}
diff --git a/libvpx/vpxenc.c b/libvpx/vpxenc.c
index 7449e6c..0c742ca 100644
--- a/libvpx/vpxenc.c
+++ b/libvpx/vpxenc.c
@@ -8,11 +8,9 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include "vpx_config.h"
-/* This is a simple program that encodes YV12 files and generates ivf
- * files using the new interface.
- */
-#if defined(_WIN32) || !CONFIG_OS_SUPPORT
+#if defined(_WIN32) || defined(__OS2__) || !CONFIG_OS_SUPPORT
#define USE_POSIX_MMAP 0
#else
#define USE_POSIX_MMAP 1
@@ -25,6 +23,9 @@
#include <limits.h>
#include <assert.h>
#include "vpx/vpx_encoder.h"
+#if CONFIG_DECODERS
+#include "vpx/vpx_decoder.h"
+#endif
#if USE_POSIX_MMAP
#include <sys/types.h>
#include <sys/stat.h>
@@ -32,13 +33,21 @@
#include <fcntl.h>
#include <unistd.h>
#endif
+
+#if CONFIG_VP8_ENCODER || CONFIG_VP9_ENCODER
#include "vpx/vp8cx.h"
+#endif
+#if CONFIG_VP8_DECODER || CONFIG_VP9_DECODER
+#include "vpx/vp8dx.h"
+#endif
+
#include "vpx_ports/mem_ops.h"
#include "vpx_ports/vpx_timer.h"
#include "tools_common.h"
#include "y4minput.h"
#include "libmkv/EbmlWriter.h"
#include "libmkv/EbmlIDs.h"
+#include "third_party/libyuv/include/libyuv/scale.h"
/* Need special handling of these functions on Windows */
#if defined(_MSC_VER)
@@ -66,643 +75,610 @@ typedef long off_t;
/* Swallow warnings about unused results of fread/fwrite */
static size_t wrap_fread(void *ptr, size_t size, size_t nmemb,
- FILE *stream)
-{
- return fread(ptr, size, nmemb, stream);
+ FILE *stream) {
+ return fread(ptr, size, nmemb, stream);
}
#define fread wrap_fread
static size_t wrap_fwrite(const void *ptr, size_t size, size_t nmemb,
- FILE *stream)
-{
- return fwrite(ptr, size, nmemb, stream);
+ FILE *stream) {
+ return fwrite(ptr, size, nmemb, stream);
}
#define fwrite wrap_fwrite
static const char *exec_name;
-static const struct codec_item
-{
- char const *name;
- vpx_codec_iface_t *iface;
- unsigned int fourcc;
-} codecs[] =
-{
-#if CONFIG_VP8_ENCODER
- {"vp8", &vpx_codec_vp8_cx_algo, 0x30385056},
+#define VP8_FOURCC (0x30385056)
+#define VP9_FOURCC (0x30395056)
+static const struct codec_item {
+ char const *name;
+ const vpx_codec_iface_t *(*iface)(void);
+ const vpx_codec_iface_t *(*dx_iface)(void);
+ unsigned int fourcc;
+} codecs[] = {
+#if CONFIG_VP8_ENCODER && CONFIG_VP8_DECODER
+ {"vp8", &vpx_codec_vp8_cx, &vpx_codec_vp8_dx, VP8_FOURCC},
+#elif CONFIG_VP8_ENCODER && !CONFIG_VP8_DECODER
+ {"vp8", &vpx_codec_vp8_cx, NULL, VP8_FOURCC},
+#endif
+#if CONFIG_VP9_ENCODER && CONFIG_VP9_DECODER
+ {"vp9", &vpx_codec_vp9_cx, &vpx_codec_vp9_dx, VP9_FOURCC},
+#elif CONFIG_VP9_ENCODER && !CONFIG_VP9_DECODER
+ {"vp9", &vpx_codec_vp9_cx, NULL, VP9_FOURCC},
#endif
};
static void usage_exit();
#define LOG_ERROR(label) do \
-{\
+ {\
const char *l=label;\
va_list ap;\
va_start(ap, fmt);\
if(l)\
- fprintf(stderr, "%s: ", l);\
+ fprintf(stderr, "%s: ", l);\
vfprintf(stderr, fmt, ap);\
fprintf(stderr, "\n");\
va_end(ap);\
-} while(0)
+ } while(0)
-void die(const char *fmt, ...)
-{
- LOG_ERROR(NULL);
- usage_exit();
+void die(const char *fmt, ...) {
+ LOG_ERROR(NULL);
+ usage_exit();
}
-void fatal(const char *fmt, ...)
-{
- LOG_ERROR("Fatal");
- exit(EXIT_FAILURE);
+void fatal(const char *fmt, ...) {
+ LOG_ERROR("Fatal");
+ exit(EXIT_FAILURE);
}
-void warn(const char *fmt, ...)
-{
- LOG_ERROR("Warning");
+void warn(const char *fmt, ...) {
+ LOG_ERROR("Warning");
}
-static void ctx_exit_on_error(vpx_codec_ctx_t *ctx, const char *s, ...)
-{
- va_list ap;
+static void warn_or_exit_on_errorv(vpx_codec_ctx_t *ctx, int fatal,
+ const char *s, va_list ap) {
+ if (ctx->err) {
+ const char *detail = vpx_codec_error_detail(ctx);
- va_start(ap, s);
- if (ctx->err)
- {
- const char *detail = vpx_codec_error_detail(ctx);
+ vfprintf(stderr, s, ap);
+ fprintf(stderr, ": %s\n", vpx_codec_error(ctx));
- vfprintf(stderr, s, ap);
- fprintf(stderr, ": %s\n", vpx_codec_error(ctx));
+ if (detail)
+ fprintf(stderr, " %s\n", detail);
- if (detail)
- fprintf(stderr, " %s\n", detail);
+ if (fatal)
+ exit(EXIT_FAILURE);
+ }
+}
- exit(EXIT_FAILURE);
- }
+static void ctx_exit_on_error(vpx_codec_ctx_t *ctx, const char *s, ...) {
+ va_list ap;
+
+ va_start(ap, s);
+ warn_or_exit_on_errorv(ctx, 1, s, ap);
+ va_end(ap);
+}
+
+static void warn_or_exit_on_error(vpx_codec_ctx_t *ctx, int fatal,
+ const char *s, ...) {
+ va_list ap;
+
+ va_start(ap, s);
+ warn_or_exit_on_errorv(ctx, fatal, s, ap);
+ va_end(ap);
}
/* This structure is used to abstract the different ways of handling
* first pass statistics.
*/
-typedef struct
-{
- vpx_fixed_buf_t buf;
- int pass;
- FILE *file;
- char *buf_ptr;
- size_t buf_alloc_sz;
+typedef struct {
+ vpx_fixed_buf_t buf;
+ int pass;
+ FILE *file;
+ char *buf_ptr;
+ size_t buf_alloc_sz;
} stats_io_t;
-int stats_open_file(stats_io_t *stats, const char *fpf, int pass)
-{
- int res;
+int stats_open_file(stats_io_t *stats, const char *fpf, int pass) {
+ int res;
- stats->pass = pass;
+ stats->pass = pass;
- if (pass == 0)
- {
- stats->file = fopen(fpf, "wb");
- stats->buf.sz = 0;
- stats->buf.buf = NULL,
- res = (stats->file != NULL);
- }
- else
- {
+ if (pass == 0) {
+ stats->file = fopen(fpf, "wb");
+ stats->buf.sz = 0;
+ stats->buf.buf = NULL,
+ res = (stats->file != NULL);
+ } else {
#if 0
#elif USE_POSIX_MMAP
- struct stat stat_buf;
- int fd;
-
- fd = open(fpf, O_RDONLY);
- stats->file = fdopen(fd, "rb");
- fstat(fd, &stat_buf);
- stats->buf.sz = stat_buf.st_size;
- stats->buf.buf = mmap(NULL, stats->buf.sz, PROT_READ, MAP_PRIVATE,
- fd, 0);
- res = (stats->buf.buf != NULL);
+ struct stat stat_buf;
+ int fd;
+
+ fd = open(fpf, O_RDONLY);
+ stats->file = fdopen(fd, "rb");
+ fstat(fd, &stat_buf);
+ stats->buf.sz = stat_buf.st_size;
+ stats->buf.buf = mmap(NULL, stats->buf.sz, PROT_READ, MAP_PRIVATE,
+ fd, 0);
+ res = (stats->buf.buf != NULL);
#else
- size_t nbytes;
+ size_t nbytes;
- stats->file = fopen(fpf, "rb");
+ stats->file = fopen(fpf, "rb");
- if (fseek(stats->file, 0, SEEK_END))
- fatal("First-pass stats file must be seekable!");
+ if (fseek(stats->file, 0, SEEK_END))
+ fatal("First-pass stats file must be seekable!");
- stats->buf.sz = stats->buf_alloc_sz = ftell(stats->file);
- rewind(stats->file);
+ stats->buf.sz = stats->buf_alloc_sz = ftell(stats->file);
+ rewind(stats->file);
- stats->buf.buf = malloc(stats->buf_alloc_sz);
+ stats->buf.buf = malloc(stats->buf_alloc_sz);
- if (!stats->buf.buf)
- fatal("Failed to allocate first-pass stats buffer (%lu bytes)",
- (unsigned long)stats->buf_alloc_sz);
+ if (!stats->buf.buf)
+ fatal("Failed to allocate first-pass stats buffer (%lu bytes)",
+ (unsigned long)stats->buf_alloc_sz);
- nbytes = fread(stats->buf.buf, 1, stats->buf.sz, stats->file);
- res = (nbytes == stats->buf.sz);
+ nbytes = fread(stats->buf.buf, 1, stats->buf.sz, stats->file);
+ res = (nbytes == stats->buf.sz);
#endif
- }
+ }
- return res;
+ return res;
}
-int stats_open_mem(stats_io_t *stats, int pass)
-{
- int res;
- stats->pass = pass;
+int stats_open_mem(stats_io_t *stats, int pass) {
+ int res;
+ stats->pass = pass;
- if (!pass)
- {
- stats->buf.sz = 0;
- stats->buf_alloc_sz = 64 * 1024;
- stats->buf.buf = malloc(stats->buf_alloc_sz);
- }
+ if (!pass) {
+ stats->buf.sz = 0;
+ stats->buf_alloc_sz = 64 * 1024;
+ stats->buf.buf = malloc(stats->buf_alloc_sz);
+ }
- stats->buf_ptr = stats->buf.buf;
- res = (stats->buf.buf != NULL);
- return res;
+ stats->buf_ptr = stats->buf.buf;
+ res = (stats->buf.buf != NULL);
+ return res;
}
-void stats_close(stats_io_t *stats, int last_pass)
-{
- if (stats->file)
- {
- if (stats->pass == last_pass)
- {
+void stats_close(stats_io_t *stats, int last_pass) {
+ if (stats->file) {
+ if (stats->pass == last_pass) {
#if 0
#elif USE_POSIX_MMAP
- munmap(stats->buf.buf, stats->buf.sz);
+ munmap(stats->buf.buf, stats->buf.sz);
#else
- free(stats->buf.buf);
+ free(stats->buf.buf);
#endif
- }
-
- fclose(stats->file);
- stats->file = NULL;
- }
- else
- {
- if (stats->pass == last_pass)
- free(stats->buf.buf);
}
+
+ fclose(stats->file);
+ stats->file = NULL;
+ } else {
+ if (stats->pass == last_pass)
+ free(stats->buf.buf);
+ }
}
-void stats_write(stats_io_t *stats, const void *pkt, size_t len)
-{
- if (stats->file)
- {
- (void) fwrite(pkt, 1, len, stats->file);
+void stats_write(stats_io_t *stats, const void *pkt, size_t len) {
+ if (stats->file) {
+ (void) fwrite(pkt, 1, len, stats->file);
+ } else {
+ if (stats->buf.sz + len > stats->buf_alloc_sz) {
+ size_t new_sz = stats->buf_alloc_sz + 64 * 1024;
+ char *new_ptr = realloc(stats->buf.buf, new_sz);
+
+ if (new_ptr) {
+ stats->buf_ptr = new_ptr + (stats->buf_ptr - (char *)stats->buf.buf);
+ stats->buf.buf = new_ptr;
+ stats->buf_alloc_sz = new_sz;
+ } else
+ fatal("Failed to realloc firstpass stats buffer.");
}
- else
- {
- if (stats->buf.sz + len > stats->buf_alloc_sz)
- {
- size_t new_sz = stats->buf_alloc_sz + 64 * 1024;
- char *new_ptr = realloc(stats->buf.buf, new_sz);
-
- if (new_ptr)
- {
- stats->buf_ptr = new_ptr + (stats->buf_ptr - (char *)stats->buf.buf);
- stats->buf.buf = new_ptr;
- stats->buf_alloc_sz = new_sz;
- }
- else
- fatal("Failed to realloc firstpass stats buffer.");
- }
- memcpy(stats->buf_ptr, pkt, len);
- stats->buf.sz += len;
- stats->buf_ptr += len;
- }
+ memcpy(stats->buf_ptr, pkt, len);
+ stats->buf.sz += len;
+ stats->buf_ptr += len;
+ }
}
-vpx_fixed_buf_t stats_get(stats_io_t *stats)
-{
- return stats->buf;
+vpx_fixed_buf_t stats_get(stats_io_t *stats) {
+ return stats->buf;
}
/* Stereo 3D packed frame format */
-typedef enum stereo_format
-{
- STEREO_FORMAT_MONO = 0,
- STEREO_FORMAT_LEFT_RIGHT = 1,
- STEREO_FORMAT_BOTTOM_TOP = 2,
- STEREO_FORMAT_TOP_BOTTOM = 3,
- STEREO_FORMAT_RIGHT_LEFT = 11
+typedef enum stereo_format {
+ STEREO_FORMAT_MONO = 0,
+ STEREO_FORMAT_LEFT_RIGHT = 1,
+ STEREO_FORMAT_BOTTOM_TOP = 2,
+ STEREO_FORMAT_TOP_BOTTOM = 3,
+ STEREO_FORMAT_RIGHT_LEFT = 11
} stereo_format_t;
-enum video_file_type
-{
- FILE_TYPE_RAW,
- FILE_TYPE_IVF,
- FILE_TYPE_Y4M
+enum video_file_type {
+ FILE_TYPE_RAW,
+ FILE_TYPE_IVF,
+ FILE_TYPE_Y4M
};
struct detect_buffer {
- char buf[4];
- size_t buf_read;
- size_t position;
+ char buf[4];
+ size_t buf_read;
+ size_t position;
};
-struct input_state
-{
- char *fn;
- FILE *file;
- y4m_input y4m;
- struct detect_buffer detect;
- enum video_file_type file_type;
- unsigned int w;
- unsigned int h;
- struct vpx_rational framerate;
- int use_i420;
+struct input_state {
+ char *fn;
+ FILE *file;
+ off_t length;
+ y4m_input y4m;
+ struct detect_buffer detect;
+ enum video_file_type file_type;
+ unsigned int w;
+ unsigned int h;
+ struct vpx_rational framerate;
+ int use_i420;
+ int only_i420;
};
#define IVF_FRAME_HDR_SZ (4+8) /* 4 byte size + 8 byte timestamp */
-static int read_frame(struct input_state *input, vpx_image_t *img)
-{
- FILE *f = input->file;
- enum video_file_type file_type = input->file_type;
- y4m_input *y4m = &input->y4m;
- struct detect_buffer *detect = &input->detect;
- int plane = 0;
- int shortread = 0;
-
- if (file_type == FILE_TYPE_Y4M)
- {
- if (y4m_input_fetch_frame(y4m, f, img) < 1)
- return 0;
+static int read_frame(struct input_state *input, vpx_image_t *img) {
+ FILE *f = input->file;
+ enum video_file_type file_type = input->file_type;
+ y4m_input *y4m = &input->y4m;
+ struct detect_buffer *detect = &input->detect;
+ int plane = 0;
+ int shortread = 0;
+
+ if (file_type == FILE_TYPE_Y4M) {
+ if (y4m_input_fetch_frame(y4m, f, img) < 1)
+ return 0;
+ } else {
+ if (file_type == FILE_TYPE_IVF) {
+ char junk[IVF_FRAME_HDR_SZ];
+
+ /* Skip the frame header. We know how big the frame should be. See
+ * write_ivf_frame_header() for documentation on the frame header
+ * layout.
+ */
+ (void) fread(junk, 1, IVF_FRAME_HDR_SZ, f);
}
- else
- {
- if (file_type == FILE_TYPE_IVF)
- {
- char junk[IVF_FRAME_HDR_SZ];
- /* Skip the frame header. We know how big the frame should be. See
- * write_ivf_frame_header() for documentation on the frame header
- * layout.
- */
- (void) fread(junk, 1, IVF_FRAME_HDR_SZ, f);
+ for (plane = 0; plane < 3; plane++) {
+ unsigned char *ptr;
+ int w = (plane ? (1 + img->d_w) / 2 : img->d_w);
+ int h = (plane ? (1 + img->d_h) / 2 : img->d_h);
+ int r;
+
+ /* Determine the correct plane based on the image format. The for-loop
+ * always counts in Y,U,V order, but this may not match the order of
+ * the data on disk.
+ */
+ switch (plane) {
+ case 1:
+ ptr = img->planes[img->fmt == VPX_IMG_FMT_YV12 ? VPX_PLANE_V : VPX_PLANE_U];
+ break;
+ case 2:
+ ptr = img->planes[img->fmt == VPX_IMG_FMT_YV12 ? VPX_PLANE_U : VPX_PLANE_V];
+ break;
+ default:
+ ptr = img->planes[plane];
+ }
+
+ for (r = 0; r < h; r++) {
+ size_t needed = w;
+ size_t buf_position = 0;
+ const size_t left = detect->buf_read - detect->position;
+ if (left > 0) {
+ const size_t more = (left < needed) ? left : needed;
+ memcpy(ptr, detect->buf + detect->position, more);
+ buf_position = more;
+ needed -= more;
+ detect->position += more;
}
-
- for (plane = 0; plane < 3; plane++)
- {
- unsigned char *ptr;
- int w = (plane ? (1 + img->d_w) / 2 : img->d_w);
- int h = (plane ? (1 + img->d_h) / 2 : img->d_h);
- int r;
-
- /* Determine the correct plane based on the image format. The for-loop
- * always counts in Y,U,V order, but this may not match the order of
- * the data on disk.
- */
- switch (plane)
- {
- case 1:
- ptr = img->planes[img->fmt==VPX_IMG_FMT_YV12? VPX_PLANE_V : VPX_PLANE_U];
- break;
- case 2:
- ptr = img->planes[img->fmt==VPX_IMG_FMT_YV12?VPX_PLANE_U : VPX_PLANE_V];
- break;
- default:
- ptr = img->planes[plane];
- }
-
- for (r = 0; r < h; r++)
- {
- size_t needed = w;
- size_t buf_position = 0;
- const size_t left = detect->buf_read - detect->position;
- if (left > 0)
- {
- const size_t more = (left < needed) ? left : needed;
- memcpy(ptr, detect->buf + detect->position, more);
- buf_position = more;
- needed -= more;
- detect->position += more;
- }
- if (needed > 0)
- {
- shortread |= (fread(ptr + buf_position, 1, needed, f) < needed);
- }
-
- ptr += img->stride[plane];
- }
+ if (needed > 0) {
+ shortread |= (fread(ptr + buf_position, 1, needed, f) < needed);
}
+
+ ptr += img->stride[plane];
+ }
}
+ }
- return !shortread;
+ return !shortread;
}
unsigned int file_is_y4m(FILE *infile,
y4m_input *y4m,
- char detect[4])
-{
- if(memcmp(detect, "YUV4", 4) == 0)
- {
- return 1;
- }
- return 0;
+ char detect[4]) {
+ if (memcmp(detect, "YUV4", 4) == 0) {
+ return 1;
+ }
+ return 0;
}
#define IVF_FILE_HDR_SZ (32)
unsigned int file_is_ivf(struct input_state *input,
- unsigned int *fourcc)
-{
- char raw_hdr[IVF_FILE_HDR_SZ];
- int is_ivf = 0;
- FILE *infile = input->file;
- unsigned int *width = &input->w;
- unsigned int *height = &input->h;
- struct detect_buffer *detect = &input->detect;
-
- if(memcmp(detect->buf, "DKIF", 4) != 0)
- return 0;
-
- /* See write_ivf_file_header() for more documentation on the file header
- * layout.
- */
- if (fread(raw_hdr + 4, 1, IVF_FILE_HDR_SZ - 4, infile)
- == IVF_FILE_HDR_SZ - 4)
+ unsigned int *fourcc) {
+ char raw_hdr[IVF_FILE_HDR_SZ];
+ int is_ivf = 0;
+ FILE *infile = input->file;
+ unsigned int *width = &input->w;
+ unsigned int *height = &input->h;
+ struct detect_buffer *detect = &input->detect;
+
+ if (memcmp(detect->buf, "DKIF", 4) != 0)
+ return 0;
+
+ /* See write_ivf_file_header() for more documentation on the file header
+ * layout.
+ */
+ if (fread(raw_hdr + 4, 1, IVF_FILE_HDR_SZ - 4, infile)
+ == IVF_FILE_HDR_SZ - 4) {
{
- {
- is_ivf = 1;
+ is_ivf = 1;
- if (mem_get_le16(raw_hdr + 4) != 0)
- warn("Unrecognized IVF version! This file may not decode "
- "properly.");
+ if (mem_get_le16(raw_hdr + 4) != 0)
+ warn("Unrecognized IVF version! This file may not decode "
+ "properly.");
- *fourcc = mem_get_le32(raw_hdr + 8);
- }
+ *fourcc = mem_get_le32(raw_hdr + 8);
}
+ }
- if (is_ivf)
- {
- *width = mem_get_le16(raw_hdr + 12);
- *height = mem_get_le16(raw_hdr + 14);
- detect->position = 4;
- }
+ if (is_ivf) {
+ *width = mem_get_le16(raw_hdr + 12);
+ *height = mem_get_le16(raw_hdr + 14);
+ detect->position = 4;
+ }
- return is_ivf;
+ return is_ivf;
}
static void write_ivf_file_header(FILE *outfile,
const vpx_codec_enc_cfg_t *cfg,
unsigned int fourcc,
- int frame_cnt)
-{
- char header[32];
-
- if (cfg->g_pass != VPX_RC_ONE_PASS && cfg->g_pass != VPX_RC_LAST_PASS)
- return;
-
- header[0] = 'D';
- header[1] = 'K';
- header[2] = 'I';
- header[3] = 'F';
- mem_put_le16(header + 4, 0); /* version */
- mem_put_le16(header + 6, 32); /* headersize */
- mem_put_le32(header + 8, fourcc); /* headersize */
- mem_put_le16(header + 12, cfg->g_w); /* width */
- mem_put_le16(header + 14, cfg->g_h); /* height */
- mem_put_le32(header + 16, cfg->g_timebase.den); /* rate */
- mem_put_le32(header + 20, cfg->g_timebase.num); /* scale */
- mem_put_le32(header + 24, frame_cnt); /* length */
- mem_put_le32(header + 28, 0); /* unused */
-
- (void) fwrite(header, 1, 32, outfile);
+ int frame_cnt) {
+ char header[32];
+
+ if (cfg->g_pass != VPX_RC_ONE_PASS && cfg->g_pass != VPX_RC_LAST_PASS)
+ return;
+
+ header[0] = 'D';
+ header[1] = 'K';
+ header[2] = 'I';
+ header[3] = 'F';
+ mem_put_le16(header + 4, 0); /* version */
+ mem_put_le16(header + 6, 32); /* headersize */
+ mem_put_le32(header + 8, fourcc); /* headersize */
+ mem_put_le16(header + 12, cfg->g_w); /* width */
+ mem_put_le16(header + 14, cfg->g_h); /* height */
+ mem_put_le32(header + 16, cfg->g_timebase.den); /* rate */
+ mem_put_le32(header + 20, cfg->g_timebase.num); /* scale */
+ mem_put_le32(header + 24, frame_cnt); /* length */
+ mem_put_le32(header + 28, 0); /* unused */
+
+ (void) fwrite(header, 1, 32, outfile);
}
static void write_ivf_frame_header(FILE *outfile,
- const vpx_codec_cx_pkt_t *pkt)
-{
- char header[12];
- vpx_codec_pts_t pts;
+ const vpx_codec_cx_pkt_t *pkt) {
+ char header[12];
+ vpx_codec_pts_t pts;
- if (pkt->kind != VPX_CODEC_CX_FRAME_PKT)
- return;
+ if (pkt->kind != VPX_CODEC_CX_FRAME_PKT)
+ return;
- pts = pkt->data.frame.pts;
- mem_put_le32(header, (int)pkt->data.frame.sz);
- mem_put_le32(header + 4, pts & 0xFFFFFFFF);
- mem_put_le32(header + 8, pts >> 32);
+ pts = pkt->data.frame.pts;
+ mem_put_le32(header, (int)pkt->data.frame.sz);
+ mem_put_le32(header + 4, pts & 0xFFFFFFFF);
+ mem_put_le32(header + 8, pts >> 32);
- (void) fwrite(header, 1, 12, outfile);
+ (void) fwrite(header, 1, 12, outfile);
}
-static void write_ivf_frame_size(FILE *outfile, size_t size)
-{
- char header[4];
- mem_put_le32(header, (int)size);
- (void) fwrite(header, 1, 4, outfile);
+static void write_ivf_frame_size(FILE *outfile, size_t size) {
+ char header[4];
+ mem_put_le32(header, (int)size);
+ (void) fwrite(header, 1, 4, outfile);
}
typedef off_t EbmlLoc;
-struct cue_entry
-{
- unsigned int time;
- uint64_t loc;
+struct cue_entry {
+ unsigned int time;
+ uint64_t loc;
};
-struct EbmlGlobal
-{
- int debug;
+struct EbmlGlobal {
+ int debug;
- FILE *stream;
- int64_t last_pts_ms;
- vpx_rational_t framerate;
+ FILE *stream;
+ int64_t last_pts_ms;
+ vpx_rational_t framerate;
- /* These pointers are to the start of an element */
- off_t position_reference;
- off_t seek_info_pos;
- off_t segment_info_pos;
- off_t track_pos;
- off_t cue_pos;
- off_t cluster_pos;
+ /* These pointers are to the start of an element */
+ off_t position_reference;
+ off_t seek_info_pos;
+ off_t segment_info_pos;
+ off_t track_pos;
+ off_t cue_pos;
+ off_t cluster_pos;
- /* This pointer is to a specific element to be serialized */
- off_t track_id_pos;
+ /* This pointer is to a specific element to be serialized */
+ off_t track_id_pos;
- /* These pointers are to the size field of the element */
- EbmlLoc startSegment;
- EbmlLoc startCluster;
+ /* These pointers are to the size field of the element */
+ EbmlLoc startSegment;
+ EbmlLoc startCluster;
- uint32_t cluster_timecode;
- int cluster_open;
+ uint32_t cluster_timecode;
+ int cluster_open;
- struct cue_entry *cue_list;
- unsigned int cues;
+ struct cue_entry *cue_list;
+ unsigned int cues;
};
-void Ebml_Write(EbmlGlobal *glob, const void *buffer_in, unsigned long len)
-{
- (void) fwrite(buffer_in, 1, len, glob->stream);
+void Ebml_Write(EbmlGlobal *glob, const void *buffer_in, unsigned long len) {
+ (void) fwrite(buffer_in, 1, len, glob->stream);
}
#define WRITE_BUFFER(s) \
-for(i = len-1; i>=0; i--)\
-{ \
+ for(i = len-1; i>=0; i--)\
+ { \
x = (char)(*(const s *)buffer_in >> (i * CHAR_BIT)); \
Ebml_Write(glob, &x, 1); \
-}
-void Ebml_Serialize(EbmlGlobal *glob, const void *buffer_in, int buffer_size, unsigned long len)
-{
- char x;
- int i;
-
- /* buffer_size:
- * 1 - int8_t;
- * 2 - int16_t;
- * 3 - int32_t;
- * 4 - int64_t;
- */
- switch (buffer_size)
- {
- case 1:
- WRITE_BUFFER(int8_t)
- break;
- case 2:
- WRITE_BUFFER(int16_t)
- break;
- case 4:
- WRITE_BUFFER(int32_t)
- break;
- case 8:
- WRITE_BUFFER(int64_t)
- break;
- default:
- break;
- }
+ }
+void Ebml_Serialize(EbmlGlobal *glob, const void *buffer_in, int buffer_size, unsigned long len) {
+ char x;
+ int i;
+
+ /* buffer_size:
+ * 1 - int8_t;
+ * 2 - int16_t;
+ * 3 - int32_t;
+ * 4 - int64_t;
+ */
+ switch (buffer_size) {
+ case 1:
+ WRITE_BUFFER(int8_t)
+ break;
+ case 2:
+ WRITE_BUFFER(int16_t)
+ break;
+ case 4:
+ WRITE_BUFFER(int32_t)
+ break;
+ case 8:
+ WRITE_BUFFER(int64_t)
+ break;
+ default:
+ break;
+ }
}
#undef WRITE_BUFFER
/* Need a fixed size serializer for the track ID. libmkv provides a 64 bit
* one, but not a 32 bit one.
*/
-static void Ebml_SerializeUnsigned32(EbmlGlobal *glob, unsigned long class_id, uint64_t ui)
-{
- unsigned char sizeSerialized = 4 | 0x80;
- Ebml_WriteID(glob, class_id);
- Ebml_Serialize(glob, &sizeSerialized, sizeof(sizeSerialized), 1);
- Ebml_Serialize(glob, &ui, sizeof(ui), 4);
+static void Ebml_SerializeUnsigned32(EbmlGlobal *glob, unsigned long class_id, uint64_t ui) {
+ unsigned char sizeSerialized = 4 | 0x80;
+ Ebml_WriteID(glob, class_id);
+ Ebml_Serialize(glob, &sizeSerialized, sizeof(sizeSerialized), 1);
+ Ebml_Serialize(glob, &ui, sizeof(ui), 4);
}
static void
Ebml_StartSubElement(EbmlGlobal *glob, EbmlLoc *ebmlLoc,
- unsigned long class_id)
-{
- /* todo this is always taking 8 bytes, this may need later optimization */
- /* this is a key that says length unknown */
- uint64_t unknownLen = LITERALU64(0x01FFFFFF, 0xFFFFFFFF);
-
- Ebml_WriteID(glob, class_id);
- *ebmlLoc = ftello(glob->stream);
- Ebml_Serialize(glob, &unknownLen, sizeof(unknownLen), 8);
+ unsigned long class_id) {
+ /* todo this is always taking 8 bytes, this may need later optimization */
+ /* this is a key that says length unknown */
+ uint64_t unknownLen = LITERALU64(0x01FFFFFF, 0xFFFFFFFF);
+
+ Ebml_WriteID(glob, class_id);
+ *ebmlLoc = ftello(glob->stream);
+ Ebml_Serialize(glob, &unknownLen, sizeof(unknownLen), 8);
}
static void
-Ebml_EndSubElement(EbmlGlobal *glob, EbmlLoc *ebmlLoc)
-{
- off_t pos;
- uint64_t size;
+Ebml_EndSubElement(EbmlGlobal *glob, EbmlLoc *ebmlLoc) {
+ off_t pos;
+ uint64_t size;
- /* Save the current stream pointer */
- pos = ftello(glob->stream);
+ /* Save the current stream pointer */
+ pos = ftello(glob->stream);
- /* Calculate the size of this element */
- size = pos - *ebmlLoc - 8;
- size |= LITERALU64(0x01000000,0x00000000);
+ /* Calculate the size of this element */
+ size = pos - *ebmlLoc - 8;
+ size |= LITERALU64(0x01000000, 0x00000000);
- /* Seek back to the beginning of the element and write the new size */
- fseeko(glob->stream, *ebmlLoc, SEEK_SET);
- Ebml_Serialize(glob, &size, sizeof(size), 8);
+ /* Seek back to the beginning of the element and write the new size */
+ fseeko(glob->stream, *ebmlLoc, SEEK_SET);
+ Ebml_Serialize(glob, &size, sizeof(size), 8);
- /* Reset the stream pointer */
- fseeko(glob->stream, pos, SEEK_SET);
+ /* Reset the stream pointer */
+ fseeko(glob->stream, pos, SEEK_SET);
}
static void
-write_webm_seek_element(EbmlGlobal *ebml, unsigned long id, off_t pos)
-{
- uint64_t offset = pos - ebml->position_reference;
- EbmlLoc start;
- Ebml_StartSubElement(ebml, &start, Seek);
- Ebml_SerializeBinary(ebml, SeekID, id);
- Ebml_SerializeUnsigned64(ebml, SeekPosition, offset);
- Ebml_EndSubElement(ebml, &start);
+write_webm_seek_element(EbmlGlobal *ebml, unsigned long id, off_t pos) {
+ uint64_t offset = pos - ebml->position_reference;
+ EbmlLoc start;
+ Ebml_StartSubElement(ebml, &start, Seek);
+ Ebml_SerializeBinary(ebml, SeekID, id);
+ Ebml_SerializeUnsigned64(ebml, SeekPosition, offset);
+ Ebml_EndSubElement(ebml, &start);
}
static void
-write_webm_seek_info(EbmlGlobal *ebml)
-{
+write_webm_seek_info(EbmlGlobal *ebml) {
- off_t pos;
+ off_t pos;
- /* Save the current stream pointer */
- pos = ftello(ebml->stream);
+ /* Save the current stream pointer */
+ pos = ftello(ebml->stream);
- if(ebml->seek_info_pos)
- fseeko(ebml->stream, ebml->seek_info_pos, SEEK_SET);
- else
- ebml->seek_info_pos = pos;
+ if (ebml->seek_info_pos)
+ fseeko(ebml->stream, ebml->seek_info_pos, SEEK_SET);
+ else
+ ebml->seek_info_pos = pos;
- {
- EbmlLoc start;
+ {
+ EbmlLoc start;
- Ebml_StartSubElement(ebml, &start, SeekHead);
- write_webm_seek_element(ebml, Tracks, ebml->track_pos);
- write_webm_seek_element(ebml, Cues, ebml->cue_pos);
- write_webm_seek_element(ebml, Info, ebml->segment_info_pos);
- Ebml_EndSubElement(ebml, &start);
+ Ebml_StartSubElement(ebml, &start, SeekHead);
+ write_webm_seek_element(ebml, Tracks, ebml->track_pos);
+ write_webm_seek_element(ebml, Cues, ebml->cue_pos);
+ write_webm_seek_element(ebml, Info, ebml->segment_info_pos);
+ Ebml_EndSubElement(ebml, &start);
+ }
+ {
+ /* segment info */
+ EbmlLoc startInfo;
+ uint64_t frame_time;
+ char version_string[64];
+
+ /* Assemble version string */
+ if (ebml->debug)
+ strcpy(version_string, "vpxenc");
+ else {
+ strcpy(version_string, "vpxenc ");
+ strncat(version_string,
+ vpx_codec_version_str(),
+ sizeof(version_string) - 1 - strlen(version_string));
}
- {
- /* segment info */
- EbmlLoc startInfo;
- uint64_t frame_time;
- char version_string[64];
-
- /* Assemble version string */
- if(ebml->debug)
- strcpy(version_string, "vpxenc");
- else
- {
- strcpy(version_string, "vpxenc ");
- strncat(version_string,
- vpx_codec_version_str(),
- sizeof(version_string) - 1 - strlen(version_string));
- }
- frame_time = (uint64_t)1000 * ebml->framerate.den
- / ebml->framerate.num;
- ebml->segment_info_pos = ftello(ebml->stream);
- Ebml_StartSubElement(ebml, &startInfo, Info);
- Ebml_SerializeUnsigned(ebml, TimecodeScale, 1000000);
- Ebml_SerializeFloat(ebml, Segment_Duration,
- (double)(ebml->last_pts_ms + frame_time));
- Ebml_SerializeString(ebml, 0x4D80, version_string);
- Ebml_SerializeString(ebml, 0x5741, version_string);
- Ebml_EndSubElement(ebml, &startInfo);
- }
+ frame_time = (uint64_t)1000 * ebml->framerate.den
+ / ebml->framerate.num;
+ ebml->segment_info_pos = ftello(ebml->stream);
+ Ebml_StartSubElement(ebml, &startInfo, Info);
+ Ebml_SerializeUnsigned(ebml, TimecodeScale, 1000000);
+ Ebml_SerializeFloat(ebml, Segment_Duration,
+ (double)(ebml->last_pts_ms + frame_time));
+ Ebml_SerializeString(ebml, 0x4D80, version_string);
+ Ebml_SerializeString(ebml, 0x5741, version_string);
+ Ebml_EndSubElement(ebml, &startInfo);
+ }
}
@@ -710,1858 +686,2116 @@ static void
write_webm_file_header(EbmlGlobal *glob,
const vpx_codec_enc_cfg_t *cfg,
const struct vpx_rational *fps,
- stereo_format_t stereo_fmt)
-{
- {
- EbmlLoc start;
- Ebml_StartSubElement(glob, &start, EBML);
- Ebml_SerializeUnsigned(glob, EBMLVersion, 1);
- Ebml_SerializeUnsigned(glob, EBMLReadVersion, 1);
- Ebml_SerializeUnsigned(glob, EBMLMaxIDLength, 4);
- Ebml_SerializeUnsigned(glob, EBMLMaxSizeLength, 8);
- Ebml_SerializeString(glob, DocType, "webm");
- Ebml_SerializeUnsigned(glob, DocTypeVersion, 2);
- Ebml_SerializeUnsigned(glob, DocTypeReadVersion, 2);
- Ebml_EndSubElement(glob, &start);
- }
+ stereo_format_t stereo_fmt,
+ unsigned int fourcc) {
+ {
+ EbmlLoc start;
+ Ebml_StartSubElement(glob, &start, EBML);
+ Ebml_SerializeUnsigned(glob, EBMLVersion, 1);
+ Ebml_SerializeUnsigned(glob, EBMLReadVersion, 1);
+ Ebml_SerializeUnsigned(glob, EBMLMaxIDLength, 4);
+ Ebml_SerializeUnsigned(glob, EBMLMaxSizeLength, 8);
+ Ebml_SerializeString(glob, DocType, "webm");
+ Ebml_SerializeUnsigned(glob, DocTypeVersion, 2);
+ Ebml_SerializeUnsigned(glob, DocTypeReadVersion, 2);
+ Ebml_EndSubElement(glob, &start);
+ }
+ {
+ Ebml_StartSubElement(glob, &glob->startSegment, Segment);
+ glob->position_reference = ftello(glob->stream);
+ glob->framerate = *fps;
+ write_webm_seek_info(glob);
+
{
- Ebml_StartSubElement(glob, &glob->startSegment, Segment);
- glob->position_reference = ftello(glob->stream);
- glob->framerate = *fps;
- write_webm_seek_info(glob);
+ EbmlLoc trackStart;
+ glob->track_pos = ftello(glob->stream);
+ Ebml_StartSubElement(glob, &trackStart, Tracks);
+ {
+ unsigned int trackNumber = 1;
+ uint64_t trackID = 0;
+ EbmlLoc start;
+ Ebml_StartSubElement(glob, &start, TrackEntry);
+ Ebml_SerializeUnsigned(glob, TrackNumber, trackNumber);
+ glob->track_id_pos = ftello(glob->stream);
+ Ebml_SerializeUnsigned32(glob, TrackUID, trackID);
+ Ebml_SerializeUnsigned(glob, TrackType, 1);
+ Ebml_SerializeString(glob, CodecID,
+ fourcc == VP8_FOURCC ? "V_VP8" : "V_VP9");
{
- EbmlLoc trackStart;
- glob->track_pos = ftello(glob->stream);
- Ebml_StartSubElement(glob, &trackStart, Tracks);
- {
- unsigned int trackNumber = 1;
- uint64_t trackID = 0;
-
- EbmlLoc start;
- Ebml_StartSubElement(glob, &start, TrackEntry);
- Ebml_SerializeUnsigned(glob, TrackNumber, trackNumber);
- glob->track_id_pos = ftello(glob->stream);
- Ebml_SerializeUnsigned32(glob, TrackUID, trackID);
- Ebml_SerializeUnsigned(glob, TrackType, 1);
- Ebml_SerializeString(glob, CodecID, "V_VP8");
- {
- unsigned int pixelWidth = cfg->g_w;
- unsigned int pixelHeight = cfg->g_h;
- float frameRate = (float)fps->num/(float)fps->den;
-
- EbmlLoc videoStart;
- Ebml_StartSubElement(glob, &videoStart, Video);
- Ebml_SerializeUnsigned(glob, PixelWidth, pixelWidth);
- Ebml_SerializeUnsigned(glob, PixelHeight, pixelHeight);
- Ebml_SerializeUnsigned(glob, StereoMode, stereo_fmt);
- Ebml_SerializeFloat(glob, FrameRate, frameRate);
- Ebml_EndSubElement(glob, &videoStart);
- }
- Ebml_EndSubElement(glob, &start); /* Track Entry */
- }
- Ebml_EndSubElement(glob, &trackStart);
+ unsigned int pixelWidth = cfg->g_w;
+ unsigned int pixelHeight = cfg->g_h;
+ float frameRate = (float)fps->num / (float)fps->den;
+
+ EbmlLoc videoStart;
+ Ebml_StartSubElement(glob, &videoStart, Video);
+ Ebml_SerializeUnsigned(glob, PixelWidth, pixelWidth);
+ Ebml_SerializeUnsigned(glob, PixelHeight, pixelHeight);
+ Ebml_SerializeUnsigned(glob, StereoMode, stereo_fmt);
+ Ebml_SerializeFloat(glob, FrameRate, frameRate);
+ Ebml_EndSubElement(glob, &videoStart);
}
- /* segment element is open */
+ Ebml_EndSubElement(glob, &start); /* Track Entry */
+ }
+ Ebml_EndSubElement(glob, &trackStart);
}
+ /* segment element is open */
+ }
}
static void
write_webm_block(EbmlGlobal *glob,
const vpx_codec_enc_cfg_t *cfg,
- const vpx_codec_cx_pkt_t *pkt)
-{
- unsigned long block_length;
- unsigned char track_number;
- unsigned short block_timecode = 0;
- unsigned char flags;
- int64_t pts_ms;
- int start_cluster = 0, is_keyframe;
-
- /* Calculate the PTS of this frame in milliseconds */
- pts_ms = pkt->data.frame.pts * 1000
- * (uint64_t)cfg->g_timebase.num / (uint64_t)cfg->g_timebase.den;
- if(pts_ms <= glob->last_pts_ms)
- pts_ms = glob->last_pts_ms + 1;
- glob->last_pts_ms = pts_ms;
-
- /* Calculate the relative time of this block */
- if(pts_ms - glob->cluster_timecode > SHRT_MAX)
- start_cluster = 1;
- else
- block_timecode = (unsigned short)pts_ms - glob->cluster_timecode;
-
- is_keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY);
- if(start_cluster || is_keyframe)
- {
- if(glob->cluster_open)
- Ebml_EndSubElement(glob, &glob->startCluster);
-
- /* Open the new cluster */
- block_timecode = 0;
- glob->cluster_open = 1;
- glob->cluster_timecode = (uint32_t)pts_ms;
- glob->cluster_pos = ftello(glob->stream);
- Ebml_StartSubElement(glob, &glob->startCluster, Cluster); /* cluster */
- Ebml_SerializeUnsigned(glob, Timecode, glob->cluster_timecode);
-
- /* Save a cue point if this is a keyframe. */
- if(is_keyframe)
- {
- struct cue_entry *cue, *new_cue_list;
-
- new_cue_list = realloc(glob->cue_list,
- (glob->cues+1) * sizeof(struct cue_entry));
- if(new_cue_list)
- glob->cue_list = new_cue_list;
- else
- fatal("Failed to realloc cue list.");
-
- cue = &glob->cue_list[glob->cues];
- cue->time = glob->cluster_timecode;
- cue->loc = glob->cluster_pos;
- glob->cues++;
- }
+ const vpx_codec_cx_pkt_t *pkt) {
+ unsigned long block_length;
+ unsigned char track_number;
+ unsigned short block_timecode = 0;
+ unsigned char flags;
+ int64_t pts_ms;
+ int start_cluster = 0, is_keyframe;
+
+ /* Calculate the PTS of this frame in milliseconds */
+ pts_ms = pkt->data.frame.pts * 1000
+ * (uint64_t)cfg->g_timebase.num / (uint64_t)cfg->g_timebase.den;
+ if (pts_ms <= glob->last_pts_ms)
+ pts_ms = glob->last_pts_ms + 1;
+ glob->last_pts_ms = pts_ms;
+
+ /* Calculate the relative time of this block */
+ if (pts_ms - glob->cluster_timecode > SHRT_MAX)
+ start_cluster = 1;
+ else
+ block_timecode = (unsigned short)pts_ms - glob->cluster_timecode;
+
+ is_keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY);
+ if (start_cluster || is_keyframe) {
+ if (glob->cluster_open)
+ Ebml_EndSubElement(glob, &glob->startCluster);
+
+ /* Open the new cluster */
+ block_timecode = 0;
+ glob->cluster_open = 1;
+ glob->cluster_timecode = (uint32_t)pts_ms;
+ glob->cluster_pos = ftello(glob->stream);
+ Ebml_StartSubElement(glob, &glob->startCluster, Cluster); /* cluster */
+ Ebml_SerializeUnsigned(glob, Timecode, glob->cluster_timecode);
+
+ /* Save a cue point if this is a keyframe. */
+ if (is_keyframe) {
+ struct cue_entry *cue, *new_cue_list;
+
+ new_cue_list = realloc(glob->cue_list,
+ (glob->cues + 1) * sizeof(struct cue_entry));
+ if (new_cue_list)
+ glob->cue_list = new_cue_list;
+ else
+ fatal("Failed to realloc cue list.");
+
+ cue = &glob->cue_list[glob->cues];
+ cue->time = glob->cluster_timecode;
+ cue->loc = glob->cluster_pos;
+ glob->cues++;
}
+ }
- /* Write the Simple Block */
- Ebml_WriteID(glob, SimpleBlock);
+ /* Write the Simple Block */
+ Ebml_WriteID(glob, SimpleBlock);
- block_length = (unsigned long)pkt->data.frame.sz + 4;
- block_length |= 0x10000000;
- Ebml_Serialize(glob, &block_length, sizeof(block_length), 4);
+ block_length = (unsigned long)pkt->data.frame.sz + 4;
+ block_length |= 0x10000000;
+ Ebml_Serialize(glob, &block_length, sizeof(block_length), 4);
- track_number = 1;
- track_number |= 0x80;
- Ebml_Write(glob, &track_number, 1);
+ track_number = 1;
+ track_number |= 0x80;
+ Ebml_Write(glob, &track_number, 1);
- Ebml_Serialize(glob, &block_timecode, sizeof(block_timecode), 2);
+ Ebml_Serialize(glob, &block_timecode, sizeof(block_timecode), 2);
- flags = 0;
- if(is_keyframe)
- flags |= 0x80;
- if(pkt->data.frame.flags & VPX_FRAME_IS_INVISIBLE)
- flags |= 0x08;
- Ebml_Write(glob, &flags, 1);
+ flags = 0;
+ if (is_keyframe)
+ flags |= 0x80;
+ if (pkt->data.frame.flags & VPX_FRAME_IS_INVISIBLE)
+ flags |= 0x08;
+ Ebml_Write(glob, &flags, 1);
- Ebml_Write(glob, pkt->data.frame.buf, (unsigned long)pkt->data.frame.sz);
+ Ebml_Write(glob, pkt->data.frame.buf, (unsigned long)pkt->data.frame.sz);
}
static void
-write_webm_file_footer(EbmlGlobal *glob, long hash)
-{
+write_webm_file_footer(EbmlGlobal *glob, long hash) {
- if(glob->cluster_open)
- Ebml_EndSubElement(glob, &glob->startCluster);
+ if (glob->cluster_open)
+ Ebml_EndSubElement(glob, &glob->startCluster);
- {
- EbmlLoc start;
- unsigned int i;
+ {
+ EbmlLoc start;
+ unsigned int i;
- glob->cue_pos = ftello(glob->stream);
- Ebml_StartSubElement(glob, &start, Cues);
- for(i=0; i<glob->cues; i++)
- {
- struct cue_entry *cue = &glob->cue_list[i];
- EbmlLoc start;
+ glob->cue_pos = ftello(glob->stream);
+ Ebml_StartSubElement(glob, &start, Cues);
+ for (i = 0; i < glob->cues; i++) {
+ struct cue_entry *cue = &glob->cue_list[i];
+ EbmlLoc start;
- Ebml_StartSubElement(glob, &start, CuePoint);
- {
- EbmlLoc start;
+ Ebml_StartSubElement(glob, &start, CuePoint);
+ {
+ EbmlLoc start;
- Ebml_SerializeUnsigned(glob, CueTime, cue->time);
+ Ebml_SerializeUnsigned(glob, CueTime, cue->time);
- Ebml_StartSubElement(glob, &start, CueTrackPositions);
- Ebml_SerializeUnsigned(glob, CueTrack, 1);
- Ebml_SerializeUnsigned64(glob, CueClusterPosition,
- cue->loc - glob->position_reference);
- Ebml_EndSubElement(glob, &start);
- }
- Ebml_EndSubElement(glob, &start);
- }
+ Ebml_StartSubElement(glob, &start, CueTrackPositions);
+ Ebml_SerializeUnsigned(glob, CueTrack, 1);
+ Ebml_SerializeUnsigned64(glob, CueClusterPosition,
+ cue->loc - glob->position_reference);
Ebml_EndSubElement(glob, &start);
+ }
+ Ebml_EndSubElement(glob, &start);
}
+ Ebml_EndSubElement(glob, &start);
+ }
- Ebml_EndSubElement(glob, &glob->startSegment);
+ Ebml_EndSubElement(glob, &glob->startSegment);
- /* Patch up the seek info block */
- write_webm_seek_info(glob);
+ /* Patch up the seek info block */
+ write_webm_seek_info(glob);
- /* Patch up the track id */
- fseeko(glob->stream, glob->track_id_pos, SEEK_SET);
- Ebml_SerializeUnsigned32(glob, TrackUID, glob->debug ? 0xDEADBEEF : hash);
+ /* Patch up the track id */
+ fseeko(glob->stream, glob->track_id_pos, SEEK_SET);
+ Ebml_SerializeUnsigned32(glob, TrackUID, glob->debug ? 0xDEADBEEF : hash);
- fseeko(glob->stream, 0, SEEK_END);
+ fseeko(glob->stream, 0, SEEK_END);
}
/* Murmur hash derived from public domain reference implementation at
- * http://sites.google.com/site/murmurhash/
+ * http:// sites.google.com/site/murmurhash/
*/
-static unsigned int murmur ( const void * key, int len, unsigned int seed )
-{
- const unsigned int m = 0x5bd1e995;
- const int r = 24;
-
- unsigned int h = seed ^ len;
+static unsigned int murmur(const void *key, int len, unsigned int seed) {
+ const unsigned int m = 0x5bd1e995;
+ const int r = 24;
- const unsigned char * data = (const unsigned char *)key;
-
- while(len >= 4)
- {
- unsigned int k;
+ unsigned int h = seed ^ len;
- k = data[0];
- k |= data[1] << 8;
- k |= data[2] << 16;
- k |= data[3] << 24;
+ const unsigned char *data = (const unsigned char *)key;
- k *= m;
- k ^= k >> r;
- k *= m;
+ while (len >= 4) {
+ unsigned int k;
- h *= m;
- h ^= k;
+ k = data[0];
+ k |= data[1] << 8;
+ k |= data[2] << 16;
+ k |= data[3] << 24;
- data += 4;
- len -= 4;
- }
-
- switch(len)
- {
- case 3: h ^= data[2] << 16;
- case 2: h ^= data[1] << 8;
- case 1: h ^= data[0];
- h *= m;
- };
+ k *= m;
+ k ^= k >> r;
+ k *= m;
- h ^= h >> 13;
h *= m;
- h ^= h >> 15;
-
- return h;
+ h ^= k;
+
+ data += 4;
+ len -= 4;
+ }
+
+ switch (len) {
+ case 3:
+ h ^= data[2] << 16;
+ case 2:
+ h ^= data[1] << 8;
+ case 1:
+ h ^= data[0];
+ h *= m;
+ };
+
+ h ^= h >> 13;
+ h *= m;
+ h ^= h >> 15;
+
+ return h;
}
#include "math.h"
+#define MAX_PSNR 100
+static double vp8_mse2psnr(double Samples, double Peak, double Mse) {
+ double psnr;
-static double vp8_mse2psnr(double Samples, double Peak, double Mse)
-{
- double psnr;
-
- if ((double)Mse > 0.0)
- psnr = 10.0 * log10(Peak * Peak * Samples / Mse);
- else
- psnr = 60; /* Limit to prevent / 0 */
+ if ((double)Mse > 0.0)
+ psnr = 10.0 * log10(Peak * Peak * Samples / Mse);
+ else
+ psnr = MAX_PSNR; /* Limit to prevent / 0 */
- if (psnr > 60)
- psnr = 60;
+ if (psnr > MAX_PSNR)
+ psnr = MAX_PSNR;
- return psnr;
+ return psnr;
}
#include "args.h"
static const arg_def_t debugmode = ARG_DEF("D", "debug", 0,
- "Debug mode (makes output deterministic)");
+ "Debug mode (makes output deterministic)");
static const arg_def_t outputfile = ARG_DEF("o", "output", 1,
- "Output filename");
+ "Output filename");
static const arg_def_t use_yv12 = ARG_DEF(NULL, "yv12", 0,
- "Input file is YV12 ");
+ "Input file is YV12 ");
static const arg_def_t use_i420 = ARG_DEF(NULL, "i420", 0,
- "Input file is I420 (default)");
+ "Input file is I420 (default)");
static const arg_def_t codecarg = ARG_DEF(NULL, "codec", 1,
- "Codec to use");
+ "Codec to use");
static const arg_def_t passes = ARG_DEF("p", "passes", 1,
- "Number of passes (1/2)");
+ "Number of passes (1/2)");
static const arg_def_t pass_arg = ARG_DEF(NULL, "pass", 1,
- "Pass to execute (1/2)");
+ "Pass to execute (1/2)");
static const arg_def_t fpf_name = ARG_DEF(NULL, "fpf", 1,
- "First pass statistics file name");
+ "First pass statistics file name");
static const arg_def_t limit = ARG_DEF(NULL, "limit", 1,
"Stop encoding after n input frames");
+static const arg_def_t skip = ARG_DEF(NULL, "skip", 1,
+ "Skip the first n input frames");
static const arg_def_t deadline = ARG_DEF("d", "deadline", 1,
- "Deadline per frame (usec)");
+ "Deadline per frame (usec)");
static const arg_def_t best_dl = ARG_DEF(NULL, "best", 0,
- "Use Best Quality Deadline");
+ "Use Best Quality Deadline");
static const arg_def_t good_dl = ARG_DEF(NULL, "good", 0,
- "Use Good Quality Deadline");
+ "Use Good Quality Deadline");
static const arg_def_t rt_dl = ARG_DEF(NULL, "rt", 0,
- "Use Realtime Quality Deadline");
+ "Use Realtime Quality Deadline");
static const arg_def_t quietarg = ARG_DEF("q", "quiet", 0,
- "Do not print encode progress");
+ "Do not print encode progress");
static const arg_def_t verbosearg = ARG_DEF("v", "verbose", 0,
- "Show encoder parameters");
+ "Show encoder parameters");
static const arg_def_t psnrarg = ARG_DEF(NULL, "psnr", 0,
- "Show PSNR in status line");
+ "Show PSNR in status line");
+enum TestDecodeFatality {
+ TEST_DECODE_OFF,
+ TEST_DECODE_FATAL,
+ TEST_DECODE_WARN,
+};
+static const struct arg_enum_list test_decode_enum[] = {
+ {"off", TEST_DECODE_OFF},
+ {"fatal", TEST_DECODE_FATAL},
+ {"warn", TEST_DECODE_WARN},
+ {NULL, 0}
+};
+static const arg_def_t recontest = ARG_DEF_ENUM(NULL, "test-decode", 1,
+ "Test encode/decode mismatch",
+ test_decode_enum);
static const arg_def_t framerate = ARG_DEF(NULL, "fps", 1,
- "Stream frame rate (rate/scale)");
+ "Stream frame rate (rate/scale)");
static const arg_def_t use_ivf = ARG_DEF(NULL, "ivf", 0,
- "Output IVF (default is WebM)");
+ "Output IVF (default is WebM)");
static const arg_def_t out_part = ARG_DEF("P", "output-partitions", 0,
- "Makes encoder output partitions. Requires IVF output!");
+ "Makes encoder output partitions. Requires IVF output!");
static const arg_def_t q_hist_n = ARG_DEF(NULL, "q-hist", 1,
- "Show quantizer histogram (n-buckets)");
+ "Show quantizer histogram (n-buckets)");
static const arg_def_t rate_hist_n = ARG_DEF(NULL, "rate-hist", 1,
- "Show rate histogram (n-buckets)");
-static const arg_def_t *main_args[] =
-{
- &debugmode,
- &outputfile, &codecarg, &passes, &pass_arg, &fpf_name, &limit, &deadline,
- &best_dl, &good_dl, &rt_dl,
- &quietarg, &verbosearg, &psnrarg, &use_ivf, &out_part, &q_hist_n, &rate_hist_n,
- NULL
+ "Show rate histogram (n-buckets)");
+static const arg_def_t *main_args[] = {
+ &debugmode,
+ &outputfile, &codecarg, &passes, &pass_arg, &fpf_name, &limit, &skip,
+ &deadline, &best_dl, &good_dl, &rt_dl,
+ &quietarg, &verbosearg, &psnrarg, &use_ivf, &out_part, &q_hist_n, &rate_hist_n,
+ NULL
};
static const arg_def_t usage = ARG_DEF("u", "usage", 1,
- "Usage profile number to use");
+ "Usage profile number to use");
static const arg_def_t threads = ARG_DEF("t", "threads", 1,
- "Max number of threads to use");
+ "Max number of threads to use");
static const arg_def_t profile = ARG_DEF(NULL, "profile", 1,
- "Bitstream profile number to use");
+ "Bitstream profile number to use");
static const arg_def_t width = ARG_DEF("w", "width", 1,
- "Frame width");
+ "Frame width");
static const arg_def_t height = ARG_DEF("h", "height", 1,
- "Frame height");
+ "Frame height");
static const struct arg_enum_list stereo_mode_enum[] = {
- {"mono" , STEREO_FORMAT_MONO},
- {"left-right", STEREO_FORMAT_LEFT_RIGHT},
- {"bottom-top", STEREO_FORMAT_BOTTOM_TOP},
- {"top-bottom", STEREO_FORMAT_TOP_BOTTOM},
- {"right-left", STEREO_FORMAT_RIGHT_LEFT},
- {NULL, 0}
+ {"mono", STEREO_FORMAT_MONO},
+ {"left-right", STEREO_FORMAT_LEFT_RIGHT},
+ {"bottom-top", STEREO_FORMAT_BOTTOM_TOP},
+ {"top-bottom", STEREO_FORMAT_TOP_BOTTOM},
+ {"right-left", STEREO_FORMAT_RIGHT_LEFT},
+ {NULL, 0}
};
static const arg_def_t stereo_mode = ARG_DEF_ENUM(NULL, "stereo-mode", 1,
- "Stereo 3D video format", stereo_mode_enum);
+ "Stereo 3D video format", stereo_mode_enum);
static const arg_def_t timebase = ARG_DEF(NULL, "timebase", 1,
- "Output timestamp precision (fractional seconds)");
+ "Output timestamp precision (fractional seconds)");
static const arg_def_t error_resilient = ARG_DEF(NULL, "error-resilient", 1,
- "Enable error resiliency features");
+ "Enable error resiliency features");
static const arg_def_t lag_in_frames = ARG_DEF(NULL, "lag-in-frames", 1,
- "Max number of frames to lag");
+ "Max number of frames to lag");
-static const arg_def_t *global_args[] =
-{
- &use_yv12, &use_i420, &usage, &threads, &profile,
- &width, &height, &stereo_mode, &timebase, &framerate, &error_resilient,
- &lag_in_frames, NULL
+static const arg_def_t *global_args[] = {
+ &use_yv12, &use_i420, &usage, &threads, &profile,
+ &width, &height, &stereo_mode, &timebase, &framerate,
+ &error_resilient,
+ &lag_in_frames, NULL
};
static const arg_def_t dropframe_thresh = ARG_DEF(NULL, "drop-frame", 1,
- "Temporal resampling threshold (buf %)");
+ "Temporal resampling threshold (buf %)");
static const arg_def_t resize_allowed = ARG_DEF(NULL, "resize-allowed", 1,
- "Spatial resampling enabled (bool)");
+ "Spatial resampling enabled (bool)");
static const arg_def_t resize_up_thresh = ARG_DEF(NULL, "resize-up", 1,
- "Upscale threshold (buf %)");
+ "Upscale threshold (buf %)");
static const arg_def_t resize_down_thresh = ARG_DEF(NULL, "resize-down", 1,
- "Downscale threshold (buf %)");
+ "Downscale threshold (buf %)");
static const struct arg_enum_list end_usage_enum[] = {
- {"vbr", VPX_VBR},
- {"cbr", VPX_CBR},
- {"cq", VPX_CQ},
- {NULL, 0}
+ {"vbr", VPX_VBR},
+ {"cbr", VPX_CBR},
+ {"cq", VPX_CQ},
+ {"q", VPX_Q},
+ {NULL, 0}
};
static const arg_def_t end_usage = ARG_DEF_ENUM(NULL, "end-usage", 1,
- "Rate control mode", end_usage_enum);
+ "Rate control mode", end_usage_enum);
static const arg_def_t target_bitrate = ARG_DEF(NULL, "target-bitrate", 1,
- "Bitrate (kbps)");
+ "Bitrate (kbps)");
static const arg_def_t min_quantizer = ARG_DEF(NULL, "min-q", 1,
- "Minimum (best) quantizer");
+ "Minimum (best) quantizer");
static const arg_def_t max_quantizer = ARG_DEF(NULL, "max-q", 1,
- "Maximum (worst) quantizer");
+ "Maximum (worst) quantizer");
static const arg_def_t undershoot_pct = ARG_DEF(NULL, "undershoot-pct", 1,
- "Datarate undershoot (min) target (%)");
+ "Datarate undershoot (min) target (%)");
static const arg_def_t overshoot_pct = ARG_DEF(NULL, "overshoot-pct", 1,
- "Datarate overshoot (max) target (%)");
+ "Datarate overshoot (max) target (%)");
static const arg_def_t buf_sz = ARG_DEF(NULL, "buf-sz", 1,
- "Client buffer size (ms)");
+ "Client buffer size (ms)");
static const arg_def_t buf_initial_sz = ARG_DEF(NULL, "buf-initial-sz", 1,
- "Client initial buffer size (ms)");
+ "Client initial buffer size (ms)");
static const arg_def_t buf_optimal_sz = ARG_DEF(NULL, "buf-optimal-sz", 1,
- "Client optimal buffer size (ms)");
-static const arg_def_t *rc_args[] =
-{
- &dropframe_thresh, &resize_allowed, &resize_up_thresh, &resize_down_thresh,
- &end_usage, &target_bitrate, &min_quantizer, &max_quantizer,
- &undershoot_pct, &overshoot_pct, &buf_sz, &buf_initial_sz, &buf_optimal_sz,
- NULL
+ "Client optimal buffer size (ms)");
+static const arg_def_t *rc_args[] = {
+ &dropframe_thresh, &resize_allowed, &resize_up_thresh, &resize_down_thresh,
+ &end_usage, &target_bitrate, &min_quantizer, &max_quantizer,
+ &undershoot_pct, &overshoot_pct, &buf_sz, &buf_initial_sz, &buf_optimal_sz,
+ NULL
};
static const arg_def_t bias_pct = ARG_DEF(NULL, "bias-pct", 1,
- "CBR/VBR bias (0=CBR, 100=VBR)");
+ "CBR/VBR bias (0=CBR, 100=VBR)");
static const arg_def_t minsection_pct = ARG_DEF(NULL, "minsection-pct", 1,
- "GOP min bitrate (% of target)");
+ "GOP min bitrate (% of target)");
static const arg_def_t maxsection_pct = ARG_DEF(NULL, "maxsection-pct", 1,
- "GOP max bitrate (% of target)");
-static const arg_def_t *rc_twopass_args[] =
-{
- &bias_pct, &minsection_pct, &maxsection_pct, NULL
+ "GOP max bitrate (% of target)");
+static const arg_def_t *rc_twopass_args[] = {
+ &bias_pct, &minsection_pct, &maxsection_pct, NULL
};
static const arg_def_t kf_min_dist = ARG_DEF(NULL, "kf-min-dist", 1,
- "Minimum keyframe interval (frames)");
+ "Minimum keyframe interval (frames)");
static const arg_def_t kf_max_dist = ARG_DEF(NULL, "kf-max-dist", 1,
- "Maximum keyframe interval (frames)");
+ "Maximum keyframe interval (frames)");
static const arg_def_t kf_disabled = ARG_DEF(NULL, "disable-kf", 0,
- "Disable keyframe placement");
-static const arg_def_t *kf_args[] =
-{
- &kf_min_dist, &kf_max_dist, &kf_disabled, NULL
+ "Disable keyframe placement");
+static const arg_def_t *kf_args[] = {
+ &kf_min_dist, &kf_max_dist, &kf_disabled, NULL
};
-#if CONFIG_VP8_ENCODER
static const arg_def_t noise_sens = ARG_DEF(NULL, "noise-sensitivity", 1,
- "Noise sensitivity (frames to blur)");
+ "Noise sensitivity (frames to blur)");
static const arg_def_t sharpness = ARG_DEF(NULL, "sharpness", 1,
- "Filter sharpness (0-7)");
+ "Filter sharpness (0-7)");
static const arg_def_t static_thresh = ARG_DEF(NULL, "static-thresh", 1,
- "Motion detection threshold");
-#endif
-
-#if CONFIG_VP8_ENCODER
+ "Motion detection threshold");
static const arg_def_t cpu_used = ARG_DEF(NULL, "cpu-used", 1,
- "CPU Used (-16..16)");
-#endif
-
-
-#if CONFIG_VP8_ENCODER
+ "CPU Used (-16..16)");
static const arg_def_t token_parts = ARG_DEF(NULL, "token-parts", 1,
"Number of token partitions to use, log2");
+static const arg_def_t tile_cols = ARG_DEF(NULL, "tile-columns", 1,
+ "Number of tile columns to use, log2");
+static const arg_def_t tile_rows = ARG_DEF(NULL, "tile-rows", 1,
+ "Number of tile rows to use, log2");
static const arg_def_t auto_altref = ARG_DEF(NULL, "auto-alt-ref", 1,
- "Enable automatic alt reference frames");
+ "Enable automatic alt reference frames");
static const arg_def_t arnr_maxframes = ARG_DEF(NULL, "arnr-maxframes", 1,
- "AltRef Max Frames");
+ "AltRef Max Frames");
static const arg_def_t arnr_strength = ARG_DEF(NULL, "arnr-strength", 1,
- "AltRef Strength");
+ "AltRef Strength");
static const arg_def_t arnr_type = ARG_DEF(NULL, "arnr-type", 1,
- "AltRef Type");
+ "AltRef Type");
static const struct arg_enum_list tuning_enum[] = {
- {"psnr", VP8_TUNE_PSNR},
- {"ssim", VP8_TUNE_SSIM},
- {NULL, 0}
+ {"psnr", VP8_TUNE_PSNR},
+ {"ssim", VP8_TUNE_SSIM},
+ {NULL, 0}
};
static const arg_def_t tune_ssim = ARG_DEF_ENUM(NULL, "tune", 1,
- "Material to favor", tuning_enum);
+ "Material to favor", tuning_enum);
static const arg_def_t cq_level = ARG_DEF(NULL, "cq-level", 1,
- "Constrained Quality Level");
+ "Constant/Constrained Quality level");
static const arg_def_t max_intra_rate_pct = ARG_DEF(NULL, "max-intra-rate", 1,
- "Max I-frame bitrate (pct)");
+ "Max I-frame bitrate (pct)");
+static const arg_def_t lossless = ARG_DEF(NULL, "lossless", 1, "Lossless mode");
+#if CONFIG_VP9_ENCODER
+static const arg_def_t frame_parallel_decoding = ARG_DEF(
+ NULL, "frame-parallel", 1, "Enable frame parallel decodability features");
+#endif
-static const arg_def_t *vp8_args[] =
-{
- &cpu_used, &auto_altref, &noise_sens, &sharpness, &static_thresh,
- &token_parts, &arnr_maxframes, &arnr_strength, &arnr_type,
- &tune_ssim, &cq_level, &max_intra_rate_pct, NULL
+#if CONFIG_VP8_ENCODER
+static const arg_def_t *vp8_args[] = {
+ &cpu_used, &auto_altref, &noise_sens, &sharpness, &static_thresh,
+ &token_parts, &arnr_maxframes, &arnr_strength, &arnr_type,
+ &tune_ssim, &cq_level, &max_intra_rate_pct,
+ NULL
};
-static const int vp8_arg_ctrl_map[] =
-{
- VP8E_SET_CPUUSED, VP8E_SET_ENABLEAUTOALTREF,
- VP8E_SET_NOISE_SENSITIVITY, VP8E_SET_SHARPNESS, VP8E_SET_STATIC_THRESHOLD,
- VP8E_SET_TOKEN_PARTITIONS,
- VP8E_SET_ARNR_MAXFRAMES, VP8E_SET_ARNR_STRENGTH , VP8E_SET_ARNR_TYPE,
- VP8E_SET_TUNING, VP8E_SET_CQ_LEVEL, VP8E_SET_MAX_INTRA_BITRATE_PCT, 0
+static const int vp8_arg_ctrl_map[] = {
+ VP8E_SET_CPUUSED, VP8E_SET_ENABLEAUTOALTREF,
+ VP8E_SET_NOISE_SENSITIVITY, VP8E_SET_SHARPNESS, VP8E_SET_STATIC_THRESHOLD,
+ VP8E_SET_TOKEN_PARTITIONS,
+ VP8E_SET_ARNR_MAXFRAMES, VP8E_SET_ARNR_STRENGTH, VP8E_SET_ARNR_TYPE,
+ VP8E_SET_TUNING, VP8E_SET_CQ_LEVEL, VP8E_SET_MAX_INTRA_BITRATE_PCT,
+ 0
+};
+#endif
+
+#if CONFIG_VP9_ENCODER
+static const arg_def_t *vp9_args[] = {
+ &cpu_used, &auto_altref, &noise_sens, &sharpness, &static_thresh,
+ &tile_cols, &tile_rows, &arnr_maxframes, &arnr_strength, &arnr_type,
+ &tune_ssim, &cq_level, &max_intra_rate_pct, &lossless,
+ &frame_parallel_decoding,
+ NULL
+};
+static const int vp9_arg_ctrl_map[] = {
+ VP8E_SET_CPUUSED, VP8E_SET_ENABLEAUTOALTREF,
+ VP8E_SET_NOISE_SENSITIVITY, VP8E_SET_SHARPNESS, VP8E_SET_STATIC_THRESHOLD,
+ VP9E_SET_TILE_COLUMNS, VP9E_SET_TILE_ROWS,
+ VP8E_SET_ARNR_MAXFRAMES, VP8E_SET_ARNR_STRENGTH, VP8E_SET_ARNR_TYPE,
+ VP8E_SET_TUNING, VP8E_SET_CQ_LEVEL, VP8E_SET_MAX_INTRA_BITRATE_PCT,
+ VP9E_SET_LOSSLESS, VP9E_SET_FRAME_PARALLEL_DECODING,
+ 0
};
#endif
static const arg_def_t *no_args[] = { NULL };
-static void usage_exit()
-{
- int i;
-
- fprintf(stderr, "Usage: %s <options> -o dst_filename src_filename \n",
- exec_name);
-
- fprintf(stderr, "\nOptions:\n");
- arg_show_usage(stdout, main_args);
- fprintf(stderr, "\nEncoder Global Options:\n");
- arg_show_usage(stdout, global_args);
- fprintf(stderr, "\nRate Control Options:\n");
- arg_show_usage(stdout, rc_args);
- fprintf(stderr, "\nTwopass Rate Control Options:\n");
- arg_show_usage(stdout, rc_twopass_args);
- fprintf(stderr, "\nKeyframe Placement Options:\n");
- arg_show_usage(stdout, kf_args);
+static void usage_exit() {
+ int i;
+
+ fprintf(stderr, "Usage: %s <options> -o dst_filename src_filename \n",
+ exec_name);
+
+ fprintf(stderr, "\nOptions:\n");
+ arg_show_usage(stderr, main_args);
+ fprintf(stderr, "\nEncoder Global Options:\n");
+ arg_show_usage(stderr, global_args);
+ fprintf(stderr, "\nRate Control Options:\n");
+ arg_show_usage(stderr, rc_args);
+ fprintf(stderr, "\nTwopass Rate Control Options:\n");
+ arg_show_usage(stderr, rc_twopass_args);
+ fprintf(stderr, "\nKeyframe Placement Options:\n");
+ arg_show_usage(stderr, kf_args);
#if CONFIG_VP8_ENCODER
- fprintf(stderr, "\nVP8 Specific Options:\n");
- arg_show_usage(stdout, vp8_args);
+ fprintf(stderr, "\nVP8 Specific Options:\n");
+ arg_show_usage(stderr, vp8_args);
+#endif
+#if CONFIG_VP9_ENCODER
+ fprintf(stderr, "\nVP9 Specific Options:\n");
+ arg_show_usage(stderr, vp9_args);
#endif
- fprintf(stderr, "\nStream timebase (--timebase):\n"
- " The desired precision of timestamps in the output, expressed\n"
- " in fractional seconds. Default is 1/1000.\n");
- fprintf(stderr, "\n"
- "Included encoders:\n"
- "\n");
-
- for (i = 0; i < sizeof(codecs) / sizeof(codecs[0]); i++)
- fprintf(stderr, " %-6s - %s\n",
- codecs[i].name,
- vpx_codec_iface_name(codecs[i].iface));
-
- exit(EXIT_FAILURE);
+ fprintf(stderr, "\nStream timebase (--timebase):\n"
+ " The desired precision of timestamps in the output, expressed\n"
+ " in fractional seconds. Default is 1/1000.\n");
+ fprintf(stderr, "\n"
+ "Included encoders:\n"
+ "\n");
+
+ for (i = 0; i < sizeof(codecs) / sizeof(codecs[0]); i++)
+ fprintf(stderr, " %-6s - %s\n",
+ codecs[i].name,
+ vpx_codec_iface_name(codecs[i].iface()));
+
+ exit(EXIT_FAILURE);
}
#define HIST_BAR_MAX 40
-struct hist_bucket
-{
- int low, high, count;
+struct hist_bucket {
+ int low, high, count;
};
static int merge_hist_buckets(struct hist_bucket *bucket,
int *buckets_,
- int max_buckets)
-{
- int small_bucket = 0, merge_bucket = INT_MAX, big_bucket=0;
- int buckets = *buckets_;
- int i;
-
- /* Find the extrema for this list of buckets */
- big_bucket = small_bucket = 0;
- for(i=0; i < buckets; i++)
- {
- if(bucket[i].count < bucket[small_bucket].count)
- small_bucket = i;
- if(bucket[i].count > bucket[big_bucket].count)
- big_bucket = i;
+ int max_buckets) {
+ int small_bucket = 0, merge_bucket = INT_MAX, big_bucket = 0;
+ int buckets = *buckets_;
+ int i;
+
+ /* Find the extrema for this list of buckets */
+ big_bucket = small_bucket = 0;
+ for (i = 0; i < buckets; i++) {
+ if (bucket[i].count < bucket[small_bucket].count)
+ small_bucket = i;
+ if (bucket[i].count > bucket[big_bucket].count)
+ big_bucket = i;
+ }
+
+ /* If we have too many buckets, merge the smallest with an adjacent
+ * bucket.
+ */
+ while (buckets > max_buckets) {
+ int last_bucket = buckets - 1;
+
+ /* merge the small bucket with an adjacent one. */
+ if (small_bucket == 0)
+ merge_bucket = 1;
+ else if (small_bucket == last_bucket)
+ merge_bucket = last_bucket - 1;
+ else if (bucket[small_bucket - 1].count < bucket[small_bucket + 1].count)
+ merge_bucket = small_bucket - 1;
+ else
+ merge_bucket = small_bucket + 1;
+
+ assert(abs(merge_bucket - small_bucket) <= 1);
+ assert(small_bucket < buckets);
+ assert(big_bucket < buckets);
+ assert(merge_bucket < buckets);
+
+ if (merge_bucket < small_bucket) {
+ bucket[merge_bucket].high = bucket[small_bucket].high;
+ bucket[merge_bucket].count += bucket[small_bucket].count;
+ } else {
+ bucket[small_bucket].high = bucket[merge_bucket].high;
+ bucket[small_bucket].count += bucket[merge_bucket].count;
+ merge_bucket = small_bucket;
}
- /* If we have too many buckets, merge the smallest with an adjacent
- * bucket.
- */
- while(buckets > max_buckets)
- {
- int last_bucket = buckets - 1;
-
- /* merge the small bucket with an adjacent one. */
- if(small_bucket == 0)
- merge_bucket = 1;
- else if(small_bucket == last_bucket)
- merge_bucket = last_bucket - 1;
- else if(bucket[small_bucket - 1].count < bucket[small_bucket + 1].count)
- merge_bucket = small_bucket - 1;
- else
- merge_bucket = small_bucket + 1;
-
- assert(abs(merge_bucket - small_bucket) <= 1);
- assert(small_bucket < buckets);
- assert(big_bucket < buckets);
- assert(merge_bucket < buckets);
-
- if(merge_bucket < small_bucket)
- {
- bucket[merge_bucket].high = bucket[small_bucket].high;
- bucket[merge_bucket].count += bucket[small_bucket].count;
- }
- else
- {
- bucket[small_bucket].high = bucket[merge_bucket].high;
- bucket[small_bucket].count += bucket[merge_bucket].count;
- merge_bucket = small_bucket;
- }
-
- assert(bucket[merge_bucket].low != bucket[merge_bucket].high);
-
- buckets--;
+ assert(bucket[merge_bucket].low != bucket[merge_bucket].high);
- /* Remove the merge_bucket from the list, and find the new small
- * and big buckets while we're at it
- */
- big_bucket = small_bucket = 0;
- for(i=0; i < buckets; i++)
- {
- if(i > merge_bucket)
- bucket[i] = bucket[i+1];
-
- if(bucket[i].count < bucket[small_bucket].count)
- small_bucket = i;
- if(bucket[i].count > bucket[big_bucket].count)
- big_bucket = i;
- }
+ buckets--;
+ /* Remove the merge_bucket from the list, and find the new small
+ * and big buckets while we're at it
+ */
+ big_bucket = small_bucket = 0;
+ for (i = 0; i < buckets; i++) {
+ if (i > merge_bucket)
+ bucket[i] = bucket[i + 1];
+
+ if (bucket[i].count < bucket[small_bucket].count)
+ small_bucket = i;
+ if (bucket[i].count > bucket[big_bucket].count)
+ big_bucket = i;
}
- *buckets_ = buckets;
- return bucket[big_bucket].count;
+ }
+
+ *buckets_ = buckets;
+ return bucket[big_bucket].count;
}
static void show_histogram(const struct hist_bucket *bucket,
int buckets,
int total,
- int scale)
-{
- const char *pat1, *pat2;
- int i;
-
- switch((int)(log(bucket[buckets-1].high)/log(10))+1)
- {
- case 1:
- case 2:
- pat1 = "%4d %2s: ";
- pat2 = "%4d-%2d: ";
- break;
- case 3:
- pat1 = "%5d %3s: ";
- pat2 = "%5d-%3d: ";
- break;
- case 4:
- pat1 = "%6d %4s: ";
- pat2 = "%6d-%4d: ";
- break;
- case 5:
- pat1 = "%7d %5s: ";
- pat2 = "%7d-%5d: ";
- break;
- case 6:
- pat1 = "%8d %6s: ";
- pat2 = "%8d-%6d: ";
- break;
- case 7:
- pat1 = "%9d %7s: ";
- pat2 = "%9d-%7d: ";
- break;
- default:
- pat1 = "%12d %10s: ";
- pat2 = "%12d-%10d: ";
- break;
- }
+ int scale) {
+ const char *pat1, *pat2;
+ int i;
+
+ switch ((int)(log(bucket[buckets - 1].high) / log(10)) + 1) {
+ case 1:
+ case 2:
+ pat1 = "%4d %2s: ";
+ pat2 = "%4d-%2d: ";
+ break;
+ case 3:
+ pat1 = "%5d %3s: ";
+ pat2 = "%5d-%3d: ";
+ break;
+ case 4:
+ pat1 = "%6d %4s: ";
+ pat2 = "%6d-%4d: ";
+ break;
+ case 5:
+ pat1 = "%7d %5s: ";
+ pat2 = "%7d-%5d: ";
+ break;
+ case 6:
+ pat1 = "%8d %6s: ";
+ pat2 = "%8d-%6d: ";
+ break;
+ case 7:
+ pat1 = "%9d %7s: ";
+ pat2 = "%9d-%7d: ";
+ break;
+ default:
+ pat1 = "%12d %10s: ";
+ pat2 = "%12d-%10d: ";
+ break;
+ }
+
+ for (i = 0; i < buckets; i++) {
+ int len;
+ int j;
+ float pct;
+
+ pct = (float)(100.0 * bucket[i].count / total);
+ len = HIST_BAR_MAX * bucket[i].count / scale;
+ if (len < 1)
+ len = 1;
+ assert(len <= HIST_BAR_MAX);
+
+ if (bucket[i].low == bucket[i].high)
+ fprintf(stderr, pat1, bucket[i].low, "");
+ else
+ fprintf(stderr, pat2, bucket[i].low, bucket[i].high);
- for(i=0; i<buckets; i++)
- {
- int len;
- int j;
- float pct;
-
- pct = (float)(100.0 * bucket[i].count / total);
- len = HIST_BAR_MAX * bucket[i].count / scale;
- if(len < 1)
- len = 1;
- assert(len <= HIST_BAR_MAX);
-
- if(bucket[i].low == bucket[i].high)
- fprintf(stderr, pat1, bucket[i].low, "");
- else
- fprintf(stderr, pat2, bucket[i].low, bucket[i].high);
-
- for(j=0; j<HIST_BAR_MAX; j++)
- fprintf(stderr, j<len?"=":" ");
- fprintf(stderr, "\t%5d (%6.2f%%)\n",bucket[i].count,pct);
- }
+ for (j = 0; j < HIST_BAR_MAX; j++)
+ fprintf(stderr, j < len ? "=" : " ");
+ fprintf(stderr, "\t%5d (%6.2f%%)\n", bucket[i].count, pct);
+ }
}
-static void show_q_histogram(const int counts[64], int max_buckets)
-{
- struct hist_bucket bucket[64];
- int buckets = 0;
- int total = 0;
- int scale;
- int i;
+static void show_q_histogram(const int counts[64], int max_buckets) {
+ struct hist_bucket bucket[64];
+ int buckets = 0;
+ int total = 0;
+ int scale;
+ int i;
- for(i=0; i<64; i++)
- {
- if(counts[i])
- {
- bucket[buckets].low = bucket[buckets].high = i;
- bucket[buckets].count = counts[i];
- buckets++;
- total += counts[i];
- }
+ for (i = 0; i < 64; i++) {
+ if (counts[i]) {
+ bucket[buckets].low = bucket[buckets].high = i;
+ bucket[buckets].count = counts[i];
+ buckets++;
+ total += counts[i];
}
+ }
- fprintf(stderr, "\nQuantizer Selection:\n");
- scale = merge_hist_buckets(bucket, &buckets, max_buckets);
- show_histogram(bucket, buckets, total, scale);
+ fprintf(stderr, "\nQuantizer Selection:\n");
+ scale = merge_hist_buckets(bucket, &buckets, max_buckets);
+ show_histogram(bucket, buckets, total, scale);
}
#define RATE_BINS (100)
-struct rate_hist
-{
- int64_t *pts;
- int *sz;
- int samples;
- int frames;
- struct hist_bucket bucket[RATE_BINS];
- int total;
+struct rate_hist {
+ int64_t *pts;
+ int *sz;
+ int samples;
+ int frames;
+ struct hist_bucket bucket[RATE_BINS];
+ int total;
};
static void init_rate_histogram(struct rate_hist *hist,
const vpx_codec_enc_cfg_t *cfg,
- const vpx_rational_t *fps)
-{
- int i;
-
- /* Determine the number of samples in the buffer. Use the file's framerate
- * to determine the number of frames in rc_buf_sz milliseconds, with an
- * adjustment (5/4) to account for alt-refs
- */
- hist->samples = cfg->rc_buf_sz * 5 / 4 * fps->num / fps->den / 1000;
-
- /* prevent division by zero */
- if (hist->samples == 0)
- hist->samples=1;
-
- hist->pts = calloc(hist->samples, sizeof(*hist->pts));
- hist->sz = calloc(hist->samples, sizeof(*hist->sz));
- for(i=0; i<RATE_BINS; i++)
- {
- hist->bucket[i].low = INT_MAX;
- hist->bucket[i].high = 0;
- hist->bucket[i].count = 0;
- }
+ const vpx_rational_t *fps) {
+ int i;
+
+ /* Determine the number of samples in the buffer. Use the file's framerate
+ * to determine the number of frames in rc_buf_sz milliseconds, with an
+ * adjustment (5/4) to account for alt-refs
+ */
+ hist->samples = cfg->rc_buf_sz * 5 / 4 * fps->num / fps->den / 1000;
+
+ /* prevent division by zero */
+ if (hist->samples == 0)
+ hist->samples = 1;
+
+ hist->pts = calloc(hist->samples, sizeof(*hist->pts));
+ hist->sz = calloc(hist->samples, sizeof(*hist->sz));
+ for (i = 0; i < RATE_BINS; i++) {
+ hist->bucket[i].low = INT_MAX;
+ hist->bucket[i].high = 0;
+ hist->bucket[i].count = 0;
+ }
}
-static void destroy_rate_histogram(struct rate_hist *hist)
-{
- free(hist->pts);
- free(hist->sz);
+static void destroy_rate_histogram(struct rate_hist *hist) {
+ free(hist->pts);
+ free(hist->sz);
}
static void update_rate_histogram(struct rate_hist *hist,
const vpx_codec_enc_cfg_t *cfg,
- const vpx_codec_cx_pkt_t *pkt)
-{
- int i, idx;
- int64_t now, then, sum_sz = 0, avg_bitrate;
-
- now = pkt->data.frame.pts * 1000
- * (uint64_t)cfg->g_timebase.num / (uint64_t)cfg->g_timebase.den;
-
- idx = hist->frames++ % hist->samples;
- hist->pts[idx] = now;
- hist->sz[idx] = (int)pkt->data.frame.sz;
-
- if(now < cfg->rc_buf_initial_sz)
- return;
+ const vpx_codec_cx_pkt_t *pkt) {
+ int i, idx;
+ int64_t now, then, sum_sz = 0, avg_bitrate;
+
+ now = pkt->data.frame.pts * 1000
+ * (uint64_t)cfg->g_timebase.num / (uint64_t)cfg->g_timebase.den;
+
+ idx = hist->frames++ % hist->samples;
+ hist->pts[idx] = now;
+ hist->sz[idx] = (int)pkt->data.frame.sz;
+
+ if (now < cfg->rc_buf_initial_sz)
+ return;
+
+ then = now;
+
+ /* Sum the size over the past rc_buf_sz ms */
+ for (i = hist->frames; i > 0 && hist->frames - i < hist->samples; i--) {
+ int i_idx = (i - 1) % hist->samples;
+
+ then = hist->pts[i_idx];
+ if (now - then > cfg->rc_buf_sz)
+ break;
+ sum_sz += hist->sz[i_idx];
+ }
+
+ if (now == then)
+ return;
+
+ avg_bitrate = sum_sz * 8 * 1000 / (now - then);
+ idx = (int)(avg_bitrate * (RATE_BINS / 2) / (cfg->rc_target_bitrate * 1000));
+ if (idx < 0)
+ idx = 0;
+ if (idx > RATE_BINS - 1)
+ idx = RATE_BINS - 1;
+ if (hist->bucket[idx].low > avg_bitrate)
+ hist->bucket[idx].low = (int)avg_bitrate;
+ if (hist->bucket[idx].high < avg_bitrate)
+ hist->bucket[idx].high = (int)avg_bitrate;
+ hist->bucket[idx].count++;
+ hist->total++;
+}
- then = now;
- /* Sum the size over the past rc_buf_sz ms */
- for(i = hist->frames; i > 0 && hist->frames - i < hist->samples; i--)
- {
- int i_idx = (i-1) % hist->samples;
+static void show_rate_histogram(struct rate_hist *hist,
+ const vpx_codec_enc_cfg_t *cfg,
+ int max_buckets) {
+ int i, scale;
+ int buckets = 0;
+
+ for (i = 0; i < RATE_BINS; i++) {
+ if (hist->bucket[i].low == INT_MAX)
+ continue;
+ hist->bucket[buckets++] = hist->bucket[i];
+ }
+
+ fprintf(stderr, "\nRate (over %dms window):\n", cfg->rc_buf_sz);
+ scale = merge_hist_buckets(hist->bucket, &buckets, max_buckets);
+ show_histogram(hist->bucket, buckets, hist->total, scale);
+}
- then = hist->pts[i_idx];
- if(now - then > cfg->rc_buf_sz)
+#define mmin(a, b) ((a) < (b) ? (a) : (b))
+static void find_mismatch(vpx_image_t *img1, vpx_image_t *img2,
+ int yloc[4], int uloc[4], int vloc[4]) {
+ const unsigned int bsize = 64;
+ const unsigned int bsizey = bsize >> img1->y_chroma_shift;
+ const unsigned int bsizex = bsize >> img1->x_chroma_shift;
+ const int c_w = (img1->d_w + img1->x_chroma_shift) >> img1->x_chroma_shift;
+ const int c_h = (img1->d_h + img1->y_chroma_shift) >> img1->y_chroma_shift;
+ unsigned int match = 1;
+ unsigned int i, j;
+ yloc[0] = yloc[1] = yloc[2] = yloc[3] = -1;
+ for (i = 0, match = 1; match && i < img1->d_h; i += bsize) {
+ for (j = 0; match && j < img1->d_w; j += bsize) {
+ int k, l;
+ int si = mmin(i + bsize, img1->d_h) - i;
+ int sj = mmin(j + bsize, img1->d_w) - j;
+ for (k = 0; match && k < si; k++)
+ for (l = 0; match && l < sj; l++) {
+ if (*(img1->planes[VPX_PLANE_Y] +
+ (i + k) * img1->stride[VPX_PLANE_Y] + j + l) !=
+ *(img2->planes[VPX_PLANE_Y] +
+ (i + k) * img2->stride[VPX_PLANE_Y] + j + l)) {
+ yloc[0] = i + k;
+ yloc[1] = j + l;
+ yloc[2] = *(img1->planes[VPX_PLANE_Y] +
+ (i + k) * img1->stride[VPX_PLANE_Y] + j + l);
+ yloc[3] = *(img2->planes[VPX_PLANE_Y] +
+ (i + k) * img2->stride[VPX_PLANE_Y] + j + l);
+ match = 0;
break;
- sum_sz += hist->sz[i_idx];
+ }
+ }
}
-
- if (now == then)
- return;
-
- avg_bitrate = sum_sz * 8 * 1000 / (now - then);
- idx = (int)(avg_bitrate * (RATE_BINS/2) / (cfg->rc_target_bitrate * 1000));
- if(idx < 0)
- idx = 0;
- if(idx > RATE_BINS-1)
- idx = RATE_BINS-1;
- if(hist->bucket[idx].low > avg_bitrate)
- hist->bucket[idx].low = (int)avg_bitrate;
- if(hist->bucket[idx].high < avg_bitrate)
- hist->bucket[idx].high = (int)avg_bitrate;
- hist->bucket[idx].count++;
- hist->total++;
+ }
+
+ uloc[0] = uloc[1] = uloc[2] = uloc[3] = -1;
+ for (i = 0, match = 1; match && i < c_h; i += bsizey) {
+ for (j = 0; match && j < c_w; j += bsizex) {
+ int k, l;
+ int si = mmin(i + bsizey, c_h - i);
+ int sj = mmin(j + bsizex, c_w - j);
+ for (k = 0; match && k < si; k++)
+ for (l = 0; match && l < sj; l++) {
+ if (*(img1->planes[VPX_PLANE_U] +
+ (i + k) * img1->stride[VPX_PLANE_U] + j + l) !=
+ *(img2->planes[VPX_PLANE_U] +
+ (i + k) * img2->stride[VPX_PLANE_U] + j + l)) {
+ uloc[0] = i + k;
+ uloc[1] = j + l;
+ uloc[2] = *(img1->planes[VPX_PLANE_U] +
+ (i + k) * img1->stride[VPX_PLANE_U] + j + l);
+ uloc[3] = *(img2->planes[VPX_PLANE_U] +
+ (i + k) * img2->stride[VPX_PLANE_V] + j + l);
+ match = 0;
+ break;
+ }
+ }
+ }
+ }
+ vloc[0] = vloc[1] = vloc[2] = vloc[3] = -1;
+ for (i = 0, match = 1; match && i < c_h; i += bsizey) {
+ for (j = 0; match && j < c_w; j += bsizex) {
+ int k, l;
+ int si = mmin(i + bsizey, c_h - i);
+ int sj = mmin(j + bsizex, c_w - j);
+ for (k = 0; match && k < si; k++)
+ for (l = 0; match && l < sj; l++) {
+ if (*(img1->planes[VPX_PLANE_V] +
+ (i + k) * img1->stride[VPX_PLANE_V] + j + l) !=
+ *(img2->planes[VPX_PLANE_V] +
+ (i + k) * img2->stride[VPX_PLANE_V] + j + l)) {
+ vloc[0] = i + k;
+ vloc[1] = j + l;
+ vloc[2] = *(img1->planes[VPX_PLANE_V] +
+ (i + k) * img1->stride[VPX_PLANE_V] + j + l);
+ vloc[3] = *(img2->planes[VPX_PLANE_V] +
+ (i + k) * img2->stride[VPX_PLANE_V] + j + l);
+ match = 0;
+ break;
+ }
+ }
+ }
+ }
}
-
-static void show_rate_histogram(struct rate_hist *hist,
- const vpx_codec_enc_cfg_t *cfg,
- int max_buckets)
+static int compare_img(vpx_image_t *img1, vpx_image_t *img2)
{
- int i, scale;
- int buckets = 0;
+ const int c_w = (img1->d_w + img1->x_chroma_shift) >> img1->x_chroma_shift;
+ const int c_h = (img1->d_h + img1->y_chroma_shift) >> img1->y_chroma_shift;
+ int match = 1;
+ unsigned int i;
- for(i = 0; i < RATE_BINS; i++)
- {
- if(hist->bucket[i].low == INT_MAX)
- continue;
- hist->bucket[buckets++] = hist->bucket[i];
- }
+ match &= (img1->fmt == img2->fmt);
+ match &= (img1->w == img2->w);
+ match &= (img1->h == img2->h);
+
+ for (i = 0; i < img1->d_h; i++)
+ match &= (memcmp(img1->planes[VPX_PLANE_Y]+i*img1->stride[VPX_PLANE_Y],
+ img2->planes[VPX_PLANE_Y]+i*img2->stride[VPX_PLANE_Y],
+ img1->d_w) == 0);
+
+ for (i = 0; i < c_h; i++)
+ match &= (memcmp(img1->planes[VPX_PLANE_U]+i*img1->stride[VPX_PLANE_U],
+ img2->planes[VPX_PLANE_U]+i*img2->stride[VPX_PLANE_U],
+ c_w) == 0);
+
+ for (i = 0; i < c_h; i++)
+ match &= (memcmp(img1->planes[VPX_PLANE_V]+i*img1->stride[VPX_PLANE_U],
+ img2->planes[VPX_PLANE_V]+i*img2->stride[VPX_PLANE_U],
+ c_w) == 0);
- fprintf(stderr, "\nRate (over %dms window):\n", cfg->rc_buf_sz);
- scale = merge_hist_buckets(hist->bucket, &buckets, max_buckets);
- show_histogram(hist->bucket, buckets, hist->total, scale);
+ return match;
}
+
#define NELEMENTS(x) (sizeof(x)/sizeof(x[0]))
+#define MAX(x,y) ((x)>(y)?(x):(y))
+#if CONFIG_VP8_ENCODER && !CONFIG_VP9_ENCODER
#define ARG_CTRL_CNT_MAX NELEMENTS(vp8_arg_ctrl_map)
-
+#elif !CONFIG_VP8_ENCODER && CONFIG_VP9_ENCODER
+#define ARG_CTRL_CNT_MAX NELEMENTS(vp9_arg_ctrl_map)
+#else
+#define ARG_CTRL_CNT_MAX MAX(NELEMENTS(vp8_arg_ctrl_map), \
+ NELEMENTS(vp9_arg_ctrl_map))
+#endif
/* Configuration elements common to all streams */
-struct global_config
-{
- const struct codec_item *codec;
- int passes;
- int pass;
- int usage;
- int deadline;
- int use_i420;
- int quiet;
- int verbose;
- int limit;
- int show_psnr;
- int have_framerate;
- struct vpx_rational framerate;
- int out_part;
- int debug;
- int show_q_hist_buckets;
- int show_rate_hist_buckets;
+struct global_config {
+ const struct codec_item *codec;
+ int passes;
+ int pass;
+ int usage;
+ int deadline;
+ int use_i420;
+ int quiet;
+ int verbose;
+ int limit;
+ int skip_frames;
+ int show_psnr;
+ enum TestDecodeFatality test_decode;
+ int have_framerate;
+ struct vpx_rational framerate;
+ int out_part;
+ int debug;
+ int show_q_hist_buckets;
+ int show_rate_hist_buckets;
};
/* Per-stream configuration */
-struct stream_config
-{
- struct vpx_codec_enc_cfg cfg;
- const char *out_fn;
- const char *stats_fn;
- stereo_format_t stereo_fmt;
- int arg_ctrls[ARG_CTRL_CNT_MAX][2];
- int arg_ctrl_cnt;
- int write_webm;
- int have_kf_max_dist;
+struct stream_config {
+ struct vpx_codec_enc_cfg cfg;
+ const char *out_fn;
+ const char *stats_fn;
+ stereo_format_t stereo_fmt;
+ int arg_ctrls[ARG_CTRL_CNT_MAX][2];
+ int arg_ctrl_cnt;
+ int write_webm;
+ int have_kf_max_dist;
};
-struct stream_state
-{
- int index;
- struct stream_state *next;
- struct stream_config config;
- FILE *file;
- struct rate_hist rate_hist;
- EbmlGlobal ebml;
- uint32_t hash;
- uint64_t psnr_sse_total;
- uint64_t psnr_samples_total;
- double psnr_totals[4];
- int psnr_count;
- int counts[64];
- vpx_codec_ctx_t encoder;
- unsigned int frames_out;
- uint64_t cx_time;
- size_t nbytes;
- stats_io_t stats;
+struct stream_state {
+ int index;
+ struct stream_state *next;
+ struct stream_config config;
+ FILE *file;
+ struct rate_hist rate_hist;
+ EbmlGlobal ebml;
+ uint32_t hash;
+ uint64_t psnr_sse_total;
+ uint64_t psnr_samples_total;
+ double psnr_totals[4];
+ int psnr_count;
+ int counts[64];
+ vpx_codec_ctx_t encoder;
+ unsigned int frames_out;
+ uint64_t cx_time;
+ size_t nbytes;
+ stats_io_t stats;
+ struct vpx_image *img;
+ vpx_codec_ctx_t decoder;
+ int mismatch_seen;
};
void validate_positive_rational(const char *msg,
- struct vpx_rational *rat)
-{
- if (rat->den < 0)
- {
- rat->num *= -1;
- rat->den *= -1;
- }
+ struct vpx_rational *rat) {
+ if (rat->den < 0) {
+ rat->num *= -1;
+ rat->den *= -1;
+ }
- if (rat->num < 0)
- die("Error: %s must be positive\n", msg);
+ if (rat->num < 0)
+ die("Error: %s must be positive\n", msg);
- if (!rat->den)
- die("Error: %s has zero denominator\n", msg);
+ if (!rat->den)
+ die("Error: %s has zero denominator\n", msg);
}
-static void parse_global_config(struct global_config *global, char **argv)
-{
- char **argi, **argj;
- struct arg arg;
-
- /* Initialize default parameters */
- memset(global, 0, sizeof(*global));
- global->codec = codecs;
- global->passes = 1;
- global->use_i420 = 1;
-
- for (argi = argj = argv; (*argj = *argi); argi += arg.argv_step)
- {
- arg.argv_step = 1;
-
- if (arg_match(&arg, &codecarg, argi))
- {
- int j, k = -1;
-
- for (j = 0; j < sizeof(codecs) / sizeof(codecs[0]); j++)
- if (!strcmp(codecs[j].name, arg.val))
- k = j;
-
- if (k >= 0)
- global->codec = codecs + k;
- else
- die("Error: Unrecognized argument (%s) to --codec\n",
- arg.val);
-
- }
- else if (arg_match(&arg, &passes, argi))
- {
- global->passes = arg_parse_uint(&arg);
-
- if (global->passes < 1 || global->passes > 2)
- die("Error: Invalid number of passes (%d)\n", global->passes);
- }
- else if (arg_match(&arg, &pass_arg, argi))
- {
- global->pass = arg_parse_uint(&arg);
-
- if (global->pass < 1 || global->pass > 2)
- die("Error: Invalid pass selected (%d)\n",
- global->pass);
- }
- else if (arg_match(&arg, &usage, argi))
- global->usage = arg_parse_uint(&arg);
- else if (arg_match(&arg, &deadline, argi))
- global->deadline = arg_parse_uint(&arg);
- else if (arg_match(&arg, &best_dl, argi))
- global->deadline = VPX_DL_BEST_QUALITY;
- else if (arg_match(&arg, &good_dl, argi))
- global->deadline = VPX_DL_GOOD_QUALITY;
- else if (arg_match(&arg, &rt_dl, argi))
- global->deadline = VPX_DL_REALTIME;
- else if (arg_match(&arg, &use_yv12, argi))
- global->use_i420 = 0;
- else if (arg_match(&arg, &use_i420, argi))
- global->use_i420 = 1;
- else if (arg_match(&arg, &quietarg, argi))
- global->quiet = 1;
- else if (arg_match(&arg, &verbosearg, argi))
- global->verbose = 1;
- else if (arg_match(&arg, &limit, argi))
- global->limit = arg_parse_uint(&arg);
- else if (arg_match(&arg, &psnrarg, argi))
- global->show_psnr = 1;
- else if (arg_match(&arg, &framerate, argi))
- {
- global->framerate = arg_parse_rational(&arg);
- validate_positive_rational(arg.name, &global->framerate);
- global->have_framerate = 1;
- }
- else if (arg_match(&arg,&out_part, argi))
- global->out_part = 1;
- else if (arg_match(&arg, &debugmode, argi))
- global->debug = 1;
- else if (arg_match(&arg, &q_hist_n, argi))
- global->show_q_hist_buckets = arg_parse_uint(&arg);
- else if (arg_match(&arg, &rate_hist_n, argi))
- global->show_rate_hist_buckets = arg_parse_uint(&arg);
- else
- argj++;
- }
-
- /* Validate global config */
-
- if (global->pass)
- {
- /* DWIM: Assume the user meant passes=2 if pass=2 is specified */
- if (global->pass > global->passes)
- {
- warn("Assuming --pass=%d implies --passes=%d\n",
- global->pass, global->pass);
- global->passes = global->pass;
- }
+static void parse_global_config(struct global_config *global, char **argv) {
+ char **argi, **argj;
+ struct arg arg;
+
+ /* Initialize default parameters */
+ memset(global, 0, sizeof(*global));
+ global->codec = codecs;
+ global->passes = 0;
+ global->use_i420 = 1;
+ /* Assign default deadline to good quality */
+ global->deadline = VPX_DL_GOOD_QUALITY;
+
+ for (argi = argj = argv; (*argj = *argi); argi += arg.argv_step) {
+ arg.argv_step = 1;
+
+ if (arg_match(&arg, &codecarg, argi)) {
+ int j, k = -1;
+
+ for (j = 0; j < sizeof(codecs) / sizeof(codecs[0]); j++)
+ if (!strcmp(codecs[j].name, arg.val))
+ k = j;
+
+ if (k >= 0)
+ global->codec = codecs + k;
+ else
+ die("Error: Unrecognized argument (%s) to --codec\n",
+ arg.val);
+
+ } else if (arg_match(&arg, &passes, argi)) {
+ global->passes = arg_parse_uint(&arg);
+
+ if (global->passes < 1 || global->passes > 2)
+ die("Error: Invalid number of passes (%d)\n", global->passes);
+ } else if (arg_match(&arg, &pass_arg, argi)) {
+ global->pass = arg_parse_uint(&arg);
+
+ if (global->pass < 1 || global->pass > 2)
+ die("Error: Invalid pass selected (%d)\n",
+ global->pass);
+ } else if (arg_match(&arg, &usage, argi))
+ global->usage = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &deadline, argi))
+ global->deadline = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &best_dl, argi))
+ global->deadline = VPX_DL_BEST_QUALITY;
+ else if (arg_match(&arg, &good_dl, argi))
+ global->deadline = VPX_DL_GOOD_QUALITY;
+ else if (arg_match(&arg, &rt_dl, argi))
+ global->deadline = VPX_DL_REALTIME;
+ else if (arg_match(&arg, &use_yv12, argi))
+ global->use_i420 = 0;
+ else if (arg_match(&arg, &use_i420, argi))
+ global->use_i420 = 1;
+ else if (arg_match(&arg, &quietarg, argi))
+ global->quiet = 1;
+ else if (arg_match(&arg, &verbosearg, argi))
+ global->verbose = 1;
+ else if (arg_match(&arg, &limit, argi))
+ global->limit = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &skip, argi))
+ global->skip_frames = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &psnrarg, argi))
+ global->show_psnr = 1;
+ else if (arg_match(&arg, &recontest, argi))
+ global->test_decode = arg_parse_enum_or_int(&arg);
+ else if (arg_match(&arg, &framerate, argi)) {
+ global->framerate = arg_parse_rational(&arg);
+ validate_positive_rational(arg.name, &global->framerate);
+ global->have_framerate = 1;
+ } else if (arg_match(&arg, &out_part, argi))
+ global->out_part = 1;
+ else if (arg_match(&arg, &debugmode, argi))
+ global->debug = 1;
+ else if (arg_match(&arg, &q_hist_n, argi))
+ global->show_q_hist_buckets = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &rate_hist_n, argi))
+ global->show_rate_hist_buckets = arg_parse_uint(&arg);
+ else
+ argj++;
+ }
+
+ /* Validate global config */
+ if (global->passes == 0) {
+ // Make default VP9 passes = 2 until there is a better quality 1-pass
+ // encoder
+ global->passes = (global->codec->iface == vpx_codec_vp9_cx ? 2 : 1);
+ }
+
+ if (global->pass) {
+ /* DWIM: Assume the user meant passes=2 if pass=2 is specified */
+ if (global->pass > global->passes) {
+ warn("Assuming --pass=%d implies --passes=%d\n",
+ global->pass, global->pass);
+ global->passes = global->pass;
}
+ }
}
-void open_input_file(struct input_state *input)
-{
- unsigned int fourcc;
+void open_input_file(struct input_state *input) {
+ unsigned int fourcc;
- /* Parse certain options from the input file, if possible */
- input->file = strcmp(input->fn, "-") ? fopen(input->fn, "rb")
- : set_binary_mode(stdin);
+ /* Parse certain options from the input file, if possible */
+ input->file = strcmp(input->fn, "-") ? fopen(input->fn, "rb")
+ : set_binary_mode(stdin);
- if (!input->file)
- fatal("Failed to open input file");
+ if (!input->file)
+ fatal("Failed to open input file");
- /* For RAW input sources, these bytes will applied on the first frame
- * in read_frame().
+ if (!fseeko(input->file, 0, SEEK_END)) {
+ /* Input file is seekable. Figure out how long it is, so we can get
+ * progress info.
*/
- input->detect.buf_read = fread(input->detect.buf, 1, 4, input->file);
- input->detect.position = 0;
-
- if (input->detect.buf_read == 4
- && file_is_y4m(input->file, &input->y4m, input->detect.buf))
- {
- if (y4m_input_open(&input->y4m, input->file, input->detect.buf, 4) >= 0)
- {
- input->file_type = FILE_TYPE_Y4M;
- input->w = input->y4m.pic_w;
- input->h = input->y4m.pic_h;
- input->framerate.num = input->y4m.fps_n;
- input->framerate.den = input->y4m.fps_d;
- input->use_i420 = 0;
- }
- else
- fatal("Unsupported Y4M stream.");
- }
- else if (input->detect.buf_read == 4 && file_is_ivf(input, &fourcc))
- {
- input->file_type = FILE_TYPE_IVF;
- switch (fourcc)
- {
- case 0x32315659:
- input->use_i420 = 0;
- break;
- case 0x30323449:
- input->use_i420 = 1;
- break;
- default:
- fatal("Unsupported fourcc (%08x) in IVF", fourcc);
- }
- }
- else
- {
- input->file_type = FILE_TYPE_RAW;
+ input->length = ftello(input->file);
+ rewind(input->file);
+ }
+
+ /* For RAW input sources, these bytes will applied on the first frame
+ * in read_frame().
+ */
+ input->detect.buf_read = fread(input->detect.buf, 1, 4, input->file);
+ input->detect.position = 0;
+
+ if (input->detect.buf_read == 4
+ && file_is_y4m(input->file, &input->y4m, input->detect.buf)) {
+ if (y4m_input_open(&input->y4m, input->file, input->detect.buf, 4,
+ input->only_i420) >= 0) {
+ input->file_type = FILE_TYPE_Y4M;
+ input->w = input->y4m.pic_w;
+ input->h = input->y4m.pic_h;
+ input->framerate.num = input->y4m.fps_n;
+ input->framerate.den = input->y4m.fps_d;
+ input->use_i420 = 0;
+ } else
+ fatal("Unsupported Y4M stream.");
+ } else if (input->detect.buf_read == 4 && file_is_ivf(input, &fourcc)) {
+ input->file_type = FILE_TYPE_IVF;
+ switch (fourcc) {
+ case 0x32315659:
+ input->use_i420 = 0;
+ break;
+ case 0x30323449:
+ input->use_i420 = 1;
+ break;
+ default:
+ fatal("Unsupported fourcc (%08x) in IVF", fourcc);
}
+ } else {
+ input->file_type = FILE_TYPE_RAW;
+ }
}
-static void close_input_file(struct input_state *input)
-{
- fclose(input->file);
- if (input->file_type == FILE_TYPE_Y4M)
- y4m_input_close(&input->y4m);
+static void close_input_file(struct input_state *input) {
+ fclose(input->file);
+ if (input->file_type == FILE_TYPE_Y4M)
+ y4m_input_close(&input->y4m);
}
static struct stream_state *new_stream(struct global_config *global,
- struct stream_state *prev)
-{
- struct stream_state *stream;
+ struct stream_state *prev) {
+ struct stream_state *stream;
+
+ stream = calloc(1, sizeof(*stream));
+ if (!stream)
+ fatal("Failed to allocate new stream.");
+ if (prev) {
+ memcpy(stream, prev, sizeof(*stream));
+ stream->index++;
+ prev->next = stream;
+ } else {
+ vpx_codec_err_t res;
+
+ /* Populate encoder configuration */
+ res = vpx_codec_enc_config_default(global->codec->iface(),
+ &stream->config.cfg,
+ global->usage);
+ if (res)
+ fatal("Failed to get config: %s\n", vpx_codec_err_to_string(res));
+
+ /* Change the default timebase to a high enough value so that the
+ * encoder will always create strictly increasing timestamps.
+ */
+ stream->config.cfg.g_timebase.den = 1000;
- stream = calloc(1, sizeof(*stream));
- if(!stream)
- fatal("Failed to allocate new stream.");
- if(prev)
- {
- memcpy(stream, prev, sizeof(*stream));
- stream->index++;
- prev->next = stream;
- }
- else
- {
- vpx_codec_err_t res;
-
- /* Populate encoder configuration */
- res = vpx_codec_enc_config_default(global->codec->iface,
- &stream->config.cfg,
- global->usage);
- if (res)
- fatal("Failed to get config: %s\n", vpx_codec_err_to_string(res));
-
- /* Change the default timebase to a high enough value so that the
- * encoder will always create strictly increasing timestamps.
- */
- stream->config.cfg.g_timebase.den = 1000;
-
- /* Never use the library's default resolution, require it be parsed
- * from the file or set on the command line.
- */
- stream->config.cfg.g_w = 0;
- stream->config.cfg.g_h = 0;
-
- /* Initialize remaining stream parameters */
- stream->config.stereo_fmt = STEREO_FORMAT_MONO;
- stream->config.write_webm = 1;
- stream->ebml.last_pts_ms = -1;
-
- /* Allows removal of the application version from the EBML tags */
- stream->ebml.debug = global->debug;
- }
+ /* Never use the library's default resolution, require it be parsed
+ * from the file or set on the command line.
+ */
+ stream->config.cfg.g_w = 0;
+ stream->config.cfg.g_h = 0;
+
+ /* Initialize remaining stream parameters */
+ stream->config.stereo_fmt = STEREO_FORMAT_MONO;
+ stream->config.write_webm = 1;
+ stream->ebml.last_pts_ms = -1;
- /* Output files must be specified for each stream */
- stream->config.out_fn = NULL;
+ /* Allows removal of the application version from the EBML tags */
+ stream->ebml.debug = global->debug;
+ }
- stream->next = NULL;
- return stream;
+ /* Output files must be specified for each stream */
+ stream->config.out_fn = NULL;
+
+ stream->next = NULL;
+ return stream;
}
static int parse_stream_params(struct global_config *global,
struct stream_state *stream,
- char **argv)
-{
- char **argi, **argj;
- struct arg arg;
- static const arg_def_t **ctrl_args = no_args;
- static const int *ctrl_args_map = NULL;
- struct stream_config *config = &stream->config;
- int eos_mark_found = 0;
-
- /* Handle codec specific options */
- if (global->codec->iface == &vpx_codec_vp8_cx_algo)
- {
- ctrl_args = vp8_args;
- ctrl_args_map = vp8_arg_ctrl_map;
- }
-
- for (argi = argj = argv; (*argj = *argi); argi += arg.argv_step)
- {
- arg.argv_step = 1;
-
- /* Once we've found an end-of-stream marker (--) we want to continue
- * shifting arguments but not consuming them.
- */
- if (eos_mark_found)
- {
- argj++;
- continue;
- }
- else if (!strcmp(*argj, "--"))
- {
- eos_mark_found = 1;
- continue;
- }
+ char **argv) {
+ char **argi, **argj;
+ struct arg arg;
+ static const arg_def_t **ctrl_args = no_args;
+ static const int *ctrl_args_map = NULL;
+ struct stream_config *config = &stream->config;
+ int eos_mark_found = 0;
+
+ /* Handle codec specific options */
+ if (0) {
+#if CONFIG_VP8_ENCODER
+ } else if (global->codec->iface == vpx_codec_vp8_cx) {
+ ctrl_args = vp8_args;
+ ctrl_args_map = vp8_arg_ctrl_map;
+#endif
+#if CONFIG_VP9_ENCODER
+ } else if (global->codec->iface == vpx_codec_vp9_cx) {
+ ctrl_args = vp9_args;
+ ctrl_args_map = vp9_arg_ctrl_map;
+#endif
+ }
- if (0);
- else if (arg_match(&arg, &outputfile, argi))
- config->out_fn = arg.val;
- else if (arg_match(&arg, &fpf_name, argi))
- config->stats_fn = arg.val;
- else if (arg_match(&arg, &use_ivf, argi))
- config->write_webm = 0;
- else if (arg_match(&arg, &threads, argi))
- config->cfg.g_threads = arg_parse_uint(&arg);
- else if (arg_match(&arg, &profile, argi))
- config->cfg.g_profile = arg_parse_uint(&arg);
- else if (arg_match(&arg, &width, argi))
- config->cfg.g_w = arg_parse_uint(&arg);
- else if (arg_match(&arg, &height, argi))
- config->cfg.g_h = arg_parse_uint(&arg);
- else if (arg_match(&arg, &stereo_mode, argi))
- config->stereo_fmt = arg_parse_enum_or_int(&arg);
- else if (arg_match(&arg, &timebase, argi))
- {
- config->cfg.g_timebase = arg_parse_rational(&arg);
- validate_positive_rational(arg.name, &config->cfg.g_timebase);
- }
- else if (arg_match(&arg, &error_resilient, argi))
- config->cfg.g_error_resilient = arg_parse_uint(&arg);
- else if (arg_match(&arg, &lag_in_frames, argi))
- config->cfg.g_lag_in_frames = arg_parse_uint(&arg);
- else if (arg_match(&arg, &dropframe_thresh, argi))
- config->cfg.rc_dropframe_thresh = arg_parse_uint(&arg);
- else if (arg_match(&arg, &resize_allowed, argi))
- config->cfg.rc_resize_allowed = arg_parse_uint(&arg);
- else if (arg_match(&arg, &resize_up_thresh, argi))
- config->cfg.rc_resize_up_thresh = arg_parse_uint(&arg);
- else if (arg_match(&arg, &resize_down_thresh, argi))
- config->cfg.rc_resize_down_thresh = arg_parse_uint(&arg);
- else if (arg_match(&arg, &end_usage, argi))
- config->cfg.rc_end_usage = arg_parse_enum_or_int(&arg);
- else if (arg_match(&arg, &target_bitrate, argi))
- config->cfg.rc_target_bitrate = arg_parse_uint(&arg);
- else if (arg_match(&arg, &min_quantizer, argi))
- config->cfg.rc_min_quantizer = arg_parse_uint(&arg);
- else if (arg_match(&arg, &max_quantizer, argi))
- config->cfg.rc_max_quantizer = arg_parse_uint(&arg);
- else if (arg_match(&arg, &undershoot_pct, argi))
- config->cfg.rc_undershoot_pct = arg_parse_uint(&arg);
- else if (arg_match(&arg, &overshoot_pct, argi))
- config->cfg.rc_overshoot_pct = arg_parse_uint(&arg);
- else if (arg_match(&arg, &buf_sz, argi))
- config->cfg.rc_buf_sz = arg_parse_uint(&arg);
- else if (arg_match(&arg, &buf_initial_sz, argi))
- config->cfg.rc_buf_initial_sz = arg_parse_uint(&arg);
- else if (arg_match(&arg, &buf_optimal_sz, argi))
- config->cfg.rc_buf_optimal_sz = arg_parse_uint(&arg);
- else if (arg_match(&arg, &bias_pct, argi))
- {
- config->cfg.rc_2pass_vbr_bias_pct = arg_parse_uint(&arg);
+ for (argi = argj = argv; (*argj = *argi); argi += arg.argv_step) {
+ arg.argv_step = 1;
- if (global->passes < 2)
- warn("option %s ignored in one-pass mode.\n", arg.name);
- }
- else if (arg_match(&arg, &minsection_pct, argi))
- {
- config->cfg.rc_2pass_vbr_minsection_pct = arg_parse_uint(&arg);
+ /* Once we've found an end-of-stream marker (--) we want to continue
+ * shifting arguments but not consuming them.
+ */
+ if (eos_mark_found) {
+ argj++;
+ continue;
+ } else if (!strcmp(*argj, "--")) {
+ eos_mark_found = 1;
+ continue;
+ }
- if (global->passes < 2)
- warn("option %s ignored in one-pass mode.\n", arg.name);
- }
- else if (arg_match(&arg, &maxsection_pct, argi))
- {
- config->cfg.rc_2pass_vbr_maxsection_pct = arg_parse_uint(&arg);
+ if (0);
+ else if (arg_match(&arg, &outputfile, argi))
+ config->out_fn = arg.val;
+ else if (arg_match(&arg, &fpf_name, argi))
+ config->stats_fn = arg.val;
+ else if (arg_match(&arg, &use_ivf, argi))
+ config->write_webm = 0;
+ else if (arg_match(&arg, &threads, argi))
+ config->cfg.g_threads = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &profile, argi))
+ config->cfg.g_profile = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &width, argi))
+ config->cfg.g_w = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &height, argi))
+ config->cfg.g_h = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &stereo_mode, argi))
+ config->stereo_fmt = arg_parse_enum_or_int(&arg);
+ else if (arg_match(&arg, &timebase, argi)) {
+ config->cfg.g_timebase = arg_parse_rational(&arg);
+ validate_positive_rational(arg.name, &config->cfg.g_timebase);
+ } else if (arg_match(&arg, &error_resilient, argi))
+ config->cfg.g_error_resilient = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &lag_in_frames, argi))
+ config->cfg.g_lag_in_frames = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &dropframe_thresh, argi))
+ config->cfg.rc_dropframe_thresh = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &resize_allowed, argi))
+ config->cfg.rc_resize_allowed = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &resize_up_thresh, argi))
+ config->cfg.rc_resize_up_thresh = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &resize_down_thresh, argi))
+ config->cfg.rc_resize_down_thresh = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &end_usage, argi))
+ config->cfg.rc_end_usage = arg_parse_enum_or_int(&arg);
+ else if (arg_match(&arg, &target_bitrate, argi))
+ config->cfg.rc_target_bitrate = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &min_quantizer, argi))
+ config->cfg.rc_min_quantizer = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &max_quantizer, argi))
+ config->cfg.rc_max_quantizer = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &undershoot_pct, argi))
+ config->cfg.rc_undershoot_pct = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &overshoot_pct, argi))
+ config->cfg.rc_overshoot_pct = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &buf_sz, argi))
+ config->cfg.rc_buf_sz = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &buf_initial_sz, argi))
+ config->cfg.rc_buf_initial_sz = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &buf_optimal_sz, argi))
+ config->cfg.rc_buf_optimal_sz = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &bias_pct, argi)) {
+ config->cfg.rc_2pass_vbr_bias_pct = arg_parse_uint(&arg);
+
+ if (global->passes < 2)
+ warn("option %s ignored in one-pass mode.\n", arg.name);
+ } else if (arg_match(&arg, &minsection_pct, argi)) {
+ config->cfg.rc_2pass_vbr_minsection_pct = arg_parse_uint(&arg);
+
+ if (global->passes < 2)
+ warn("option %s ignored in one-pass mode.\n", arg.name);
+ } else if (arg_match(&arg, &maxsection_pct, argi)) {
+ config->cfg.rc_2pass_vbr_maxsection_pct = arg_parse_uint(&arg);
+
+ if (global->passes < 2)
+ warn("option %s ignored in one-pass mode.\n", arg.name);
+ } else if (arg_match(&arg, &kf_min_dist, argi))
+ config->cfg.kf_min_dist = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &kf_max_dist, argi)) {
+ config->cfg.kf_max_dist = arg_parse_uint(&arg);
+ config->have_kf_max_dist = 1;
+ } else if (arg_match(&arg, &kf_disabled, argi))
+ config->cfg.kf_mode = VPX_KF_DISABLED;
+ else {
+ int i, match = 0;
+
+ for (i = 0; ctrl_args[i]; i++) {
+ if (arg_match(&arg, ctrl_args[i], argi)) {
+ int j;
+ match = 1;
+
+ /* Point either to the next free element or the first
+ * instance of this control.
+ */
+ for (j = 0; j < config->arg_ctrl_cnt; j++)
+ if (config->arg_ctrls[j][0] == ctrl_args_map[i])
+ break;
+
+ /* Update/insert */
+ assert(j < ARG_CTRL_CNT_MAX);
+ if (j < ARG_CTRL_CNT_MAX) {
+ config->arg_ctrls[j][0] = ctrl_args_map[i];
+ config->arg_ctrls[j][1] = arg_parse_enum_or_int(&arg);
+ if (j == config->arg_ctrl_cnt)
+ config->arg_ctrl_cnt++;
+ }
- if (global->passes < 2)
- warn("option %s ignored in one-pass mode.\n", arg.name);
- }
- else if (arg_match(&arg, &kf_min_dist, argi))
- config->cfg.kf_min_dist = arg_parse_uint(&arg);
- else if (arg_match(&arg, &kf_max_dist, argi))
- {
- config->cfg.kf_max_dist = arg_parse_uint(&arg);
- config->have_kf_max_dist = 1;
}
- else if (arg_match(&arg, &kf_disabled, argi))
- config->cfg.kf_mode = VPX_KF_DISABLED;
- else
- {
- int i, match = 0;
-
- for (i = 0; ctrl_args[i]; i++)
- {
- if (arg_match(&arg, ctrl_args[i], argi))
- {
- int j;
- match = 1;
-
- /* Point either to the next free element or the first
- * instance of this control.
- */
- for(j=0; j<config->arg_ctrl_cnt; j++)
- if(config->arg_ctrls[j][0] == ctrl_args_map[i])
- break;
-
- /* Update/insert */
- assert(j < ARG_CTRL_CNT_MAX);
- if (j < ARG_CTRL_CNT_MAX)
- {
- config->arg_ctrls[j][0] = ctrl_args_map[i];
- config->arg_ctrls[j][1] = arg_parse_enum_or_int(&arg);
- if(j == config->arg_ctrl_cnt)
- config->arg_ctrl_cnt++;
- }
-
- }
- }
+ }
- if (!match)
- argj++;
- }
+ if (!match)
+ argj++;
}
+ }
- return eos_mark_found;
+ return eos_mark_found;
}
#define FOREACH_STREAM(func)\
-do\
-{\
+ do\
+ {\
struct stream_state *stream;\
-\
+ \
for(stream = streams; stream; stream = stream->next)\
- func;\
-}while(0)
-
-
-static void validate_stream_config(struct stream_state *stream)
-{
- struct stream_state *streami;
-
- if(!stream->config.cfg.g_w || !stream->config.cfg.g_h)
- fatal("Stream %d: Specify stream dimensions with --width (-w) "
- " and --height (-h)", stream->index);
-
- for(streami = stream; streami; streami = streami->next)
- {
- /* All streams require output files */
- if(!streami->config.out_fn)
- fatal("Stream %d: Output file is required (specify with -o)",
- streami->index);
-
- /* Check for two streams outputting to the same file */
- if(streami != stream)
- {
- const char *a = stream->config.out_fn;
- const char *b = streami->config.out_fn;
- if(!strcmp(a,b) && strcmp(a, "/dev/null") && strcmp(a, ":nul"))
- fatal("Stream %d: duplicate output file (from stream %d)",
- streami->index, stream->index);
- }
+ func;\
+ }while(0)
+
+
+static void validate_stream_config(struct stream_state *stream) {
+ struct stream_state *streami;
+
+ if (!stream->config.cfg.g_w || !stream->config.cfg.g_h)
+ fatal("Stream %d: Specify stream dimensions with --width (-w) "
+ " and --height (-h)", stream->index);
+
+ for (streami = stream; streami; streami = streami->next) {
+ /* All streams require output files */
+ if (!streami->config.out_fn)
+ fatal("Stream %d: Output file is required (specify with -o)",
+ streami->index);
+
+ /* Check for two streams outputting to the same file */
+ if (streami != stream) {
+ const char *a = stream->config.out_fn;
+ const char *b = streami->config.out_fn;
+ if (!strcmp(a, b) && strcmp(a, "/dev/null") && strcmp(a, ":nul"))
+ fatal("Stream %d: duplicate output file (from stream %d)",
+ streami->index, stream->index);
+ }
- /* Check for two streams sharing a stats file. */
- if(streami != stream)
- {
- const char *a = stream->config.stats_fn;
- const char *b = streami->config.stats_fn;
- if(a && b && !strcmp(a,b))
- fatal("Stream %d: duplicate stats file (from stream %d)",
- streami->index, stream->index);
- }
+ /* Check for two streams sharing a stats file. */
+ if (streami != stream) {
+ const char *a = stream->config.stats_fn;
+ const char *b = streami->config.stats_fn;
+ if (a && b && !strcmp(a, b))
+ fatal("Stream %d: duplicate stats file (from stream %d)",
+ streami->index, stream->index);
}
+ }
}
static void set_stream_dimensions(struct stream_state *stream,
unsigned int w,
- unsigned int h)
-{
- if ((stream->config.cfg.g_w && stream->config.cfg.g_w != w)
- ||(stream->config.cfg.g_h && stream->config.cfg.g_h != h))
- fatal("Stream %d: Resizing not yet supported", stream->index);
- stream->config.cfg.g_w = w;
- stream->config.cfg.g_h = h;
+ unsigned int h) {
+ if (!stream->config.cfg.g_w) {
+ if (!stream->config.cfg.g_h)
+ stream->config.cfg.g_w = w;
+ else
+ stream->config.cfg.g_w = w * stream->config.cfg.g_h / h;
+ }
+ if (!stream->config.cfg.g_h) {
+ stream->config.cfg.g_h = h * stream->config.cfg.g_w / w;
+ }
}
static void set_default_kf_interval(struct stream_state *stream,
- struct global_config *global)
-{
- /* Use a max keyframe interval of 5 seconds, if none was
- * specified on the command line.
- */
- if (!stream->config.have_kf_max_dist)
- {
- double framerate = (double)global->framerate.num/global->framerate.den;
- if (framerate > 0.0)
- stream->config.cfg.kf_max_dist = (unsigned int)(5.0*framerate);
- }
+ struct global_config *global) {
+ /* Use a max keyframe interval of 5 seconds, if none was
+ * specified on the command line.
+ */
+ if (!stream->config.have_kf_max_dist) {
+ double framerate = (double)global->framerate.num / global->framerate.den;
+ if (framerate > 0.0)
+ stream->config.cfg.kf_max_dist = (unsigned int)(5.0 * framerate);
+ }
}
static void show_stream_config(struct stream_state *stream,
struct global_config *global,
- struct input_state *input)
-{
+ struct input_state *input) {
#define SHOW(field) \
- fprintf(stderr, " %-28s = %d\n", #field, stream->config.cfg.field)
-
- if(stream->index == 0)
- {
- fprintf(stderr, "Codec: %s\n",
- vpx_codec_iface_name(global->codec->iface));
- fprintf(stderr, "Source file: %s Format: %s\n", input->fn,
- input->use_i420 ? "I420" : "YV12");
- }
- if(stream->next || stream->index)
- fprintf(stderr, "\nStream Index: %d\n", stream->index);
- fprintf(stderr, "Destination file: %s\n", stream->config.out_fn);
- fprintf(stderr, "Encoder parameters:\n");
-
- SHOW(g_usage);
- SHOW(g_threads);
- SHOW(g_profile);
- SHOW(g_w);
- SHOW(g_h);
- SHOW(g_timebase.num);
- SHOW(g_timebase.den);
- SHOW(g_error_resilient);
- SHOW(g_pass);
- SHOW(g_lag_in_frames);
- SHOW(rc_dropframe_thresh);
- SHOW(rc_resize_allowed);
- SHOW(rc_resize_up_thresh);
- SHOW(rc_resize_down_thresh);
- SHOW(rc_end_usage);
- SHOW(rc_target_bitrate);
- SHOW(rc_min_quantizer);
- SHOW(rc_max_quantizer);
- SHOW(rc_undershoot_pct);
- SHOW(rc_overshoot_pct);
- SHOW(rc_buf_sz);
- SHOW(rc_buf_initial_sz);
- SHOW(rc_buf_optimal_sz);
- SHOW(rc_2pass_vbr_bias_pct);
- SHOW(rc_2pass_vbr_minsection_pct);
- SHOW(rc_2pass_vbr_maxsection_pct);
- SHOW(kf_mode);
- SHOW(kf_min_dist);
- SHOW(kf_max_dist);
+ fprintf(stderr, " %-28s = %d\n", #field, stream->config.cfg.field)
+
+ if (stream->index == 0) {
+ fprintf(stderr, "Codec: %s\n",
+ vpx_codec_iface_name(global->codec->iface()));
+ fprintf(stderr, "Source file: %s Format: %s\n", input->fn,
+ input->use_i420 ? "I420" : "YV12");
+ }
+ if (stream->next || stream->index)
+ fprintf(stderr, "\nStream Index: %d\n", stream->index);
+ fprintf(stderr, "Destination file: %s\n", stream->config.out_fn);
+ fprintf(stderr, "Encoder parameters:\n");
+
+ SHOW(g_usage);
+ SHOW(g_threads);
+ SHOW(g_profile);
+ SHOW(g_w);
+ SHOW(g_h);
+ SHOW(g_timebase.num);
+ SHOW(g_timebase.den);
+ SHOW(g_error_resilient);
+ SHOW(g_pass);
+ SHOW(g_lag_in_frames);
+ SHOW(rc_dropframe_thresh);
+ SHOW(rc_resize_allowed);
+ SHOW(rc_resize_up_thresh);
+ SHOW(rc_resize_down_thresh);
+ SHOW(rc_end_usage);
+ SHOW(rc_target_bitrate);
+ SHOW(rc_min_quantizer);
+ SHOW(rc_max_quantizer);
+ SHOW(rc_undershoot_pct);
+ SHOW(rc_overshoot_pct);
+ SHOW(rc_buf_sz);
+ SHOW(rc_buf_initial_sz);
+ SHOW(rc_buf_optimal_sz);
+ SHOW(rc_2pass_vbr_bias_pct);
+ SHOW(rc_2pass_vbr_minsection_pct);
+ SHOW(rc_2pass_vbr_maxsection_pct);
+ SHOW(kf_mode);
+ SHOW(kf_min_dist);
+ SHOW(kf_max_dist);
}
static void open_output_file(struct stream_state *stream,
- struct global_config *global)
-{
- const char *fn = stream->config.out_fn;
-
- stream->file = strcmp(fn, "-") ? fopen(fn, "wb") : set_binary_mode(stdout);
-
- if (!stream->file)
- fatal("Failed to open output file");
-
- if(stream->config.write_webm && fseek(stream->file, 0, SEEK_CUR))
- fatal("WebM output to pipes not supported.");
-
- if(stream->config.write_webm)
- {
- stream->ebml.stream = stream->file;
- write_webm_file_header(&stream->ebml, &stream->config.cfg,
- &global->framerate,
- stream->config.stereo_fmt);
- }
- else
- write_ivf_file_header(stream->file, &stream->config.cfg,
- global->codec->fourcc, 0);
+ struct global_config *global) {
+ const char *fn = stream->config.out_fn;
+
+ stream->file = strcmp(fn, "-") ? fopen(fn, "wb") : set_binary_mode(stdout);
+
+ if (!stream->file)
+ fatal("Failed to open output file");
+
+ if (stream->config.write_webm && fseek(stream->file, 0, SEEK_CUR))
+ fatal("WebM output to pipes not supported.");
+
+ if (stream->config.write_webm) {
+ stream->ebml.stream = stream->file;
+ write_webm_file_header(&stream->ebml, &stream->config.cfg,
+ &global->framerate,
+ stream->config.stereo_fmt,
+ global->codec->fourcc);
+ } else
+ write_ivf_file_header(stream->file, &stream->config.cfg,
+ global->codec->fourcc, 0);
}
static void close_output_file(struct stream_state *stream,
- unsigned int fourcc)
-{
- if(stream->config.write_webm)
- {
- write_webm_file_footer(&stream->ebml, stream->hash);
- free(stream->ebml.cue_list);
- stream->ebml.cue_list = NULL;
- }
- else
- {
- if (!fseek(stream->file, 0, SEEK_SET))
- write_ivf_file_header(stream->file, &stream->config.cfg,
- fourcc,
- stream->frames_out);
- }
-
- fclose(stream->file);
+ unsigned int fourcc) {
+ if (stream->config.write_webm) {
+ write_webm_file_footer(&stream->ebml, stream->hash);
+ free(stream->ebml.cue_list);
+ stream->ebml.cue_list = NULL;
+ } else {
+ if (!fseek(stream->file, 0, SEEK_SET))
+ write_ivf_file_header(stream->file, &stream->config.cfg,
+ fourcc,
+ stream->frames_out);
+ }
+
+ fclose(stream->file);
}
static void setup_pass(struct stream_state *stream,
struct global_config *global,
- int pass)
-{
- if (stream->config.stats_fn)
- {
- if (!stats_open_file(&stream->stats, stream->config.stats_fn,
- pass))
- fatal("Failed to open statistics store");
- }
- else
- {
- if (!stats_open_mem(&stream->stats, pass))
- fatal("Failed to open statistics store");
- }
-
- stream->config.cfg.g_pass = global->passes == 2
- ? pass ? VPX_RC_LAST_PASS : VPX_RC_FIRST_PASS
- : VPX_RC_ONE_PASS;
- if (pass)
- stream->config.cfg.rc_twopass_stats_in = stats_get(&stream->stats);
-
- stream->cx_time = 0;
- stream->nbytes = 0;
- stream->frames_out = 0;
+ int pass) {
+ if (stream->config.stats_fn) {
+ if (!stats_open_file(&stream->stats, stream->config.stats_fn,
+ pass))
+ fatal("Failed to open statistics store");
+ } else {
+ if (!stats_open_mem(&stream->stats, pass))
+ fatal("Failed to open statistics store");
+ }
+
+ stream->config.cfg.g_pass = global->passes == 2
+ ? pass ? VPX_RC_LAST_PASS : VPX_RC_FIRST_PASS
+ : VPX_RC_ONE_PASS;
+ if (pass)
+ stream->config.cfg.rc_twopass_stats_in = stats_get(&stream->stats);
+
+ stream->cx_time = 0;
+ stream->nbytes = 0;
+ stream->frames_out = 0;
}
static void initialize_encoder(struct stream_state *stream,
- struct global_config *global)
-{
- int i;
- int flags = 0;
-
- flags |= global->show_psnr ? VPX_CODEC_USE_PSNR : 0;
- flags |= global->out_part ? VPX_CODEC_USE_OUTPUT_PARTITION : 0;
-
- /* Construct Encoder Context */
- vpx_codec_enc_init(&stream->encoder, global->codec->iface,
- &stream->config.cfg, flags);
- ctx_exit_on_error(&stream->encoder, "Failed to initialize encoder");
-
- /* Note that we bypass the vpx_codec_control wrapper macro because
- * we're being clever to store the control IDs in an array. Real
- * applications will want to make use of the enumerations directly
- */
- for (i = 0; i < stream->config.arg_ctrl_cnt; i++)
- {
- int ctrl = stream->config.arg_ctrls[i][0];
- int value = stream->config.arg_ctrls[i][1];
- if (vpx_codec_control_(&stream->encoder, ctrl, value))
- fprintf(stderr, "Error: Tried to set control %d = %d\n",
- ctrl, value);
-
- ctx_exit_on_error(&stream->encoder, "Failed to control codec");
- }
+ struct global_config *global) {
+ int i;
+ int flags = 0;
+
+ flags |= global->show_psnr ? VPX_CODEC_USE_PSNR : 0;
+ flags |= global->out_part ? VPX_CODEC_USE_OUTPUT_PARTITION : 0;
+
+ /* Construct Encoder Context */
+ vpx_codec_enc_init(&stream->encoder, global->codec->iface(),
+ &stream->config.cfg, flags);
+ ctx_exit_on_error(&stream->encoder, "Failed to initialize encoder");
+
+ /* Note that we bypass the vpx_codec_control wrapper macro because
+ * we're being clever to store the control IDs in an array. Real
+ * applications will want to make use of the enumerations directly
+ */
+ for (i = 0; i < stream->config.arg_ctrl_cnt; i++) {
+ int ctrl = stream->config.arg_ctrls[i][0];
+ int value = stream->config.arg_ctrls[i][1];
+ if (vpx_codec_control_(&stream->encoder, ctrl, value))
+ fprintf(stderr, "Error: Tried to set control %d = %d\n",
+ ctrl, value);
+
+ ctx_exit_on_error(&stream->encoder, "Failed to control codec");
+ }
+
+#if CONFIG_DECODERS
+ if (global->test_decode != TEST_DECODE_OFF) {
+ vpx_codec_dec_init(&stream->decoder, global->codec->dx_iface(), NULL, 0);
+ }
+#endif
}
static void encode_frame(struct stream_state *stream,
struct global_config *global,
struct vpx_image *img,
- unsigned int frames_in)
-{
- vpx_codec_pts_t frame_start, next_frame_start;
- struct vpx_codec_enc_cfg *cfg = &stream->config.cfg;
- struct vpx_usec_timer timer;
-
- frame_start = (cfg->g_timebase.den * (int64_t)(frames_in - 1)
- * global->framerate.den)
- / cfg->g_timebase.num / global->framerate.num;
- next_frame_start = (cfg->g_timebase.den * (int64_t)(frames_in)
- * global->framerate.den)
- / cfg->g_timebase.num / global->framerate.num;
- vpx_usec_timer_start(&timer);
- vpx_codec_encode(&stream->encoder, img, frame_start,
- (unsigned long)(next_frame_start - frame_start),
- 0, global->deadline);
- vpx_usec_timer_mark(&timer);
- stream->cx_time += vpx_usec_timer_elapsed(&timer);
- ctx_exit_on_error(&stream->encoder, "Stream %d: Failed to encode frame",
- stream->index);
+ unsigned int frames_in) {
+ vpx_codec_pts_t frame_start, next_frame_start;
+ struct vpx_codec_enc_cfg *cfg = &stream->config.cfg;
+ struct vpx_usec_timer timer;
+
+ frame_start = (cfg->g_timebase.den * (int64_t)(frames_in - 1)
+ * global->framerate.den)
+ / cfg->g_timebase.num / global->framerate.num;
+ next_frame_start = (cfg->g_timebase.den * (int64_t)(frames_in)
+ * global->framerate.den)
+ / cfg->g_timebase.num / global->framerate.num;
+
+ /* Scale if necessary */
+ if (img && (img->d_w != cfg->g_w || img->d_h != cfg->g_h)) {
+ if (!stream->img)
+ stream->img = vpx_img_alloc(NULL, VPX_IMG_FMT_I420,
+ cfg->g_w, cfg->g_h, 16);
+ I420Scale(img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
+ img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
+ img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V],
+ img->d_w, img->d_h,
+ stream->img->planes[VPX_PLANE_Y],
+ stream->img->stride[VPX_PLANE_Y],
+ stream->img->planes[VPX_PLANE_U],
+ stream->img->stride[VPX_PLANE_U],
+ stream->img->planes[VPX_PLANE_V],
+ stream->img->stride[VPX_PLANE_V],
+ stream->img->d_w, stream->img->d_h,
+ kFilterBox);
+
+ img = stream->img;
+ }
+
+ vpx_usec_timer_start(&timer);
+ vpx_codec_encode(&stream->encoder, img, frame_start,
+ (unsigned long)(next_frame_start - frame_start),
+ 0, global->deadline);
+ vpx_usec_timer_mark(&timer);
+ stream->cx_time += vpx_usec_timer_elapsed(&timer);
+ ctx_exit_on_error(&stream->encoder, "Stream %d: Failed to encode frame",
+ stream->index);
}
-static void update_quantizer_histogram(struct stream_state *stream)
-{
- if(stream->config.cfg.g_pass != VPX_RC_FIRST_PASS)
- {
- int q;
+static void update_quantizer_histogram(struct stream_state *stream) {
+ if (stream->config.cfg.g_pass != VPX_RC_FIRST_PASS) {
+ int q;
- vpx_codec_control(&stream->encoder, VP8E_GET_LAST_QUANTIZER_64, &q);
- ctx_exit_on_error(&stream->encoder, "Failed to read quantizer");
- stream->counts[q]++;
- }
+ vpx_codec_control(&stream->encoder, VP8E_GET_LAST_QUANTIZER_64, &q);
+ ctx_exit_on_error(&stream->encoder, "Failed to read quantizer");
+ stream->counts[q]++;
+ }
}
static void get_cx_data(struct stream_state *stream,
struct global_config *global,
- int *got_data)
-{
- const vpx_codec_cx_pkt_t *pkt;
- const struct vpx_codec_enc_cfg *cfg = &stream->config.cfg;
- vpx_codec_iter_t iter = NULL;
+ int *got_data) {
+ const vpx_codec_cx_pkt_t *pkt;
+ const struct vpx_codec_enc_cfg *cfg = &stream->config.cfg;
+ vpx_codec_iter_t iter = NULL;
+
+ *got_data = 0;
+ while ((pkt = vpx_codec_get_cx_data(&stream->encoder, &iter))) {
+ static size_t fsize = 0;
+ static off_t ivf_header_pos = 0;
+
+ switch (pkt->kind) {
+ case VPX_CODEC_CX_FRAME_PKT:
+ if (!(pkt->data.frame.flags & VPX_FRAME_IS_FRAGMENT)) {
+ stream->frames_out++;
+ }
+ if (!global->quiet)
+ fprintf(stderr, " %6luF", (unsigned long)pkt->data.frame.sz);
+
+ update_rate_histogram(&stream->rate_hist, cfg, pkt);
+ if (stream->config.write_webm) {
+ /* Update the hash */
+ if (!stream->ebml.debug)
+ stream->hash = murmur(pkt->data.frame.buf,
+ (int)pkt->data.frame.sz,
+ stream->hash);
+
+ write_webm_block(&stream->ebml, cfg, pkt);
+ } else {
+ if (pkt->data.frame.partition_id <= 0) {
+ ivf_header_pos = ftello(stream->file);
+ fsize = pkt->data.frame.sz;
+
+ write_ivf_frame_header(stream->file, pkt);
+ } else {
+ fsize += pkt->data.frame.sz;
+
+ if (!(pkt->data.frame.flags & VPX_FRAME_IS_FRAGMENT)) {
+ off_t currpos = ftello(stream->file);
+ fseeko(stream->file, ivf_header_pos, SEEK_SET);
+ write_ivf_frame_size(stream->file, fsize);
+ fseeko(stream->file, currpos, SEEK_SET);
+ }
+ }
- while ((pkt = vpx_codec_get_cx_data(&stream->encoder, &iter)))
- {
- static size_t fsize = 0;
- static off_t ivf_header_pos = 0;
+ (void) fwrite(pkt->data.frame.buf, 1, pkt->data.frame.sz,
+ stream->file);
+ }
+ stream->nbytes += pkt->data.raw.sz;
*got_data = 1;
-
- switch (pkt->kind)
- {
- case VPX_CODEC_CX_FRAME_PKT:
- if (!(pkt->data.frame.flags & VPX_FRAME_IS_FRAGMENT))
- {
- stream->frames_out++;
- }
+#if CONFIG_DECODERS
+ if (global->test_decode != TEST_DECODE_OFF && !stream->mismatch_seen) {
+ vpx_codec_decode(&stream->decoder, pkt->data.frame.buf,
+ pkt->data.frame.sz, NULL, 0);
+ if (stream->decoder.err) {
+ warn_or_exit_on_error(&stream->decoder,
+ global->test_decode == TEST_DECODE_FATAL,
+ "Failed to decode frame %d in stream %d",
+ stream->frames_out + 1, stream->index);
+ stream->mismatch_seen = stream->frames_out + 1;
+ }
+ }
+#endif
+ break;
+ case VPX_CODEC_STATS_PKT:
+ stream->frames_out++;
+ stats_write(&stream->stats,
+ pkt->data.twopass_stats.buf,
+ pkt->data.twopass_stats.sz);
+ stream->nbytes += pkt->data.raw.sz;
+ break;
+ case VPX_CODEC_PSNR_PKT:
+
+ if (global->show_psnr) {
+ int i;
+
+ stream->psnr_sse_total += pkt->data.psnr.sse[0];
+ stream->psnr_samples_total += pkt->data.psnr.samples[0];
+ for (i = 0; i < 4; i++) {
if (!global->quiet)
- fprintf(stderr, " %6luF",
- (unsigned long)pkt->data.frame.sz);
-
- update_rate_histogram(&stream->rate_hist, cfg, pkt);
- if(stream->config.write_webm)
- {
- /* Update the hash */
- if(!stream->ebml.debug)
- stream->hash = murmur(pkt->data.frame.buf,
- (int)pkt->data.frame.sz,
- stream->hash);
-
- write_webm_block(&stream->ebml, cfg, pkt);
- }
- else
- {
- if (pkt->data.frame.partition_id <= 0)
- {
- ivf_header_pos = ftello(stream->file);
- fsize = pkt->data.frame.sz;
-
- write_ivf_frame_header(stream->file, pkt);
- }
- else
- {
- fsize += pkt->data.frame.sz;
-
- if (!(pkt->data.frame.flags & VPX_FRAME_IS_FRAGMENT))
- {
- off_t currpos = ftello(stream->file);
- fseeko(stream->file, ivf_header_pos, SEEK_SET);
- write_ivf_frame_size(stream->file, fsize);
- fseeko(stream->file, currpos, SEEK_SET);
- }
- }
-
- (void) fwrite(pkt->data.frame.buf, 1, pkt->data.frame.sz,
- stream->file);
- }
- stream->nbytes += pkt->data.raw.sz;
- break;
- case VPX_CODEC_STATS_PKT:
- stream->frames_out++;
- fprintf(stderr, " %6luS",
- (unsigned long)pkt->data.twopass_stats.sz);
- stats_write(&stream->stats,
- pkt->data.twopass_stats.buf,
- pkt->data.twopass_stats.sz);
- stream->nbytes += pkt->data.raw.sz;
- break;
- case VPX_CODEC_PSNR_PKT:
-
- if (global->show_psnr)
- {
- int i;
-
- stream->psnr_sse_total += pkt->data.psnr.sse[0];
- stream->psnr_samples_total += pkt->data.psnr.samples[0];
- for (i = 0; i < 4; i++)
- {
- if (!global->quiet)
- fprintf(stderr, "%.3f ", pkt->data.psnr.psnr[i]);
- stream->psnr_totals[i] += pkt->data.psnr.psnr[i];
- }
- stream->psnr_count++;
- }
-
- break;
- default:
- break;
+ fprintf(stderr, "%.3f ", pkt->data.psnr.psnr[i]);
+ stream->psnr_totals[i] += pkt->data.psnr.psnr[i];
+ }
+ stream->psnr_count++;
}
+
+ break;
+ default:
+ break;
}
+ }
}
-static void show_psnr(struct stream_state *stream)
-{
- int i;
- double ovpsnr;
+static void show_psnr(struct stream_state *stream) {
+ int i;
+ double ovpsnr;
- if (!stream->psnr_count)
- return;
+ if (!stream->psnr_count)
+ return;
- fprintf(stderr, "Stream %d PSNR (Overall/Avg/Y/U/V)", stream->index);
- ovpsnr = vp8_mse2psnr((double)stream->psnr_samples_total, 255.0,
- (double)stream->psnr_sse_total);
- fprintf(stderr, " %.3f", ovpsnr);
+ fprintf(stderr, "Stream %d PSNR (Overall/Avg/Y/U/V)", stream->index);
+ ovpsnr = vp8_mse2psnr((double)stream->psnr_samples_total, 255.0,
+ (double)stream->psnr_sse_total);
+ fprintf(stderr, " %.3f", ovpsnr);
- for (i = 0; i < 4; i++)
- {
- fprintf(stderr, " %.3f", stream->psnr_totals[i]/stream->psnr_count);
- }
- fprintf(stderr, "\n");
+ for (i = 0; i < 4; i++) {
+ fprintf(stderr, " %.3f", stream->psnr_totals[i] / stream->psnr_count);
+ }
+ fprintf(stderr, "\n");
}
-float usec_to_fps(uint64_t usec, unsigned int frames)
-{
- return (float)(usec > 0 ? frames * 1000000.0 / (float)usec : 0);
+static float usec_to_fps(uint64_t usec, unsigned int frames) {
+ return (float)(usec > 0 ? frames * 1000000.0 / (float)usec : 0);
}
-int main(int argc, const char **argv_)
-{
- int pass;
- vpx_image_t raw;
- int frame_avail, got_data;
-
- struct input_state input = {0};
- struct global_config global;
- struct stream_state *streams = NULL;
- char **argv, **argi;
- unsigned long cx_time = 0;
- int stream_cnt = 0;
-
- exec_name = argv_[0];
-
- if (argc < 3)
- usage_exit();
-
- /* Setup default input stream settings */
- input.framerate.num = 30;
- input.framerate.den = 1;
- input.use_i420 = 1;
-
- /* First parse the global configuration values, because we want to apply
- * other parameters on top of the default configuration provided by the
- * codec.
- */
- argv = argv_dup(argc - 1, argv_ + 1);
- parse_global_config(&global, argv);
+static void test_decode(struct stream_state *stream,
+ enum TestDecodeFatality fatal,
+ const struct codec_item *codec) {
+ vpx_image_t enc_img, dec_img;
+
+ if (stream->mismatch_seen)
+ return;
+
+ /* Get the internal reference frame */
+ if (codec->fourcc == VP8_FOURCC) {
+ struct vpx_ref_frame ref_enc, ref_dec;
+ int width, height;
+
+ width = (stream->config.cfg.g_w + 15) & ~15;
+ height = (stream->config.cfg.g_h + 15) & ~15;
+ vpx_img_alloc(&ref_enc.img, VPX_IMG_FMT_I420, width, height, 1);
+ enc_img = ref_enc.img;
+ vpx_img_alloc(&ref_dec.img, VPX_IMG_FMT_I420, width, height, 1);
+ dec_img = ref_dec.img;
+
+ ref_enc.frame_type = VP8_LAST_FRAME;
+ ref_dec.frame_type = VP8_LAST_FRAME;
+ vpx_codec_control(&stream->encoder, VP8_COPY_REFERENCE, &ref_enc);
+ vpx_codec_control(&stream->decoder, VP8_COPY_REFERENCE, &ref_dec);
+ } else {
+ struct vp9_ref_frame ref;
+
+ ref.idx = 0;
+ vpx_codec_control(&stream->encoder, VP9_GET_REFERENCE, &ref);
+ enc_img = ref.img;
+ vpx_codec_control(&stream->decoder, VP9_GET_REFERENCE, &ref);
+ dec_img = ref.img;
+ }
+ ctx_exit_on_error(&stream->encoder, "Failed to get encoder reference frame");
+ ctx_exit_on_error(&stream->decoder, "Failed to get decoder reference frame");
+
+ if (!compare_img(&enc_img, &dec_img)) {
+ int y[4], u[4], v[4];
+ find_mismatch(&enc_img, &dec_img, y, u, v);
+ stream->decoder.err = 1;
+ warn_or_exit_on_error(&stream->decoder, fatal == TEST_DECODE_FATAL,
+ "Stream %d: Encode/decode mismatch on frame %d at"
+ " Y[%d, %d] {%d/%d},"
+ " U[%d, %d] {%d/%d},"
+ " V[%d, %d] {%d/%d}",
+ stream->index, stream->frames_out,
+ y[0], y[1], y[2], y[3],
+ u[0], u[1], u[2], u[3],
+ v[0], v[1], v[2], v[3]);
+ stream->mismatch_seen = stream->frames_out;
+ }
+
+ vpx_img_free(&enc_img);
+ vpx_img_free(&dec_img);
+}
- {
- /* Now parse each stream's parameters. Using a local scope here
- * due to the use of 'stream' as loop variable in FOREACH_STREAM
- * loops
- */
- struct stream_state *stream = NULL;
- do
- {
- stream = new_stream(&global, stream);
- stream_cnt++;
- if(!streams)
- streams = stream;
- } while(parse_stream_params(&global, stream, argv));
- }
+static void print_time(const char *label, int64_t etl) {
+ int hours, mins, secs;
- /* Check for unrecognized options */
- for (argi = argv; *argi; argi++)
- if (argi[0][0] == '-' && argi[0][1])
- die("Error: Unrecognized option %s\n", *argi);
+ if (etl >= 0) {
+ hours = etl / 3600;
+ etl -= hours * 3600;
+ mins = etl / 60;
+ etl -= mins * 60;
+ secs = etl;
- /* Handle non-option arguments */
- input.fn = argv[0];
+ fprintf(stderr, "[%3s %2d:%02d:%02d] ",
+ label, hours, mins, secs);
+ } else {
+ fprintf(stderr, "[%3s unknown] ", label);
+ }
+}
- if (!input.fn)
- usage_exit();
+int main(int argc, const char **argv_) {
+ int pass;
+ vpx_image_t raw;
+ int frame_avail, got_data;
- for (pass = global.pass ? global.pass - 1 : 0; pass < global.passes; pass++)
- {
- int frames_in = 0;
-
- open_input_file(&input);
-
- /* If the input file doesn't specify its w/h (raw files), try to get
- * the data from the first stream's configuration.
- */
- if(!input.w || !input.h)
- FOREACH_STREAM({
- if(stream->config.cfg.g_w && stream->config.cfg.g_h)
- {
- input.w = stream->config.cfg.g_w;
- input.h = stream->config.cfg.g_h;
- break;
- }
- });
-
- /* Update stream configurations from the input file's parameters */
- FOREACH_STREAM(set_stream_dimensions(stream, input.w, input.h));
- FOREACH_STREAM(validate_stream_config(stream));
-
- /* Ensure that --passes and --pass are consistent. If --pass is set and
- * --passes=2, ensure --fpf was set.
- */
- if (global.pass && global.passes == 2)
- FOREACH_STREAM({
- if(!stream->config.stats_fn)
- die("Stream %d: Must specify --fpf when --pass=%d"
- " and --passes=2\n", stream->index, global.pass);
- });
-
-
- /* Use the frame rate from the file only if none was specified
- * on the command-line.
- */
- if (!global.have_framerate)
- global.framerate = input.framerate;
-
- FOREACH_STREAM(set_default_kf_interval(stream, &global));
-
- /* Show configuration */
- if (global.verbose && pass == 0)
- FOREACH_STREAM(show_stream_config(stream, &global, &input));
-
- if(pass == (global.pass ? global.pass - 1 : 0)) {
- if (input.file_type == FILE_TYPE_Y4M)
- /*The Y4M reader does its own allocation.
- Just initialize this here to avoid problems if we never read any
- frames.*/
- memset(&raw, 0, sizeof(raw));
- else
- vpx_img_alloc(&raw,
- input.use_i420 ? VPX_IMG_FMT_I420
- : VPX_IMG_FMT_YV12,
- input.w, input.h, 32);
-
- FOREACH_STREAM(init_rate_histogram(&stream->rate_hist,
- &stream->config.cfg,
- &global.framerate));
- }
+ struct input_state input = {0};
+ struct global_config global;
+ struct stream_state *streams = NULL;
+ char **argv, **argi;
+ uint64_t cx_time = 0;
+ int stream_cnt = 0;
+ int res = 0;
- FOREACH_STREAM(open_output_file(stream, &global));
- FOREACH_STREAM(setup_pass(stream, &global, pass));
- FOREACH_STREAM(initialize_encoder(stream, &global));
+ exec_name = argv_[0];
- frame_avail = 1;
- got_data = 0;
+ if (argc < 3)
+ usage_exit();
- while (frame_avail || got_data)
- {
- struct vpx_usec_timer timer;
-
- if (!global.limit || frames_in < global.limit)
- {
- frame_avail = read_frame(&input, &raw);
-
- if (frame_avail)
- frames_in++;
-
- if (!global.quiet)
- {
- if(stream_cnt == 1)
- fprintf(stderr,
- "\rPass %d/%d frame %4d/%-4d %7"PRId64"B \033[K",
- pass + 1, global.passes, frames_in,
- streams->frames_out, (int64_t)streams->nbytes);
- else
- fprintf(stderr,
- "\rPass %d/%d frame %4d %7lu %s (%.2f fps)\033[K",
- pass + 1, global.passes, frames_in,
- cx_time > 9999999 ? cx_time / 1000 : cx_time,
- cx_time > 9999999 ? "ms" : "us",
- usec_to_fps(cx_time, frames_in));
- }
+ /* Setup default input stream settings */
+ input.framerate.num = 30;
+ input.framerate.den = 1;
+ input.use_i420 = 1;
+ input.only_i420 = 1;
+
+ /* First parse the global configuration values, because we want to apply
+ * other parameters on top of the default configuration provided by the
+ * codec.
+ */
+ argv = argv_dup(argc - 1, argv_ + 1);
+ parse_global_config(&global, argv);
+
+ {
+ /* Now parse each stream's parameters. Using a local scope here
+ * due to the use of 'stream' as loop variable in FOREACH_STREAM
+ * loops
+ */
+ struct stream_state *stream = NULL;
- }
- else
- frame_avail = 0;
+ do {
+ stream = new_stream(&global, stream);
+ stream_cnt++;
+ if (!streams)
+ streams = stream;
+ } while (parse_stream_params(&global, stream, argv));
+ }
- vpx_usec_timer_start(&timer);
- FOREACH_STREAM(encode_frame(stream, &global,
- frame_avail ? &raw : NULL,
- frames_in));
- vpx_usec_timer_mark(&timer);
- cx_time += (unsigned long)vpx_usec_timer_elapsed(&timer);
+ /* Check for unrecognized options */
+ for (argi = argv; *argi; argi++)
+ if (argi[0][0] == '-' && argi[0][1])
+ die("Error: Unrecognized option %s\n", *argi);
- FOREACH_STREAM(update_quantizer_histogram(stream));
+ /* Handle non-option arguments */
+ input.fn = argv[0];
- got_data = 0;
- FOREACH_STREAM(get_cx_data(stream, &global, &got_data));
+ if (!input.fn)
+ usage_exit();
- fflush(stdout);
- }
+#if CONFIG_NON420
+ /* Decide if other chroma subsamplings than 4:2:0 are supported */
+ if (global.codec->fourcc == VP9_FOURCC)
+ input.only_i420 = 0;
+#endif
+
+ for (pass = global.pass ? global.pass - 1 : 0; pass < global.passes; pass++) {
+ int frames_in = 0, seen_frames = 0;
+ int64_t estimated_time_left = -1;
+ int64_t average_rate = -1;
+ off_t lagged_count = 0;
- if(stream_cnt > 1)
- fprintf(stderr, "\n");
+ open_input_file(&input);
- if (!global.quiet)
- FOREACH_STREAM(fprintf(
- stderr,
- "\rPass %d/%d frame %4d/%-4d %7"PRId64"B %7lub/f %7"PRId64"b/s"
- " %7"PRId64" %s (%.2f fps)\033[K\n", pass + 1,
- global.passes, frames_in, stream->frames_out, (int64_t)stream->nbytes,
- frames_in ? (unsigned long)(stream->nbytes * 8 / frames_in) : 0,
- frames_in ? (int64_t)stream->nbytes * 8
- * (int64_t)global.framerate.num / global.framerate.den
- / frames_in
- : 0,
- stream->cx_time > 9999999 ? stream->cx_time / 1000 : stream->cx_time,
- stream->cx_time > 9999999 ? "ms" : "us",
- usec_to_fps(stream->cx_time, frames_in));
- );
+ /* If the input file doesn't specify its w/h (raw files), try to get
+ * the data from the first stream's configuration.
+ */
+ if (!input.w || !input.h)
+ FOREACH_STREAM( {
+ if (stream->config.cfg.g_w && stream->config.cfg.g_h) {
+ input.w = stream->config.cfg.g_w;
+ input.h = stream->config.cfg.g_h;
+ break;
+ }
+ });
+
+ /* Update stream configurations from the input file's parameters */
+ if (!input.w || !input.h)
+ fatal("Specify stream dimensions with --width (-w) "
+ " and --height (-h)");
+ FOREACH_STREAM(set_stream_dimensions(stream, input.w, input.h));
+ FOREACH_STREAM(validate_stream_config(stream));
+
+ /* Ensure that --passes and --pass are consistent. If --pass is set and
+ * --passes=2, ensure --fpf was set.
+ */
+ if (global.pass && global.passes == 2)
+ FOREACH_STREAM( {
+ if (!stream->config.stats_fn)
+ die("Stream %d: Must specify --fpf when --pass=%d"
+ " and --passes=2\n", stream->index, global.pass);
+ });
+
+ /* Use the frame rate from the file only if none was specified
+ * on the command-line.
+ */
+ if (!global.have_framerate)
+ global.framerate = input.framerate;
+
+ FOREACH_STREAM(set_default_kf_interval(stream, &global));
+
+ /* Show configuration */
+ if (global.verbose && pass == 0)
+ FOREACH_STREAM(show_stream_config(stream, &global, &input));
+
+ if (pass == (global.pass ? global.pass - 1 : 0)) {
+ if (input.file_type == FILE_TYPE_Y4M)
+ /*The Y4M reader does its own allocation.
+ Just initialize this here to avoid problems if we never read any
+ frames.*/
+ memset(&raw, 0, sizeof(raw));
+ else
+ vpx_img_alloc(&raw,
+ input.use_i420 ? VPX_IMG_FMT_I420
+ : VPX_IMG_FMT_YV12,
+ input.w, input.h, 32);
+
+ FOREACH_STREAM(init_rate_histogram(&stream->rate_hist,
+ &stream->config.cfg,
+ &global.framerate));
+ }
+
+ FOREACH_STREAM(setup_pass(stream, &global, pass));
+ FOREACH_STREAM(open_output_file(stream, &global));
+ FOREACH_STREAM(initialize_encoder(stream, &global));
+
+ frame_avail = 1;
+ got_data = 0;
+
+ while (frame_avail || got_data) {
+ struct vpx_usec_timer timer;
+
+ if (!global.limit || frames_in < global.limit) {
+ frame_avail = read_frame(&input, &raw);
+
+ if (frame_avail)
+ frames_in++;
+ seen_frames = frames_in > global.skip_frames ?
+ frames_in - global.skip_frames : 0;
+
+ if (!global.quiet) {
+ float fps = usec_to_fps(cx_time, seen_frames);
+ fprintf(stderr, "\rPass %d/%d ", pass + 1, global.passes);
+
+ if (stream_cnt == 1)
+ fprintf(stderr,
+ "frame %4d/%-4d %7"PRId64"B ",
+ frames_in, streams->frames_out, (int64_t)streams->nbytes);
+ else
+ fprintf(stderr, "frame %4d ", frames_in);
+
+ fprintf(stderr, "%7"PRId64" %s %.2f %s ",
+ cx_time > 9999999 ? cx_time / 1000 : cx_time,
+ cx_time > 9999999 ? "ms" : "us",
+ fps >= 1.0 ? fps : 1000.0 / fps,
+ fps >= 1.0 ? "fps" : "ms/f");
+ print_time("ETA", estimated_time_left);
+ fprintf(stderr, "\033[K");
+ }
+
+ } else
+ frame_avail = 0;
- if (global.show_psnr)
- FOREACH_STREAM(show_psnr(stream));
+ if (frames_in > global.skip_frames) {
+ vpx_usec_timer_start(&timer);
+ FOREACH_STREAM(encode_frame(stream, &global,
+ frame_avail ? &raw : NULL,
+ frames_in));
+ vpx_usec_timer_mark(&timer);
+ cx_time += vpx_usec_timer_elapsed(&timer);
- FOREACH_STREAM(vpx_codec_destroy(&stream->encoder));
+ FOREACH_STREAM(update_quantizer_histogram(stream));
- close_input_file(&input);
+ got_data = 0;
+ FOREACH_STREAM(get_cx_data(stream, &global, &got_data));
+
+ if (!got_data && input.length && !streams->frames_out) {
+ lagged_count = global.limit ? seen_frames : ftello(input.file);
+ } else if (input.length) {
+ int64_t remaining;
+ int64_t rate;
+
+ if (global.limit) {
+ int frame_in_lagged = (seen_frames - lagged_count) * 1000;
+
+ rate = cx_time ? frame_in_lagged * (int64_t)1000000 / cx_time : 0;
+ remaining = 1000 * (global.limit - global.skip_frames
+ - seen_frames + lagged_count);
+ } else {
+ off_t input_pos = ftello(input.file);
+ off_t input_pos_lagged = input_pos - lagged_count;
+ int64_t limit = input.length;
+
+ rate = cx_time ? input_pos_lagged * (int64_t)1000000 / cx_time : 0;
+ remaining = limit - input_pos + lagged_count;
+ }
+
+ average_rate = (average_rate <= 0)
+ ? rate
+ : (average_rate * 7 + rate) / 8;
+ estimated_time_left = average_rate ? remaining / average_rate : -1;
+ }
- FOREACH_STREAM(close_output_file(stream, global.codec->fourcc));
+ if (got_data && global.test_decode != TEST_DECODE_OFF)
+ FOREACH_STREAM(test_decode(stream, global.test_decode, global.codec));
+ }
- FOREACH_STREAM(stats_close(&stream->stats, global.passes-1));
+ fflush(stdout);
+ }
- if (global.pass)
- break;
+ if (stream_cnt > 1)
+ fprintf(stderr, "\n");
+
+ if (!global.quiet)
+ FOREACH_STREAM(fprintf(
+ stderr,
+ "\rPass %d/%d frame %4d/%-4d %7"PRId64"B %7lub/f %7"PRId64"b/s"
+ " %7"PRId64" %s (%.2f fps)\033[K\n", pass + 1,
+ global.passes, frames_in, stream->frames_out, (int64_t)stream->nbytes,
+ seen_frames ? (unsigned long)(stream->nbytes * 8 / seen_frames) : 0,
+ seen_frames ? (int64_t)stream->nbytes * 8
+ * (int64_t)global.framerate.num / global.framerate.den
+ / seen_frames
+ : 0,
+ stream->cx_time > 9999999 ? stream->cx_time / 1000 : stream->cx_time,
+ stream->cx_time > 9999999 ? "ms" : "us",
+ usec_to_fps(stream->cx_time, seen_frames));
+ );
+
+ if (global.show_psnr)
+ FOREACH_STREAM(show_psnr(stream));
+
+ FOREACH_STREAM(vpx_codec_destroy(&stream->encoder));
+
+ if (global.test_decode != TEST_DECODE_OFF) {
+ FOREACH_STREAM(vpx_codec_destroy(&stream->decoder));
}
- if (global.show_q_hist_buckets)
- FOREACH_STREAM(show_q_histogram(stream->counts,
- global.show_q_hist_buckets));
+ close_input_file(&input);
- if (global.show_rate_hist_buckets)
- FOREACH_STREAM(show_rate_histogram(&stream->rate_hist,
- &stream->config.cfg,
- global.show_rate_hist_buckets));
- FOREACH_STREAM(destroy_rate_histogram(&stream->rate_hist));
+ if (global.test_decode == TEST_DECODE_FATAL) {
+ FOREACH_STREAM(res |= stream->mismatch_seen);
+ }
+ FOREACH_STREAM(close_output_file(stream, global.codec->fourcc));
+
+ FOREACH_STREAM(stats_close(&stream->stats, global.passes - 1));
+
+ if (global.pass)
+ break;
+ }
+
+ if (global.show_q_hist_buckets)
+ FOREACH_STREAM(show_q_histogram(stream->counts,
+ global.show_q_hist_buckets));
+
+ if (global.show_rate_hist_buckets)
+ FOREACH_STREAM(show_rate_histogram(&stream->rate_hist,
+ &stream->config.cfg,
+ global.show_rate_hist_buckets));
+ FOREACH_STREAM(destroy_rate_histogram(&stream->rate_hist));
+
+#if CONFIG_INTERNAL_STATS
+ /* TODO(jkoleszar): This doesn't belong in this executable. Do it for now,
+ * to match some existing utilities.
+ */
+ FOREACH_STREAM({
+ FILE *f = fopen("opsnr.stt", "a");
+ if (stream->mismatch_seen) {
+ fprintf(f, "First mismatch occurred in frame %d\n",
+ stream->mismatch_seen);
+ } else {
+ fprintf(f, "No mismatch detected in recon buffers\n");
+ }
+ fclose(f);
+ });
+#endif
- vpx_img_free(&raw);
- free(argv);
- free(streams);
- return EXIT_SUCCESS;
+ vpx_img_free(&raw);
+ free(argv);
+ free(streams);
+ return res ? EXIT_FAILURE : EXIT_SUCCESS;
}
diff --git a/libvpx/y4minput.c b/libvpx/y4minput.c
index ff9ffbc..47f005a 100644
--- a/libvpx/y4minput.c
+++ b/libvpx/y4minput.c
@@ -14,7 +14,7 @@
#include <string.h>
#include "y4minput.h"
-static int y4m_parse_tags(y4m_input *_y4m,char *_tags){
+static int y4m_parse_tags(y4m_input *_y4m, char *_tags) {
int got_w;
int got_h;
int got_fps;
@@ -23,55 +23,61 @@ static int y4m_parse_tags(y4m_input *_y4m,char *_tags){
int got_chroma;
char *p;
char *q;
- got_w=got_h=got_fps=got_interlace=got_par=got_chroma=0;
- for(p=_tags;;p=q){
+ got_w = got_h = got_fps = got_interlace = got_par = got_chroma = 0;
+ for (p = _tags;; p = q) {
/*Skip any leading spaces.*/
- while(*p==' ')p++;
+ while (*p == ' ')p++;
/*If that's all we have, stop.*/
- if(p[0]=='\0')break;
+ if (p[0] == '\0')break;
/*Find the end of this tag.*/
- for(q=p+1;*q!='\0'&&*q!=' ';q++);
+ for (q = p + 1; *q != '\0' && *q != ' '; q++);
/*Process the tag.*/
- switch(p[0]){
- case 'W':{
- if(sscanf(p+1,"%d",&_y4m->pic_w)!=1)return -1;
- got_w=1;
- }break;
- case 'H':{
- if(sscanf(p+1,"%d",&_y4m->pic_h)!=1)return -1;
- got_h=1;
- }break;
- case 'F':{
- if(sscanf(p+1,"%d:%d",&_y4m->fps_n,&_y4m->fps_d)!=2){
+ switch (p[0]) {
+ case 'W': {
+ if (sscanf(p + 1, "%d", &_y4m->pic_w) != 1)return -1;
+ got_w = 1;
+ }
+ break;
+ case 'H': {
+ if (sscanf(p + 1, "%d", &_y4m->pic_h) != 1)return -1;
+ got_h = 1;
+ }
+ break;
+ case 'F': {
+ if (sscanf(p + 1, "%d:%d", &_y4m->fps_n, &_y4m->fps_d) != 2) {
return -1;
}
- got_fps=1;
- }break;
- case 'I':{
- _y4m->interlace=p[1];
- got_interlace=1;
- }break;
- case 'A':{
- if(sscanf(p+1,"%d:%d",&_y4m->par_n,&_y4m->par_d)!=2){
+ got_fps = 1;
+ }
+ break;
+ case 'I': {
+ _y4m->interlace = p[1];
+ got_interlace = 1;
+ }
+ break;
+ case 'A': {
+ if (sscanf(p + 1, "%d:%d", &_y4m->par_n, &_y4m->par_d) != 2) {
return -1;
}
- got_par=1;
- }break;
- case 'C':{
- if(q-p>16)return -1;
- memcpy(_y4m->chroma_type,p+1,q-p-1);
- _y4m->chroma_type[q-p-1]='\0';
- got_chroma=1;
- }break;
+ got_par = 1;
+ }
+ break;
+ case 'C': {
+ if (q - p > 16)return -1;
+ memcpy(_y4m->chroma_type, p + 1, q - p - 1);
+ _y4m->chroma_type[q - p - 1] = '\0';
+ got_chroma = 1;
+ }
+ break;
/*Ignore unknown tags.*/
}
}
- if(!got_w||!got_h||!got_fps)return -1;
- if(!got_interlace)_y4m->interlace='?';
- if(!got_par)_y4m->par_n=_y4m->par_d=0;
+ if (!got_w || !got_h || !got_fps)return -1;
+ if (!got_interlace)_y4m->interlace = '?';
+ if (!got_par)_y4m->par_n = _y4m->par_d = 0;
/*Chroma-type is not specified in older files, e.g., those generated by
mplayer.*/
- if(!got_chroma)strcpy(_y4m->chroma_type,"420");
+ if (!got_chroma)strcpy(_y4m->chroma_type, "420");
return 0;
}
@@ -145,48 +151,48 @@ static int y4m_parse_tags(y4m_input *_y4m,char *_tags){
lines, and they are vertically co-sited with the luma samples in both the
mpeg2 and jpeg cases (thus requiring no vertical resampling).*/
static void y4m_42xmpeg2_42xjpeg_helper(unsigned char *_dst,
- const unsigned char *_src,int _c_w,int _c_h){
+ const unsigned char *_src, int _c_w, int _c_h) {
int y;
int x;
- for(y=0;y<_c_h;y++){
+ for (y = 0; y < _c_h; y++) {
/*Filter: [4 -17 114 35 -9 1]/128, derived from a 6-tap Lanczos
window.*/
- for(x=0;x<OC_MINI(_c_w,2);x++){
- _dst[x]=(unsigned char)OC_CLAMPI(0,(4*_src[0]-17*_src[OC_MAXI(x-1,0)]+
- 114*_src[x]+35*_src[OC_MINI(x+1,_c_w-1)]-9*_src[OC_MINI(x+2,_c_w-1)]+
- _src[OC_MINI(x+3,_c_w-1)]+64)>>7,255);
+ for (x = 0; x < OC_MINI(_c_w, 2); x++) {
+ _dst[x] = (unsigned char)OC_CLAMPI(0, (4 * _src[0] - 17 * _src[OC_MAXI(x - 1, 0)] +
+ 114 * _src[x] + 35 * _src[OC_MINI(x + 1, _c_w - 1)] - 9 * _src[OC_MINI(x + 2, _c_w - 1)] +
+ _src[OC_MINI(x + 3, _c_w - 1)] + 64) >> 7, 255);
}
- for(;x<_c_w-3;x++){
- _dst[x]=(unsigned char)OC_CLAMPI(0,(4*_src[x-2]-17*_src[x-1]+
- 114*_src[x]+35*_src[x+1]-9*_src[x+2]+_src[x+3]+64)>>7,255);
+ for (; x < _c_w - 3; x++) {
+ _dst[x] = (unsigned char)OC_CLAMPI(0, (4 * _src[x - 2] - 17 * _src[x - 1] +
+ 114 * _src[x] + 35 * _src[x + 1] - 9 * _src[x + 2] + _src[x + 3] + 64) >> 7, 255);
}
- for(;x<_c_w;x++){
- _dst[x]=(unsigned char)OC_CLAMPI(0,(4*_src[x-2]-17*_src[x-1]+
- 114*_src[x]+35*_src[OC_MINI(x+1,_c_w-1)]-9*_src[OC_MINI(x+2,_c_w-1)]+
- _src[_c_w-1]+64)>>7,255);
+ for (; x < _c_w; x++) {
+ _dst[x] = (unsigned char)OC_CLAMPI(0, (4 * _src[x - 2] - 17 * _src[x - 1] +
+ 114 * _src[x] + 35 * _src[OC_MINI(x + 1, _c_w - 1)] - 9 * _src[OC_MINI(x + 2, _c_w - 1)] +
+ _src[_c_w - 1] + 64) >> 7, 255);
}
- _dst+=_c_w;
- _src+=_c_w;
+ _dst += _c_w;
+ _src += _c_w;
}
}
/*Handles both 422 and 420mpeg2 to 422jpeg and 420jpeg, respectively.*/
-static void y4m_convert_42xmpeg2_42xjpeg(y4m_input *_y4m,unsigned char *_dst,
- unsigned char *_aux){
+static void y4m_convert_42xmpeg2_42xjpeg(y4m_input *_y4m, unsigned char *_dst,
+ unsigned char *_aux) {
int c_w;
int c_h;
int c_sz;
int pli;
/*Skip past the luma data.*/
- _dst+=_y4m->pic_w*_y4m->pic_h;
+ _dst += _y4m->pic_w * _y4m->pic_h;
/*Compute the size of each chroma plane.*/
- c_w=(_y4m->pic_w+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h;
- c_h=(_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v;
- c_sz=c_w*c_h;
- for(pli=1;pli<3;pli++){
- y4m_42xmpeg2_42xjpeg_helper(_dst,_aux,c_w,c_h);
- _dst+=c_sz;
- _aux+=c_sz;
+ c_w = (_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h;
+ c_h = (_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v;
+ c_sz = c_w * c_h;
+ for (pli = 1; pli < 3; pli++) {
+ y4m_42xmpeg2_42xjpeg_helper(_dst, _aux, c_w, c_h);
+ _dst += c_sz;
+ _aux += c_sz;
}
}
@@ -233,8 +239,8 @@ static void y4m_convert_42xmpeg2_42xjpeg(y4m_input *_y4m,unsigned char *_dst,
the chroma plane's resolution) to the right.
Then we use another filter to move the C_r location down one quarter pixel,
and the C_b location up one quarter pixel.*/
-static void y4m_convert_42xpaldv_42xjpeg(y4m_input *_y4m,unsigned char *_dst,
- unsigned char *_aux){
+static void y4m_convert_42xpaldv_42xjpeg(y4m_input *_y4m, unsigned char *_dst,
+ unsigned char *_aux) {
unsigned char *tmp;
int c_w;
int c_h;
@@ -243,69 +249,71 @@ static void y4m_convert_42xpaldv_42xjpeg(y4m_input *_y4m,unsigned char *_dst,
int y;
int x;
/*Skip past the luma data.*/
- _dst+=_y4m->pic_w*_y4m->pic_h;
+ _dst += _y4m->pic_w * _y4m->pic_h;
/*Compute the size of each chroma plane.*/
- c_w=(_y4m->pic_w+1)/2;
- c_h=(_y4m->pic_h+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h;
- c_sz=c_w*c_h;
- tmp=_aux+2*c_sz;
- for(pli=1;pli<3;pli++){
+ c_w = (_y4m->pic_w + 1) / 2;
+ c_h = (_y4m->pic_h + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h;
+ c_sz = c_w * c_h;
+ tmp = _aux + 2 * c_sz;
+ for (pli = 1; pli < 3; pli++) {
/*First do the horizontal re-sampling.
This is the same as the mpeg2 case, except that after the horizontal
case, we need to apply a second vertical filter.*/
- y4m_42xmpeg2_42xjpeg_helper(tmp,_aux,c_w,c_h);
- _aux+=c_sz;
- switch(pli){
- case 1:{
+ y4m_42xmpeg2_42xjpeg_helper(tmp, _aux, c_w, c_h);
+ _aux += c_sz;
+ switch (pli) {
+ case 1: {
/*Slide C_b up a quarter-pel.
This is the same filter used above, but in the other order.*/
- for(x=0;x<c_w;x++){
- for(y=0;y<OC_MINI(c_h,3);y++){
- _dst[y*c_w]=(unsigned char)OC_CLAMPI(0,(tmp[0]
- -9*tmp[OC_MAXI(y-2,0)*c_w]+35*tmp[OC_MAXI(y-1,0)*c_w]
- +114*tmp[y*c_w]-17*tmp[OC_MINI(y+1,c_h-1)*c_w]
- +4*tmp[OC_MINI(y+2,c_h-1)*c_w]+64)>>7,255);
+ for (x = 0; x < c_w; x++) {
+ for (y = 0; y < OC_MINI(c_h, 3); y++) {
+ _dst[y * c_w] = (unsigned char)OC_CLAMPI(0, (tmp[0]
+ - 9 * tmp[OC_MAXI(y - 2, 0) * c_w] + 35 * tmp[OC_MAXI(y - 1, 0) * c_w]
+ + 114 * tmp[y * c_w] - 17 * tmp[OC_MINI(y + 1, c_h - 1) * c_w]
+ + 4 * tmp[OC_MINI(y + 2, c_h - 1) * c_w] + 64) >> 7, 255);
}
- for(;y<c_h-2;y++){
- _dst[y*c_w]=(unsigned char)OC_CLAMPI(0,(tmp[(y-3)*c_w]
- -9*tmp[(y-2)*c_w]+35*tmp[(y-1)*c_w]+114*tmp[y*c_w]
- -17*tmp[(y+1)*c_w]+4*tmp[(y+2)*c_w]+64)>>7,255);
+ for (; y < c_h - 2; y++) {
+ _dst[y * c_w] = (unsigned char)OC_CLAMPI(0, (tmp[(y - 3) * c_w]
+ - 9 * tmp[(y - 2) * c_w] + 35 * tmp[(y - 1) * c_w] + 114 * tmp[y * c_w]
+ - 17 * tmp[(y + 1) * c_w] + 4 * tmp[(y + 2) * c_w] + 64) >> 7, 255);
}
- for(;y<c_h;y++){
- _dst[y*c_w]=(unsigned char)OC_CLAMPI(0,(tmp[(y-3)*c_w]
- -9*tmp[(y-2)*c_w]+35*tmp[(y-1)*c_w]+114*tmp[y*c_w]
- -17*tmp[OC_MINI(y+1,c_h-1)*c_w]+4*tmp[(c_h-1)*c_w]+64)>>7,255);
+ for (; y < c_h; y++) {
+ _dst[y * c_w] = (unsigned char)OC_CLAMPI(0, (tmp[(y - 3) * c_w]
+ - 9 * tmp[(y - 2) * c_w] + 35 * tmp[(y - 1) * c_w] + 114 * tmp[y * c_w]
+ - 17 * tmp[OC_MINI(y + 1, c_h - 1) * c_w] + 4 * tmp[(c_h - 1) * c_w] + 64) >> 7, 255);
}
_dst++;
tmp++;
}
- _dst+=c_sz-c_w;
- tmp-=c_w;
- }break;
- case 2:{
+ _dst += c_sz - c_w;
+ tmp -= c_w;
+ }
+ break;
+ case 2: {
/*Slide C_r down a quarter-pel.
This is the same as the horizontal filter.*/
- for(x=0;x<c_w;x++){
- for(y=0;y<OC_MINI(c_h,2);y++){
- _dst[y*c_w]=(unsigned char)OC_CLAMPI(0,(4*tmp[0]
- -17*tmp[OC_MAXI(y-1,0)*c_w]+114*tmp[y*c_w]
- +35*tmp[OC_MINI(y+1,c_h-1)*c_w]-9*tmp[OC_MINI(y+2,c_h-1)*c_w]
- +tmp[OC_MINI(y+3,c_h-1)*c_w]+64)>>7,255);
+ for (x = 0; x < c_w; x++) {
+ for (y = 0; y < OC_MINI(c_h, 2); y++) {
+ _dst[y * c_w] = (unsigned char)OC_CLAMPI(0, (4 * tmp[0]
+ - 17 * tmp[OC_MAXI(y - 1, 0) * c_w] + 114 * tmp[y * c_w]
+ + 35 * tmp[OC_MINI(y + 1, c_h - 1) * c_w] - 9 * tmp[OC_MINI(y + 2, c_h - 1) * c_w]
+ + tmp[OC_MINI(y + 3, c_h - 1) * c_w] + 64) >> 7, 255);
}
- for(;y<c_h-3;y++){
- _dst[y*c_w]=(unsigned char)OC_CLAMPI(0,(4*tmp[(y-2)*c_w]
- -17*tmp[(y-1)*c_w]+114*tmp[y*c_w]+35*tmp[(y+1)*c_w]
- -9*tmp[(y+2)*c_w]+tmp[(y+3)*c_w]+64)>>7,255);
+ for (; y < c_h - 3; y++) {
+ _dst[y * c_w] = (unsigned char)OC_CLAMPI(0, (4 * tmp[(y - 2) * c_w]
+ - 17 * tmp[(y - 1) * c_w] + 114 * tmp[y * c_w] + 35 * tmp[(y + 1) * c_w]
+ - 9 * tmp[(y + 2) * c_w] + tmp[(y + 3) * c_w] + 64) >> 7, 255);
}
- for(;y<c_h;y++){
- _dst[y*c_w]=(unsigned char)OC_CLAMPI(0,(4*tmp[(y-2)*c_w]
- -17*tmp[(y-1)*c_w]+114*tmp[y*c_w]+35*tmp[OC_MINI(y+1,c_h-1)*c_w]
- -9*tmp[OC_MINI(y+2,c_h-1)*c_w]+tmp[(c_h-1)*c_w]+64)>>7,255);
+ for (; y < c_h; y++) {
+ _dst[y * c_w] = (unsigned char)OC_CLAMPI(0, (4 * tmp[(y - 2) * c_w]
+ - 17 * tmp[(y - 1) * c_w] + 114 * tmp[y * c_w] + 35 * tmp[OC_MINI(y + 1, c_h - 1) * c_w]
+ - 9 * tmp[OC_MINI(y + 2, c_h - 1) * c_w] + tmp[(c_h - 1) * c_w] + 64) >> 7, 255);
}
_dst++;
tmp++;
}
- }break;
+ }
+ break;
}
/*For actual interlaced material, this would have to be done separately on
each field, and the shift amounts would be different.
@@ -320,27 +328,27 @@ static void y4m_convert_42xpaldv_42xjpeg(y4m_input *_y4m,unsigned char *_dst,
/*Perform vertical filtering to reduce a single plane from 4:2:2 to 4:2:0.
This is used as a helper by several converation routines.*/
static void y4m_422jpeg_420jpeg_helper(unsigned char *_dst,
- const unsigned char *_src,int _c_w,int _c_h){
+ const unsigned char *_src, int _c_w, int _c_h) {
int y;
int x;
/*Filter: [3 -17 78 78 -17 3]/128, derived from a 6-tap Lanczos window.*/
- for(x=0;x<_c_w;x++){
- for(y=0;y<OC_MINI(_c_h,2);y+=2){
- _dst[(y>>1)*_c_w]=OC_CLAMPI(0,(64*_src[0]
- +78*_src[OC_MINI(1,_c_h-1)*_c_w]
- -17*_src[OC_MINI(2,_c_h-1)*_c_w]
- +3*_src[OC_MINI(3,_c_h-1)*_c_w]+64)>>7,255);
+ for (x = 0; x < _c_w; x++) {
+ for (y = 0; y < OC_MINI(_c_h, 2); y += 2) {
+ _dst[(y >> 1)*_c_w] = OC_CLAMPI(0, (64 * _src[0]
+ + 78 * _src[OC_MINI(1, _c_h - 1) * _c_w]
+ - 17 * _src[OC_MINI(2, _c_h - 1) * _c_w]
+ + 3 * _src[OC_MINI(3, _c_h - 1) * _c_w] + 64) >> 7, 255);
}
- for(;y<_c_h-3;y+=2){
- _dst[(y>>1)*_c_w]=OC_CLAMPI(0,(3*(_src[(y-2)*_c_w]+_src[(y+3)*_c_w])
- -17*(_src[(y-1)*_c_w]+_src[(y+2)*_c_w])
- +78*(_src[y*_c_w]+_src[(y+1)*_c_w])+64)>>7,255);
+ for (; y < _c_h - 3; y += 2) {
+ _dst[(y >> 1)*_c_w] = OC_CLAMPI(0, (3 * (_src[(y - 2) * _c_w] + _src[(y + 3) * _c_w])
+ - 17 * (_src[(y - 1) * _c_w] + _src[(y + 2) * _c_w])
+ + 78 * (_src[y * _c_w] + _src[(y + 1) * _c_w]) + 64) >> 7, 255);
}
- for(;y<_c_h;y+=2){
- _dst[(y>>1)*_c_w]=OC_CLAMPI(0,(3*(_src[(y-2)*_c_w]
- +_src[(_c_h-1)*_c_w])-17*(_src[(y-1)*_c_w]
- +_src[OC_MINI(y+2,_c_h-1)*_c_w])
- +78*(_src[y*_c_w]+_src[OC_MINI(y+1,_c_h-1)*_c_w])+64)>>7,255);
+ for (; y < _c_h; y += 2) {
+ _dst[(y >> 1)*_c_w] = OC_CLAMPI(0, (3 * (_src[(y - 2) * _c_w]
+ + _src[(_c_h - 1) * _c_w]) - 17 * (_src[(y - 1) * _c_w]
+ + _src[OC_MINI(y + 2, _c_h - 1) * _c_w])
+ + 78 * (_src[y * _c_w] + _src[OC_MINI(y + 1, _c_h - 1) * _c_w]) + 64) >> 7, 255);
}
_src++;
_dst++;
@@ -385,8 +393,8 @@ static void y4m_422jpeg_420jpeg_helper(unsigned char *_dst,
We use a resampling filter to decimate the chroma planes by two in the
vertical direction.*/
-static void y4m_convert_422jpeg_420jpeg(y4m_input *_y4m,unsigned char *_dst,
- unsigned char *_aux){
+static void y4m_convert_422jpeg_420jpeg(y4m_input *_y4m, unsigned char *_dst,
+ unsigned char *_aux) {
int c_w;
int c_h;
int c_sz;
@@ -395,18 +403,18 @@ static void y4m_convert_422jpeg_420jpeg(y4m_input *_y4m,unsigned char *_dst,
int dst_c_sz;
int pli;
/*Skip past the luma data.*/
- _dst+=_y4m->pic_w*_y4m->pic_h;
+ _dst += _y4m->pic_w * _y4m->pic_h;
/*Compute the size of each chroma plane.*/
- c_w=(_y4m->pic_w+_y4m->src_c_dec_h-1)/_y4m->src_c_dec_h;
- c_h=_y4m->pic_h;
- dst_c_w=(_y4m->pic_w+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h;
- dst_c_h=(_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v;
- c_sz=c_w*c_h;
- dst_c_sz=dst_c_w*dst_c_h;
- for(pli=1;pli<3;pli++){
- y4m_422jpeg_420jpeg_helper(_dst,_aux,c_w,c_h);
- _aux+=c_sz;
- _dst+=dst_c_sz;
+ c_w = (_y4m->pic_w + _y4m->src_c_dec_h - 1) / _y4m->src_c_dec_h;
+ c_h = _y4m->pic_h;
+ dst_c_w = (_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h;
+ dst_c_h = (_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v;
+ c_sz = c_w * c_h;
+ dst_c_sz = dst_c_w * dst_c_h;
+ for (pli = 1; pli < 3; pli++) {
+ y4m_422jpeg_420jpeg_helper(_dst, _aux, c_w, c_h);
+ _aux += c_sz;
+ _dst += dst_c_sz;
}
}
@@ -450,8 +458,8 @@ static void y4m_convert_422jpeg_420jpeg(y4m_input *_y4m,unsigned char *_dst,
pixel (at the original chroma resolution) to the right.
Then we use a second resampling filter to decimate the chroma planes by two
in the vertical direction.*/
-static void y4m_convert_422_420jpeg(y4m_input *_y4m,unsigned char *_dst,
- unsigned char *_aux){
+static void y4m_convert_422_420jpeg(y4m_input *_y4m, unsigned char *_dst,
+ unsigned char *_aux) {
unsigned char *tmp;
int c_w;
int c_h;
@@ -460,24 +468,24 @@ static void y4m_convert_422_420jpeg(y4m_input *_y4m,unsigned char *_dst,
int dst_c_sz;
int pli;
/*Skip past the luma data.*/
- _dst+=_y4m->pic_w*_y4m->pic_h;
+ _dst += _y4m->pic_w * _y4m->pic_h;
/*Compute the size of each chroma plane.*/
- c_w=(_y4m->pic_w+_y4m->src_c_dec_h-1)/_y4m->src_c_dec_h;
- c_h=_y4m->pic_h;
- dst_c_h=(_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v;
- c_sz=c_w*c_h;
- dst_c_sz=c_w*dst_c_h;
- tmp=_aux+2*c_sz;
- for(pli=1;pli<3;pli++){
+ c_w = (_y4m->pic_w + _y4m->src_c_dec_h - 1) / _y4m->src_c_dec_h;
+ c_h = _y4m->pic_h;
+ dst_c_h = (_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v;
+ c_sz = c_w * c_h;
+ dst_c_sz = c_w * dst_c_h;
+ tmp = _aux + 2 * c_sz;
+ for (pli = 1; pli < 3; pli++) {
/*In reality, the horizontal and vertical steps could be pipelined, for
less memory consumption and better cache performance, but we do them
separately for simplicity.*/
/*First do horizontal filtering (convert to 422jpeg)*/
- y4m_42xmpeg2_42xjpeg_helper(tmp,_aux,c_w,c_h);
+ y4m_42xmpeg2_42xjpeg_helper(tmp, _aux, c_w, c_h);
/*Now do the vertical filtering.*/
- y4m_422jpeg_420jpeg_helper(_dst,tmp,c_w,c_h);
- _aux+=c_sz;
- _dst+=dst_c_sz;
+ y4m_422jpeg_420jpeg_helper(_dst, tmp, c_w, c_h);
+ _aux += c_sz;
+ _dst += dst_c_sz;
}
}
@@ -522,8 +530,8 @@ static void y4m_convert_422_420jpeg(y4m_input *_y4m,unsigned char *_dst,
right.
Then we use another filter to decimate the planes by 2 in the vertical
direction.*/
-static void y4m_convert_411_420jpeg(y4m_input *_y4m,unsigned char *_dst,
- unsigned char *_aux){
+static void y4m_convert_411_420jpeg(y4m_input *_y4m, unsigned char *_dst,
+ unsigned char *_aux) {
unsigned char *tmp;
int c_w;
int c_h;
@@ -536,57 +544,57 @@ static void y4m_convert_411_420jpeg(y4m_input *_y4m,unsigned char *_dst,
int y;
int x;
/*Skip past the luma data.*/
- _dst+=_y4m->pic_w*_y4m->pic_h;
+ _dst += _y4m->pic_w * _y4m->pic_h;
/*Compute the size of each chroma plane.*/
- c_w=(_y4m->pic_w+_y4m->src_c_dec_h-1)/_y4m->src_c_dec_h;
- c_h=_y4m->pic_h;
- dst_c_w=(_y4m->pic_w+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h;
- dst_c_h=(_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v;
- c_sz=c_w*c_h;
- dst_c_sz=dst_c_w*dst_c_h;
- tmp_sz=dst_c_w*c_h;
- tmp=_aux+2*c_sz;
- for(pli=1;pli<3;pli++){
+ c_w = (_y4m->pic_w + _y4m->src_c_dec_h - 1) / _y4m->src_c_dec_h;
+ c_h = _y4m->pic_h;
+ dst_c_w = (_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h;
+ dst_c_h = (_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v;
+ c_sz = c_w * c_h;
+ dst_c_sz = dst_c_w * dst_c_h;
+ tmp_sz = dst_c_w * c_h;
+ tmp = _aux + 2 * c_sz;
+ for (pli = 1; pli < 3; pli++) {
/*In reality, the horizontal and vertical steps could be pipelined, for
less memory consumption and better cache performance, but we do them
separately for simplicity.*/
/*First do horizontal filtering (convert to 422jpeg)*/
- for(y=0;y<c_h;y++){
+ for (y = 0; y < c_h; y++) {
/*Filters: [1 110 18 -1]/128 and [-3 50 86 -5]/128, both derived from a
4-tap Mitchell window.*/
- for(x=0;x<OC_MINI(c_w,1);x++){
- tmp[x<<1]=(unsigned char)OC_CLAMPI(0,(111*_aux[0]
- +18*_aux[OC_MINI(1,c_w-1)]-_aux[OC_MINI(2,c_w-1)]+64)>>7,255);
- tmp[x<<1|1]=(unsigned char)OC_CLAMPI(0,(47*_aux[0]
- +86*_aux[OC_MINI(1,c_w-1)]-5*_aux[OC_MINI(2,c_w-1)]+64)>>7,255);
+ for (x = 0; x < OC_MINI(c_w, 1); x++) {
+ tmp[x << 1] = (unsigned char)OC_CLAMPI(0, (111 * _aux[0]
+ + 18 * _aux[OC_MINI(1, c_w - 1)] - _aux[OC_MINI(2, c_w - 1)] + 64) >> 7, 255);
+ tmp[x << 1 | 1] = (unsigned char)OC_CLAMPI(0, (47 * _aux[0]
+ + 86 * _aux[OC_MINI(1, c_w - 1)] - 5 * _aux[OC_MINI(2, c_w - 1)] + 64) >> 7, 255);
}
- for(;x<c_w-2;x++){
- tmp[x<<1]=(unsigned char)OC_CLAMPI(0,(_aux[x-1]+110*_aux[x]
- +18*_aux[x+1]-_aux[x+2]+64)>>7,255);
- tmp[x<<1|1]=(unsigned char)OC_CLAMPI(0,(-3*_aux[x-1]+50*_aux[x]
- +86*_aux[x+1]-5*_aux[x+2]+64)>>7,255);
+ for (; x < c_w - 2; x++) {
+ tmp[x << 1] = (unsigned char)OC_CLAMPI(0, (_aux[x - 1] + 110 * _aux[x]
+ + 18 * _aux[x + 1] - _aux[x + 2] + 64) >> 7, 255);
+ tmp[x << 1 | 1] = (unsigned char)OC_CLAMPI(0, (-3 * _aux[x - 1] + 50 * _aux[x]
+ + 86 * _aux[x + 1] - 5 * _aux[x + 2] + 64) >> 7, 255);
}
- for(;x<c_w;x++){
- tmp[x<<1]=(unsigned char)OC_CLAMPI(0,(_aux[x-1]+110*_aux[x]
- +18*_aux[OC_MINI(x+1,c_w-1)]-_aux[c_w-1]+64)>>7,255);
- if((x<<1|1)<dst_c_w){
- tmp[x<<1|1]=(unsigned char)OC_CLAMPI(0,(-3*_aux[x-1]+50*_aux[x]
- +86*_aux[OC_MINI(x+1,c_w-1)]-5*_aux[c_w-1]+64)>>7,255);
+ for (; x < c_w; x++) {
+ tmp[x << 1] = (unsigned char)OC_CLAMPI(0, (_aux[x - 1] + 110 * _aux[x]
+ + 18 * _aux[OC_MINI(x + 1, c_w - 1)] - _aux[c_w - 1] + 64) >> 7, 255);
+ if ((x << 1 | 1) < dst_c_w) {
+ tmp[x << 1 | 1] = (unsigned char)OC_CLAMPI(0, (-3 * _aux[x - 1] + 50 * _aux[x]
+ + 86 * _aux[OC_MINI(x + 1, c_w - 1)] - 5 * _aux[c_w - 1] + 64) >> 7, 255);
}
}
- tmp+=dst_c_w;
- _aux+=c_w;
+ tmp += dst_c_w;
+ _aux += c_w;
}
- tmp-=tmp_sz;
+ tmp -= tmp_sz;
/*Now do the vertical filtering.*/
- y4m_422jpeg_420jpeg_helper(_dst,tmp,dst_c_w,c_h);
- _dst+=dst_c_sz;
+ y4m_422jpeg_420jpeg_helper(_dst, tmp, dst_c_w, c_h);
+ _dst += dst_c_sz;
}
}
/*Convert 444 to 420jpeg.*/
-static void y4m_convert_444_420jpeg(y4m_input *_y4m,unsigned char *_dst,
- unsigned char *_aux){
+static void y4m_convert_444_420jpeg(y4m_input *_y4m, unsigned char *_dst,
+ unsigned char *_aux) {
unsigned char *tmp;
int c_w;
int c_h;
@@ -599,218 +607,247 @@ static void y4m_convert_444_420jpeg(y4m_input *_y4m,unsigned char *_dst,
int y;
int x;
/*Skip past the luma data.*/
- _dst+=_y4m->pic_w*_y4m->pic_h;
+ _dst += _y4m->pic_w * _y4m->pic_h;
/*Compute the size of each chroma plane.*/
- c_w=(_y4m->pic_w+_y4m->src_c_dec_h-1)/_y4m->src_c_dec_h;
- c_h=_y4m->pic_h;
- dst_c_w=(_y4m->pic_w+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h;
- dst_c_h=(_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v;
- c_sz=c_w*c_h;
- dst_c_sz=dst_c_w*dst_c_h;
- tmp_sz=dst_c_w*c_h;
- tmp=_aux+2*c_sz;
- for(pli=1;pli<3;pli++){
+ c_w = (_y4m->pic_w + _y4m->src_c_dec_h - 1) / _y4m->src_c_dec_h;
+ c_h = _y4m->pic_h;
+ dst_c_w = (_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h;
+ dst_c_h = (_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v;
+ c_sz = c_w * c_h;
+ dst_c_sz = dst_c_w * dst_c_h;
+ tmp_sz = dst_c_w * c_h;
+ tmp = _aux + 2 * c_sz;
+ for (pli = 1; pli < 3; pli++) {
/*Filter: [3 -17 78 78 -17 3]/128, derived from a 6-tap Lanczos window.*/
- for(y=0;y<c_h;y++){
- for(x=0;x<OC_MINI(c_w,2);x+=2){
- tmp[x>>1]=OC_CLAMPI(0,(64*_aux[0]+78*_aux[OC_MINI(1,c_w-1)]
- -17*_aux[OC_MINI(2,c_w-1)]
- +3*_aux[OC_MINI(3,c_w-1)]+64)>>7,255);
+ for (y = 0; y < c_h; y++) {
+ for (x = 0; x < OC_MINI(c_w, 2); x += 2) {
+ tmp[x >> 1] = OC_CLAMPI(0, (64 * _aux[0] + 78 * _aux[OC_MINI(1, c_w - 1)]
+ - 17 * _aux[OC_MINI(2, c_w - 1)]
+ + 3 * _aux[OC_MINI(3, c_w - 1)] + 64) >> 7, 255);
}
- for(;x<c_w-3;x+=2){
- tmp[x>>1]=OC_CLAMPI(0,(3*(_aux[x-2]+_aux[x+3])
- -17*(_aux[x-1]+_aux[x+2])+78*(_aux[x]+_aux[x+1])+64)>>7,255);
+ for (; x < c_w - 3; x += 2) {
+ tmp[x >> 1] = OC_CLAMPI(0, (3 * (_aux[x - 2] + _aux[x + 3])
+ - 17 * (_aux[x - 1] + _aux[x + 2]) + 78 * (_aux[x] + _aux[x + 1]) + 64) >> 7, 255);
}
- for(;x<c_w;x+=2){
- tmp[x>>1]=OC_CLAMPI(0,(3*(_aux[x-2]+_aux[c_w-1])-
- 17*(_aux[x-1]+_aux[OC_MINI(x+2,c_w-1)])+
- 78*(_aux[x]+_aux[OC_MINI(x+1,c_w-1)])+64)>>7,255);
+ for (; x < c_w; x += 2) {
+ tmp[x >> 1] = OC_CLAMPI(0, (3 * (_aux[x - 2] + _aux[c_w - 1]) -
+ 17 * (_aux[x - 1] + _aux[OC_MINI(x + 2, c_w - 1)]) +
+ 78 * (_aux[x] + _aux[OC_MINI(x + 1, c_w - 1)]) + 64) >> 7, 255);
}
- tmp+=dst_c_w;
- _aux+=c_w;
+ tmp += dst_c_w;
+ _aux += c_w;
}
- tmp-=tmp_sz;
+ tmp -= tmp_sz;
/*Now do the vertical filtering.*/
- y4m_422jpeg_420jpeg_helper(_dst,tmp,dst_c_w,c_h);
- _dst+=dst_c_sz;
+ y4m_422jpeg_420jpeg_helper(_dst, tmp, dst_c_w, c_h);
+ _dst += dst_c_sz;
}
}
/*The image is padded with empty chroma components at 4:2:0.*/
-static void y4m_convert_mono_420jpeg(y4m_input *_y4m,unsigned char *_dst,
- unsigned char *_aux){
+static void y4m_convert_mono_420jpeg(y4m_input *_y4m, unsigned char *_dst,
+ unsigned char *_aux) {
int c_sz;
- _dst+=_y4m->pic_w*_y4m->pic_h;
- c_sz=((_y4m->pic_w+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h)*
- ((_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v);
- memset(_dst,128,c_sz*2);
+ _dst += _y4m->pic_w * _y4m->pic_h;
+ c_sz = ((_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h) *
+ ((_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v);
+ memset(_dst, 128, c_sz * 2);
}
/*No conversion function needed.*/
-static void y4m_convert_null(y4m_input *_y4m,unsigned char *_dst,
- unsigned char *_aux){
+static void y4m_convert_null(y4m_input *_y4m, unsigned char *_dst,
+ unsigned char *_aux) {
}
-int y4m_input_open(y4m_input *_y4m,FILE *_fin,char *_skip,int _nskip){
+int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip,
+ int only_420) {
char buffer[80];
int ret;
int i;
/*Read until newline, or 80 cols, whichever happens first.*/
- for(i=0;i<79;i++){
- if(_nskip>0){
- buffer[i]=*_skip++;
+ for (i = 0; i < 79; i++) {
+ if (_nskip > 0) {
+ buffer[i] = *_skip++;
_nskip--;
+ } else {
+ ret = (int)fread(buffer + i, 1, 1, _fin);
+ if (ret < 1)return -1;
}
- else{
- ret=(int)fread(buffer+i,1,1,_fin);
- if(ret<1)return -1;
- }
- if(buffer[i]=='\n')break;
+ if (buffer[i] == '\n')break;
}
/*We skipped too much header data.*/
- if(_nskip>0)return -1;
- if(i==79){
- fprintf(stderr,"Error parsing header; not a YUV2MPEG2 file?\n");
+ if (_nskip > 0)return -1;
+ if (i == 79) {
+ fprintf(stderr, "Error parsing header; not a YUV2MPEG2 file?\n");
return -1;
}
- buffer[i]='\0';
- if(memcmp(buffer,"YUV4MPEG",8)){
- fprintf(stderr,"Incomplete magic for YUV4MPEG file.\n");
+ buffer[i] = '\0';
+ if (memcmp(buffer, "YUV4MPEG", 8)) {
+ fprintf(stderr, "Incomplete magic for YUV4MPEG file.\n");
return -1;
}
- if(buffer[8]!='2'){
- fprintf(stderr,"Incorrect YUV input file version; YUV4MPEG2 required.\n");
+ if (buffer[8] != '2') {
+ fprintf(stderr, "Incorrect YUV input file version; YUV4MPEG2 required.\n");
}
- ret=y4m_parse_tags(_y4m,buffer+5);
- if(ret<0){
- fprintf(stderr,"Error parsing YUV4MPEG2 header.\n");
+ ret = y4m_parse_tags(_y4m, buffer + 5);
+ if (ret < 0) {
+ fprintf(stderr, "Error parsing YUV4MPEG2 header.\n");
return ret;
}
- if(_y4m->interlace=='?'){
- fprintf(stderr,"Warning: Input video interlacing format unknown; "
- "assuming progressive scan.\n");
- }
- else if(_y4m->interlace!='p'){
- fprintf(stderr,"Input video is interlaced; "
- "Only progressive scan handled.\n");
+ if (_y4m->interlace == '?') {
+ fprintf(stderr, "Warning: Input video interlacing format unknown; "
+ "assuming progressive scan.\n");
+ } else if (_y4m->interlace != 'p') {
+ fprintf(stderr, "Input video is interlaced; "
+ "Only progressive scan handled.\n");
return -1;
}
- if(strcmp(_y4m->chroma_type,"420")==0||
- strcmp(_y4m->chroma_type,"420jpeg")==0){
- _y4m->src_c_dec_h=_y4m->dst_c_dec_h=_y4m->src_c_dec_v=_y4m->dst_c_dec_v=2;
- _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h
- +2*((_y4m->pic_w+1)/2)*((_y4m->pic_h+1)/2);
+ _y4m->vpx_fmt = VPX_IMG_FMT_I420;
+ _y4m->vpx_bps = 12;
+ if (strcmp(_y4m->chroma_type, "420") == 0 ||
+ strcmp(_y4m->chroma_type, "420jpeg") == 0) {
+ _y4m->src_c_dec_h = _y4m->dst_c_dec_h = _y4m->src_c_dec_v = _y4m->dst_c_dec_v = 2;
+ _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h
+ + 2 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2);
/*Natively supported: no conversion required.*/
- _y4m->aux_buf_sz=_y4m->aux_buf_read_sz=0;
- _y4m->convert=y4m_convert_null;
- }
- else if(strcmp(_y4m->chroma_type,"420mpeg2")==0){
- _y4m->src_c_dec_h=_y4m->dst_c_dec_h=_y4m->src_c_dec_v=_y4m->dst_c_dec_v=2;
- _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
+ _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
+ _y4m->convert = y4m_convert_null;
+ } else if (strcmp(_y4m->chroma_type, "420mpeg2") == 0) {
+ _y4m->src_c_dec_h = _y4m->dst_c_dec_h = _y4m->src_c_dec_v = _y4m->dst_c_dec_v = 2;
+ _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
/*Chroma filter required: read into the aux buf first.*/
- _y4m->aux_buf_sz=_y4m->aux_buf_read_sz=
- 2*((_y4m->pic_w+1)/2)*((_y4m->pic_h+1)/2);
- _y4m->convert=y4m_convert_42xmpeg2_42xjpeg;
- }
- else if(strcmp(_y4m->chroma_type,"420paldv")==0){
- _y4m->src_c_dec_h=_y4m->dst_c_dec_h=_y4m->src_c_dec_v=_y4m->dst_c_dec_v=2;
- _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
+ _y4m->aux_buf_sz = _y4m->aux_buf_read_sz =
+ 2 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2);
+ _y4m->convert = y4m_convert_42xmpeg2_42xjpeg;
+ } else if (strcmp(_y4m->chroma_type, "420paldv") == 0) {
+ _y4m->src_c_dec_h = _y4m->dst_c_dec_h = _y4m->src_c_dec_v = _y4m->dst_c_dec_v = 2;
+ _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
/*Chroma filter required: read into the aux buf first.
We need to make two filter passes, so we need some extra space in the
aux buffer.*/
- _y4m->aux_buf_sz=3*((_y4m->pic_w+1)/2)*((_y4m->pic_h+1)/2);
- _y4m->aux_buf_read_sz=2*((_y4m->pic_w+1)/2)*((_y4m->pic_h+1)/2);
- _y4m->convert=y4m_convert_42xpaldv_42xjpeg;
- }
- else if(strcmp(_y4m->chroma_type,"422jpeg")==0){
- _y4m->src_c_dec_h=_y4m->dst_c_dec_h=2;
- _y4m->src_c_dec_v=1;
- _y4m->dst_c_dec_v=2;
- _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
+ _y4m->aux_buf_sz = 3 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2);
+ _y4m->aux_buf_read_sz = 2 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2);
+ _y4m->convert = y4m_convert_42xpaldv_42xjpeg;
+ } else if (strcmp(_y4m->chroma_type, "422jpeg") == 0) {
+ _y4m->src_c_dec_h = _y4m->dst_c_dec_h = 2;
+ _y4m->src_c_dec_v = 1;
+ _y4m->dst_c_dec_v = 2;
+ _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
/*Chroma filter required: read into the aux buf first.*/
- _y4m->aux_buf_sz=_y4m->aux_buf_read_sz=2*((_y4m->pic_w+1)/2)*_y4m->pic_h;
- _y4m->convert=y4m_convert_422jpeg_420jpeg;
- }
- else if(strcmp(_y4m->chroma_type,"422")==0){
- _y4m->src_c_dec_h=_y4m->dst_c_dec_h=2;
- _y4m->src_c_dec_v=1;
- _y4m->dst_c_dec_v=2;
- _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
- /*Chroma filter required: read into the aux buf first.
- We need to make two filter passes, so we need some extra space in the
- aux buffer.*/
- _y4m->aux_buf_read_sz=2*((_y4m->pic_w+1)/2)*_y4m->pic_h;
- _y4m->aux_buf_sz=_y4m->aux_buf_read_sz+((_y4m->pic_w+1)/2)*_y4m->pic_h;
- _y4m->convert=y4m_convert_422_420jpeg;
- }
- else if(strcmp(_y4m->chroma_type,"411")==0){
- _y4m->src_c_dec_h=4;
- _y4m->dst_c_dec_h=2;
- _y4m->src_c_dec_v=1;
- _y4m->dst_c_dec_v=2;
- _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
- /*Chroma filter required: read into the aux buf first.
- We need to make two filter passes, so we need some extra space in the
- aux buffer.*/
- _y4m->aux_buf_read_sz=2*((_y4m->pic_w+3)/4)*_y4m->pic_h;
- _y4m->aux_buf_sz=_y4m->aux_buf_read_sz+((_y4m->pic_w+1)/2)*_y4m->pic_h;
- _y4m->convert=y4m_convert_411_420jpeg;
- }
- else if(strcmp(_y4m->chroma_type,"444")==0){
- _y4m->src_c_dec_h=1;
- _y4m->dst_c_dec_h=2;
- _y4m->src_c_dec_v=1;
- _y4m->dst_c_dec_v=2;
- _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
+ _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 2 * ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
+ _y4m->convert = y4m_convert_422jpeg_420jpeg;
+ } else if (strcmp(_y4m->chroma_type, "422") == 0) {
+ _y4m->src_c_dec_h = 2;
+ _y4m->src_c_dec_v = 1;
+ if (only_420) {
+ _y4m->dst_c_dec_h = 2;
+ _y4m->dst_c_dec_v = 2;
+ _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
+ /*Chroma filter required: read into the aux buf first.
+ We need to make two filter passes, so we need some extra space in the
+ aux buffer.*/
+ _y4m->aux_buf_read_sz = 2 * ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
+ _y4m->aux_buf_sz = _y4m->aux_buf_read_sz +
+ ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
+ _y4m->convert = y4m_convert_422_420jpeg;
+ } else {
+ _y4m->vpx_fmt = VPX_IMG_FMT_I422;
+ _y4m->vpx_bps = 16;
+ _y4m->dst_c_dec_h = _y4m->src_c_dec_h;
+ _y4m->dst_c_dec_v = _y4m->src_c_dec_v;
+ _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h
+ + 2 * ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
+ /*Natively supported: no conversion required.*/
+ _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
+ _y4m->convert = y4m_convert_null;
+ }
+ } else if (strcmp(_y4m->chroma_type, "411") == 0) {
+ _y4m->src_c_dec_h = 4;
+ _y4m->dst_c_dec_h = 2;
+ _y4m->src_c_dec_v = 1;
+ _y4m->dst_c_dec_v = 2;
+ _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
/*Chroma filter required: read into the aux buf first.
We need to make two filter passes, so we need some extra space in the
aux buffer.*/
- _y4m->aux_buf_read_sz=2*_y4m->pic_w*_y4m->pic_h;
- _y4m->aux_buf_sz=_y4m->aux_buf_read_sz+((_y4m->pic_w+1)/2)*_y4m->pic_h;
- _y4m->convert=y4m_convert_444_420jpeg;
- }
- else if(strcmp(_y4m->chroma_type,"444alpha")==0){
- _y4m->src_c_dec_h=1;
- _y4m->dst_c_dec_h=2;
- _y4m->src_c_dec_v=1;
- _y4m->dst_c_dec_v=2;
- _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
- /*Chroma filter required: read into the aux buf first.
- We need to make two filter passes, so we need some extra space in the
- aux buffer.
- The extra plane also gets read into the aux buf.
- It will be discarded.*/
- _y4m->aux_buf_sz=_y4m->aux_buf_read_sz=3*_y4m->pic_w*_y4m->pic_h;
- _y4m->convert=y4m_convert_444_420jpeg;
- }
- else if(strcmp(_y4m->chroma_type,"mono")==0){
- _y4m->src_c_dec_h=_y4m->src_c_dec_v=0;
- _y4m->dst_c_dec_h=_y4m->dst_c_dec_v=2;
- _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
+ _y4m->aux_buf_read_sz = 2 * ((_y4m->pic_w + 3) / 4) * _y4m->pic_h;
+ _y4m->aux_buf_sz = _y4m->aux_buf_read_sz + ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
+ _y4m->convert = y4m_convert_411_420jpeg;
+ } else if (strcmp(_y4m->chroma_type, "444") == 0) {
+ _y4m->src_c_dec_h = 1;
+ _y4m->src_c_dec_v = 1;
+ if (only_420) {
+ _y4m->dst_c_dec_h = 2;
+ _y4m->dst_c_dec_v = 2;
+ _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
+ /*Chroma filter required: read into the aux buf first.
+ We need to make two filter passes, so we need some extra space in the
+ aux buffer.*/
+ _y4m->aux_buf_read_sz = 2 * _y4m->pic_w * _y4m->pic_h;
+ _y4m->aux_buf_sz = _y4m->aux_buf_read_sz +
+ ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
+ _y4m->convert = y4m_convert_444_420jpeg;
+ } else {
+ _y4m->vpx_fmt = VPX_IMG_FMT_I444;
+ _y4m->vpx_bps = 24;
+ _y4m->dst_c_dec_h = _y4m->src_c_dec_h;
+ _y4m->dst_c_dec_v = _y4m->src_c_dec_v;
+ _y4m->dst_buf_read_sz = 3 * _y4m->pic_w * _y4m->pic_h;
+ /*Natively supported: no conversion required.*/
+ _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
+ _y4m->convert = y4m_convert_null;
+ }
+ } else if (strcmp(_y4m->chroma_type, "444alpha") == 0) {
+ _y4m->src_c_dec_h = 1;
+ _y4m->src_c_dec_v = 1;
+ if (only_420) {
+ _y4m->dst_c_dec_h = 2;
+ _y4m->dst_c_dec_v = 2;
+ _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
+ /*Chroma filter required: read into the aux buf first.
+ We need to make two filter passes, so we need some extra space in the
+ aux buffer.
+ The extra plane also gets read into the aux buf.
+ It will be discarded.*/
+ _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 3 * _y4m->pic_w * _y4m->pic_h;
+ _y4m->convert = y4m_convert_444_420jpeg;
+ } else {
+ _y4m->vpx_fmt = VPX_IMG_FMT_444A;
+ _y4m->vpx_bps = 32;
+ _y4m->dst_c_dec_h = _y4m->src_c_dec_h;
+ _y4m->dst_c_dec_v = _y4m->src_c_dec_v;
+ _y4m->dst_buf_read_sz = 4 * _y4m->pic_w * _y4m->pic_h;
+ /*Natively supported: no conversion required.*/
+ _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
+ _y4m->convert = y4m_convert_null;
+ }
+ } else if (strcmp(_y4m->chroma_type, "mono") == 0) {
+ _y4m->src_c_dec_h = _y4m->src_c_dec_v = 0;
+ _y4m->dst_c_dec_h = _y4m->dst_c_dec_v = 2;
+ _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
/*No extra space required, but we need to clear the chroma planes.*/
- _y4m->aux_buf_sz=_y4m->aux_buf_read_sz=0;
- _y4m->convert=y4m_convert_mono_420jpeg;
- }
- else{
- fprintf(stderr,"Unknown chroma sampling type: %s\n",_y4m->chroma_type);
+ _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
+ _y4m->convert = y4m_convert_mono_420jpeg;
+ } else {
+ fprintf(stderr, "Unknown chroma sampling type: %s\n", _y4m->chroma_type);
return -1;
}
/*The size of the final frame buffers is always computed from the
destination chroma decimation type.*/
- _y4m->dst_buf_sz=_y4m->pic_w*_y4m->pic_h
- +2*((_y4m->pic_w+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h)*
- ((_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v);
- _y4m->dst_buf=(unsigned char *)malloc(_y4m->dst_buf_sz);
- _y4m->aux_buf=(unsigned char *)malloc(_y4m->aux_buf_sz);
+ _y4m->dst_buf_sz = _y4m->pic_w * _y4m->pic_h
+ + 2 * ((_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h) *
+ ((_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v);
+ _y4m->dst_buf = (unsigned char *)malloc(_y4m->dst_buf_sz);
+ _y4m->aux_buf = (unsigned char *)malloc(_y4m->aux_buf_sz);
return 0;
}
-void y4m_input_close(y4m_input *_y4m){
+void y4m_input_close(y4m_input *_y4m) {
free(_y4m->dst_buf);
free(_y4m->aux_buf);
}
-int y4m_input_fetch_frame(y4m_input *_y4m,FILE *_fin,vpx_image_t *_img){
+int y4m_input_fetch_frame(y4m_input *_y4m, FILE *_fin, vpx_image_t *_img) {
char frame[6];
int pic_sz;
int c_w;
@@ -818,54 +855,55 @@ int y4m_input_fetch_frame(y4m_input *_y4m,FILE *_fin,vpx_image_t *_img){
int c_sz;
int ret;
/*Read and skip the frame header.*/
- ret=(int)fread(frame,1,6,_fin);
- if(ret<6)return 0;
- if(memcmp(frame,"FRAME",5)){
- fprintf(stderr,"Loss of framing in Y4M input data\n");
+ ret = (int)fread(frame, 1, 6, _fin);
+ if (ret < 6)return 0;
+ if (memcmp(frame, "FRAME", 5)) {
+ fprintf(stderr, "Loss of framing in Y4M input data\n");
return -1;
}
- if(frame[5]!='\n'){
+ if (frame[5] != '\n') {
char c;
int j;
- for(j=0;j<79&&fread(&c,1,1,_fin)&&c!='\n';j++);
- if(j==79){
- fprintf(stderr,"Error parsing Y4M frame header\n");
+ for (j = 0; j < 79 && fread(&c, 1, 1, _fin) && c != '\n'; j++);
+ if (j == 79) {
+ fprintf(stderr, "Error parsing Y4M frame header\n");
return -1;
}
}
/*Read the frame data that needs no conversion.*/
- if(fread(_y4m->dst_buf,1,_y4m->dst_buf_read_sz,_fin)!=_y4m->dst_buf_read_sz){
- fprintf(stderr,"Error reading Y4M frame data.\n");
+ if (fread(_y4m->dst_buf, 1, _y4m->dst_buf_read_sz, _fin) != _y4m->dst_buf_read_sz) {
+ fprintf(stderr, "Error reading Y4M frame data.\n");
return -1;
}
/*Read the frame data that does need conversion.*/
- if(fread(_y4m->aux_buf,1,_y4m->aux_buf_read_sz,_fin)!=_y4m->aux_buf_read_sz){
- fprintf(stderr,"Error reading Y4M frame data.\n");
+ if (fread(_y4m->aux_buf, 1, _y4m->aux_buf_read_sz, _fin) != _y4m->aux_buf_read_sz) {
+ fprintf(stderr, "Error reading Y4M frame data.\n");
return -1;
}
/*Now convert the just read frame.*/
- (*_y4m->convert)(_y4m,_y4m->dst_buf,_y4m->aux_buf);
+ (*_y4m->convert)(_y4m, _y4m->dst_buf, _y4m->aux_buf);
/*Fill in the frame buffer pointers.
We don't use vpx_img_wrap() because it forces padding for odd picture
sizes, which would require a separate fread call for every row.*/
- memset(_img,0,sizeof(*_img));
+ memset(_img, 0, sizeof(*_img));
/*Y4M has the planes in Y'CbCr order, which libvpx calls Y, U, and V.*/
- _img->fmt=IMG_FMT_I420;
- _img->w=_img->d_w=_y4m->pic_w;
- _img->h=_img->d_h=_y4m->pic_h;
- /*This is hard-coded to 4:2:0 for now, as that's all VP8 supports.*/
- _img->x_chroma_shift=1;
- _img->y_chroma_shift=1;
- _img->bps=12;
+ _img->fmt = _y4m->vpx_fmt;
+ _img->w = _img->d_w = _y4m->pic_w;
+ _img->h = _img->d_h = _y4m->pic_h;
+ _img->x_chroma_shift = _y4m->dst_c_dec_h >> 1;
+ _img->y_chroma_shift = _y4m->dst_c_dec_v >> 1;
+ _img->bps = _y4m->vpx_bps;
+
/*Set up the buffer pointers.*/
- pic_sz=_y4m->pic_w*_y4m->pic_h;
- c_w=(_y4m->pic_w+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h;
- c_h=(_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v;
- c_sz=c_w*c_h;
- _img->stride[PLANE_Y]=_y4m->pic_w;
- _img->stride[PLANE_U]=_img->stride[PLANE_V]=c_w;
- _img->planes[PLANE_Y]=_y4m->dst_buf;
- _img->planes[PLANE_U]=_y4m->dst_buf+pic_sz;
- _img->planes[PLANE_V]=_y4m->dst_buf+pic_sz+c_sz;
+ pic_sz = _y4m->pic_w * _y4m->pic_h;
+ c_w = (_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h;
+ c_h = (_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v;
+ c_sz = c_w * c_h;
+ _img->stride[PLANE_Y] = _img->stride[PLANE_ALPHA] = _y4m->pic_w;
+ _img->stride[PLANE_U] = _img->stride[PLANE_V] = c_w;
+ _img->planes[PLANE_Y] = _y4m->dst_buf;
+ _img->planes[PLANE_U] = _y4m->dst_buf + pic_sz;
+ _img->planes[PLANE_V] = _y4m->dst_buf + pic_sz + c_sz;
+ _img->planes[PLANE_ALPHA] = _y4m->dst_buf + pic_sz + 2 * c_sz;
return 1;
}
diff --git a/libvpx/y4minput.h b/libvpx/y4minput.h
index 1a01bcd..b2a390c 100644
--- a/libvpx/y4minput.h
+++ b/libvpx/y4minput.h
@@ -23,11 +23,11 @@ typedef struct y4m_input y4m_input;
/*The function used to perform chroma conversion.*/
typedef void (*y4m_convert_func)(y4m_input *_y4m,
- unsigned char *_dst,unsigned char *_src);
+ unsigned char *_dst, unsigned char *_src);
-struct y4m_input{
+struct y4m_input {
int pic_w;
int pic_h;
int fps_n;
@@ -51,10 +51,13 @@ struct y4m_input{
y4m_convert_func convert;
unsigned char *dst_buf;
unsigned char *aux_buf;
+ enum vpx_img_fmt vpx_fmt;
+ int vpx_bps;
};
-int y4m_input_open(y4m_input *_y4m,FILE *_fin,char *_skip,int _nskip);
+int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip,
+ int only_420);
void y4m_input_close(y4m_input *_y4m);
-int y4m_input_fetch_frame(y4m_input *_y4m,FILE *_fin,vpx_image_t *img);
+int y4m_input_fetch_frame(y4m_input *_y4m, FILE *_fin, vpx_image_t *img);
#endif
diff --git a/libwebm/mkvparser.cpp b/libwebm/mkvparser.cpp
index e48343f..894d470 100644
--- a/libwebm/mkvparser.cpp
+++ b/libwebm/mkvparser.cpp
@@ -5760,7 +5760,7 @@ long Tracks::ParseTrackEntry(
if (id == 0x60) // VideoSettings ID
{
- if (size <= 0)
+ if (size < 0)
return E_FILE_FORMAT_INVALID;
v.start = start;
@@ -5768,7 +5768,7 @@ long Tracks::ParseTrackEntry(
}
else if (id == 0x61) // AudioSettings ID
{
- if (size <= 0)
+ if (size < 0)
return E_FILE_FORMAT_INVALID;
a.start = start;
@@ -5776,7 +5776,7 @@ long Tracks::ParseTrackEntry(
}
else if (id == 0x2D80) // ContentEncodings ID
{
- if (size <= 0)
+ if (size < 0)
return E_FILE_FORMAT_INVALID;
e.start = start;
diff --git a/mips-dspr2/libvpx_srcs.txt b/mips-dspr2/libvpx_srcs.txt
index 8b5b727..897d207 100644
--- a/mips-dspr2/libvpx_srcs.txt
+++ b/mips-dspr2/libvpx_srcs.txt
@@ -4,7 +4,6 @@ CHANGELOG
libs.mk
vp8/common/alloccommon.c
vp8/common/alloccommon.h
-vp8/common/asm_com_offsets.c
vp8/common/blockd.c
vp8/common/blockd.h
vp8/common/coefupdateprobs.h
@@ -68,7 +67,6 @@ vp8/common/treecoder.h
vp8/common/variance_c.c
vp8/common/variance.h
vp8/common/vp8_entropymodedata.h
-vp8/decoder/asm_dec_offsets.c
vp8/decoder/dboolhuff.c
vp8/decoder/dboolhuff.h
vp8/decoder/decodemv.c
@@ -81,7 +79,6 @@ vp8/decoder/onyxd_if.c
vp8/decoder/onyxd_int.h
vp8/decoder/threading.c
vp8/decoder/treereader.h
-vp8/encoder/asm_enc_offsets.c
vp8/encoder/bitstream.c
vp8/encoder/bitstream.h
vp8/encoder/block.h
@@ -128,11 +125,93 @@ vp8/encoder/tokenize.c
vp8/encoder/tokenize.h
vp8/encoder/treewriter.c
vp8/encoder/treewriter.h
+vp8/encoder/vp8_asm_enc_offsets.c
vp8/vp8_common.mk
vp8/vp8_cx_iface.c
vp8/vp8cx.mk
vp8/vp8_dx_iface.c
vp8/vp8dx.mk
+vp9/common/generic/vp9_systemdependent.c
+vp9/common/vp9_alloccommon.c
+vp9/common/vp9_alloccommon.h
+vp9/common/vp9_blockd.h
+vp9/common/vp9_common_data.c
+vp9/common/vp9_common_data.h
+vp9/common/vp9_common.h
+vp9/common/vp9_convolve.c
+vp9/common/vp9_convolve.h
+vp9/common/vp9_debugmodes.c
+vp9/common/vp9_default_coef_probs.h
+vp9/common/vp9_entropy.c
+vp9/common/vp9_entropy.h
+vp9/common/vp9_entropymode.c
+vp9/common/vp9_entropymode.h
+vp9/common/vp9_entropymv.c
+vp9/common/vp9_entropymv.h
+vp9/common/vp9_enums.h
+vp9/common/vp9_extend.c
+vp9/common/vp9_extend.h
+vp9/common/vp9_filter.c
+vp9/common/vp9_filter.h
+vp9/common/vp9_findnearmv.c
+vp9/common/vp9_findnearmv.h
+vp9/common/vp9_idct.c
+vp9/common/vp9_idct.h
+vp9/common/vp9_loopfilter.c
+vp9/common/vp9_loopfilter_filters.c
+vp9/common/vp9_loopfilter.h
+vp9/common/vp9_mv.h
+vp9/common/vp9_mvref_common.c
+vp9/common/vp9_mvref_common.h
+vp9/common/vp9_onyxc_int.h
+vp9/common/vp9_onyx.h
+vp9/common/vp9_ppflags.h
+vp9/common/vp9_pragmas.h
+vp9/common/vp9_pred_common.c
+vp9/common/vp9_pred_common.h
+vp9/common/vp9_quant_common.c
+vp9/common/vp9_quant_common.h
+vp9/common/vp9_reconinter.c
+vp9/common/vp9_reconinter.h
+vp9/common/vp9_reconintra.c
+vp9/common/vp9_reconintra.h
+vp9/common/vp9_rtcd.c
+vp9/common/vp9_rtcd_defs.sh
+vp9/common/vp9_sadmxn.h
+vp9/common/vp9_scale.c
+vp9/common/vp9_scale.h
+vp9/common/vp9_seg_common.c
+vp9/common/vp9_seg_common.h
+vp9/common/vp9_subpelvar.h
+vp9/common/vp9_systemdependent.h
+vp9/common/vp9_textblit.h
+vp9/common/vp9_tile_common.c
+vp9/common/vp9_tile_common.h
+vp9/common/vp9_treecoder.c
+vp9/common/vp9_treecoder.h
+vp9/decoder/vp9_dboolhuff.c
+vp9/decoder/vp9_dboolhuff.h
+vp9/decoder/vp9_decodemv.c
+vp9/decoder/vp9_decodemv.h
+vp9/decoder/vp9_decodframe.c
+vp9/decoder/vp9_decodframe.h
+vp9/decoder/vp9_detokenize.c
+vp9/decoder/vp9_detokenize.h
+vp9/decoder/vp9_dsubexp.c
+vp9/decoder/vp9_dsubexp.h
+vp9/decoder/vp9_idct_blk.c
+vp9/decoder/vp9_idct_blk.h
+vp9/decoder/vp9_onyxd.h
+vp9/decoder/vp9_onyxd_if.c
+vp9/decoder/vp9_onyxd_int.h
+vp9/decoder/vp9_read_bit_buffer.h
+vp9/decoder/vp9_thread.c
+vp9/decoder/vp9_thread.h
+vp9/decoder/vp9_treereader.h
+vp9/vp9_common.mk
+vp9/vp9_dx_iface.c
+vp9/vp9dx.mk
+vp9/vp9_iface_common.h
vpx_config.c
vpx/internal/vpx_codec_internal.h
vpx_mem/include/vpx_mem_intrnl.h
@@ -140,17 +219,20 @@ vpx_mem/vpx_mem.c
vpx_mem/vpx_mem.h
vpx_mem/vpx_mem.mk
vpx_ports/asm_offsets.h
+vpx_ports/emmintrin_compat.h
vpx_ports/mem.h
+vpx_ports/vpx_once.h
vpx_ports/vpx_ports.mk
vpx_ports/vpx_timer.h
vpx_scale/generic/gen_scalers.c
-vpx_scale/generic/vpxscale.c
+vpx_scale/generic/vpx_scale.c
vpx_scale/generic/yv12config.c
vpx_scale/generic/yv12extend.c
-vpx_scale/generic/yv12extend_generic.h
-vpx_scale/scale_mode.h
-vpx_scale/vpxscale.h
+vpx_scale/vpx_scale_asm_offsets.c
+vpx_scale/vpx_scale.h
vpx_scale/vpx_scale.mk
+vpx_scale/vpx_scale_rtcd.c
+vpx_scale/vpx_scale_rtcd.sh
vpx_scale/yv12config.h
vpx/src/vpx_codec.c
vpx/src/vpx_decoder.c
diff --git a/mips-dspr2/vpx_rtcd.h b/mips-dspr2/vp8_rtcd.h
index 4e212f8..b9fc986 100644
--- a/mips-dspr2/vpx_rtcd.h
+++ b/mips-dspr2/vp8_rtcd.h
@@ -1,5 +1,5 @@
-#ifndef VPX_RTCD_
-#define VPX_RTCD_
+#ifndef VP8_RTCD_H_
+#define VP8_RTCD_H_
#ifdef RTCD_C
#define RTCD_EXTERN
@@ -7,7 +7,9 @@
#define RTCD_EXTERN extern
#endif
-#include "vp8/common/blockd.h"
+/*
+ * VP8
+ */
struct blockd;
struct macroblockd;
@@ -20,6 +22,9 @@ struct variance_vtable;
union int_mv;
struct yv12_buffer_config;
+void vp8_clear_system_state_c();
+#define vp8_clear_system_state vp8_clear_system_state_c
+
void vp8_dequantize_b_c(struct blockd*, short *dqc);
#define vp8_dequantize_b vp8_dequantize_b_c
@@ -97,7 +102,7 @@ void vp8_build_intra_predictors_mby_s_c(struct macroblockd *x, unsigned char * y
void vp8_build_intra_predictors_mbuv_s_c(struct macroblockd *x, unsigned char * uabove_row, unsigned char * vabove_row, unsigned char *uleft, unsigned char *vleft, int left_stride, unsigned char * upred_ptr, unsigned char * vpred_ptr, int pred_stride);
#define vp8_build_intra_predictors_mbuv_s vp8_build_intra_predictors_mbuv_s_c
-void vp8_intra4x4_predict_c(unsigned char *Above, unsigned char *yleft, int left_stride, B_PREDICTION_MODE b_mode, unsigned char *dst, int dst_stride, unsigned char top_left);
+void vp8_intra4x4_predict_c(unsigned char *Above, unsigned char *yleft, int left_stride, int b_mode, unsigned char *dst, int dst_stride, unsigned char top_left);
#define vp8_intra4x4_predict vp8_intra4x4_predict_c
void vp8_sixtap_predict16x16_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
@@ -302,82 +307,7 @@ void vp8_yv12_copy_partial_frame_c(struct yv12_buffer_config *src_ybc, struct yv
int vp8_denoiser_filter_c(struct yv12_buffer_config* mc_running_avg, struct yv12_buffer_config* running_avg, struct macroblock* signal, unsigned int motion_magnitude2, int y_offset, int uv_offset);
#define vp8_denoiser_filter vp8_denoiser_filter_c
-void vp8_horizontal_line_4_5_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_4_5_scale vp8_horizontal_line_4_5_scale_c
-
-void vp8_vertical_band_4_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_4_5_scale vp8_vertical_band_4_5_scale_c
-
-void vp8_last_vertical_band_4_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_4_5_scale vp8_last_vertical_band_4_5_scale_c
-
-void vp8_horizontal_line_2_3_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_2_3_scale vp8_horizontal_line_2_3_scale_c
-
-void vp8_vertical_band_2_3_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_2_3_scale vp8_vertical_band_2_3_scale_c
-
-void vp8_last_vertical_band_2_3_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_2_3_scale vp8_last_vertical_band_2_3_scale_c
-
-void vp8_horizontal_line_3_5_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_3_5_scale vp8_horizontal_line_3_5_scale_c
-
-void vp8_vertical_band_3_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_3_5_scale vp8_vertical_band_3_5_scale_c
-
-void vp8_last_vertical_band_3_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_3_5_scale vp8_last_vertical_band_3_5_scale_c
-
-void vp8_horizontal_line_3_4_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_3_4_scale vp8_horizontal_line_3_4_scale_c
-
-void vp8_vertical_band_3_4_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_3_4_scale vp8_vertical_band_3_4_scale_c
-
-void vp8_last_vertical_band_3_4_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_3_4_scale vp8_last_vertical_band_3_4_scale_c
-
-void vp8_horizontal_line_1_2_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_1_2_scale vp8_horizontal_line_1_2_scale_c
-
-void vp8_vertical_band_1_2_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_1_2_scale vp8_vertical_band_1_2_scale_c
-
-void vp8_last_vertical_band_1_2_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_1_2_scale vp8_last_vertical_band_1_2_scale_c
-
-void vp8_horizontal_line_5_4_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_5_4_scale vp8_horizontal_line_5_4_scale_c
-
-void vp8_vertical_band_5_4_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_5_4_scale vp8_vertical_band_5_4_scale_c
-
-void vp8_horizontal_line_5_3_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_5_3_scale vp8_horizontal_line_5_3_scale_c
-
-void vp8_vertical_band_5_3_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_5_3_scale vp8_vertical_band_5_3_scale_c
-
-void vp8_horizontal_line_2_1_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_2_1_scale vp8_horizontal_line_2_1_scale_c
-
-void vp8_vertical_band_2_1_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_2_1_scale vp8_vertical_band_2_1_scale_c
-
-void vp8_vertical_band_2_1_scale_i_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_2_1_scale_i vp8_vertical_band_2_1_scale_i_c
-
-void vp8_yv12_extend_frame_borders_c(struct yv12_buffer_config *ybf);
-#define vp8_yv12_extend_frame_borders vp8_yv12_extend_frame_borders_c
-
-void vp8_yv12_copy_frame_c(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
-#define vp8_yv12_copy_frame vp8_yv12_copy_frame_c
-
-void vp8_yv12_copy_y_c(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
-#define vp8_yv12_copy_y vp8_yv12_copy_y_c
-
-void vpx_rtcd(void);
+void vp8_rtcd(void);
#include "vpx_config.h"
#ifdef RTCD_C
diff --git a/mips-dspr2/vp9_rtcd.h b/mips-dspr2/vp9_rtcd.h
new file mode 100644
index 0000000..b23f1a6
--- /dev/null
+++ b/mips-dspr2/vp9_rtcd.h
@@ -0,0 +1,316 @@
+#ifndef VP9_RTCD_H_
+#define VP9_RTCD_H_
+
+#ifdef RTCD_C
+#define RTCD_EXTERN
+#else
+#define RTCD_EXTERN extern
+#endif
+
+/*
+ * VP9
+ */
+
+#include "vpx/vpx_integer.h"
+#include "vp9/common/vp9_enums.h"
+
+struct macroblockd;
+
+/* Encoder forward decls */
+struct macroblock;
+struct vp9_variance_vtable;
+
+#define DEC_MVCOSTS int *mvjcost, int *mvcost[2]
+union int_mv;
+struct yv12_buffer_config;
+
+void vp9_idct_add_16x16_c(int16_t *input, uint8_t *dest, int stride, int eob);
+#define vp9_idct_add_16x16 vp9_idct_add_16x16_c
+
+void vp9_idct_add_8x8_c(int16_t *input, uint8_t *dest, int stride, int eob);
+#define vp9_idct_add_8x8 vp9_idct_add_8x8_c
+
+void vp9_idct_add_c(int16_t *input, uint8_t *dest, int stride, int eob);
+#define vp9_idct_add vp9_idct_add_c
+
+void vp9_idct_add_32x32_c(int16_t *q, uint8_t *dst, int stride, int eob);
+#define vp9_idct_add_32x32 vp9_idct_add_32x32_c
+
+void vp9_d207_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d207_predictor_4x4 vp9_d207_predictor_4x4_c
+
+void vp9_d45_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d45_predictor_4x4 vp9_d45_predictor_4x4_c
+
+void vp9_d63_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d63_predictor_4x4 vp9_d63_predictor_4x4_c
+
+void vp9_h_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_h_predictor_4x4 vp9_h_predictor_4x4_c
+
+void vp9_d117_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d117_predictor_4x4 vp9_d117_predictor_4x4_c
+
+void vp9_d135_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d135_predictor_4x4 vp9_d135_predictor_4x4_c
+
+void vp9_d153_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d153_predictor_4x4 vp9_d153_predictor_4x4_c
+
+void vp9_v_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_v_predictor_4x4 vp9_v_predictor_4x4_c
+
+void vp9_tm_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_tm_predictor_4x4 vp9_tm_predictor_4x4_c
+
+void vp9_dc_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_predictor_4x4 vp9_dc_predictor_4x4_c
+
+void vp9_dc_top_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_top_predictor_4x4 vp9_dc_top_predictor_4x4_c
+
+void vp9_dc_left_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_left_predictor_4x4 vp9_dc_left_predictor_4x4_c
+
+void vp9_dc_128_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_128_predictor_4x4 vp9_dc_128_predictor_4x4_c
+
+void vp9_d207_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d207_predictor_8x8 vp9_d207_predictor_8x8_c
+
+void vp9_d45_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d45_predictor_8x8 vp9_d45_predictor_8x8_c
+
+void vp9_d63_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d63_predictor_8x8 vp9_d63_predictor_8x8_c
+
+void vp9_h_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_h_predictor_8x8 vp9_h_predictor_8x8_c
+
+void vp9_d117_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d117_predictor_8x8 vp9_d117_predictor_8x8_c
+
+void vp9_d135_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d135_predictor_8x8 vp9_d135_predictor_8x8_c
+
+void vp9_d153_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d153_predictor_8x8 vp9_d153_predictor_8x8_c
+
+void vp9_v_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_v_predictor_8x8 vp9_v_predictor_8x8_c
+
+void vp9_tm_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_tm_predictor_8x8 vp9_tm_predictor_8x8_c
+
+void vp9_dc_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_predictor_8x8 vp9_dc_predictor_8x8_c
+
+void vp9_dc_top_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_top_predictor_8x8 vp9_dc_top_predictor_8x8_c
+
+void vp9_dc_left_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_left_predictor_8x8 vp9_dc_left_predictor_8x8_c
+
+void vp9_dc_128_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_128_predictor_8x8 vp9_dc_128_predictor_8x8_c
+
+void vp9_d207_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d207_predictor_16x16 vp9_d207_predictor_16x16_c
+
+void vp9_d45_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d45_predictor_16x16 vp9_d45_predictor_16x16_c
+
+void vp9_d63_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d63_predictor_16x16 vp9_d63_predictor_16x16_c
+
+void vp9_h_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_h_predictor_16x16 vp9_h_predictor_16x16_c
+
+void vp9_d117_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d117_predictor_16x16 vp9_d117_predictor_16x16_c
+
+void vp9_d135_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d135_predictor_16x16 vp9_d135_predictor_16x16_c
+
+void vp9_d153_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d153_predictor_16x16 vp9_d153_predictor_16x16_c
+
+void vp9_v_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_v_predictor_16x16 vp9_v_predictor_16x16_c
+
+void vp9_tm_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_tm_predictor_16x16 vp9_tm_predictor_16x16_c
+
+void vp9_dc_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_predictor_16x16 vp9_dc_predictor_16x16_c
+
+void vp9_dc_top_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_top_predictor_16x16 vp9_dc_top_predictor_16x16_c
+
+void vp9_dc_left_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_left_predictor_16x16 vp9_dc_left_predictor_16x16_c
+
+void vp9_dc_128_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_128_predictor_16x16 vp9_dc_128_predictor_16x16_c
+
+void vp9_d207_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d207_predictor_32x32 vp9_d207_predictor_32x32_c
+
+void vp9_d45_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d45_predictor_32x32 vp9_d45_predictor_32x32_c
+
+void vp9_d63_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d63_predictor_32x32 vp9_d63_predictor_32x32_c
+
+void vp9_h_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_h_predictor_32x32 vp9_h_predictor_32x32_c
+
+void vp9_d117_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d117_predictor_32x32 vp9_d117_predictor_32x32_c
+
+void vp9_d135_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d135_predictor_32x32 vp9_d135_predictor_32x32_c
+
+void vp9_d153_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d153_predictor_32x32 vp9_d153_predictor_32x32_c
+
+void vp9_v_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_v_predictor_32x32 vp9_v_predictor_32x32_c
+
+void vp9_tm_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_tm_predictor_32x32 vp9_tm_predictor_32x32_c
+
+void vp9_dc_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_predictor_32x32 vp9_dc_predictor_32x32_c
+
+void vp9_dc_top_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_top_predictor_32x32 vp9_dc_top_predictor_32x32_c
+
+void vp9_dc_left_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_left_predictor_32x32 vp9_dc_left_predictor_32x32_c
+
+void vp9_dc_128_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_128_predictor_32x32 vp9_dc_128_predictor_32x32_c
+
+void vp9_add_constant_residual_8x8_c(const int16_t diff, uint8_t *dest, int stride);
+#define vp9_add_constant_residual_8x8 vp9_add_constant_residual_8x8_c
+
+void vp9_add_constant_residual_16x16_c(const int16_t diff, uint8_t *dest, int stride);
+#define vp9_add_constant_residual_16x16 vp9_add_constant_residual_16x16_c
+
+void vp9_add_constant_residual_32x32_c(const int16_t diff, uint8_t *dest, int stride);
+#define vp9_add_constant_residual_32x32 vp9_add_constant_residual_32x32_c
+
+void vp9_mb_lpf_vertical_edge_w_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
+#define vp9_mb_lpf_vertical_edge_w vp9_mb_lpf_vertical_edge_w_c
+
+void vp9_mbloop_filter_vertical_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_mbloop_filter_vertical_edge vp9_mbloop_filter_vertical_edge_c
+
+void vp9_loop_filter_vertical_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_loop_filter_vertical_edge vp9_loop_filter_vertical_edge_c
+
+void vp9_mb_lpf_horizontal_edge_w_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_mb_lpf_horizontal_edge_w vp9_mb_lpf_horizontal_edge_w_c
+
+void vp9_mbloop_filter_horizontal_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_mbloop_filter_horizontal_edge vp9_mbloop_filter_horizontal_edge_c
+
+void vp9_loop_filter_horizontal_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_loop_filter_horizontal_edge vp9_loop_filter_horizontal_edge_c
+
+void vp9_blend_mb_inner_c(uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride);
+#define vp9_blend_mb_inner vp9_blend_mb_inner_c
+
+void vp9_blend_mb_outer_c(uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride);
+#define vp9_blend_mb_outer vp9_blend_mb_outer_c
+
+void vp9_blend_b_c(uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride);
+#define vp9_blend_b vp9_blend_b_c
+
+void vp9_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve_copy vp9_convolve_copy_c
+
+void vp9_convolve_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve_avg vp9_convolve_avg_c
+
+void vp9_convolve8_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8 vp9_convolve8_c
+
+void vp9_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_horiz vp9_convolve8_horiz_c
+
+void vp9_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_vert vp9_convolve8_vert_c
+
+void vp9_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_avg vp9_convolve8_avg_c
+
+void vp9_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_avg_horiz vp9_convolve8_avg_horiz_c
+
+void vp9_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_avg_vert vp9_convolve8_avg_vert_c
+
+void vp9_short_idct4x4_1_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct4x4_1_add vp9_short_idct4x4_1_add_c
+
+void vp9_short_idct4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct4x4_add vp9_short_idct4x4_add_c
+
+void vp9_short_idct8x8_1_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct8x8_1_add vp9_short_idct8x8_1_add_c
+
+void vp9_short_idct8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct8x8_add vp9_short_idct8x8_add_c
+
+void vp9_short_idct10_8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct10_8x8_add vp9_short_idct10_8x8_add_c
+
+void vp9_short_idct16x16_1_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct16x16_1_add vp9_short_idct16x16_1_add_c
+
+void vp9_short_idct16x16_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct16x16_add vp9_short_idct16x16_add_c
+
+void vp9_short_idct10_16x16_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct10_16x16_add vp9_short_idct10_16x16_add_c
+
+void vp9_short_idct32x32_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct32x32_add vp9_short_idct32x32_add_c
+
+void vp9_short_idct1_32x32_c(int16_t *input, int16_t *output);
+#define vp9_short_idct1_32x32 vp9_short_idct1_32x32_c
+
+void vp9_short_iht4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride, int tx_type);
+#define vp9_short_iht4x4_add vp9_short_iht4x4_add_c
+
+void vp9_short_iht8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride, int tx_type);
+#define vp9_short_iht8x8_add vp9_short_iht8x8_add_c
+
+void vp9_short_iht16x16_add_c(int16_t *input, uint8_t *output, int pitch, int tx_type);
+#define vp9_short_iht16x16_add vp9_short_iht16x16_add_c
+
+void vp9_idct4_1d_c(int16_t *input, int16_t *output);
+#define vp9_idct4_1d vp9_idct4_1d_c
+
+void vp9_short_iwalsh4x4_1_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_iwalsh4x4_1_add vp9_short_iwalsh4x4_1_add_c
+
+void vp9_short_iwalsh4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_iwalsh4x4_add vp9_short_iwalsh4x4_add_c
+
+void vp9_rtcd(void);
+#include "vpx_config.h"
+
+#ifdef RTCD_C
+static void setup_rtcd_internal(void)
+{
+
+#if HAVE_DSPR2
+void dsputil_static_init();
+dsputil_static_init();
+#endif
+}
+#endif
+#endif
diff --git a/mips-dspr2/vpx_config.c b/mips-dspr2/vpx_config.c
index 42fc4cb..1036456 100644
--- a/mips-dspr2/vpx_config.c
+++ b/mips-dspr2/vpx_config.c
@@ -5,5 +5,5 @@
/* tree. An additional intellectual property rights grant can be found */
/* in the file PATENTS. All contributing project authors may */
/* be found in the AUTHORS file in the root of the source tree. */
-static const char* const cfg = "--force-target=mips32-android-gcc --disable-runtime-cpu-detect --sdk-path=/usr/local/google/home/johannkoenig/android-ndk --disable-examples --disable-docs --enable-dspr2 --enable-realtime-only";
+static const char* const cfg = "--force-target=mips32-android-gcc --disable-runtime-cpu-detect --sdk-path=/usr/local/google/home/hkuang/Downloads/android-ndk-r8e --disable-vp9-encoder --enable-dspr2 --disable-examples --disable-docs --enable-realtime-only";
const char *vpx_codec_build_config(void) {return cfg;}
diff --git a/mips-dspr2/vpx_config.h b/mips-dspr2/vpx_config.h
index b03bd7c..e6cad01 100644
--- a/mips-dspr2/vpx_config.h
+++ b/mips-dspr2/vpx_config.h
@@ -9,6 +9,7 @@
#ifndef VPX_CONFIG_H
#define VPX_CONFIG_H
#define RESTRICT
+#define INLINE __inline__ __attribute__((always_inline))
#define ARCH_ARM 0
#define ARCH_MIPS 1
#define ARCH_X86 0
@@ -34,10 +35,11 @@
#define HAVE_SYS_MMAN_H 1
#define HAVE_UNISTD_H 1
#define CONFIG_EXTERNAL_BUILD 0
-#define CONFIG_INSTALL_DOCS 1
+#define CONFIG_INSTALL_DOCS 0
#define CONFIG_INSTALL_BINS 1
#define CONFIG_INSTALL_LIBS 1
#define CONFIG_INSTALL_SRCS 0
+#define CONFIG_USE_X86INC 1
#define CONFIG_DEBUG 0
#define CONFIG_GPROF 0
#define CONFIG_GCOV 0
@@ -57,11 +59,15 @@
#define CONFIG_DC_RECON 1
#define CONFIG_RUNTIME_CPU_DETECT 0
#define CONFIG_POSTPROC 0
+#define CONFIG_VP9_POSTPROC 0
#define CONFIG_MULTITHREAD 1
#define CONFIG_INTERNAL_STATS 0
#define CONFIG_VP8_ENCODER 1
#define CONFIG_VP8_DECODER 1
+#define CONFIG_VP9_ENCODER 0
+#define CONFIG_VP9_DECODER 1
#define CONFIG_VP8 1
+#define CONFIG_VP9 1
#define CONFIG_ENCODERS 1
#define CONFIG_DECODERS 1
#define CONFIG_STATIC_MSVCRT 0
@@ -77,4 +83,10 @@
#define CONFIG_UNIT_TESTS 0
#define CONFIG_MULTI_RES_ENCODING 0
#define CONFIG_TEMPORAL_DENOISING 1
+#define CONFIG_EXPERIMENTAL 0
+#define CONFIG_DECRYPT 0
+#define CONFIG_ONESHOTQ 0
+#define CONFIG_MULTIPLE_ARF 0
+#define CONFIG_NON420 0
+#define CONFIG_ALPHA 0
#endif /* VPX_CONFIG_H */
diff --git a/mips-dspr2/vpx_scale_rtcd.h b/mips-dspr2/vpx_scale_rtcd.h
new file mode 100644
index 0000000..d9e41f3
--- /dev/null
+++ b/mips-dspr2/vpx_scale_rtcd.h
@@ -0,0 +1,61 @@
+#ifndef VPX_SCALE_RTCD_H_
+#define VPX_SCALE_RTCD_H_
+
+#ifdef RTCD_C
+#define RTCD_EXTERN
+#else
+#define RTCD_EXTERN extern
+#endif
+
+struct yv12_buffer_config;
+
+void vp8_horizontal_line_5_4_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
+#define vp8_horizontal_line_5_4_scale vp8_horizontal_line_5_4_scale_c
+
+void vp8_vertical_band_5_4_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_5_4_scale vp8_vertical_band_5_4_scale_c
+
+void vp8_horizontal_line_5_3_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
+#define vp8_horizontal_line_5_3_scale vp8_horizontal_line_5_3_scale_c
+
+void vp8_vertical_band_5_3_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_5_3_scale vp8_vertical_band_5_3_scale_c
+
+void vp8_horizontal_line_2_1_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
+#define vp8_horizontal_line_2_1_scale vp8_horizontal_line_2_1_scale_c
+
+void vp8_vertical_band_2_1_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_2_1_scale vp8_vertical_band_2_1_scale_c
+
+void vp8_vertical_band_2_1_scale_i_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_2_1_scale_i vp8_vertical_band_2_1_scale_i_c
+
+void vp8_yv12_extend_frame_borders_c(struct yv12_buffer_config *ybf);
+#define vp8_yv12_extend_frame_borders vp8_yv12_extend_frame_borders_c
+
+void vp8_yv12_copy_frame_c(const struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
+#define vp8_yv12_copy_frame vp8_yv12_copy_frame_c
+
+void vpx_yv12_copy_y_c(const struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
+#define vpx_yv12_copy_y vpx_yv12_copy_y_c
+
+void vp9_extend_frame_borders_c(struct yv12_buffer_config *ybf, int subsampling_x, int subsampling_y);
+#define vp9_extend_frame_borders vp9_extend_frame_borders_c
+
+void vp9_extend_frame_inner_borders_c(struct yv12_buffer_config *ybf, int subsampling_x, int subsampling_y);
+#define vp9_extend_frame_inner_borders vp9_extend_frame_inner_borders_c
+
+void vpx_scale_rtcd(void);
+#include "vpx_config.h"
+
+#ifdef RTCD_C
+static void setup_rtcd_internal(void)
+{
+
+#if HAVE_DSPR2
+void dsputil_static_init();
+dsputil_static_init();
+#endif
+}
+#endif
+#endif
diff --git a/mips-dspr2/vpx_version.h b/mips-dspr2/vpx_version.h
index 663dd49..512851c 100644
--- a/mips-dspr2/vpx_version.h
+++ b/mips-dspr2/vpx_version.h
@@ -1,7 +1,7 @@
#define VERSION_MAJOR 1
-#define VERSION_MINOR 1
+#define VERSION_MINOR 2
#define VERSION_PATCH 0
#define VERSION_EXTRA ""
#define VERSION_PACKED ((VERSION_MAJOR<<16)|(VERSION_MINOR<<8)|(VERSION_PATCH))
-#define VERSION_STRING_NOSP "v1.1.0"
-#define VERSION_STRING " v1.1.0"
+#define VERSION_STRING_NOSP "v1.2.0"
+#define VERSION_STRING " v1.2.0"
diff --git a/mips/.bins b/mips/.bins
deleted file mode 100644
index e69de29..0000000
--- a/mips/.bins
+++ /dev/null
diff --git a/mips/.docs b/mips/.docs
deleted file mode 100644
index e69de29..0000000
--- a/mips/.docs
+++ /dev/null
diff --git a/mips/.libs b/mips/.libs
deleted file mode 100644
index e69de29..0000000
--- a/mips/.libs
+++ /dev/null
diff --git a/mips/libvpx_srcs.txt b/mips/libvpx_srcs.txt
index 5756427..8e6fad7 100644
--- a/mips/libvpx_srcs.txt
+++ b/mips/libvpx_srcs.txt
@@ -4,7 +4,6 @@ CHANGELOG
libs.mk
vp8/common/alloccommon.c
vp8/common/alloccommon.h
-vp8/common/asm_com_offsets.c
vp8/common/blockd.c
vp8/common/blockd.h
vp8/common/coefupdateprobs.h
@@ -62,7 +61,6 @@ vp8/common/treecoder.h
vp8/common/variance_c.c
vp8/common/variance.h
vp8/common/vp8_entropymodedata.h
-vp8/decoder/asm_dec_offsets.c
vp8/decoder/dboolhuff.c
vp8/decoder/dboolhuff.h
vp8/decoder/decodemv.c
@@ -75,7 +73,6 @@ vp8/decoder/onyxd_if.c
vp8/decoder/onyxd_int.h
vp8/decoder/threading.c
vp8/decoder/treereader.h
-vp8/encoder/asm_enc_offsets.c
vp8/encoder/bitstream.c
vp8/encoder/bitstream.h
vp8/encoder/block.h
@@ -122,11 +119,93 @@ vp8/encoder/tokenize.c
vp8/encoder/tokenize.h
vp8/encoder/treewriter.c
vp8/encoder/treewriter.h
+vp8/encoder/vp8_asm_enc_offsets.c
vp8/vp8_common.mk
vp8/vp8_cx_iface.c
vp8/vp8cx.mk
vp8/vp8_dx_iface.c
vp8/vp8dx.mk
+vp9/common/generic/vp9_systemdependent.c
+vp9/common/vp9_alloccommon.c
+vp9/common/vp9_alloccommon.h
+vp9/common/vp9_blockd.h
+vp9/common/vp9_common_data.c
+vp9/common/vp9_common_data.h
+vp9/common/vp9_common.h
+vp9/common/vp9_convolve.c
+vp9/common/vp9_convolve.h
+vp9/common/vp9_debugmodes.c
+vp9/common/vp9_default_coef_probs.h
+vp9/common/vp9_entropy.c
+vp9/common/vp9_entropy.h
+vp9/common/vp9_entropymode.c
+vp9/common/vp9_entropymode.h
+vp9/common/vp9_entropymv.c
+vp9/common/vp9_entropymv.h
+vp9/common/vp9_enums.h
+vp9/common/vp9_extend.c
+vp9/common/vp9_extend.h
+vp9/common/vp9_filter.c
+vp9/common/vp9_filter.h
+vp9/common/vp9_findnearmv.c
+vp9/common/vp9_findnearmv.h
+vp9/common/vp9_idct.c
+vp9/common/vp9_idct.h
+vp9/common/vp9_loopfilter.c
+vp9/common/vp9_loopfilter_filters.c
+vp9/common/vp9_loopfilter.h
+vp9/common/vp9_mv.h
+vp9/common/vp9_mvref_common.c
+vp9/common/vp9_mvref_common.h
+vp9/common/vp9_onyxc_int.h
+vp9/common/vp9_onyx.h
+vp9/common/vp9_ppflags.h
+vp9/common/vp9_pragmas.h
+vp9/common/vp9_pred_common.c
+vp9/common/vp9_pred_common.h
+vp9/common/vp9_quant_common.c
+vp9/common/vp9_quant_common.h
+vp9/common/vp9_reconinter.c
+vp9/common/vp9_reconinter.h
+vp9/common/vp9_reconintra.c
+vp9/common/vp9_reconintra.h
+vp9/common/vp9_rtcd.c
+vp9/common/vp9_rtcd_defs.sh
+vp9/common/vp9_sadmxn.h
+vp9/common/vp9_scale.c
+vp9/common/vp9_scale.h
+vp9/common/vp9_seg_common.c
+vp9/common/vp9_seg_common.h
+vp9/common/vp9_subpelvar.h
+vp9/common/vp9_systemdependent.h
+vp9/common/vp9_textblit.h
+vp9/common/vp9_tile_common.c
+vp9/common/vp9_tile_common.h
+vp9/common/vp9_treecoder.c
+vp9/common/vp9_treecoder.h
+vp9/decoder/vp9_dboolhuff.c
+vp9/decoder/vp9_dboolhuff.h
+vp9/decoder/vp9_decodemv.c
+vp9/decoder/vp9_decodemv.h
+vp9/decoder/vp9_decodframe.c
+vp9/decoder/vp9_decodframe.h
+vp9/decoder/vp9_detokenize.c
+vp9/decoder/vp9_detokenize.h
+vp9/decoder/vp9_dsubexp.c
+vp9/decoder/vp9_dsubexp.h
+vp9/decoder/vp9_idct_blk.c
+vp9/decoder/vp9_idct_blk.h
+vp9/decoder/vp9_onyxd.h
+vp9/decoder/vp9_onyxd_if.c
+vp9/decoder/vp9_onyxd_int.h
+vp9/decoder/vp9_read_bit_buffer.h
+vp9/decoder/vp9_thread.c
+vp9/decoder/vp9_thread.h
+vp9/decoder/vp9_treereader.h
+vp9/vp9_common.mk
+vp9/vp9_dx_iface.c
+vp9/vp9dx.mk
+vp9/vp9_iface_common.h
vpx_config.c
vpx/internal/vpx_codec_internal.h
vpx_mem/include/vpx_mem_intrnl.h
@@ -134,17 +213,20 @@ vpx_mem/vpx_mem.c
vpx_mem/vpx_mem.h
vpx_mem/vpx_mem.mk
vpx_ports/asm_offsets.h
+vpx_ports/emmintrin_compat.h
vpx_ports/mem.h
+vpx_ports/vpx_once.h
vpx_ports/vpx_ports.mk
vpx_ports/vpx_timer.h
vpx_scale/generic/gen_scalers.c
-vpx_scale/generic/vpxscale.c
+vpx_scale/generic/vpx_scale.c
vpx_scale/generic/yv12config.c
vpx_scale/generic/yv12extend.c
-vpx_scale/generic/yv12extend_generic.h
-vpx_scale/scale_mode.h
-vpx_scale/vpxscale.h
+vpx_scale/vpx_scale_asm_offsets.c
+vpx_scale/vpx_scale.h
vpx_scale/vpx_scale.mk
+vpx_scale/vpx_scale_rtcd.c
+vpx_scale/vpx_scale_rtcd.sh
vpx_scale/yv12config.h
vpx/src/vpx_codec.c
vpx/src/vpx_decoder.c
diff --git a/mips/vpx_rtcd.h b/mips/vp8_rtcd.h
index fe84d62..5f63677 100644
--- a/mips/vpx_rtcd.h
+++ b/mips/vp8_rtcd.h
@@ -1,5 +1,5 @@
-#ifndef VPX_RTCD_
-#define VPX_RTCD_
+#ifndef VP8_RTCD_H_
+#define VP8_RTCD_H_
#ifdef RTCD_C
#define RTCD_EXTERN
@@ -7,7 +7,9 @@
#define RTCD_EXTERN extern
#endif
-#include "vp8/common/blockd.h"
+/*
+ * VP8
+ */
struct blockd;
struct macroblockd;
@@ -20,6 +22,9 @@ struct variance_vtable;
union int_mv;
struct yv12_buffer_config;
+void vp8_clear_system_state_c();
+#define vp8_clear_system_state vp8_clear_system_state_c
+
void vp8_dequantize_b_c(struct blockd*, short *dqc);
#define vp8_dequantize_b vp8_dequantize_b_c
@@ -83,7 +88,7 @@ void vp8_build_intra_predictors_mby_s_c(struct macroblockd *x, unsigned char * y
void vp8_build_intra_predictors_mbuv_s_c(struct macroblockd *x, unsigned char * uabove_row, unsigned char * vabove_row, unsigned char *uleft, unsigned char *vleft, int left_stride, unsigned char * upred_ptr, unsigned char * vpred_ptr, int pred_stride);
#define vp8_build_intra_predictors_mbuv_s vp8_build_intra_predictors_mbuv_s_c
-void vp8_intra4x4_predict_c(unsigned char *Above, unsigned char *yleft, int left_stride, B_PREDICTION_MODE b_mode, unsigned char *dst, int dst_stride, unsigned char top_left);
+void vp8_intra4x4_predict_c(unsigned char *Above, unsigned char *yleft, int left_stride, int b_mode, unsigned char *dst, int dst_stride, unsigned char top_left);
#define vp8_intra4x4_predict vp8_intra4x4_predict_c
void vp8_sixtap_predict16x16_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
@@ -284,82 +289,7 @@ void vp8_yv12_copy_partial_frame_c(struct yv12_buffer_config *src_ybc, struct yv
int vp8_denoiser_filter_c(struct yv12_buffer_config* mc_running_avg, struct yv12_buffer_config* running_avg, struct macroblock* signal, unsigned int motion_magnitude2, int y_offset, int uv_offset);
#define vp8_denoiser_filter vp8_denoiser_filter_c
-void vp8_horizontal_line_4_5_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_4_5_scale vp8_horizontal_line_4_5_scale_c
-
-void vp8_vertical_band_4_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_4_5_scale vp8_vertical_band_4_5_scale_c
-
-void vp8_last_vertical_band_4_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_4_5_scale vp8_last_vertical_band_4_5_scale_c
-
-void vp8_horizontal_line_2_3_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_2_3_scale vp8_horizontal_line_2_3_scale_c
-
-void vp8_vertical_band_2_3_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_2_3_scale vp8_vertical_band_2_3_scale_c
-
-void vp8_last_vertical_band_2_3_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_2_3_scale vp8_last_vertical_band_2_3_scale_c
-
-void vp8_horizontal_line_3_5_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_3_5_scale vp8_horizontal_line_3_5_scale_c
-
-void vp8_vertical_band_3_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_3_5_scale vp8_vertical_band_3_5_scale_c
-
-void vp8_last_vertical_band_3_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_3_5_scale vp8_last_vertical_band_3_5_scale_c
-
-void vp8_horizontal_line_3_4_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_3_4_scale vp8_horizontal_line_3_4_scale_c
-
-void vp8_vertical_band_3_4_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_3_4_scale vp8_vertical_band_3_4_scale_c
-
-void vp8_last_vertical_band_3_4_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_3_4_scale vp8_last_vertical_band_3_4_scale_c
-
-void vp8_horizontal_line_1_2_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_1_2_scale vp8_horizontal_line_1_2_scale_c
-
-void vp8_vertical_band_1_2_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_1_2_scale vp8_vertical_band_1_2_scale_c
-
-void vp8_last_vertical_band_1_2_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_1_2_scale vp8_last_vertical_band_1_2_scale_c
-
-void vp8_horizontal_line_5_4_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_5_4_scale vp8_horizontal_line_5_4_scale_c
-
-void vp8_vertical_band_5_4_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_5_4_scale vp8_vertical_band_5_4_scale_c
-
-void vp8_horizontal_line_5_3_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_5_3_scale vp8_horizontal_line_5_3_scale_c
-
-void vp8_vertical_band_5_3_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_5_3_scale vp8_vertical_band_5_3_scale_c
-
-void vp8_horizontal_line_2_1_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_2_1_scale vp8_horizontal_line_2_1_scale_c
-
-void vp8_vertical_band_2_1_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_2_1_scale vp8_vertical_band_2_1_scale_c
-
-void vp8_vertical_band_2_1_scale_i_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_2_1_scale_i vp8_vertical_band_2_1_scale_i_c
-
-void vp8_yv12_extend_frame_borders_c(struct yv12_buffer_config *ybf);
-#define vp8_yv12_extend_frame_borders vp8_yv12_extend_frame_borders_c
-
-void vp8_yv12_copy_frame_c(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
-#define vp8_yv12_copy_frame vp8_yv12_copy_frame_c
-
-void vp8_yv12_copy_y_c(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
-#define vp8_yv12_copy_y vp8_yv12_copy_y_c
-
-void vpx_rtcd(void);
+void vp8_rtcd(void);
#include "vpx_config.h"
#ifdef RTCD_C
diff --git a/mips/vp9_rtcd.h b/mips/vp9_rtcd.h
new file mode 100644
index 0000000..b23f1a6
--- /dev/null
+++ b/mips/vp9_rtcd.h
@@ -0,0 +1,316 @@
+#ifndef VP9_RTCD_H_
+#define VP9_RTCD_H_
+
+#ifdef RTCD_C
+#define RTCD_EXTERN
+#else
+#define RTCD_EXTERN extern
+#endif
+
+/*
+ * VP9
+ */
+
+#include "vpx/vpx_integer.h"
+#include "vp9/common/vp9_enums.h"
+
+struct macroblockd;
+
+/* Encoder forward decls */
+struct macroblock;
+struct vp9_variance_vtable;
+
+#define DEC_MVCOSTS int *mvjcost, int *mvcost[2]
+union int_mv;
+struct yv12_buffer_config;
+
+void vp9_idct_add_16x16_c(int16_t *input, uint8_t *dest, int stride, int eob);
+#define vp9_idct_add_16x16 vp9_idct_add_16x16_c
+
+void vp9_idct_add_8x8_c(int16_t *input, uint8_t *dest, int stride, int eob);
+#define vp9_idct_add_8x8 vp9_idct_add_8x8_c
+
+void vp9_idct_add_c(int16_t *input, uint8_t *dest, int stride, int eob);
+#define vp9_idct_add vp9_idct_add_c
+
+void vp9_idct_add_32x32_c(int16_t *q, uint8_t *dst, int stride, int eob);
+#define vp9_idct_add_32x32 vp9_idct_add_32x32_c
+
+void vp9_d207_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d207_predictor_4x4 vp9_d207_predictor_4x4_c
+
+void vp9_d45_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d45_predictor_4x4 vp9_d45_predictor_4x4_c
+
+void vp9_d63_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d63_predictor_4x4 vp9_d63_predictor_4x4_c
+
+void vp9_h_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_h_predictor_4x4 vp9_h_predictor_4x4_c
+
+void vp9_d117_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d117_predictor_4x4 vp9_d117_predictor_4x4_c
+
+void vp9_d135_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d135_predictor_4x4 vp9_d135_predictor_4x4_c
+
+void vp9_d153_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d153_predictor_4x4 vp9_d153_predictor_4x4_c
+
+void vp9_v_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_v_predictor_4x4 vp9_v_predictor_4x4_c
+
+void vp9_tm_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_tm_predictor_4x4 vp9_tm_predictor_4x4_c
+
+void vp9_dc_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_predictor_4x4 vp9_dc_predictor_4x4_c
+
+void vp9_dc_top_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_top_predictor_4x4 vp9_dc_top_predictor_4x4_c
+
+void vp9_dc_left_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_left_predictor_4x4 vp9_dc_left_predictor_4x4_c
+
+void vp9_dc_128_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_128_predictor_4x4 vp9_dc_128_predictor_4x4_c
+
+void vp9_d207_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d207_predictor_8x8 vp9_d207_predictor_8x8_c
+
+void vp9_d45_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d45_predictor_8x8 vp9_d45_predictor_8x8_c
+
+void vp9_d63_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d63_predictor_8x8 vp9_d63_predictor_8x8_c
+
+void vp9_h_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_h_predictor_8x8 vp9_h_predictor_8x8_c
+
+void vp9_d117_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d117_predictor_8x8 vp9_d117_predictor_8x8_c
+
+void vp9_d135_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d135_predictor_8x8 vp9_d135_predictor_8x8_c
+
+void vp9_d153_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d153_predictor_8x8 vp9_d153_predictor_8x8_c
+
+void vp9_v_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_v_predictor_8x8 vp9_v_predictor_8x8_c
+
+void vp9_tm_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_tm_predictor_8x8 vp9_tm_predictor_8x8_c
+
+void vp9_dc_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_predictor_8x8 vp9_dc_predictor_8x8_c
+
+void vp9_dc_top_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_top_predictor_8x8 vp9_dc_top_predictor_8x8_c
+
+void vp9_dc_left_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_left_predictor_8x8 vp9_dc_left_predictor_8x8_c
+
+void vp9_dc_128_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_128_predictor_8x8 vp9_dc_128_predictor_8x8_c
+
+void vp9_d207_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d207_predictor_16x16 vp9_d207_predictor_16x16_c
+
+void vp9_d45_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d45_predictor_16x16 vp9_d45_predictor_16x16_c
+
+void vp9_d63_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d63_predictor_16x16 vp9_d63_predictor_16x16_c
+
+void vp9_h_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_h_predictor_16x16 vp9_h_predictor_16x16_c
+
+void vp9_d117_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d117_predictor_16x16 vp9_d117_predictor_16x16_c
+
+void vp9_d135_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d135_predictor_16x16 vp9_d135_predictor_16x16_c
+
+void vp9_d153_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d153_predictor_16x16 vp9_d153_predictor_16x16_c
+
+void vp9_v_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_v_predictor_16x16 vp9_v_predictor_16x16_c
+
+void vp9_tm_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_tm_predictor_16x16 vp9_tm_predictor_16x16_c
+
+void vp9_dc_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_predictor_16x16 vp9_dc_predictor_16x16_c
+
+void vp9_dc_top_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_top_predictor_16x16 vp9_dc_top_predictor_16x16_c
+
+void vp9_dc_left_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_left_predictor_16x16 vp9_dc_left_predictor_16x16_c
+
+void vp9_dc_128_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_128_predictor_16x16 vp9_dc_128_predictor_16x16_c
+
+void vp9_d207_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d207_predictor_32x32 vp9_d207_predictor_32x32_c
+
+void vp9_d45_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d45_predictor_32x32 vp9_d45_predictor_32x32_c
+
+void vp9_d63_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d63_predictor_32x32 vp9_d63_predictor_32x32_c
+
+void vp9_h_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_h_predictor_32x32 vp9_h_predictor_32x32_c
+
+void vp9_d117_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d117_predictor_32x32 vp9_d117_predictor_32x32_c
+
+void vp9_d135_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d135_predictor_32x32 vp9_d135_predictor_32x32_c
+
+void vp9_d153_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_d153_predictor_32x32 vp9_d153_predictor_32x32_c
+
+void vp9_v_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_v_predictor_32x32 vp9_v_predictor_32x32_c
+
+void vp9_tm_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_tm_predictor_32x32 vp9_tm_predictor_32x32_c
+
+void vp9_dc_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_predictor_32x32 vp9_dc_predictor_32x32_c
+
+void vp9_dc_top_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_top_predictor_32x32 vp9_dc_top_predictor_32x32_c
+
+void vp9_dc_left_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_left_predictor_32x32 vp9_dc_left_predictor_32x32_c
+
+void vp9_dc_128_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
+#define vp9_dc_128_predictor_32x32 vp9_dc_128_predictor_32x32_c
+
+void vp9_add_constant_residual_8x8_c(const int16_t diff, uint8_t *dest, int stride);
+#define vp9_add_constant_residual_8x8 vp9_add_constant_residual_8x8_c
+
+void vp9_add_constant_residual_16x16_c(const int16_t diff, uint8_t *dest, int stride);
+#define vp9_add_constant_residual_16x16 vp9_add_constant_residual_16x16_c
+
+void vp9_add_constant_residual_32x32_c(const int16_t diff, uint8_t *dest, int stride);
+#define vp9_add_constant_residual_32x32 vp9_add_constant_residual_32x32_c
+
+void vp9_mb_lpf_vertical_edge_w_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
+#define vp9_mb_lpf_vertical_edge_w vp9_mb_lpf_vertical_edge_w_c
+
+void vp9_mbloop_filter_vertical_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_mbloop_filter_vertical_edge vp9_mbloop_filter_vertical_edge_c
+
+void vp9_loop_filter_vertical_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_loop_filter_vertical_edge vp9_loop_filter_vertical_edge_c
+
+void vp9_mb_lpf_horizontal_edge_w_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_mb_lpf_horizontal_edge_w vp9_mb_lpf_horizontal_edge_w_c
+
+void vp9_mbloop_filter_horizontal_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_mbloop_filter_horizontal_edge vp9_mbloop_filter_horizontal_edge_c
+
+void vp9_loop_filter_horizontal_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_loop_filter_horizontal_edge vp9_loop_filter_horizontal_edge_c
+
+void vp9_blend_mb_inner_c(uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride);
+#define vp9_blend_mb_inner vp9_blend_mb_inner_c
+
+void vp9_blend_mb_outer_c(uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride);
+#define vp9_blend_mb_outer vp9_blend_mb_outer_c
+
+void vp9_blend_b_c(uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride);
+#define vp9_blend_b vp9_blend_b_c
+
+void vp9_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve_copy vp9_convolve_copy_c
+
+void vp9_convolve_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve_avg vp9_convolve_avg_c
+
+void vp9_convolve8_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8 vp9_convolve8_c
+
+void vp9_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_horiz vp9_convolve8_horiz_c
+
+void vp9_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_vert vp9_convolve8_vert_c
+
+void vp9_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_avg vp9_convolve8_avg_c
+
+void vp9_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_avg_horiz vp9_convolve8_avg_horiz_c
+
+void vp9_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_avg_vert vp9_convolve8_avg_vert_c
+
+void vp9_short_idct4x4_1_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct4x4_1_add vp9_short_idct4x4_1_add_c
+
+void vp9_short_idct4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct4x4_add vp9_short_idct4x4_add_c
+
+void vp9_short_idct8x8_1_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct8x8_1_add vp9_short_idct8x8_1_add_c
+
+void vp9_short_idct8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct8x8_add vp9_short_idct8x8_add_c
+
+void vp9_short_idct10_8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct10_8x8_add vp9_short_idct10_8x8_add_c
+
+void vp9_short_idct16x16_1_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct16x16_1_add vp9_short_idct16x16_1_add_c
+
+void vp9_short_idct16x16_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct16x16_add vp9_short_idct16x16_add_c
+
+void vp9_short_idct10_16x16_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct10_16x16_add vp9_short_idct10_16x16_add_c
+
+void vp9_short_idct32x32_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct32x32_add vp9_short_idct32x32_add_c
+
+void vp9_short_idct1_32x32_c(int16_t *input, int16_t *output);
+#define vp9_short_idct1_32x32 vp9_short_idct1_32x32_c
+
+void vp9_short_iht4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride, int tx_type);
+#define vp9_short_iht4x4_add vp9_short_iht4x4_add_c
+
+void vp9_short_iht8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride, int tx_type);
+#define vp9_short_iht8x8_add vp9_short_iht8x8_add_c
+
+void vp9_short_iht16x16_add_c(int16_t *input, uint8_t *output, int pitch, int tx_type);
+#define vp9_short_iht16x16_add vp9_short_iht16x16_add_c
+
+void vp9_idct4_1d_c(int16_t *input, int16_t *output);
+#define vp9_idct4_1d vp9_idct4_1d_c
+
+void vp9_short_iwalsh4x4_1_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_iwalsh4x4_1_add vp9_short_iwalsh4x4_1_add_c
+
+void vp9_short_iwalsh4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_iwalsh4x4_add vp9_short_iwalsh4x4_add_c
+
+void vp9_rtcd(void);
+#include "vpx_config.h"
+
+#ifdef RTCD_C
+static void setup_rtcd_internal(void)
+{
+
+#if HAVE_DSPR2
+void dsputil_static_init();
+dsputil_static_init();
+#endif
+}
+#endif
+#endif
diff --git a/mips/vpx_config.c b/mips/vpx_config.c
index fa93c2c..8c995fb 100644
--- a/mips/vpx_config.c
+++ b/mips/vpx_config.c
@@ -5,5 +5,5 @@
/* tree. An additional intellectual property rights grant can be found */
/* in the file PATENTS. All contributing project authors may */
/* be found in the AUTHORS file in the root of the source tree. */
-static const char* const cfg = "--force-target=mips32-android-gcc --disable-runtime-cpu-detect --sdk-path=/usr/local/google/home/johannkoenig/android-ndk --disable-examples --disable-docs --enable-realtime-only";
+static const char* const cfg = "--force-target=mips32-android-gcc --disable-runtime-cpu-detect --sdk-path=/usr/local/google/home/hkuang/Downloads/android-ndk-r8e --disable-vp9-encoder --disable-examples --disable-docs --enable-realtime-only";
const char *vpx_codec_build_config(void) {return cfg;}
diff --git a/mips/vpx_config.h b/mips/vpx_config.h
index 9f51b09..8ead72e 100644
--- a/mips/vpx_config.h
+++ b/mips/vpx_config.h
@@ -9,6 +9,7 @@
#ifndef VPX_CONFIG_H
#define VPX_CONFIG_H
#define RESTRICT
+#define INLINE __inline__ __attribute__((always_inline))
#define ARCH_ARM 0
#define ARCH_MIPS 1
#define ARCH_X86 0
@@ -34,10 +35,11 @@
#define HAVE_SYS_MMAN_H 1
#define HAVE_UNISTD_H 1
#define CONFIG_EXTERNAL_BUILD 0
-#define CONFIG_INSTALL_DOCS 1
+#define CONFIG_INSTALL_DOCS 0
#define CONFIG_INSTALL_BINS 1
#define CONFIG_INSTALL_LIBS 1
#define CONFIG_INSTALL_SRCS 0
+#define CONFIG_USE_X86INC 1
#define CONFIG_DEBUG 0
#define CONFIG_GPROF 0
#define CONFIG_GCOV 0
@@ -57,11 +59,15 @@
#define CONFIG_DC_RECON 1
#define CONFIG_RUNTIME_CPU_DETECT 0
#define CONFIG_POSTPROC 0
+#define CONFIG_VP9_POSTPROC 0
#define CONFIG_MULTITHREAD 1
#define CONFIG_INTERNAL_STATS 0
#define CONFIG_VP8_ENCODER 1
#define CONFIG_VP8_DECODER 1
+#define CONFIG_VP9_ENCODER 0
+#define CONFIG_VP9_DECODER 1
#define CONFIG_VP8 1
+#define CONFIG_VP9 1
#define CONFIG_ENCODERS 1
#define CONFIG_DECODERS 1
#define CONFIG_STATIC_MSVCRT 0
@@ -77,4 +83,10 @@
#define CONFIG_UNIT_TESTS 0
#define CONFIG_MULTI_RES_ENCODING 0
#define CONFIG_TEMPORAL_DENOISING 1
+#define CONFIG_EXPERIMENTAL 0
+#define CONFIG_DECRYPT 0
+#define CONFIG_ONESHOTQ 0
+#define CONFIG_MULTIPLE_ARF 0
+#define CONFIG_NON420 0
+#define CONFIG_ALPHA 0
#endif /* VPX_CONFIG_H */
diff --git a/mips/vpx_scale_rtcd.h b/mips/vpx_scale_rtcd.h
new file mode 100644
index 0000000..d9e41f3
--- /dev/null
+++ b/mips/vpx_scale_rtcd.h
@@ -0,0 +1,61 @@
+#ifndef VPX_SCALE_RTCD_H_
+#define VPX_SCALE_RTCD_H_
+
+#ifdef RTCD_C
+#define RTCD_EXTERN
+#else
+#define RTCD_EXTERN extern
+#endif
+
+struct yv12_buffer_config;
+
+void vp8_horizontal_line_5_4_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
+#define vp8_horizontal_line_5_4_scale vp8_horizontal_line_5_4_scale_c
+
+void vp8_vertical_band_5_4_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_5_4_scale vp8_vertical_band_5_4_scale_c
+
+void vp8_horizontal_line_5_3_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
+#define vp8_horizontal_line_5_3_scale vp8_horizontal_line_5_3_scale_c
+
+void vp8_vertical_band_5_3_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_5_3_scale vp8_vertical_band_5_3_scale_c
+
+void vp8_horizontal_line_2_1_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
+#define vp8_horizontal_line_2_1_scale vp8_horizontal_line_2_1_scale_c
+
+void vp8_vertical_band_2_1_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_2_1_scale vp8_vertical_band_2_1_scale_c
+
+void vp8_vertical_band_2_1_scale_i_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_2_1_scale_i vp8_vertical_band_2_1_scale_i_c
+
+void vp8_yv12_extend_frame_borders_c(struct yv12_buffer_config *ybf);
+#define vp8_yv12_extend_frame_borders vp8_yv12_extend_frame_borders_c
+
+void vp8_yv12_copy_frame_c(const struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
+#define vp8_yv12_copy_frame vp8_yv12_copy_frame_c
+
+void vpx_yv12_copy_y_c(const struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
+#define vpx_yv12_copy_y vpx_yv12_copy_y_c
+
+void vp9_extend_frame_borders_c(struct yv12_buffer_config *ybf, int subsampling_x, int subsampling_y);
+#define vp9_extend_frame_borders vp9_extend_frame_borders_c
+
+void vp9_extend_frame_inner_borders_c(struct yv12_buffer_config *ybf, int subsampling_x, int subsampling_y);
+#define vp9_extend_frame_inner_borders vp9_extend_frame_inner_borders_c
+
+void vpx_scale_rtcd(void);
+#include "vpx_config.h"
+
+#ifdef RTCD_C
+static void setup_rtcd_internal(void)
+{
+
+#if HAVE_DSPR2
+void dsputil_static_init();
+dsputil_static_init();
+#endif
+}
+#endif
+#endif
diff --git a/mips/vpx_version.h b/mips/vpx_version.h
index 663dd49..512851c 100644
--- a/mips/vpx_version.h
+++ b/mips/vpx_version.h
@@ -1,7 +1,7 @@
#define VERSION_MAJOR 1
-#define VERSION_MINOR 1
+#define VERSION_MINOR 2
#define VERSION_PATCH 0
#define VERSION_EXTRA ""
#define VERSION_PACKED ((VERSION_MAJOR<<16)|(VERSION_MINOR<<8)|(VERSION_PATCH))
-#define VERSION_STRING_NOSP "v1.1.0"
-#define VERSION_STRING " v1.1.0"
+#define VERSION_STRING_NOSP "v1.2.0"
+#define VERSION_STRING " v1.2.0"